diff mbox

[3/4] drm/ttm: add input parameter force_alloc for ttm_bo_evict_mm

Message ID 1518080761-12952-3-git-send-email-Hongbo.He@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

He, Hongbo Feb. 8, 2018, 9:06 a.m. UTC
if true, allocate TTM pages regardless of zone global memory
account limit. For suspend, We should avoid TTM memory allocate
failure then result in suspend failure.

Signed-off-by: Roger He <Hongbo.He@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c  |  4 ++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.h  |  2 +-
 drivers/gpu/drm/nouveau/nouveau_drm.c       |  2 +-
 drivers/gpu/drm/qxl/qxl_object.c            |  4 ++--
 drivers/gpu/drm/radeon/radeon_device.c      |  6 +++---
 drivers/gpu/drm/radeon/radeon_object.c      |  5 +++--
 drivers/gpu/drm/radeon/radeon_object.h      |  3 ++-
 drivers/gpu/drm/ttm/ttm_bo.c                | 16 ++++++++++------
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.c         |  6 +++---
 include/drm/ttm/ttm_bo_api.h                |  5 ++++-
 12 files changed, 34 insertions(+), 25 deletions(-)

Comments

Christian König Feb. 8, 2018, 12:58 p.m. UTC | #1
Am 08.02.2018 um 10:06 schrieb Roger He:
> if true, allocate TTM pages regardless of zone global memory
> account limit. For suspend, We should avoid TTM memory allocate
> failure then result in suspend failure.

Why the extra parameter for amdgpu_bo_evict_vram ?

I can't think of an use case when we don't want this to succeed.

Christian.

>
> Signed-off-by: Roger He <Hongbo.He@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  4 ++--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.c  |  4 ++--
>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.h  |  2 +-
>   drivers/gpu/drm/nouveau/nouveau_drm.c       |  2 +-
>   drivers/gpu/drm/qxl/qxl_object.c            |  4 ++--
>   drivers/gpu/drm/radeon/radeon_device.c      |  6 +++---
>   drivers/gpu/drm/radeon/radeon_object.c      |  5 +++--
>   drivers/gpu/drm/radeon/radeon_object.h      |  3 ++-
>   drivers/gpu/drm/ttm/ttm_bo.c                | 16 ++++++++++------
>   drivers/gpu/drm/vmwgfx/vmwgfx_drv.c         |  6 +++---
>   include/drm/ttm/ttm_bo_api.h                |  5 ++++-
>   12 files changed, 34 insertions(+), 25 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
> index ee76b46..59ee12c 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
> @@ -763,7 +763,7 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
>   	struct drm_device *dev = node->minor->dev;
>   	struct amdgpu_device *adev = dev->dev_private;
>   
> -	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
> +	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev, true));
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> index e3fa3d7..3c5f9ca 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
> @@ -2168,7 +2168,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
>   		}
>   	}
>   	/* evict vram memory */
> -	amdgpu_bo_evict_vram(adev);
> +	amdgpu_bo_evict_vram(adev, true);
>   
>   	amdgpu_fence_driver_suspend(adev);
>   
> @@ -2178,7 +2178,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
>   	 * This second call to evict vram is to evict the gart page table
>   	 * using the CPU.
>   	 */
> -	amdgpu_bo_evict_vram(adev);
> +	amdgpu_bo_evict_vram(adev, true);
>   
>   	pci_save_state(dev->pdev);
>   	if (suspend) {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> index 0338ef6..db813f9 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
> @@ -803,14 +803,14 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
>   	return r;
>   }
>   
> -int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
> +int amdgpu_bo_evict_vram(struct amdgpu_device *adev, bool force_alloc)
>   {
>   	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
>   	if (0 && (adev->flags & AMD_IS_APU)) {
>   		/* Useless to evict on IGP chips */
>   		return 0;
>   	}
> -	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
> +	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM, force_alloc);
>   }
>   
>   static const char *amdgpu_vram_names[] = {
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
> index c2b02f5..6724cdc 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
> @@ -227,7 +227,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
>   			     u64 min_offset, u64 max_offset,
>   			     u64 *gpu_addr);
>   int amdgpu_bo_unpin(struct amdgpu_bo *bo);
> -int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
> +int amdgpu_bo_evict_vram(struct amdgpu_device *adev, bool force_alloc);
>   int amdgpu_bo_init(struct amdgpu_device *adev);
>   void amdgpu_bo_fini(struct amdgpu_device *adev);
>   int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
> diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
> index 8d4a5be..c9627ef 100644
> --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
> +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
> @@ -702,7 +702,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
>   	}
>   
>   	NV_DEBUG(drm, "evicting buffers...\n");
> -	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
> +	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM, true);
>   
>   	NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
>   	if (drm->cechan) {
> diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
> index f6b80fe..d8d26c8 100644
> --- a/drivers/gpu/drm/qxl/qxl_object.c
> +++ b/drivers/gpu/drm/qxl/qxl_object.c
> @@ -350,10 +350,10 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
>   
>   int qxl_surf_evict(struct qxl_device *qdev)
>   {
> -	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
> +	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV, true);
>   }
>   
>   int qxl_vram_evict(struct qxl_device *qdev)
>   {
> -	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
> +	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM, true);
>   }
> diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
> index 8d3e3d2..c11ee06 100644
> --- a/drivers/gpu/drm/radeon/radeon_device.c
> +++ b/drivers/gpu/drm/radeon/radeon_device.c
> @@ -1522,7 +1522,7 @@ void radeon_device_fini(struct radeon_device *rdev)
>   	DRM_INFO("radeon: finishing device.\n");
>   	rdev->shutdown = true;
>   	/* evict vram memory */
> -	radeon_bo_evict_vram(rdev);
> +	radeon_bo_evict_vram(rdev, true);
>   	radeon_fini(rdev);
>   	if (!pci_is_thunderbolt_attached(rdev->pdev))
>   		vga_switcheroo_unregister_client(rdev->pdev);
> @@ -1607,7 +1607,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
>   		}
>   	}
>   	/* evict vram memory */
> -	radeon_bo_evict_vram(rdev);
> +	radeon_bo_evict_vram(rdev, true);
>   
>   	/* wait for gpu to finish processing current batch */
>   	for (i = 0; i < RADEON_NUM_RINGS; i++) {
> @@ -1626,7 +1626,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
>   	 * This second call to evict vram is to evict the gart page table
>   	 * using the CPU.
>   	 */
> -	radeon_bo_evict_vram(rdev);
> +	radeon_bo_evict_vram(rdev, true);
>   
>   	radeon_agp_suspend(rdev);
>   
> diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
> index 15404af..99a9a45 100644
> --- a/drivers/gpu/drm/radeon/radeon_object.c
> +++ b/drivers/gpu/drm/radeon/radeon_object.c
> @@ -420,7 +420,8 @@ int radeon_bo_unpin(struct radeon_bo *bo)
>   	return r;
>   }
>   
> -int radeon_bo_evict_vram(struct radeon_device *rdev)
> +int
> +radeon_bo_evict_vram(struct radeon_device *rdev, bool force_alloc)
>   {
>   	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
>   	if (0 && (rdev->flags & RADEON_IS_IGP)) {
> @@ -428,7 +429,7 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
>   			/* Useless to evict on IGP chips */
>   			return 0;
>   	}
> -	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
> +	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM, force_alloc);
>   }
>   
>   void radeon_bo_force_delete(struct radeon_device *rdev)
> diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
> index 9ffd821..757ba88 100644
> --- a/drivers/gpu/drm/radeon/radeon_object.h
> +++ b/drivers/gpu/drm/radeon/radeon_object.h
> @@ -136,7 +136,8 @@ extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
>   extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
>   				    u64 max_offset, u64 *gpu_addr);
>   extern int radeon_bo_unpin(struct radeon_bo *bo);
> -extern int radeon_bo_evict_vram(struct radeon_device *rdev);
> +extern int radeon_bo_evict_vram(struct radeon_device *rdev,
> +				bool force_alloc);
>   extern void radeon_bo_force_delete(struct radeon_device *rdev);
>   extern int radeon_bo_init(struct radeon_device *rdev);
>   extern void radeon_bo_fini(struct radeon_device *rdev);
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index a907311..31d10f1 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -1342,15 +1342,17 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
>   EXPORT_SYMBOL(ttm_bo_create);
>   
>   static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
> -				   unsigned mem_type)
> +			unsigned mem_type, bool force_alloc)
>   {
> -	struct ttm_operation_ctx ctx = { false, false };
> +	struct ttm_operation_ctx ttm_opt_ctx = { false, false };
>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
>   	struct ttm_bo_global *glob = bdev->glob;
>   	struct dma_fence *fence;
>   	int ret;
>   	unsigned i;
>   
> +	if (force_alloc)
> +		ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;
>   	/*
>   	 * Can't use standard list traversal since we're unlocking.
>   	 */
> @@ -1359,7 +1361,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		while (!list_empty(&man->lru[i])) {
>   			spin_unlock(&glob->lru_lock);
> -			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL,
> +						  &ttm_opt_ctx);
>   			if (ret)
>   				return ret;
>   			spin_lock(&glob->lru_lock);
> @@ -1403,7 +1406,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
>   
>   	ret = 0;
>   	if (mem_type > 0) {
> -		ret = ttm_bo_force_list_clean(bdev, mem_type);
> +		ret = ttm_bo_force_list_clean(bdev, mem_type, true);
>   		if (ret) {
>   			pr_err("Cleanup eviction failed\n");
>   			return ret;
> @@ -1419,7 +1422,8 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
>   }
>   EXPORT_SYMBOL(ttm_bo_clean_mm);
>   
> -int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
> +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type,
> +			bool force_alloc)
>   {
>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
>   
> @@ -1433,7 +1437,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
>   		return 0;
>   	}
>   
> -	return ttm_bo_force_list_clean(bdev, mem_type);
> +	return ttm_bo_force_list_clean(bdev, mem_type, force_alloc);
>   }
>   EXPORT_SYMBOL(ttm_bo_evict_mm);
>   
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> index 184340d..28f8e4f 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
> @@ -430,7 +430,7 @@ static int vmw_request_device(struct vmw_private *dev_priv)
>   	if (dev_priv->cman)
>   		vmw_cmdbuf_remove_pool(dev_priv->cman);
>   	if (dev_priv->has_mob) {
> -		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
> +		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB, true);
>   		vmw_otables_takedown(dev_priv);
>   	}
>   	if (dev_priv->cman)
> @@ -463,7 +463,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
>   		vmw_cmdbuf_remove_pool(dev_priv->cman);
>   
>   	if (dev_priv->has_mob) {
> -		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
> +		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB, true);
>   		vmw_otables_takedown(dev_priv);
>   	}
>   }
> @@ -1342,7 +1342,7 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
>   	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
>   		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
>   		spin_unlock(&dev_priv->svga_lock);
> -		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
> +		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM, true))
>   			DRM_ERROR("Failed evicting VRAM buffers.\n");
>   		vmw_write(dev_priv, SVGA_REG_ENABLE,
>   			  SVGA_REG_ENABLE_HIDE |
> diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
> index 2142639..6b5db9c 100644
> --- a/include/drm/ttm/ttm_bo_api.h
> +++ b/include/drm/ttm/ttm_bo_api.h
> @@ -636,6 +636,8 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
>    *
>    * @bdev: Pointer to a ttm_bo_device struct.
>    * @mem_type: The memory type.
> + * @force_alloc: if true allow ttm pages allocation always
> + * regardless of zone memory account limit
>    *
>    * Evicts all buffers on the lru list of the memory type.
>    * This is normally part of a VT switch or an
> @@ -649,7 +651,8 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
>    * -ERESTARTSYS: The call was interrupted by a signal while waiting to
>    * evict a buffer.
>    */
> -int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
> +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type,
> +			bool force_alloc);
>   
>   /**
>    * ttm_kmap_obj_virtual
He, Hongbo Feb. 9, 2018, 4:34 a.m. UTC | #2
I can't think of an use case when we don't want this to succeed.

That is true. seems I can simplify more here.

Thanks
Roger(Hongbo.He)
-----Original Message-----
From: Koenig, Christian 

Sent: Thursday, February 08, 2018 8:58 PM
To: He, Roger <Hongbo.He@amd.com>; amd-gfx@lists.freedesktop.org; dri-devel@lists.freedesktop.org
Subject: Re: [PATCH 3/4] drm/ttm: add input parameter force_alloc for ttm_bo_evict_mm

Am 08.02.2018 um 10:06 schrieb Roger He:
> if true, allocate TTM pages regardless of zone global memory account 

> limit. For suspend, We should avoid TTM memory allocate failure then 

> result in suspend failure.


Why the extra parameter for amdgpu_bo_evict_vram ?

I can't think of an use case when we don't want this to succeed.

Christian.

>

> Signed-off-by: Roger He <Hongbo.He@amd.com>

> ---

>   drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c |  2 +-

>   drivers/gpu/drm/amd/amdgpu/amdgpu_device.c  |  4 ++--

>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.c  |  4 ++--

>   drivers/gpu/drm/amd/amdgpu/amdgpu_object.h  |  2 +-

>   drivers/gpu/drm/nouveau/nouveau_drm.c       |  2 +-

>   drivers/gpu/drm/qxl/qxl_object.c            |  4 ++--

>   drivers/gpu/drm/radeon/radeon_device.c      |  6 +++---

>   drivers/gpu/drm/radeon/radeon_object.c      |  5 +++--

>   drivers/gpu/drm/radeon/radeon_object.h      |  3 ++-

>   drivers/gpu/drm/ttm/ttm_bo.c                | 16 ++++++++++------

>   drivers/gpu/drm/vmwgfx/vmwgfx_drv.c         |  6 +++---

>   include/drm/ttm/ttm_bo_api.h                |  5 ++++-

>   12 files changed, 34 insertions(+), 25 deletions(-)

>

> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c 

> b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

> index ee76b46..59ee12c 100644

> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c

> @@ -763,7 +763,7 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)

>   	struct drm_device *dev = node->minor->dev;

>   	struct amdgpu_device *adev = dev->dev_private;

>   

> -	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));

> +	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev, true));

>   	return 0;

>   }

>   

> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c 

> b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

> index e3fa3d7..3c5f9ca 100644

> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c

> @@ -2168,7 +2168,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)

>   		}

>   	}

>   	/* evict vram memory */

> -	amdgpu_bo_evict_vram(adev);

> +	amdgpu_bo_evict_vram(adev, true);

>   

>   	amdgpu_fence_driver_suspend(adev);

>   

> @@ -2178,7 +2178,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)

>   	 * This second call to evict vram is to evict the gart page table

>   	 * using the CPU.

>   	 */

> -	amdgpu_bo_evict_vram(adev);

> +	amdgpu_bo_evict_vram(adev, true);

>   

>   	pci_save_state(dev->pdev);

>   	if (suspend) {

> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c 

> b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c

> index 0338ef6..db813f9 100644

> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c

> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c

> @@ -803,14 +803,14 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)

>   	return r;

>   }

>   

> -int amdgpu_bo_evict_vram(struct amdgpu_device *adev)

> +int amdgpu_bo_evict_vram(struct amdgpu_device *adev, bool 

> +force_alloc)

>   {

>   	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */

>   	if (0 && (adev->flags & AMD_IS_APU)) {

>   		/* Useless to evict on IGP chips */

>   		return 0;

>   	}

> -	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);

> +	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM, force_alloc);

>   }

>   

>   static const char *amdgpu_vram_names[] = { diff --git 

> a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h 

> b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h

> index c2b02f5..6724cdc 100644

> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h

> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h

> @@ -227,7 +227,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,

>   			     u64 min_offset, u64 max_offset,

>   			     u64 *gpu_addr);

>   int amdgpu_bo_unpin(struct amdgpu_bo *bo); -int 

> amdgpu_bo_evict_vram(struct amdgpu_device *adev);

> +int amdgpu_bo_evict_vram(struct amdgpu_device *adev, bool 

> +force_alloc);

>   int amdgpu_bo_init(struct amdgpu_device *adev);

>   void amdgpu_bo_fini(struct amdgpu_device *adev);

>   int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, diff --git 

> a/drivers/gpu/drm/nouveau/nouveau_drm.c 

> b/drivers/gpu/drm/nouveau/nouveau_drm.c

> index 8d4a5be..c9627ef 100644

> --- a/drivers/gpu/drm/nouveau/nouveau_drm.c

> +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c

> @@ -702,7 +702,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)

>   	}

>   

>   	NV_DEBUG(drm, "evicting buffers...\n");

> -	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);

> +	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM, true);

>   

>   	NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");

>   	if (drm->cechan) {

> diff --git a/drivers/gpu/drm/qxl/qxl_object.c 

> b/drivers/gpu/drm/qxl/qxl_object.c

> index f6b80fe..d8d26c8 100644

> --- a/drivers/gpu/drm/qxl/qxl_object.c

> +++ b/drivers/gpu/drm/qxl/qxl_object.c

> @@ -350,10 +350,10 @@ int qxl_bo_check_id(struct qxl_device *qdev, 

> struct qxl_bo *bo)

>   

>   int qxl_surf_evict(struct qxl_device *qdev)

>   {

> -	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);

> +	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV, true);

>   }

>   

>   int qxl_vram_evict(struct qxl_device *qdev)

>   {

> -	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);

> +	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM, true);

>   }

> diff --git a/drivers/gpu/drm/radeon/radeon_device.c 

> b/drivers/gpu/drm/radeon/radeon_device.c

> index 8d3e3d2..c11ee06 100644

> --- a/drivers/gpu/drm/radeon/radeon_device.c

> +++ b/drivers/gpu/drm/radeon/radeon_device.c

> @@ -1522,7 +1522,7 @@ void radeon_device_fini(struct radeon_device *rdev)

>   	DRM_INFO("radeon: finishing device.\n");

>   	rdev->shutdown = true;

>   	/* evict vram memory */

> -	radeon_bo_evict_vram(rdev);

> +	radeon_bo_evict_vram(rdev, true);

>   	radeon_fini(rdev);

>   	if (!pci_is_thunderbolt_attached(rdev->pdev))

>   		vga_switcheroo_unregister_client(rdev->pdev);

> @@ -1607,7 +1607,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,

>   		}

>   	}

>   	/* evict vram memory */

> -	radeon_bo_evict_vram(rdev);

> +	radeon_bo_evict_vram(rdev, true);

>   

>   	/* wait for gpu to finish processing current batch */

>   	for (i = 0; i < RADEON_NUM_RINGS; i++) { @@ -1626,7 +1626,7 @@ int 

> radeon_suspend_kms(struct drm_device *dev, bool suspend,

>   	 * This second call to evict vram is to evict the gart page table

>   	 * using the CPU.

>   	 */

> -	radeon_bo_evict_vram(rdev);

> +	radeon_bo_evict_vram(rdev, true);

>   

>   	radeon_agp_suspend(rdev);

>   

> diff --git a/drivers/gpu/drm/radeon/radeon_object.c 

> b/drivers/gpu/drm/radeon/radeon_object.c

> index 15404af..99a9a45 100644

> --- a/drivers/gpu/drm/radeon/radeon_object.c

> +++ b/drivers/gpu/drm/radeon/radeon_object.c

> @@ -420,7 +420,8 @@ int radeon_bo_unpin(struct radeon_bo *bo)

>   	return r;

>   }

>   

> -int radeon_bo_evict_vram(struct radeon_device *rdev)

> +int

> +radeon_bo_evict_vram(struct radeon_device *rdev, bool force_alloc)

>   {

>   	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */

>   	if (0 && (rdev->flags & RADEON_IS_IGP)) { @@ -428,7 +429,7 @@ int 

> radeon_bo_evict_vram(struct radeon_device *rdev)

>   			/* Useless to evict on IGP chips */

>   			return 0;

>   	}

> -	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);

> +	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM, force_alloc);

>   }

>   

>   void radeon_bo_force_delete(struct radeon_device *rdev) diff --git 

> a/drivers/gpu/drm/radeon/radeon_object.h 

> b/drivers/gpu/drm/radeon/radeon_object.h

> index 9ffd821..757ba88 100644

> --- a/drivers/gpu/drm/radeon/radeon_object.h

> +++ b/drivers/gpu/drm/radeon/radeon_object.h

> @@ -136,7 +136,8 @@ extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);

>   extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,

>   				    u64 max_offset, u64 *gpu_addr);

>   extern int radeon_bo_unpin(struct radeon_bo *bo); -extern int 

> radeon_bo_evict_vram(struct radeon_device *rdev);

> +extern int radeon_bo_evict_vram(struct radeon_device *rdev,

> +				bool force_alloc);

>   extern void radeon_bo_force_delete(struct radeon_device *rdev);

>   extern int radeon_bo_init(struct radeon_device *rdev);

>   extern void radeon_bo_fini(struct radeon_device *rdev); diff --git 

> a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 

> a907311..31d10f1 100644

> --- a/drivers/gpu/drm/ttm/ttm_bo.c

> +++ b/drivers/gpu/drm/ttm/ttm_bo.c

> @@ -1342,15 +1342,17 @@ int ttm_bo_create(struct ttm_bo_device *bdev,

>   EXPORT_SYMBOL(ttm_bo_create);

>   

>   static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,

> -				   unsigned mem_type)

> +			unsigned mem_type, bool force_alloc)

>   {

> -	struct ttm_operation_ctx ctx = { false, false };

> +	struct ttm_operation_ctx ttm_opt_ctx = { false, false };

>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

>   	struct ttm_bo_global *glob = bdev->glob;

>   	struct dma_fence *fence;

>   	int ret;

>   	unsigned i;

>   

> +	if (force_alloc)

> +		ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;

>   	/*

>   	 * Can't use standard list traversal since we're unlocking.

>   	 */

> @@ -1359,7 +1361,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,

>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {

>   		while (!list_empty(&man->lru[i])) {

>   			spin_unlock(&glob->lru_lock);

> -			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);

> +			ret = ttm_mem_evict_first(bdev, mem_type, NULL,

> +						  &ttm_opt_ctx);

>   			if (ret)

>   				return ret;

>   			spin_lock(&glob->lru_lock);

> @@ -1403,7 +1406,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, 

> unsigned mem_type)

>   

>   	ret = 0;

>   	if (mem_type > 0) {

> -		ret = ttm_bo_force_list_clean(bdev, mem_type);

> +		ret = ttm_bo_force_list_clean(bdev, mem_type, true);

>   		if (ret) {

>   			pr_err("Cleanup eviction failed\n");

>   			return ret;

> @@ -1419,7 +1422,8 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)

>   }

>   EXPORT_SYMBOL(ttm_bo_clean_mm);

>   

> -int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)

> +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type,

> +			bool force_alloc)

>   {

>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];

>   

> @@ -1433,7 +1437,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)

>   		return 0;

>   	}

>   

> -	return ttm_bo_force_list_clean(bdev, mem_type);

> +	return ttm_bo_force_list_clean(bdev, mem_type, force_alloc);

>   }

>   EXPORT_SYMBOL(ttm_bo_evict_mm);

>   

> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c 

> b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c

> index 184340d..28f8e4f 100644

> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c

> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c

> @@ -430,7 +430,7 @@ static int vmw_request_device(struct vmw_private *dev_priv)

>   	if (dev_priv->cman)

>   		vmw_cmdbuf_remove_pool(dev_priv->cman);

>   	if (dev_priv->has_mob) {

> -		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);

> +		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB, true);

>   		vmw_otables_takedown(dev_priv);

>   	}

>   	if (dev_priv->cman)

> @@ -463,7 +463,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)

>   		vmw_cmdbuf_remove_pool(dev_priv->cman);

>   

>   	if (dev_priv->has_mob) {

> -		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);

> +		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB, true);

>   		vmw_otables_takedown(dev_priv);

>   	}

>   }

> @@ -1342,7 +1342,7 @@ void vmw_svga_disable(struct vmw_private *dev_priv)

>   	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {

>   		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;

>   		spin_unlock(&dev_priv->svga_lock);

> -		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))

> +		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM, true))

>   			DRM_ERROR("Failed evicting VRAM buffers.\n");

>   		vmw_write(dev_priv, SVGA_REG_ENABLE,

>   			  SVGA_REG_ENABLE_HIDE |

> diff --git a/include/drm/ttm/ttm_bo_api.h 

> b/include/drm/ttm/ttm_bo_api.h index 2142639..6b5db9c 100644

> --- a/include/drm/ttm/ttm_bo_api.h

> +++ b/include/drm/ttm/ttm_bo_api.h

> @@ -636,6 +636,8 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);

>    *

>    * @bdev: Pointer to a ttm_bo_device struct.

>    * @mem_type: The memory type.

> + * @force_alloc: if true allow ttm pages allocation always

> + * regardless of zone memory account limit

>    *

>    * Evicts all buffers on the lru list of the memory type.

>    * This is normally part of a VT switch or an @@ -649,7 +651,8 @@ 

> int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);

>    * -ERESTARTSYS: The call was interrupted by a signal while waiting to

>    * evict a buffer.

>    */

> -int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);

> +int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type,

> +			bool force_alloc);

>   

>   /**

>    * ttm_kmap_obj_virtual
diff mbox

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index ee76b46..59ee12c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -763,7 +763,7 @@  static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	struct amdgpu_device *adev = dev->dev_private;
 
-	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+	seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev, true));
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index e3fa3d7..3c5f9ca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2168,7 +2168,7 @@  int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 		}
 	}
 	/* evict vram memory */
-	amdgpu_bo_evict_vram(adev);
+	amdgpu_bo_evict_vram(adev, true);
 
 	amdgpu_fence_driver_suspend(adev);
 
@@ -2178,7 +2178,7 @@  int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
 	 * This second call to evict vram is to evict the gart page table
 	 * using the CPU.
 	 */
-	amdgpu_bo_evict_vram(adev);
+	amdgpu_bo_evict_vram(adev, true);
 
 	pci_save_state(dev->pdev);
 	if (suspend) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0338ef6..db813f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -803,14 +803,14 @@  int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 	return r;
 }
 
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
+int amdgpu_bo_evict_vram(struct amdgpu_device *adev, bool force_alloc)
 {
 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
 	if (0 && (adev->flags & AMD_IS_APU)) {
 		/* Useless to evict on IGP chips */
 		return 0;
 	}
-	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
+	return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM, force_alloc);
 }
 
 static const char *amdgpu_vram_names[] = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index c2b02f5..6724cdc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -227,7 +227,7 @@  int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 			     u64 min_offset, u64 max_offset,
 			     u64 *gpu_addr);
 int amdgpu_bo_unpin(struct amdgpu_bo *bo);
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
+int amdgpu_bo_evict_vram(struct amdgpu_device *adev, bool force_alloc);
 int amdgpu_bo_init(struct amdgpu_device *adev);
 void amdgpu_bo_fini(struct amdgpu_device *adev);
 int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 8d4a5be..c9627ef 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -702,7 +702,7 @@  nouveau_do_suspend(struct drm_device *dev, bool runtime)
 	}
 
 	NV_DEBUG(drm, "evicting buffers...\n");
-	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+	ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM, true);
 
 	NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
 	if (drm->cechan) {
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index f6b80fe..d8d26c8 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -350,10 +350,10 @@  int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
 
 int qxl_surf_evict(struct qxl_device *qdev)
 {
-	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
+	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV, true);
 }
 
 int qxl_vram_evict(struct qxl_device *qdev)
 {
-	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+	return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM, true);
 }
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 8d3e3d2..c11ee06 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1522,7 +1522,7 @@  void radeon_device_fini(struct radeon_device *rdev)
 	DRM_INFO("radeon: finishing device.\n");
 	rdev->shutdown = true;
 	/* evict vram memory */
-	radeon_bo_evict_vram(rdev);
+	radeon_bo_evict_vram(rdev, true);
 	radeon_fini(rdev);
 	if (!pci_is_thunderbolt_attached(rdev->pdev))
 		vga_switcheroo_unregister_client(rdev->pdev);
@@ -1607,7 +1607,7 @@  int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 		}
 	}
 	/* evict vram memory */
-	radeon_bo_evict_vram(rdev);
+	radeon_bo_evict_vram(rdev, true);
 
 	/* wait for gpu to finish processing current batch */
 	for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1626,7 +1626,7 @@  int radeon_suspend_kms(struct drm_device *dev, bool suspend,
 	 * This second call to evict vram is to evict the gart page table
 	 * using the CPU.
 	 */
-	radeon_bo_evict_vram(rdev);
+	radeon_bo_evict_vram(rdev, true);
 
 	radeon_agp_suspend(rdev);
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 15404af..99a9a45 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -420,7 +420,8 @@  int radeon_bo_unpin(struct radeon_bo *bo)
 	return r;
 }
 
-int radeon_bo_evict_vram(struct radeon_device *rdev)
+int
+radeon_bo_evict_vram(struct radeon_device *rdev, bool force_alloc)
 {
 	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
 	if (0 && (rdev->flags & RADEON_IS_IGP)) {
@@ -428,7 +429,7 @@  int radeon_bo_evict_vram(struct radeon_device *rdev)
 			/* Useless to evict on IGP chips */
 			return 0;
 	}
-	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM, force_alloc);
 }
 
 void radeon_bo_force_delete(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 9ffd821..757ba88 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -136,7 +136,8 @@  extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
 extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
 				    u64 max_offset, u64 *gpu_addr);
 extern int radeon_bo_unpin(struct radeon_bo *bo);
-extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev,
+				bool force_alloc);
 extern void radeon_bo_force_delete(struct radeon_device *rdev);
 extern int radeon_bo_init(struct radeon_device *rdev);
 extern void radeon_bo_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a907311..31d10f1 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1342,15 +1342,17 @@  int ttm_bo_create(struct ttm_bo_device *bdev,
 EXPORT_SYMBOL(ttm_bo_create);
 
 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
-				   unsigned mem_type)
+			unsigned mem_type, bool force_alloc)
 {
-	struct ttm_operation_ctx ctx = { false, false };
+	struct ttm_operation_ctx ttm_opt_ctx = { false, false };
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	struct ttm_bo_global *glob = bdev->glob;
 	struct dma_fence *fence;
 	int ret;
 	unsigned i;
 
+	if (force_alloc)
+		ttm_opt_ctx.flags = TTM_OPT_FLAG_FORCE_ALLOC;
 	/*
 	 * Can't use standard list traversal since we're unlocking.
 	 */
@@ -1359,7 +1361,8 @@  static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 		while (!list_empty(&man->lru[i])) {
 			spin_unlock(&glob->lru_lock);
-			ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
+			ret = ttm_mem_evict_first(bdev, mem_type, NULL,
+						  &ttm_opt_ctx);
 			if (ret)
 				return ret;
 			spin_lock(&glob->lru_lock);
@@ -1403,7 +1406,7 @@  int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 
 	ret = 0;
 	if (mem_type > 0) {
-		ret = ttm_bo_force_list_clean(bdev, mem_type);
+		ret = ttm_bo_force_list_clean(bdev, mem_type, true);
 		if (ret) {
 			pr_err("Cleanup eviction failed\n");
 			return ret;
@@ -1419,7 +1422,8 @@  int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 }
 EXPORT_SYMBOL(ttm_bo_clean_mm);
 
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type,
+			bool force_alloc)
 {
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 
@@ -1433,7 +1437,7 @@  int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 		return 0;
 	}
 
-	return ttm_bo_force_list_clean(bdev, mem_type);
+	return ttm_bo_force_list_clean(bdev, mem_type, force_alloc);
 }
 EXPORT_SYMBOL(ttm_bo_evict_mm);
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 184340d..28f8e4f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -430,7 +430,7 @@  static int vmw_request_device(struct vmw_private *dev_priv)
 	if (dev_priv->cman)
 		vmw_cmdbuf_remove_pool(dev_priv->cman);
 	if (dev_priv->has_mob) {
-		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+		(void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB, true);
 		vmw_otables_takedown(dev_priv);
 	}
 	if (dev_priv->cman)
@@ -463,7 +463,7 @@  static void vmw_release_device_early(struct vmw_private *dev_priv)
 		vmw_cmdbuf_remove_pool(dev_priv->cman);
 
 	if (dev_priv->has_mob) {
-		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+		ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB, true);
 		vmw_otables_takedown(dev_priv);
 	}
 }
@@ -1342,7 +1342,7 @@  void vmw_svga_disable(struct vmw_private *dev_priv)
 	if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
 		dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
 		spin_unlock(&dev_priv->svga_lock);
-		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+		if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM, true))
 			DRM_ERROR("Failed evicting VRAM buffers.\n");
 		vmw_write(dev_priv, SVGA_REG_ENABLE,
 			  SVGA_REG_ENABLE_HIDE |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 2142639..6b5db9c 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -636,6 +636,8 @@  int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
  *
  * @bdev: Pointer to a ttm_bo_device struct.
  * @mem_type: The memory type.
+ * @force_alloc: if true allow ttm pages allocation always
+ * regardless of zone memory account limit
  *
  * Evicts all buffers on the lru list of the memory type.
  * This is normally part of a VT switch or an
@@ -649,7 +651,8 @@  int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
  * -ERESTARTSYS: The call was interrupted by a signal while waiting to
  * evict a buffer.
  */
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type,
+			bool force_alloc);
 
 /**
  * ttm_kmap_obj_virtual