@@ -763,7 +763,7 @@ static int amdgpu_debugfs_evict_vram(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
- seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev));
+ seq_printf(m, "(%d)\n", amdgpu_bo_evict_vram(adev, true));
return 0;
}
@@ -2168,7 +2168,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
}
}
/* evict vram memory */
- amdgpu_bo_evict_vram(adev);
+ amdgpu_bo_evict_vram(adev, true);
amdgpu_fence_driver_suspend(adev);
@@ -2178,7 +2178,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
* This second call to evict vram is to evict the gart page table
* using the CPU.
*/
- amdgpu_bo_evict_vram(adev);
+ amdgpu_bo_evict_vram(adev, true);
pci_save_state(dev->pdev);
if (suspend) {
@@ -803,14 +803,16 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
return r;
}
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
+int amdgpu_bo_evict_vram(struct amdgpu_device *adev,
+ bool allow_alloc_anyway)
{
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
if (0 && (adev->flags & AMD_IS_APU)) {
/* Useless to evict on IGP chips */
return 0;
}
- return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM);
+ return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM,
+ allow_alloc_anyway);
}
static const char *amdgpu_vram_names[] = {
@@ -227,7 +227,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset,
u64 *gpu_addr);
int amdgpu_bo_unpin(struct amdgpu_bo *bo);
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
+int amdgpu_bo_evict_vram(struct amdgpu_device *adev, bool allow_alloc_anyway);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo,
@@ -702,7 +702,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
}
NV_DEBUG(drm, "evicting buffers...\n");
- ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
+ ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM, true);
NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
if (drm->cechan) {
@@ -350,10 +350,10 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
int qxl_surf_evict(struct qxl_device *qdev)
{
- return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
+ return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV, true);
}
int qxl_vram_evict(struct qxl_device *qdev)
{
- return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
+ return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM, true);
}
@@ -1522,7 +1522,7 @@ void radeon_device_fini(struct radeon_device *rdev)
DRM_INFO("radeon: finishing device.\n");
rdev->shutdown = true;
/* evict vram memory */
- radeon_bo_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev, true);
radeon_fini(rdev);
if (!pci_is_thunderbolt_attached(rdev->pdev))
vga_switcheroo_unregister_client(rdev->pdev);
@@ -1607,7 +1607,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
}
}
/* evict vram memory */
- radeon_bo_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev, true);
/* wait for gpu to finish processing current batch */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
@@ -1626,7 +1626,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
* This second call to evict vram is to evict the gart page table
* using the CPU.
*/
- radeon_bo_evict_vram(rdev);
+ radeon_bo_evict_vram(rdev, true);
radeon_agp_suspend(rdev);
@@ -420,7 +420,8 @@ int radeon_bo_unpin(struct radeon_bo *bo)
return r;
}
-int radeon_bo_evict_vram(struct radeon_device *rdev)
+int
+radeon_bo_evict_vram(struct radeon_device *rdev, bool allow_alloc_anyway)
{
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
if (0 && (rdev->flags & RADEON_IS_IGP)) {
@@ -428,7 +429,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev)
/* Useless to evict on IGP chips */
return 0;
}
- return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
+ return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+ allow_alloc_anyway);
}
void radeon_bo_force_delete(struct radeon_device *rdev)
@@ -136,7 +136,8 @@ extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
u64 max_offset, u64 *gpu_addr);
extern int radeon_bo_unpin(struct radeon_bo *bo);
-extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev,
+ bool allow_alloc_anyway);
extern void radeon_bo_force_delete(struct radeon_device *rdev);
extern int radeon_bo_init(struct radeon_device *rdev);
extern void radeon_bo_fini(struct radeon_device *rdev);
@@ -1342,15 +1342,17 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
EXPORT_SYMBOL(ttm_bo_create);
static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
- unsigned mem_type)
+ unsigned mem_type, bool allow_alloc_anyway)
{
- struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_operation_ctx ttm_opt_ctx = { false, false };
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
struct ttm_bo_global *glob = bdev->glob;
struct dma_fence *fence;
int ret;
unsigned i;
+ if (allow_alloc_anyway)
+ ttm_opt_ctx.flags = TTM_OPT_FLAG_ALLOW_ALLOC_ANYWAY;
/*
* Can't use standard list traversal since we're unlocking.
*/
@@ -1359,7 +1361,8 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
spin_unlock(&glob->lru_lock);
- ret = ttm_mem_evict_first(bdev, mem_type, NULL, &ctx);
+ ret = ttm_mem_evict_first(bdev, mem_type, NULL,
+ &ttm_opt_ctx);
if (ret)
return ret;
spin_lock(&glob->lru_lock);
@@ -1403,7 +1406,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
ret = 0;
if (mem_type > 0) {
- ret = ttm_bo_force_list_clean(bdev, mem_type);
+ ret = ttm_bo_force_list_clean(bdev, mem_type, true);
if (ret) {
pr_err("Cleanup eviction failed\n");
return ret;
@@ -1419,7 +1422,8 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
}
EXPORT_SYMBOL(ttm_bo_clean_mm);
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type,
+ bool allow_allo_anyway)
{
struct ttm_mem_type_manager *man = &bdev->man[mem_type];
@@ -1433,7 +1437,7 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
return 0;
}
- return ttm_bo_force_list_clean(bdev, mem_type);
+ return ttm_bo_force_list_clean(bdev, mem_type, allow_allo_anyway);
}
EXPORT_SYMBOL(ttm_bo_evict_mm);
@@ -430,7 +430,7 @@ static int vmw_request_device(struct vmw_private *dev_priv)
if (dev_priv->cman)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
- (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+ (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB, true);
vmw_otables_takedown(dev_priv);
}
if (dev_priv->cman)
@@ -463,7 +463,7 @@ static void vmw_release_device_early(struct vmw_private *dev_priv)
vmw_cmdbuf_remove_pool(dev_priv->cman);
if (dev_priv->has_mob) {
- ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB);
+ ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB, true);
vmw_otables_takedown(dev_priv);
}
}
@@ -1342,7 +1342,7 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
if (dev_priv->bdev.man[TTM_PL_VRAM].use_type) {
dev_priv->bdev.man[TTM_PL_VRAM].use_type = false;
spin_unlock(&dev_priv->svga_lock);
- if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM))
+ if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM, true))
DRM_ERROR("Failed evicting VRAM buffers.\n");
vmw_write(dev_priv, SVGA_REG_ENABLE,
SVGA_REG_ENABLE_HIDE |
@@ -636,6 +636,8 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
*
* @bdev: Pointer to a ttm_bo_device struct.
* @mem_type: The memory type.
+ * @allow_alloc_anyway: if true allow ttm pages allocation always
+ * regardless of zone memory account limit
*
* Evicts all buffers on the lru list of the memory type.
* This is normally part of a VT switch or an
@@ -649,7 +651,8 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type);
* -ERESTARTSYS: The call was interrupted by a signal while waiting to
* evict a buffer.
*/
-int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type);
+int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type,
+ bool allow_alloc_anyway);
/**
* ttm_kmap_obj_virtual
if true for it, allocate TTM pages regardless of zone global memory account limit. that is for another special case: suspend. doesn't care the zone global memory account limit for this case. Signed-off-by: Roger He <Hongbo.He@amd.com> --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +- drivers/gpu/drm/qxl/qxl_object.c | 4 ++-- drivers/gpu/drm/radeon/radeon_device.c | 6 +++--- drivers/gpu/drm/radeon/radeon_object.c | 6 ++++-- drivers/gpu/drm/radeon/radeon_object.h | 3 ++- drivers/gpu/drm/ttm/ttm_bo.c | 16 ++++++++++------ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 6 +++--- include/drm/ttm/ttm_bo_api.h | 5 ++++- 12 files changed, 37 insertions(+), 25 deletions(-)