diff mbox

[1/8] drm/ttm: add operation ctx to ttm_bo_validate v2

Message ID 20171117104935.4150-2-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Christian König Nov. 17, 2017, 10:49 a.m. UTC
Give moving a BO into place an operation context to work with.

v2: rebased

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c      | 14 ++++++++------
 drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c     |  3 ++-
 drivers/gpu/drm/amd/amdgpu/amdgpu_object.c  | 12 ++++++++----
 drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c     |  6 ++++--
 drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c     |  3 ++-
 drivers/gpu/drm/ast/ast_ttm.c               |  9 ++++++---
 drivers/gpu/drm/bochs/bochs_mm.c            |  6 ++++--
 drivers/gpu/drm/cirrus/cirrus_ttm.c         |  6 ++++--
 drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c |  6 ++++--
 drivers/gpu/drm/mgag200/mgag200_ttm.c       |  9 ++++++---
 drivers/gpu/drm/nouveau/nouveau_bo.c        |  4 ++--
 drivers/gpu/drm/qxl/qxl_ioctl.c             |  4 ++--
 drivers/gpu/drm/qxl/qxl_object.c            |  6 ++++--
 drivers/gpu/drm/qxl/qxl_release.c           |  4 ++--
 drivers/gpu/drm/radeon/radeon_gem.c         |  3 ++-
 drivers/gpu/drm/radeon/radeon_mn.c          |  3 ++-
 drivers/gpu/drm/radeon/radeon_object.c      | 14 +++++++++-----
 drivers/gpu/drm/radeon/radeon_vm.c          |  3 ++-
 drivers/gpu/drm/ttm/ttm_bo.c                | 16 +++++++++-------
 drivers/gpu/drm/virtio/virtgpu_ioctl.c      | 11 ++++++-----
 drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c     |  3 ++-
 drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c      | 21 +++++++++++++--------
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c     |  9 ++++-----
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c    |  6 ++++--
 drivers/gpu/drm/vmwgfx/vmwgfx_shader.c      |  3 ++-
 include/drm/ttm/ttm_bo_api.h                | 20 ++++++++++++++++----
 26 files changed, 129 insertions(+), 75 deletions(-)

Comments

Daniel Vetter Nov. 20, 2017, 10:14 a.m. UTC | #1
On Fri, Nov 17, 2017 at 11:49:28AM +0100, Christian König wrote:
> Give moving a BO into place an operation context to work with.
> 
> v2: rebased
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> Reviewed-by: Michel Dänzer <michel.daenzer@amd.com>
> ---
> diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
> index 833c3ad24091..097951e999bc 100644
> --- a/include/drm/ttm/ttm_bo_api.h
> +++ b/include/drm/ttm/ttm_bo_api.h
> @@ -259,6 +259,20 @@ struct ttm_bo_kmap_obj {
>  };
>  
>  /**
> + * struct ttm_operation_ctx
> + *
> + * @interruptible: Sleep interruptible if sleeping.
> + * @no_wait_gpu: Return immediately if the GPU is busy.
> + *
> + * Context for TTM operations like changing buffer placement or general memory
> + * allocation.
> + */
> +struct ttm_operation_ctx {
> +	bool interruptible;
> +	bool no_wait_gpu;

Random drive-by comment without looking at how this would work out, but
maybe embed the ww_acquire_ctx in here too? That's at least the design we
ended up with on the kms side for a (at a quick glance) fairly similar
problem. See struct drm_modeset_acquire_ctx.

Cheers, Daniel
diff mbox

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index ee7736419411..41994b87c76e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -343,6 +343,7 @@  static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 				 struct amdgpu_bo *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	struct ttm_operation_ctx ctx = { true, false };
 	u64 initial_bytes_moved, bytes_moved;
 	uint32_t domain;
 	int r;
@@ -374,7 +375,7 @@  static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
 retry:
 	amdgpu_ttm_placement_from_domain(bo, domain);
 	initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	bytes_moved = atomic64_read(&adev->num_bytes_moved) -
 		      initial_bytes_moved;
 	p->bytes_moved += bytes_moved;
@@ -396,6 +397,7 @@  static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 				struct amdgpu_bo *validated)
 {
 	uint32_t domain = validated->allowed_domains;
+	struct ttm_operation_ctx ctx = { true, false };
 	int r;
 
 	if (!p->evictable)
@@ -433,7 +435,7 @@  static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
 			bo->tbo.mem.mem_type == TTM_PL_VRAM &&
 			bo->tbo.mem.start < adev->mc.visible_vram_size >> PAGE_SHIFT;
 		initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
-		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 		bytes_moved = atomic64_read(&adev->num_bytes_moved) -
 			initial_bytes_moved;
 		p->bytes_moved += bytes_moved;
@@ -472,6 +474,7 @@  static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo)
 static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 			    struct list_head *validated)
 {
+	struct ttm_operation_ctx ctx = { true, false };
 	struct amdgpu_bo_list_entry *lobj;
 	int r;
 
@@ -489,8 +492,7 @@  static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
 		    lobj->user_pages) {
 			amdgpu_ttm_placement_from_domain(bo,
 							 AMDGPU_GEM_DOMAIN_CPU);
-			r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
-					    false);
+			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 			if (r)
 				return r;
 			amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
@@ -1573,6 +1575,7 @@  int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 			   struct amdgpu_bo_va_mapping **map)
 {
 	struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
+	struct ttm_operation_ctx ctx = { false, false };
 	struct amdgpu_vm *vm = &fpriv->vm;
 	struct amdgpu_bo_va_mapping *mapping;
 	int r;
@@ -1593,8 +1596,7 @@  int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
 	if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
 		(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
 		amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
-		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false,
-				    false);
+		r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
 		if (r)
 			return r;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 04ddd782bf6d..b3aea454c5a6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -282,6 +282,7 @@  int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
 int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *filp)
 {
+	struct ttm_operation_ctx ctx = { true, false };
 	struct amdgpu_device *adev = dev->dev_private;
 	struct drm_amdgpu_gem_userptr *args = data;
 	struct drm_gem_object *gobj;
@@ -335,7 +336,7 @@  int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
 			goto free_pages;
 
 		amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
-		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 		amdgpu_bo_unreserve(bo);
 		if (r)
 			goto free_pages;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 3233d5988f66..c2419bc6b3df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -552,6 +552,7 @@  int amdgpu_bo_backup_to_shadow(struct amdgpu_device *adev,
 
 int amdgpu_bo_validate(struct amdgpu_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	uint32_t domain;
 	int r;
 
@@ -562,7 +563,7 @@  int amdgpu_bo_validate(struct amdgpu_bo *bo)
 
 retry:
 	amdgpu_ttm_placement_from_domain(bo, domain);
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
 		domain = bo->allowed_domains;
 		goto retry;
@@ -673,6 +674,7 @@  int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 			     u64 *gpu_addr)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	struct ttm_operation_ctx ctx = { false, false };
 	int r, i;
 
 	if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
@@ -723,7 +725,7 @@  int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 	}
 
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (unlikely(r)) {
 		dev_err(adev->dev, "%p pin failed\n", bo);
 		goto error;
@@ -760,6 +762,7 @@  int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain, u64 *gpu_addr)
 int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+	struct ttm_operation_ctx ctx = { false, false };
 	int r, i;
 
 	if (!bo->pin_count) {
@@ -773,7 +776,7 @@  int amdgpu_bo_unpin(struct amdgpu_bo *bo)
 		bo->placements[i].lpfn = 0;
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
 	}
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (unlikely(r)) {
 		dev_err(adev->dev, "%p validate failed for unpin\n", bo);
 		goto error;
@@ -945,6 +948,7 @@  void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
 int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
 	struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
+	struct ttm_operation_ctx ctx = { false, false };
 	struct amdgpu_bo *abo;
 	unsigned long offset, size;
 	int r;
@@ -978,7 +982,7 @@  int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 	abo->placement.num_busy_placement = 1;
 	abo->placement.busy_placement = &abo->placements[1];
 
-	r = ttm_bo_validate(bo, &abo->placement, false, false);
+	r = ttm_bo_validate(bo, &abo->placement, &ctx);
 	if (unlikely(r != 0))
 		return r;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index be607b2be4e9..2f2a9e17fdb4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -408,6 +408,7 @@  static u64 amdgpu_uvd_get_addr_from_ctx(struct amdgpu_uvd_cs_ctx *ctx)
  */
 static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
 {
+	struct ttm_operation_ctx tctx = { false, false };
 	struct amdgpu_bo_va_mapping *mapping;
 	struct amdgpu_bo *bo;
 	uint32_t cmd;
@@ -430,7 +431,7 @@  static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
 		}
 		amdgpu_uvd_force_into_uvd_segment(bo);
 
-		r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, &tctx);
 	}
 
 	return r;
@@ -949,6 +950,7 @@  int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
 static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 			       bool direct, struct dma_fence **fence)
 {
+	struct ttm_operation_ctx ctx = { true, false };
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct list_head head;
@@ -975,7 +977,7 @@  static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 		amdgpu_uvd_force_into_uvd_segment(bo);
 	}
 
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (r)
 		goto err;
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index dabaca4da7f2..df218df332b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -274,6 +274,7 @@  int amdgpu_vcn_dec_ring_test_ring(struct amdgpu_ring *ring)
 static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
 			       bool direct, struct dma_fence **fence)
 {
+	struct ttm_operation_ctx ctx = { true, false };
 	struct ttm_validate_buffer tv;
 	struct ww_acquire_ctx ticket;
 	struct list_head head;
@@ -294,7 +295,7 @@  static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *b
 	if (r)
 		return r;
 
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (r)
 		goto err;
 
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 696a15dc2f3f..28da7c2b7ed9 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -354,6 +354,7 @@  static inline u64 ast_bo_gpu_offset(struct ast_bo *bo)
 
 int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 
 	if (bo->pin_count) {
@@ -365,7 +366,7 @@  int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
 	ast_ttm_placement(bo, pl_flag);
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret)
 		return ret;
 
@@ -377,6 +378,7 @@  int ast_bo_pin(struct ast_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int ast_bo_unpin(struct ast_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i;
 	if (!bo->pin_count) {
 		DRM_ERROR("unpin bad %p\n", bo);
@@ -388,11 +390,12 @@  int ast_bo_unpin(struct ast_bo *bo)
 
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-	return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 }
 
 int ast_bo_push_sysram(struct ast_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 	if (!bo->pin_count) {
 		DRM_ERROR("unpin bad %p\n", bo);
@@ -409,7 +412,7 @@  int ast_bo_push_sysram(struct ast_bo *bo)
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret) {
 		DRM_ERROR("pushing to VRAM failed\n");
 		return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index c4cadb638460..8250b5e612d2 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -283,6 +283,7 @@  static inline u64 bochs_bo_gpu_offset(struct bochs_bo *bo)
 
 int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 
 	if (bo->pin_count) {
@@ -295,7 +296,7 @@  int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
 	bochs_ttm_placement(bo, pl_flag);
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret)
 		return ret;
 
@@ -307,6 +308,7 @@  int bochs_bo_pin(struct bochs_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int bochs_bo_unpin(struct bochs_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 
 	if (!bo->pin_count) {
@@ -320,7 +322,7 @@  int bochs_bo_unpin(struct bochs_bo *bo)
 
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 1ff1838c0d44..2a5b54d3a03a 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -358,6 +358,7 @@  static inline u64 cirrus_bo_gpu_offset(struct cirrus_bo *bo)
 
 int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 
 	if (bo->pin_count) {
@@ -369,7 +370,7 @@  int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
 	cirrus_ttm_placement(bo, pl_flag);
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret)
 		return ret;
 
@@ -381,6 +382,7 @@  int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int cirrus_bo_push_sysram(struct cirrus_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 	if (!bo->pin_count) {
 		DRM_ERROR("unpin bad %p\n", bo);
@@ -397,7 +399,7 @@  int cirrus_bo_push_sysram(struct cirrus_bo *bo)
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret) {
 		DRM_ERROR("pushing to VRAM failed\n");
 		return ret;
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 3518167a7dc4..ab4ee5953615 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -344,6 +344,7 @@  int hibmc_bo_create(struct drm_device *dev, int size, int align,
 
 int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 
 	if (bo->pin_count) {
@@ -356,7 +357,7 @@  int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
 	hibmc_ttm_placement(bo, pl_flag);
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret)
 		return ret;
 
@@ -368,6 +369,7 @@  int hibmc_bo_pin(struct hibmc_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int hibmc_bo_unpin(struct hibmc_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 
 	if (!bo->pin_count) {
@@ -380,7 +382,7 @@  int hibmc_bo_unpin(struct hibmc_bo *bo)
 
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret) {
 		DRM_ERROR("validate failed for unpin: %d\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 3e7e1cd31395..f03da63abc7b 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -354,6 +354,7 @@  static inline u64 mgag200_bo_gpu_offset(struct mgag200_bo *bo)
 
 int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 
 	if (bo->pin_count) {
@@ -366,7 +367,7 @@  int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
 	mgag200_ttm_placement(bo, pl_flag);
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret)
 		return ret;
 
@@ -378,6 +379,7 @@  int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr)
 
 int mgag200_bo_unpin(struct mgag200_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i;
 	if (!bo->pin_count) {
 		DRM_ERROR("unpin bad %p\n", bo);
@@ -389,11 +391,12 @@  int mgag200_bo_unpin(struct mgag200_bo *bo)
 
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-	return ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	return ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 }
 
 int mgag200_bo_push_sysram(struct mgag200_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int i, ret;
 	if (!bo->pin_count) {
 		DRM_ERROR("unpin bad %p\n", bo);
@@ -410,7 +413,7 @@  int mgag200_bo_push_sysram(struct mgag200_bo *bo)
 	for (i = 0; i < bo->placement.num_placement ; i++)
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 
-	ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
+	ret = ttm_bo_validate(&bo->bo, &bo->placement, &ctx);
 	if (ret) {
 		DRM_ERROR("pushing to VRAM failed\n");
 		return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index e427f80344c4..16caca2647b9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -486,10 +486,10 @@  int
 nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
 		    bool no_wait_gpu)
 {
+	struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu };
 	int ret;
 
-	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
-			      interruptible, no_wait_gpu);
+	ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, &ctx);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 31effed4a3c8..e8c0b1037230 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -309,6 +309,7 @@  static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
 	int ret;
 	struct drm_gem_object *gobj = NULL;
 	struct qxl_bo *qobj = NULL;
+	struct ttm_operation_ctx ctx = { true, false };
 
 	if (update_area->left >= update_area->right ||
 	    update_area->top >= update_area->bottom)
@@ -326,8 +327,7 @@  static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
 
 	if (!qobj->pin_count) {
 		qxl_ttm_placement_from_domain(qobj, qobj->type, false);
-		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
-				      true, false);
+		ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
 		if (unlikely(ret))
 			goto out;
 	}
diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c
index 0a67ddf19c3d..f6b80fe47d1f 100644
--- a/drivers/gpu/drm/qxl/qxl_object.c
+++ b/drivers/gpu/drm/qxl/qxl_object.c
@@ -223,6 +223,7 @@  struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
 
 static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	struct drm_device *ddev = bo->gem_base.dev;
 	int r;
 
@@ -233,7 +234,7 @@  static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
 		return 0;
 	}
 	qxl_ttm_placement_from_domain(bo, domain, true);
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (likely(r == 0)) {
 		bo->pin_count = 1;
 		if (gpu_addr != NULL)
@@ -246,6 +247,7 @@  static int __qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
 
 static int __qxl_bo_unpin(struct qxl_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	struct drm_device *ddev = bo->gem_base.dev;
 	int r, i;
 
@@ -258,7 +260,7 @@  static int __qxl_bo_unpin(struct qxl_bo *bo)
 		return 0;
 	for (i = 0; i < bo->placement.num_placement; i++)
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (unlikely(r != 0))
 		dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
 	return r;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index a5acd723be41..17c556570be6 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -231,12 +231,12 @@  int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo)
 
 static int qxl_release_validate_bo(struct qxl_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { true, false };
 	int ret;
 
 	if (!bo->pin_count) {
 		qxl_ttm_placement_from_domain(bo, bo->type, false);
-		ret = ttm_bo_validate(&bo->tbo, &bo->placement,
-				      true, false);
+		ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index 3386452bd2f0..31d3cb38dfe3 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -285,6 +285,7 @@  int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
 int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *filp)
 {
+	struct ttm_operation_ctx ctx = { true, false };
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_radeon_gem_userptr *args = data;
 	struct drm_gem_object *gobj;
@@ -343,7 +344,7 @@  int radeon_gem_userptr_ioctl(struct drm_device *dev, void *data,
 		}
 
 		radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_GTT);
-		r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+		r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 		radeon_bo_unreserve(bo);
 		up_read(&current->mm->mmap_sem);
 		if (r)
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 1d62288b7ee3..abd24975c9b1 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -124,6 +124,7 @@  static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 					     unsigned long end)
 {
 	struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
+	struct ttm_operation_ctx ctx = { false, false };
 	struct interval_tree_node *it;
 
 	/* notification is exclusive, but interval is inclusive */
@@ -157,7 +158,7 @@  static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 				DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 
 			radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
-			r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 			if (r)
 				DRM_ERROR("(%ld) failed to validate user bo\n", r);
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 093594976126..15404af9d740 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -329,6 +329,7 @@  void radeon_bo_unref(struct radeon_bo **bo)
 int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
 			     u64 *gpu_addr)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int r, i;
 
 	if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
@@ -371,7 +372,7 @@  int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
 		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
 	}
 
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (likely(r == 0)) {
 		bo->pin_count = 1;
 		if (gpu_addr != NULL)
@@ -393,6 +394,7 @@  int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
 
 int radeon_bo_unpin(struct radeon_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	int r, i;
 
 	if (!bo->pin_count) {
@@ -406,7 +408,7 @@  int radeon_bo_unpin(struct radeon_bo *bo)
 		bo->placements[i].lpfn = 0;
 		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
 	}
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (likely(r == 0)) {
 		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
 			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
@@ -531,6 +533,7 @@  int radeon_bo_list_validate(struct radeon_device *rdev,
 			    struct ww_acquire_ctx *ticket,
 			    struct list_head *head, int ring)
 {
+	struct ttm_operation_ctx ctx = { true, false };
 	struct radeon_bo_list *lobj;
 	struct list_head duplicates;
 	int r;
@@ -572,7 +575,7 @@  int radeon_bo_list_validate(struct radeon_device *rdev,
 				radeon_uvd_force_into_uvd_segment(bo, allowed);
 
 			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
-			r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+			r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
 				       initial_bytes_moved;
 
@@ -792,6 +795,7 @@  void radeon_bo_move_notify(struct ttm_buffer_object *bo,
 
 int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	struct radeon_device *rdev;
 	struct radeon_bo *rbo;
 	unsigned long offset, size, lpfn;
@@ -823,10 +827,10 @@  int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 		    (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
 			rbo->placements[i].lpfn = lpfn;
 	}
-	r = ttm_bo_validate(bo, &rbo->placement, false, false);
+	r = ttm_bo_validate(bo, &rbo->placement, &ctx);
 	if (unlikely(r == -ENOMEM)) {
 		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
-		return ttm_bo_validate(bo, &rbo->placement, false, false);
+		return ttm_bo_validate(bo, &rbo->placement, &ctx);
 	} else if (unlikely(r != 0)) {
 		return r;
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index e5c0e635e371..7f1a9c787bd1 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -387,6 +387,7 @@  static void radeon_vm_set_pages(struct radeon_device *rdev,
 static int radeon_vm_clear_bo(struct radeon_device *rdev,
 			      struct radeon_bo *bo)
 {
+	struct ttm_operation_ctx ctx = { true, false };
 	struct radeon_ib ib;
 	unsigned entries;
 	uint64_t addr;
@@ -396,7 +397,7 @@  static int radeon_vm_clear_bo(struct radeon_device *rdev,
 	if (r)
 		return r;
 
-	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
+	r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 	if (r)
 		goto error_unreserve;
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 77a0fd22e5ba..5347c3f3e2f4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1091,9 +1091,8 @@  bool ttm_bo_mem_compat(struct ttm_placement *placement,
 EXPORT_SYMBOL(ttm_bo_mem_compat);
 
 int ttm_bo_validate(struct ttm_buffer_object *bo,
-			struct ttm_placement *placement,
-			bool interruptible,
-			bool no_wait_gpu)
+		    struct ttm_placement *placement,
+		    struct ttm_operation_ctx *ctx)
 {
 	int ret;
 	uint32_t new_flags;
@@ -1103,8 +1102,8 @@  int ttm_bo_validate(struct ttm_buffer_object *bo,
 	 * Check whether we need to move buffer.
 	 */
 	if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
-		ret = ttm_bo_move_buffer(bo, placement, interruptible,
-					 no_wait_gpu);
+		ret = ttm_bo_move_buffer(bo, placement, ctx->interruptible,
+					 ctx->no_wait_gpu);
 		if (ret)
 			return ret;
 	} else {
@@ -1219,8 +1218,11 @@  int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 		WARN_ON(!locked);
 	}
 
-	if (likely(!ret))
-		ret = ttm_bo_validate(bo, placement, interruptible, false);
+	if (likely(!ret)) {
+		struct ttm_operation_ctx ctx = { interruptible, false };
+
+		ret = ttm_bo_validate(bo, placement, &ctx);
+	}
 
 	if (unlikely(ret)) {
 		if (!resv)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 0528edb4a2bf..11a4db518639 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -56,6 +56,7 @@  static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
 static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
 					   struct list_head *head)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	struct ttm_validate_buffer *buf;
 	struct ttm_buffer_object *bo;
 	struct virtio_gpu_object *qobj;
@@ -68,7 +69,7 @@  static int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
 	list_for_each_entry(buf, head, head) {
 		bo = buf->bo;
 		qobj = container_of(bo, struct virtio_gpu_object, tbo);
-		ret = ttm_bo_validate(bo, &qobj->placement, false, false);
+		ret = ttm_bo_validate(bo, &qobj->placement, &ctx);
 		if (ret) {
 			ttm_eu_backoff_reservation(ticket, head);
 			return ret;
@@ -352,6 +353,7 @@  static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 	struct drm_virtgpu_3d_transfer_from_host *args = data;
+	struct ttm_operation_ctx ctx = { true, false };
 	struct drm_gem_object *gobj = NULL;
 	struct virtio_gpu_object *qobj = NULL;
 	struct virtio_gpu_fence *fence;
@@ -372,8 +374,7 @@  static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
 	if (ret)
 		goto out;
 
-	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
-			      true, false);
+	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
 	if (unlikely(ret))
 		goto out_unres;
 
@@ -399,6 +400,7 @@  static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 	struct virtio_gpu_device *vgdev = dev->dev_private;
 	struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
 	struct drm_virtgpu_3d_transfer_to_host *args = data;
+	struct ttm_operation_ctx ctx = { true, false };
 	struct drm_gem_object *gobj = NULL;
 	struct virtio_gpu_object *qobj = NULL;
 	struct virtio_gpu_fence *fence;
@@ -416,8 +418,7 @@  static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
 	if (ret)
 		goto out;
 
-	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
-			      true, false);
+	ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx);
 	if (unlikely(ret))
 		goto out_unres;
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index d87861bbe971..92df0b08c194 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -387,6 +387,7 @@  static int vmw_cotable_readback(struct vmw_resource *res)
  */
 static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
 {
+	struct ttm_operation_ctx ctx = { false, false };
 	struct vmw_private *dev_priv = res->dev_priv;
 	struct vmw_cotable *vcotbl = vmw_cotable(res);
 	struct vmw_dma_buffer *buf, *old_buf = res->backup;
@@ -455,7 +456,7 @@  static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size)
 	}
 
 	/* Unpin new buffer, and switch backup buffers. */
-	ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false);
+	ret = ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Failed validating new COTable backup buffer.\n");
 		goto out_wait;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
index 0cd889015dc5..d45d2caffa5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
@@ -47,6 +47,7 @@  int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
 				struct ttm_placement *placement,
 				bool interruptible)
 {
+	struct ttm_operation_ctx ctx = {interruptible, false };
 	struct ttm_buffer_object *bo = &buf->base;
 	int ret;
 	uint32_t new_flags;
@@ -65,7 +66,7 @@  int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
 		ret = ttm_bo_mem_compat(placement, &bo->mem,
 					&new_flags) == true ? 0 : -EINVAL;
 	else
-		ret = ttm_bo_validate(bo, placement, interruptible, false);
+		ret = ttm_bo_validate(bo, placement, &ctx);
 
 	if (!ret)
 		vmw_bo_pin_reserved(buf, true);
@@ -95,6 +96,7 @@  int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 				  struct vmw_dma_buffer *buf,
 				  bool interruptible)
 {
+	struct ttm_operation_ctx ctx = {interruptible, false };
 	struct ttm_buffer_object *bo = &buf->base;
 	int ret;
 	uint32_t new_flags;
@@ -115,12 +117,11 @@  int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
 		goto out_unreserve;
 	}
 
-	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
-			      false);
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
 	if (likely(ret == 0) || ret == -ERESTARTSYS)
 		goto out_unreserve;
 
-	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
 
 out_unreserve:
 	if (!ret)
@@ -170,6 +171,7 @@  int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
 				    struct vmw_dma_buffer *buf,
 				    bool interruptible)
 {
+	struct ttm_operation_ctx ctx = {interruptible, false };
 	struct ttm_buffer_object *bo = &buf->base;
 	struct ttm_placement placement;
 	struct ttm_place place;
@@ -200,14 +202,16 @@  int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
 	if (bo->mem.mem_type == TTM_PL_VRAM &&
 	    bo->mem.start < bo->num_pages &&
 	    bo->mem.start > 0 &&
-	    buf->pin_count == 0)
-		(void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
+	    buf->pin_count == 0) {
+		ctx.interruptible = false;
+		(void) ttm_bo_validate(bo, &vmw_sys_placement, &ctx);
+	}
 
 	if (buf->pin_count > 0)
 		ret = ttm_bo_mem_compat(&placement, &bo->mem,
 					&new_flags) == true ? 0 : -EINVAL;
 	else
-		ret = ttm_bo_validate(bo, &placement, interruptible, false);
+		ret = ttm_bo_validate(bo, &placement, &ctx);
 
 	/* For some reason we didn't end up at the start of vram */
 	WARN_ON(ret == 0 && bo->offset != 0);
@@ -286,6 +290,7 @@  void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
  */
 void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
 {
+	struct ttm_operation_ctx ctx = { false, true };
 	struct ttm_place pl;
 	struct ttm_placement placement;
 	struct ttm_buffer_object *bo = &vbo->base;
@@ -314,7 +319,7 @@  void vmw_bo_pin_reserved(struct vmw_dma_buffer *vbo, bool pin)
 	placement.num_placement = 1;
 	placement.placement = &pl;
 
-	ret = ttm_bo_validate(bo, &placement, false, true);
+	ret = ttm_bo_validate(bo, &placement, &ctx);
 
 	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 21c62a34e558..b700667f6f0b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3701,14 +3701,14 @@  int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 {
 	struct vmw_dma_buffer *vbo = container_of(bo, struct vmw_dma_buffer,
 						  base);
+	struct ttm_operation_ctx ctx = { interruptible, true };
 	int ret;
 
 	if (vbo->pin_count > 0)
 		return 0;
 
 	if (validate_as_mob)
-		return ttm_bo_validate(bo, &vmw_mob_placement, interruptible,
-				       false);
+		return ttm_bo_validate(bo, &vmw_mob_placement, &ctx);
 
 	/**
 	 * Put BO in VRAM if there is space, otherwise as a GMR.
@@ -3717,8 +3717,7 @@  int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 	 * used as a GMR, this will return -ENOMEM.
 	 */
 
-	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
-			      false);
+	ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, &ctx);
 	if (likely(ret == 0 || ret == -ERESTARTSYS))
 		return ret;
 
@@ -3727,7 +3726,7 @@  int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 	 * previous contents.
 	 */
 
-	ret = ttm_bo_validate(bo, &vmw_vram_placement, interruptible, false);
+	ret = ttm_bo_validate(bo, &vmw_vram_placement, &ctx);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index a96f90f017d1..200904ff9a22 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -968,6 +968,7 @@  vmw_resource_check_buffer(struct vmw_resource *res,
 			  bool interruptible,
 			  struct ttm_validate_buffer *val_buf)
 {
+	struct ttm_operation_ctx ctx = { true, false };
 	struct list_head val_list;
 	bool backup_dirty = false;
 	int ret;
@@ -992,7 +993,7 @@  vmw_resource_check_buffer(struct vmw_resource *res,
 	backup_dirty = res->backup_dirty;
 	ret = ttm_bo_validate(&res->backup->base,
 			      res->func->backup_placement,
-			      true, false);
+			      &ctx);
 
 	if (unlikely(ret != 0))
 		goto out_no_validate;
@@ -1446,6 +1447,7 @@  void vmw_resource_evict_all(struct vmw_private *dev_priv)
  */
 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 {
+	struct ttm_operation_ctx ctx = { interruptible, false };
 	struct vmw_private *dev_priv = res->dev_priv;
 	int ret;
 
@@ -1466,7 +1468,7 @@  int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
 				ret = ttm_bo_validate
 					(&vbo->base,
 					 res->func->backup_placement,
-					 interruptible, false);
+					 &ctx);
 				if (ret) {
 					ttm_bo_unreserve(&vbo->base);
 					goto out_no_validate;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 9b832f136813..004e18b8832c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -970,6 +970,7 @@  int vmw_compat_shader_add(struct vmw_private *dev_priv,
 			  size_t size,
 			  struct list_head *list)
 {
+	struct ttm_operation_ctx ctx = { false, true };
 	struct vmw_dma_buffer *buf;
 	struct ttm_bo_kmap_obj map;
 	bool is_iomem;
@@ -1005,7 +1006,7 @@  int vmw_compat_shader_add(struct vmw_private *dev_priv,
 	WARN_ON(is_iomem);
 
 	ttm_bo_kunmap(&map);
-	ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+	ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, &ctx);
 	WARN_ON(ret != 0);
 	ttm_bo_unreserve(&buf->base);
 
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 833c3ad24091..097951e999bc 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -259,6 +259,20 @@  struct ttm_bo_kmap_obj {
 };
 
 /**
+ * struct ttm_operation_ctx
+ *
+ * @interruptible: Sleep interruptible if sleeping.
+ * @no_wait_gpu: Return immediately if the GPU is busy.
+ *
+ * Context for TTM operations like changing buffer placement or general memory
+ * allocation.
+ */
+struct ttm_operation_ctx {
+	bool interruptible;
+	bool no_wait_gpu;
+};
+
+/**
  * ttm_bo_reference - reference a struct ttm_buffer_object
  *
  * @bo: The buffer object.
@@ -306,8 +320,7 @@  bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
  *
  * @bo: The buffer object.
  * @placement: Proposed placement for the buffer object.
- * @interruptible: Sleep interruptible if sleeping.
- * @no_wait_gpu: Return immediately if the GPU is busy.
+ * @ctx: validation parameters.
  *
  * Changes placement and caching policy of the buffer object
  * according proposed placement.
@@ -319,8 +332,7 @@  bool ttm_bo_mem_compat(struct ttm_placement *placement, struct ttm_mem_reg *mem,
  */
 int ttm_bo_validate(struct ttm_buffer_object *bo,
 		    struct ttm_placement *placement,
-		    bool interruptible,
-		    bool no_wait_gpu);
+		    struct ttm_operation_ctx *ctx);
 
 /**
  * ttm_bo_unref