diff mbox series

drm/ttm: move cpu_writers handling into vmwgfx

Message ID 20190607144758.60076-1-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series drm/ttm: move cpu_writers handling into vmwgfx | expand

Commit Message

Christian König June 7, 2019, 2:47 p.m. UTC
This feature is only used by vmwgfx and superflous for everybody else.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/ttm/ttm_bo.c             | 27 ------------------
 drivers/gpu/drm/ttm/ttm_bo_util.c        |  1 -
 drivers/gpu/drm/ttm/ttm_execbuf_util.c   |  7 +----
 drivers/gpu/drm/vmwgfx/vmwgfx_bo.c       | 35 ++++++++++++++++++++----
 drivers/gpu/drm/vmwgfx/vmwgfx_drv.h      |  2 ++
 drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c  |  8 ++++++
 drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |  4 +++
 include/drm/ttm/ttm_bo_api.h             | 31 ---------------------
 8 files changed, 45 insertions(+), 70 deletions(-)

Comments

Thomas Hellstrom June 12, 2019, 3:27 p.m. UTC | #1
Hi, Christian,

This looks OK, although there are a couple of minor alterations needed
in the vmwgfx driver:

- We should operate on vmw_buffer_objects rather than on
user_buffer_objects.
- vmw_user_bo_verify_synccpu should move to the validate code.

I can take care of that if it's ok with you.

Thanks,
Thomas


On Fri, 2019-06-07 at 16:47 +0200, Christian König wrote:
> This feature is only used by vmwgfx and superflous for everybody
> else.
> 
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>  drivers/gpu/drm/ttm/ttm_bo.c             | 27 ------------------
>  drivers/gpu/drm/ttm/ttm_bo_util.c        |  1 -
>  drivers/gpu/drm/ttm/ttm_execbuf_util.c   |  7 +----
>  drivers/gpu/drm/vmwgfx/vmwgfx_bo.c       | 35 ++++++++++++++++++++
> ----
>  drivers/gpu/drm/vmwgfx/vmwgfx_drv.h      |  2 ++
>  drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c  |  8 ++++++
>  drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |  4 +++
>  include/drm/ttm/ttm_bo_api.h             | 31 ---------------------
>  8 files changed, 45 insertions(+), 70 deletions(-)
> 
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c
> b/drivers/gpu/drm/ttm/ttm_bo.c
> index c7de667d482a..4ec055ffd6a7 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -153,7 +153,6 @@ static void ttm_bo_release_list(struct kref
> *list_kref)
>  
>  	BUG_ON(kref_read(&bo->list_kref));
>  	BUG_ON(kref_read(&bo->kref));
> -	BUG_ON(atomic_read(&bo->cpu_writers));
>  	BUG_ON(bo->mem.mm_node != NULL);
>  	BUG_ON(!list_empty(&bo->lru));
>  	BUG_ON(!list_empty(&bo->ddestroy));
> @@ -1308,7 +1307,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device
> *bdev,
>  
>  	kref_init(&bo->kref);
>  	kref_init(&bo->list_kref);
> -	atomic_set(&bo->cpu_writers, 0);
>  	INIT_LIST_HEAD(&bo->lru);
>  	INIT_LIST_HEAD(&bo->ddestroy);
>  	INIT_LIST_HEAD(&bo->swap);
> @@ -1814,31 +1812,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
>  }
>  EXPORT_SYMBOL(ttm_bo_wait);
>  
> -int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool
> no_wait)
> -{
> -	int ret = 0;
> -
> -	/*
> -	 * Using ttm_bo_reserve makes sure the lru lists are updated.
> -	 */
> -
> -	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
> -	if (unlikely(ret != 0))
> -		return ret;
> -	ret = ttm_bo_wait(bo, true, no_wait);
> -	if (likely(ret == 0))
> -		atomic_inc(&bo->cpu_writers);
> -	ttm_bo_unreserve(bo);
> -	return ret;
> -}
> -EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
> -
> -void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
> -{
> -	atomic_dec(&bo->cpu_writers);
> -}
> -EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
> -
>  /**
>   * A buffer object shrink method that tries to swap out the first
>   * buffer object on the bo_global::swap_lru list.
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c
> b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 895d77d799e4..6f43f1f0de7c 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -511,7 +511,6 @@ static int ttm_buffer_object_transfer(struct
> ttm_buffer_object *bo,
>  	mutex_init(&fbo->base.wu_mutex);
>  	fbo->base.moving = NULL;
>  	drm_vma_node_reset(&fbo->base.vma_node);
> -	atomic_set(&fbo->base.cpu_writers, 0);
>  
>  	kref_init(&fbo->base.list_kref);
>  	kref_init(&fbo->base.kref);
> diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> index 957ec375a4ba..80fa52b36d5c 100644
> --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> @@ -113,12 +113,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx
> *ticket,
>  		struct ttm_buffer_object *bo = entry->bo;
>  
>  		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL),
> ticket);
> -		if (!ret && unlikely(atomic_read(&bo->cpu_writers) >
> 0)) {
> -			reservation_object_unlock(bo->resv);
> -
> -			ret = -EBUSY;
> -
> -		} else if (ret == -EALREADY && dups) {
> +		if (ret == -EALREADY && dups) {
>  			struct ttm_validate_buffer *safe = entry;
>  			entry = list_prev_entry(entry, head);
>  			list_del(&safe->head);
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> index 5d5c2bce01f3..457861c5047f 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> @@ -565,7 +565,7 @@ static void vmw_user_bo_ref_obj_release(struct
> ttm_base_object *base,
>  
>  	switch (ref_type) {
>  	case TTM_REF_SYNCCPU_WRITE:
> -		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
> +		atomic_dec(&user_bo->vbo.cpu_writers);
>  		break;
>  	default:
>  		WARN_ONCE(true, "Undefined buffer object reference
> release.\n");
> @@ -681,12 +681,12 @@ static int vmw_user_bo_synccpu_grab(struct
> vmw_user_buffer_object *user_bo,
>  				    struct ttm_object_file *tfile,
>  				    uint32_t flags)
>  {
> +	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
>  	struct ttm_buffer_object *bo = &user_bo->vbo.base;
>  	bool existed;
>  	int ret;
>  
>  	if (flags & drm_vmw_synccpu_allow_cs) {
> -		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
>  		long lret;
>  
>  		lret = reservation_object_wait_timeout_rcu
> @@ -699,15 +699,20 @@ static int vmw_user_bo_synccpu_grab(struct
> vmw_user_buffer_object *user_bo,
>  		return 0;
>  	}
>  
> -	ret = ttm_bo_synccpu_write_grab
> -		(bo, !!(flags & drm_vmw_synccpu_dontblock));
> +	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
> +	if (unlikely(ret != 0))
> +		return ret;
> +	ret = ttm_bo_wait(bo, true, nonblock);
> +	if (likely(ret == 0))
> +		atomic_inc(&user_bo->vbo.cpu_writers);
> +	ttm_bo_unreserve(bo);
>  	if (unlikely(ret != 0))
>  		return ret;
>  
>  	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
>  				 TTM_REF_SYNCCPU_WRITE, &existed,
> false);
>  	if (ret != 0 || existed)
> -		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
> +		atomic_dec(&user_bo->vbo.cpu_writers);
>  
>  	return ret;
>  }
> @@ -731,6 +736,26 @@ static int vmw_user_bo_synccpu_release(uint32_t
> handle,
>  	return 0;
>  }
>  
> +/**
> + * vmw_user_bo_verify_synccpu - Verify if grab for CPU access exists
> + *
> + * @list: list of ttm_validate_buffer objects
> + *
> + * Return:
> + * -EBUSY if a CPU grab is found, 0 otherwise.
> + */
> +int vmw_user_bo_verify_synccpu(struct list_head *list)
> +{
> +	struct ttm_validate_buffer *entry;
> +
> +	list_for_each_entry(entry, list, head) {
> +		struct vmw_buffer_object *bo = vmw_buffer_object(entry-
> >bo);
> +
> +                if (unlikely(atomic_read(&bo->cpu_writers) > 0))
> +			return -EBUSY;
> +	}
> +	return 0;
> +}
>  
>  /**
>   * vmw_user_bo_synccpu_ioctl - ioctl function implementing the
> synccpu
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> index 96983c47fb40..b3988c957f0e 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> @@ -90,6 +90,7 @@ struct vmw_buffer_object {
>  	struct ttm_buffer_object base;
>  	struct list_head res_list;
>  	s32 pin_count;
> +	atomic_t cpu_writers;
>  	/* Not ref-counted.  Protected by binding_mutex */
>  	struct vmw_resource *dx_query_ctx;
>  	/* Protected by reservation */
> @@ -749,6 +750,7 @@ extern int vmw_bo_init(struct vmw_private
> *dev_priv,
>  		       void (*bo_free)(struct ttm_buffer_object *bo));
>  extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
>  				     struct ttm_object_file *tfile);
> +extern int vmw_user_bo_verify_synccpu(struct list_head *list);
>  extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
>  			     struct ttm_object_file *tfile,
>  			     uint32_t size,
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> index 2ff7ba04d8c8..271217f6e3a3 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> @@ -3719,6 +3719,10 @@ int vmw_execbuf_process(struct drm_file
> *file_priv,
>  	if (unlikely(ret != 0))
>  		goto out_err_nores;
>  
> +	ret = vmw_user_bo_verify_synccpu(&val_ctx.bo_list);
> +	if (unlikely(ret != 0))
> +		goto out_err;
> +
>  	ret = vmw_validation_bo_validate(&val_ctx, true);
>  	if (unlikely(ret != 0))
>  		goto out_err;
> @@ -3918,6 +3922,10 @@ void __vmw_execbuf_release_pinned_bo(struct
> vmw_private *dev_priv,
>  	if (ret)
>  		goto out_no_reserve;
>  
> +	ret = vmw_user_bo_verify_synccpu(&val_ctx.bo_list);
> +	if (unlikely(ret != 0))
> +		goto out_no_emit;
> +
>  	if (dev_priv->query_cid_valid) {
>  		BUG_ON(fence != NULL);
>  		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv-
> >query_cid);
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index 1d38a8b2f2ec..c55da6f0ff03 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -469,6 +469,10 @@ vmw_resource_check_buffer(struct ww_acquire_ctx
> *ticket,
>  	if (unlikely(ret != 0))
>  		goto out_no_reserve;
>  
> +	ret = vmw_user_bo_verify_synccpu(&val_list);
> +	if (unlikely(ret != 0))
> +		goto out_no_validate;
> +
>  	if (res->func->needs_backup && list_empty(&res->mob_head))
>  		return 0;
>  
> diff --git a/include/drm/ttm/ttm_bo_api.h
> b/include/drm/ttm/ttm_bo_api.h
> index 49d9cdfc58f2..e358e965d4c0 100644
> --- a/include/drm/ttm/ttm_bo_api.h
> +++ b/include/drm/ttm/ttm_bo_api.h
> @@ -145,7 +145,6 @@ struct ttm_tt;
>   * holds a pointer to a persistent shmem object.
>   * @ttm: TTM structure holding system pages.
>   * @evicted: Whether the object was evicted without user-space
> knowing.
> - * @cpu_writes: For synchronization. Number of cpu writers.
>   * @lru: List head for the lru list.
>   * @ddestroy: List head for the delayed destroy list.
>   * @swap: List head for swap LRU list.
> @@ -195,11 +194,6 @@ struct ttm_buffer_object {
>  	struct ttm_tt *ttm;
>  	bool evicted;
>  
> -	/**
> -	 * Members protected by the bo::reserved lock only when written
> to.
> -	 */
> -
> -	atomic_t cpu_writers;
>  
>  	/**
>  	 * Members protected by the bdev::lru_lock.
> @@ -443,31 +437,6 @@ void ttm_bo_unlock_delayed_workqueue(struct
> ttm_bo_device *bdev, int resched);
>  bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
>  			      const struct ttm_place *place);
>  
> -/**
> - * ttm_bo_synccpu_write_grab
> - *
> - * @bo: The buffer object:
> - * @no_wait: Return immediately if buffer is busy.
> - *
> - * Synchronizes a buffer object for CPU RW access. This means
> - * command submission that affects the buffer will return -EBUSY
> - * until ttm_bo_synccpu_write_release is called.
> - *
> - * Returns
> - * -EBUSY if the buffer is busy and no_wait is true.
> - * -ERESTARTSYS if interrupted by a signal.
> - */
> -int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool
> no_wait);
> -
> -/**
> - * ttm_bo_synccpu_write_release:
> - *
> - * @bo : The buffer object.
> - *
> - * Releases a synccpu lock.
> - */
> -void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
> -
>  /**
>   * ttm_bo_acc_size
>   *
Christian König June 14, 2019, 12:58 p.m. UTC | #2
Thomas just a gentle ping on this.

It's not that my live depends on this, but it would still be a nice to 
have cleanup.

Thanks,
Christian.

Am 07.06.19 um 16:47 schrieb Christian König:
> This feature is only used by vmwgfx and superflous for everybody else.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/ttm/ttm_bo.c             | 27 ------------------
>   drivers/gpu/drm/ttm/ttm_bo_util.c        |  1 -
>   drivers/gpu/drm/ttm/ttm_execbuf_util.c   |  7 +----
>   drivers/gpu/drm/vmwgfx/vmwgfx_bo.c       | 35 ++++++++++++++++++++----
>   drivers/gpu/drm/vmwgfx/vmwgfx_drv.h      |  2 ++
>   drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c  |  8 ++++++
>   drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |  4 +++
>   include/drm/ttm/ttm_bo_api.h             | 31 ---------------------
>   8 files changed, 45 insertions(+), 70 deletions(-)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index c7de667d482a..4ec055ffd6a7 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -153,7 +153,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
>   
>   	BUG_ON(kref_read(&bo->list_kref));
>   	BUG_ON(kref_read(&bo->kref));
> -	BUG_ON(atomic_read(&bo->cpu_writers));
>   	BUG_ON(bo->mem.mm_node != NULL);
>   	BUG_ON(!list_empty(&bo->lru));
>   	BUG_ON(!list_empty(&bo->ddestroy));
> @@ -1308,7 +1307,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>   
>   	kref_init(&bo->kref);
>   	kref_init(&bo->list_kref);
> -	atomic_set(&bo->cpu_writers, 0);
>   	INIT_LIST_HEAD(&bo->lru);
>   	INIT_LIST_HEAD(&bo->ddestroy);
>   	INIT_LIST_HEAD(&bo->swap);
> @@ -1814,31 +1812,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
>   }
>   EXPORT_SYMBOL(ttm_bo_wait);
>   
> -int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
> -{
> -	int ret = 0;
> -
> -	/*
> -	 * Using ttm_bo_reserve makes sure the lru lists are updated.
> -	 */
> -
> -	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
> -	if (unlikely(ret != 0))
> -		return ret;
> -	ret = ttm_bo_wait(bo, true, no_wait);
> -	if (likely(ret == 0))
> -		atomic_inc(&bo->cpu_writers);
> -	ttm_bo_unreserve(bo);
> -	return ret;
> -}
> -EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
> -
> -void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
> -{
> -	atomic_dec(&bo->cpu_writers);
> -}
> -EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
> -
>   /**
>    * A buffer object shrink method that tries to swap out the first
>    * buffer object on the bo_global::swap_lru list.
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 895d77d799e4..6f43f1f0de7c 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -511,7 +511,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
>   	mutex_init(&fbo->base.wu_mutex);
>   	fbo->base.moving = NULL;
>   	drm_vma_node_reset(&fbo->base.vma_node);
> -	atomic_set(&fbo->base.cpu_writers, 0);
>   
>   	kref_init(&fbo->base.list_kref);
>   	kref_init(&fbo->base.kref);
> diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> index 957ec375a4ba..80fa52b36d5c 100644
> --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> @@ -113,12 +113,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
>   		struct ttm_buffer_object *bo = entry->bo;
>   
>   		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
> -		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
> -			reservation_object_unlock(bo->resv);
> -
> -			ret = -EBUSY;
> -
> -		} else if (ret == -EALREADY && dups) {
> +		if (ret == -EALREADY && dups) {
>   			struct ttm_validate_buffer *safe = entry;
>   			entry = list_prev_entry(entry, head);
>   			list_del(&safe->head);
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> index 5d5c2bce01f3..457861c5047f 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> @@ -565,7 +565,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
>   
>   	switch (ref_type) {
>   	case TTM_REF_SYNCCPU_WRITE:
> -		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
> +		atomic_dec(&user_bo->vbo.cpu_writers);
>   		break;
>   	default:
>   		WARN_ONCE(true, "Undefined buffer object reference release.\n");
> @@ -681,12 +681,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
>   				    struct ttm_object_file *tfile,
>   				    uint32_t flags)
>   {
> +	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
>   	struct ttm_buffer_object *bo = &user_bo->vbo.base;
>   	bool existed;
>   	int ret;
>   
>   	if (flags & drm_vmw_synccpu_allow_cs) {
> -		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
>   		long lret;
>   
>   		lret = reservation_object_wait_timeout_rcu
> @@ -699,15 +699,20 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
>   		return 0;
>   	}
>   
> -	ret = ttm_bo_synccpu_write_grab
> -		(bo, !!(flags & drm_vmw_synccpu_dontblock));
> +	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
> +	if (unlikely(ret != 0))
> +		return ret;
> +	ret = ttm_bo_wait(bo, true, nonblock);
> +	if (likely(ret == 0))
> +		atomic_inc(&user_bo->vbo.cpu_writers);
> +	ttm_bo_unreserve(bo);
>   	if (unlikely(ret != 0))
>   		return ret;
>   
>   	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
>   				 TTM_REF_SYNCCPU_WRITE, &existed, false);
>   	if (ret != 0 || existed)
> -		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
> +		atomic_dec(&user_bo->vbo.cpu_writers);
>   
>   	return ret;
>   }
> @@ -731,6 +736,26 @@ static int vmw_user_bo_synccpu_release(uint32_t handle,
>   	return 0;
>   }
>   
> +/**
> + * vmw_user_bo_verify_synccpu - Verify if grab for CPU access exists
> + *
> + * @list: list of ttm_validate_buffer objects
> + *
> + * Return:
> + * -EBUSY if a CPU grab is found, 0 otherwise.
> + */
> +int vmw_user_bo_verify_synccpu(struct list_head *list)
> +{
> +	struct ttm_validate_buffer *entry;
> +
> +	list_for_each_entry(entry, list, head) {
> +		struct vmw_buffer_object *bo = vmw_buffer_object(entry->bo);
> +
> +                if (unlikely(atomic_read(&bo->cpu_writers) > 0))
> +			return -EBUSY;
> +	}
> +	return 0;
> +}
>   
>   /**
>    * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> index 96983c47fb40..b3988c957f0e 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> @@ -90,6 +90,7 @@ struct vmw_buffer_object {
>   	struct ttm_buffer_object base;
>   	struct list_head res_list;
>   	s32 pin_count;
> +	atomic_t cpu_writers;
>   	/* Not ref-counted.  Protected by binding_mutex */
>   	struct vmw_resource *dx_query_ctx;
>   	/* Protected by reservation */
> @@ -749,6 +750,7 @@ extern int vmw_bo_init(struct vmw_private *dev_priv,
>   		       void (*bo_free)(struct ttm_buffer_object *bo));
>   extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
>   				     struct ttm_object_file *tfile);
> +extern int vmw_user_bo_verify_synccpu(struct list_head *list);
>   extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
>   			     struct ttm_object_file *tfile,
>   			     uint32_t size,
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> index 2ff7ba04d8c8..271217f6e3a3 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> @@ -3719,6 +3719,10 @@ int vmw_execbuf_process(struct drm_file *file_priv,
>   	if (unlikely(ret != 0))
>   		goto out_err_nores;
>   
> +	ret = vmw_user_bo_verify_synccpu(&val_ctx.bo_list);
> +	if (unlikely(ret != 0))
> +		goto out_err;
> +
>   	ret = vmw_validation_bo_validate(&val_ctx, true);
>   	if (unlikely(ret != 0))
>   		goto out_err;
> @@ -3918,6 +3922,10 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
>   	if (ret)
>   		goto out_no_reserve;
>   
> +	ret = vmw_user_bo_verify_synccpu(&val_ctx.bo_list);
> +	if (unlikely(ret != 0))
> +		goto out_no_emit;
> +
>   	if (dev_priv->query_cid_valid) {
>   		BUG_ON(fence != NULL);
>   		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> index 1d38a8b2f2ec..c55da6f0ff03 100644
> --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> @@ -469,6 +469,10 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
>   	if (unlikely(ret != 0))
>   		goto out_no_reserve;
>   
> +	ret = vmw_user_bo_verify_synccpu(&val_list);
> +	if (unlikely(ret != 0))
> +		goto out_no_validate;
> +
>   	if (res->func->needs_backup && list_empty(&res->mob_head))
>   		return 0;
>   
> diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
> index 49d9cdfc58f2..e358e965d4c0 100644
> --- a/include/drm/ttm/ttm_bo_api.h
> +++ b/include/drm/ttm/ttm_bo_api.h
> @@ -145,7 +145,6 @@ struct ttm_tt;
>    * holds a pointer to a persistent shmem object.
>    * @ttm: TTM structure holding system pages.
>    * @evicted: Whether the object was evicted without user-space knowing.
> - * @cpu_writes: For synchronization. Number of cpu writers.
>    * @lru: List head for the lru list.
>    * @ddestroy: List head for the delayed destroy list.
>    * @swap: List head for swap LRU list.
> @@ -195,11 +194,6 @@ struct ttm_buffer_object {
>   	struct ttm_tt *ttm;
>   	bool evicted;
>   
> -	/**
> -	 * Members protected by the bo::reserved lock only when written to.
> -	 */
> -
> -	atomic_t cpu_writers;
>   
>   	/**
>   	 * Members protected by the bdev::lru_lock.
> @@ -443,31 +437,6 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
>   bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
>   			      const struct ttm_place *place);
>   
> -/**
> - * ttm_bo_synccpu_write_grab
> - *
> - * @bo: The buffer object:
> - * @no_wait: Return immediately if buffer is busy.
> - *
> - * Synchronizes a buffer object for CPU RW access. This means
> - * command submission that affects the buffer will return -EBUSY
> - * until ttm_bo_synccpu_write_release is called.
> - *
> - * Returns
> - * -EBUSY if the buffer is busy and no_wait is true.
> - * -ERESTARTSYS if interrupted by a signal.
> - */
> -int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
> -
> -/**
> - * ttm_bo_synccpu_write_release:
> - *
> - * @bo : The buffer object.
> - *
> - * Releases a synccpu lock.
> - */
> -void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
> -
>   /**
>    * ttm_bo_acc_size
>    *
Thomas Hellstrom June 14, 2019, 2:36 p.m. UTC | #3
Hi, Christian,

On Fri, 2019-06-14 at 14:58 +0200, Christian König wrote:
> Thomas just a gentle ping on this.
> 
> It's not that my live depends on this, but it would still be a nice
> to 
> have cleanup.
> 
> Thanks,
> Christian.
> 

I thought I had answered this, but I can't find it in my outgoing
folder. Sorry about that.

In principle I'm fine with it, but the vmwgfx part needs some changes:
1) We need to operate on struct vmwgfx_buffer_object rather than struct
vmwgfx_user_buffer_object. Not all buffer objects are user buffer
objects...

2) Need to look at the moving the list verifying or at least its calls
into the vmwgfx_validate.c code.

I hopefully can have a quick look at this next week.

/Thomas




> Am 07.06.19 um 16:47 schrieb Christian König:
> > This feature is only used by vmwgfx and superflous for everybody
> > else.
> > 
> > Signed-off-by: Christian König <christian.koenig@amd.com>
> > ---
> >   drivers/gpu/drm/ttm/ttm_bo.c             | 27 ------------------
> >   drivers/gpu/drm/ttm/ttm_bo_util.c        |  1 -
> >   drivers/gpu/drm/ttm/ttm_execbuf_util.c   |  7 +----
> >   drivers/gpu/drm/vmwgfx/vmwgfx_bo.c       | 35
> > ++++++++++++++++++++----
> >   drivers/gpu/drm/vmwgfx/vmwgfx_drv.h      |  2 ++
> >   drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c  |  8 ++++++
> >   drivers/gpu/drm/vmwgfx/vmwgfx_resource.c |  4 +++
> >   include/drm/ttm/ttm_bo_api.h             | 31 -----------------
> > ----
> >   8 files changed, 45 insertions(+), 70 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/ttm/ttm_bo.c
> > b/drivers/gpu/drm/ttm/ttm_bo.c
> > index c7de667d482a..4ec055ffd6a7 100644
> > --- a/drivers/gpu/drm/ttm/ttm_bo.c
> > +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> > @@ -153,7 +153,6 @@ static void ttm_bo_release_list(struct kref
> > *list_kref)
> >   
> >   	BUG_ON(kref_read(&bo->list_kref));
> >   	BUG_ON(kref_read(&bo->kref));
> > -	BUG_ON(atomic_read(&bo->cpu_writers));
> >   	BUG_ON(bo->mem.mm_node != NULL);
> >   	BUG_ON(!list_empty(&bo->lru));
> >   	BUG_ON(!list_empty(&bo->ddestroy));
> > @@ -1308,7 +1307,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device
> > *bdev,
> >   
> >   	kref_init(&bo->kref);
> >   	kref_init(&bo->list_kref);
> > -	atomic_set(&bo->cpu_writers, 0);
> >   	INIT_LIST_HEAD(&bo->lru);
> >   	INIT_LIST_HEAD(&bo->ddestroy);
> >   	INIT_LIST_HEAD(&bo->swap);
> > @@ -1814,31 +1812,6 @@ int ttm_bo_wait(struct ttm_buffer_object
> > *bo,
> >   }
> >   EXPORT_SYMBOL(ttm_bo_wait);
> >   
> > -int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool
> > no_wait)
> > -{
> > -	int ret = 0;
> > -
> > -	/*
> > -	 * Using ttm_bo_reserve makes sure the lru lists are updated.
> > -	 */
> > -
> > -	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
> > -	if (unlikely(ret != 0))
> > -		return ret;
> > -	ret = ttm_bo_wait(bo, true, no_wait);
> > -	if (likely(ret == 0))
> > -		atomic_inc(&bo->cpu_writers);
> > -	ttm_bo_unreserve(bo);
> > -	return ret;
> > -}
> > -EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
> > -
> > -void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
> > -{
> > -	atomic_dec(&bo->cpu_writers);
> > -}
> > -EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
> > -
> >   /**
> >    * A buffer object shrink method that tries to swap out the first
> >    * buffer object on the bo_global::swap_lru list.
> > diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c
> > b/drivers/gpu/drm/ttm/ttm_bo_util.c
> > index 895d77d799e4..6f43f1f0de7c 100644
> > --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> > +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> > @@ -511,7 +511,6 @@ static int ttm_buffer_object_transfer(struct
> > ttm_buffer_object *bo,
> >   	mutex_init(&fbo->base.wu_mutex);
> >   	fbo->base.moving = NULL;
> >   	drm_vma_node_reset(&fbo->base.vma_node);
> > -	atomic_set(&fbo->base.cpu_writers, 0);
> >   
> >   	kref_init(&fbo->base.list_kref);
> >   	kref_init(&fbo->base.kref);
> > diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> > b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> > index 957ec375a4ba..80fa52b36d5c 100644
> > --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> > +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> > @@ -113,12 +113,7 @@ int ttm_eu_reserve_buffers(struct
> > ww_acquire_ctx *ticket,
> >   		struct ttm_buffer_object *bo = entry->bo;
> >   
> >   		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL),
> > ticket);
> > -		if (!ret && unlikely(atomic_read(&bo->cpu_writers) >
> > 0)) {
> > -			reservation_object_unlock(bo->resv);
> > -
> > -			ret = -EBUSY;
> > -
> > -		} else if (ret == -EALREADY && dups) {
> > +		if (ret == -EALREADY && dups) {
> >   			struct ttm_validate_buffer *safe = entry;
> >   			entry = list_prev_entry(entry, head);
> >   			list_del(&safe->head);
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> > b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> > index 5d5c2bce01f3..457861c5047f 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
> > @@ -565,7 +565,7 @@ static void vmw_user_bo_ref_obj_release(struct
> > ttm_base_object *base,
> >   
> >   	switch (ref_type) {
> >   	case TTM_REF_SYNCCPU_WRITE:
> > -		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
> > +		atomic_dec(&user_bo->vbo.cpu_writers);
> >   		break;
> >   	default:
> >   		WARN_ONCE(true, "Undefined buffer object reference
> > release.\n");
> > @@ -681,12 +681,12 @@ static int vmw_user_bo_synccpu_grab(struct
> > vmw_user_buffer_object *user_bo,
> >   				    struct ttm_object_file *tfile,
> >   				    uint32_t flags)
> >   {
> > +	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
> >   	struct ttm_buffer_object *bo = &user_bo->vbo.base;
> >   	bool existed;
> >   	int ret;
> >   
> >   	if (flags & drm_vmw_synccpu_allow_cs) {
> > -		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
> >   		long lret;
> >   
> >   		lret = reservation_object_wait_timeout_rcu
> > @@ -699,15 +699,20 @@ static int vmw_user_bo_synccpu_grab(struct
> > vmw_user_buffer_object *user_bo,
> >   		return 0;
> >   	}
> >   
> > -	ret = ttm_bo_synccpu_write_grab
> > -		(bo, !!(flags & drm_vmw_synccpu_dontblock));
> > +	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
> > +	if (unlikely(ret != 0))
> > +		return ret;
> > +	ret = ttm_bo_wait(bo, true, nonblock);
> > +	if (likely(ret == 0))
> > +		atomic_inc(&user_bo->vbo.cpu_writers);
> > +	ttm_bo_unreserve(bo);
> >   	if (unlikely(ret != 0))
> >   		return ret;
> >   
> >   	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
> >   				 TTM_REF_SYNCCPU_WRITE, &existed,
> > false);
> >   	if (ret != 0 || existed)
> > -		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
> > +		atomic_dec(&user_bo->vbo.cpu_writers);
> >   
> >   	return ret;
> >   }
> > @@ -731,6 +736,26 @@ static int
> > vmw_user_bo_synccpu_release(uint32_t handle,
> >   	return 0;
> >   }
> >   
> > +/**
> > + * vmw_user_bo_verify_synccpu - Verify if grab for CPU access
> > exists
> > + *
> > + * @list: list of ttm_validate_buffer objects
> > + *
> > + * Return:
> > + * -EBUSY if a CPU grab is found, 0 otherwise.
> > + */
> > +int vmw_user_bo_verify_synccpu(struct list_head *list)
> > +{
> > +	struct ttm_validate_buffer *entry;
> > +
> > +	list_for_each_entry(entry, list, head) {
> > +		struct vmw_buffer_object *bo = vmw_buffer_object(entry-
> > >bo);
> > +
> > +                if (unlikely(atomic_read(&bo->cpu_writers) > 0))
> > +			return -EBUSY;
> > +	}
> > +	return 0;
> > +}
> >   
> >   /**
> >    * vmw_user_bo_synccpu_ioctl - ioctl function implementing the
> > synccpu
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > index 96983c47fb40..b3988c957f0e 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
> > @@ -90,6 +90,7 @@ struct vmw_buffer_object {
> >   	struct ttm_buffer_object base;
> >   	struct list_head res_list;
> >   	s32 pin_count;
> > +	atomic_t cpu_writers;
> >   	/* Not ref-counted.  Protected by binding_mutex */
> >   	struct vmw_resource *dx_query_ctx;
> >   	/* Protected by reservation */
> > @@ -749,6 +750,7 @@ extern int vmw_bo_init(struct vmw_private
> > *dev_priv,
> >   		       void (*bo_free)(struct ttm_buffer_object *bo));
> >   extern int vmw_user_bo_verify_access(struct ttm_buffer_object
> > *bo,
> >   				     struct ttm_object_file *tfile);
> > +extern int vmw_user_bo_verify_synccpu(struct list_head *list);
> >   extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
> >   			     struct ttm_object_file *tfile,
> >   			     uint32_t size,
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > index 2ff7ba04d8c8..271217f6e3a3 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
> > @@ -3719,6 +3719,10 @@ int vmw_execbuf_process(struct drm_file
> > *file_priv,
> >   	if (unlikely(ret != 0))
> >   		goto out_err_nores;
> >   
> > +	ret = vmw_user_bo_verify_synccpu(&val_ctx.bo_list);
> > +	if (unlikely(ret != 0))
> > +		goto out_err;
> > +
> >   	ret = vmw_validation_bo_validate(&val_ctx, true);
> >   	if (unlikely(ret != 0))
> >   		goto out_err;
> > @@ -3918,6 +3922,10 @@ void __vmw_execbuf_release_pinned_bo(struct
> > vmw_private *dev_priv,
> >   	if (ret)
> >   		goto out_no_reserve;
> >   
> > +	ret = vmw_user_bo_verify_synccpu(&val_ctx.bo_list);
> > +	if (unlikely(ret != 0))
> > +		goto out_no_emit;
> > +
> >   	if (dev_priv->query_cid_valid) {
> >   		BUG_ON(fence != NULL);
> >   		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv-
> > >query_cid);
> > diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> > b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> > index 1d38a8b2f2ec..c55da6f0ff03 100644
> > --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> > +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
> > @@ -469,6 +469,10 @@ vmw_resource_check_buffer(struct
> > ww_acquire_ctx *ticket,
> >   	if (unlikely(ret != 0))
> >   		goto out_no_reserve;
> >   
> > +	ret = vmw_user_bo_verify_synccpu(&val_list);
> > +	if (unlikely(ret != 0))
> > +		goto out_no_validate;
> > +
> >   	if (res->func->needs_backup && list_empty(&res->mob_head))
> >   		return 0;
> >   
> > diff --git a/include/drm/ttm/ttm_bo_api.h
> > b/include/drm/ttm/ttm_bo_api.h
> > index 49d9cdfc58f2..e358e965d4c0 100644
> > --- a/include/drm/ttm/ttm_bo_api.h
> > +++ b/include/drm/ttm/ttm_bo_api.h
> > @@ -145,7 +145,6 @@ struct ttm_tt;
> >    * holds a pointer to a persistent shmem object.
> >    * @ttm: TTM structure holding system pages.
> >    * @evicted: Whether the object was evicted without user-space
> > knowing.
> > - * @cpu_writes: For synchronization. Number of cpu writers.
> >    * @lru: List head for the lru list.
> >    * @ddestroy: List head for the delayed destroy list.
> >    * @swap: List head for swap LRU list.
> > @@ -195,11 +194,6 @@ struct ttm_buffer_object {
> >   	struct ttm_tt *ttm;
> >   	bool evicted;
> >   
> > -	/**
> > -	 * Members protected by the bo::reserved lock only when written
> > to.
> > -	 */
> > -
> > -	atomic_t cpu_writers;
> >   
> >   	/**
> >   	 * Members protected by the bdev::lru_lock.
> > @@ -443,31 +437,6 @@ void ttm_bo_unlock_delayed_workqueue(struct
> > ttm_bo_device *bdev, int resched);
> >   bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
> >   			      const struct ttm_place *place);
> >   
> > -/**
> > - * ttm_bo_synccpu_write_grab
> > - *
> > - * @bo: The buffer object:
> > - * @no_wait: Return immediately if buffer is busy.
> > - *
> > - * Synchronizes a buffer object for CPU RW access. This means
> > - * command submission that affects the buffer will return -EBUSY
> > - * until ttm_bo_synccpu_write_release is called.
> > - *
> > - * Returns
> > - * -EBUSY if the buffer is busy and no_wait is true.
> > - * -ERESTARTSYS if interrupted by a signal.
> > - */
> > -int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool
> > no_wait);
> > -
> > -/**
> > - * ttm_bo_synccpu_write_release:
> > - *
> > - * @bo : The buffer object.
> > - *
> > - * Releases a synccpu lock.
> > - */
> > -void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
> > -
> >   /**
> >    * ttm_bo_acc_size
> >    *
diff mbox series

Patch

diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index c7de667d482a..4ec055ffd6a7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -153,7 +153,6 @@  static void ttm_bo_release_list(struct kref *list_kref)
 
 	BUG_ON(kref_read(&bo->list_kref));
 	BUG_ON(kref_read(&bo->kref));
-	BUG_ON(atomic_read(&bo->cpu_writers));
 	BUG_ON(bo->mem.mm_node != NULL);
 	BUG_ON(!list_empty(&bo->lru));
 	BUG_ON(!list_empty(&bo->ddestroy));
@@ -1308,7 +1307,6 @@  int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 
 	kref_init(&bo->kref);
 	kref_init(&bo->list_kref);
-	atomic_set(&bo->cpu_writers, 0);
 	INIT_LIST_HEAD(&bo->lru);
 	INIT_LIST_HEAD(&bo->ddestroy);
 	INIT_LIST_HEAD(&bo->swap);
@@ -1814,31 +1812,6 @@  int ttm_bo_wait(struct ttm_buffer_object *bo,
 }
 EXPORT_SYMBOL(ttm_bo_wait);
 
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
-{
-	int ret = 0;
-
-	/*
-	 * Using ttm_bo_reserve makes sure the lru lists are updated.
-	 */
-
-	ret = ttm_bo_reserve(bo, true, no_wait, NULL);
-	if (unlikely(ret != 0))
-		return ret;
-	ret = ttm_bo_wait(bo, true, no_wait);
-	if (likely(ret == 0))
-		atomic_inc(&bo->cpu_writers);
-	ttm_bo_unreserve(bo);
-	return ret;
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
-
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
-{
-	atomic_dec(&bo->cpu_writers);
-}
-EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
-
 /**
  * A buffer object shrink method that tries to swap out the first
  * buffer object on the bo_global::swap_lru list.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 895d77d799e4..6f43f1f0de7c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -511,7 +511,6 @@  static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	mutex_init(&fbo->base.wu_mutex);
 	fbo->base.moving = NULL;
 	drm_vma_node_reset(&fbo->base.vma_node);
-	atomic_set(&fbo->base.cpu_writers, 0);
 
 	kref_init(&fbo->base.list_kref);
 	kref_init(&fbo->base.kref);
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 957ec375a4ba..80fa52b36d5c 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -113,12 +113,7 @@  int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
 		struct ttm_buffer_object *bo = entry->bo;
 
 		ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket);
-		if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) {
-			reservation_object_unlock(bo->resv);
-
-			ret = -EBUSY;
-
-		} else if (ret == -EALREADY && dups) {
+		if (ret == -EALREADY && dups) {
 			struct ttm_validate_buffer *safe = entry;
 			entry = list_prev_entry(entry, head);
 			list_del(&safe->head);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 5d5c2bce01f3..457861c5047f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -565,7 +565,7 @@  static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base,
 
 	switch (ref_type) {
 	case TTM_REF_SYNCCPU_WRITE:
-		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+		atomic_dec(&user_bo->vbo.cpu_writers);
 		break;
 	default:
 		WARN_ONCE(true, "Undefined buffer object reference release.\n");
@@ -681,12 +681,12 @@  static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 				    struct ttm_object_file *tfile,
 				    uint32_t flags)
 {
+	bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 	struct ttm_buffer_object *bo = &user_bo->vbo.base;
 	bool existed;
 	int ret;
 
 	if (flags & drm_vmw_synccpu_allow_cs) {
-		bool nonblock = !!(flags & drm_vmw_synccpu_dontblock);
 		long lret;
 
 		lret = reservation_object_wait_timeout_rcu
@@ -699,15 +699,20 @@  static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo,
 		return 0;
 	}
 
-	ret = ttm_bo_synccpu_write_grab
-		(bo, !!(flags & drm_vmw_synccpu_dontblock));
+	ret = ttm_bo_reserve(bo, true, nonblock, NULL);
+	if (unlikely(ret != 0))
+		return ret;
+	ret = ttm_bo_wait(bo, true, nonblock);
+	if (likely(ret == 0))
+		atomic_inc(&user_bo->vbo.cpu_writers);
+	ttm_bo_unreserve(bo);
 	if (unlikely(ret != 0))
 		return ret;
 
 	ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
 				 TTM_REF_SYNCCPU_WRITE, &existed, false);
 	if (ret != 0 || existed)
-		ttm_bo_synccpu_write_release(&user_bo->vbo.base);
+		atomic_dec(&user_bo->vbo.cpu_writers);
 
 	return ret;
 }
@@ -731,6 +736,26 @@  static int vmw_user_bo_synccpu_release(uint32_t handle,
 	return 0;
 }
 
+/**
+ * vmw_user_bo_verify_synccpu - Verify if grab for CPU access exists
+ *
+ * @list: list of ttm_validate_buffer objects
+ *
+ * Return:
+ * -EBUSY if a CPU grab is found, 0 otherwise.
+ */
+int vmw_user_bo_verify_synccpu(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct vmw_buffer_object *bo = vmw_buffer_object(entry->bo);
+
+                if (unlikely(atomic_read(&bo->cpu_writers) > 0))
+			return -EBUSY;
+	}
+	return 0;
+}
 
 /**
  * vmw_user_bo_synccpu_ioctl - ioctl function implementing the synccpu
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 96983c47fb40..b3988c957f0e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -90,6 +90,7 @@  struct vmw_buffer_object {
 	struct ttm_buffer_object base;
 	struct list_head res_list;
 	s32 pin_count;
+	atomic_t cpu_writers;
 	/* Not ref-counted.  Protected by binding_mutex */
 	struct vmw_resource *dx_query_ctx;
 	/* Protected by reservation */
@@ -749,6 +750,7 @@  extern int vmw_bo_init(struct vmw_private *dev_priv,
 		       void (*bo_free)(struct ttm_buffer_object *bo));
 extern int vmw_user_bo_verify_access(struct ttm_buffer_object *bo,
 				     struct ttm_object_file *tfile);
+extern int vmw_user_bo_verify_synccpu(struct list_head *list);
 extern int vmw_user_bo_alloc(struct vmw_private *dev_priv,
 			     struct ttm_object_file *tfile,
 			     uint32_t size,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 2ff7ba04d8c8..271217f6e3a3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3719,6 +3719,10 @@  int vmw_execbuf_process(struct drm_file *file_priv,
 	if (unlikely(ret != 0))
 		goto out_err_nores;
 
+	ret = vmw_user_bo_verify_synccpu(&val_ctx.bo_list);
+	if (unlikely(ret != 0))
+		goto out_err;
+
 	ret = vmw_validation_bo_validate(&val_ctx, true);
 	if (unlikely(ret != 0))
 		goto out_err;
@@ -3918,6 +3922,10 @@  void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
 	if (ret)
 		goto out_no_reserve;
 
+	ret = vmw_user_bo_verify_synccpu(&val_ctx.bo_list);
+	if (unlikely(ret != 0))
+		goto out_no_emit;
+
 	if (dev_priv->query_cid_valid) {
 		BUG_ON(fence != NULL);
 		ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 1d38a8b2f2ec..c55da6f0ff03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -469,6 +469,10 @@  vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
 	if (unlikely(ret != 0))
 		goto out_no_reserve;
 
+	ret = vmw_user_bo_verify_synccpu(&val_list);
+	if (unlikely(ret != 0))
+		goto out_no_validate;
+
 	if (res->func->needs_backup && list_empty(&res->mob_head))
 		return 0;
 
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 49d9cdfc58f2..e358e965d4c0 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -145,7 +145,6 @@  struct ttm_tt;
  * holds a pointer to a persistent shmem object.
  * @ttm: TTM structure holding system pages.
  * @evicted: Whether the object was evicted without user-space knowing.
- * @cpu_writes: For synchronization. Number of cpu writers.
  * @lru: List head for the lru list.
  * @ddestroy: List head for the delayed destroy list.
  * @swap: List head for swap LRU list.
@@ -195,11 +194,6 @@  struct ttm_buffer_object {
 	struct ttm_tt *ttm;
 	bool evicted;
 
-	/**
-	 * Members protected by the bo::reserved lock only when written to.
-	 */
-
-	atomic_t cpu_writers;
 
 	/**
 	 * Members protected by the bdev::lru_lock.
@@ -443,31 +437,6 @@  void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
 bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
 			      const struct ttm_place *place);
 
-/**
- * ttm_bo_synccpu_write_grab
- *
- * @bo: The buffer object:
- * @no_wait: Return immediately if buffer is busy.
- *
- * Synchronizes a buffer object for CPU RW access. This means
- * command submission that affects the buffer will return -EBUSY
- * until ttm_bo_synccpu_write_release is called.
- *
- * Returns
- * -EBUSY if the buffer is busy and no_wait is true.
- * -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
-
-/**
- * ttm_bo_synccpu_write_release:
- *
- * @bo : The buffer object.
- *
- * Releases a synccpu lock.
- */
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
-
 /**
  * ttm_bo_acc_size
  *