diff mbox series

[3/3] drm/ttm: remove pointers to globals

Message ID 20190925105530.2261-3-christian.koenig@amd.com (mailing list archive)
State New, archived
Headers show
Series [1/3] drm/ttm: fix busy reference in ttm_mem_evict_first | expand

Commit Message

Christian König Sept. 25, 2019, 10:55 a.m. UTC
As the name says global memory and bo accounting is global. So it doesn't
make to much sense having pointers to global structures all around the code.

Signed-off-by: Christian König <christian.koenig@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c |  2 +-
 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   |  9 ++--
 drivers/gpu/drm/drm_gem_vram_helper.c    |  5 +-
 drivers/gpu/drm/qxl/qxl_release.c        |  7 +--
 drivers/gpu/drm/qxl/qxl_ttm.c            |  7 +--
 drivers/gpu/drm/ttm/ttm_agp_backend.c    |  2 +-
 drivers/gpu/drm/ttm/ttm_bo.c             | 65 +++++++++++-------------
 drivers/gpu/drm/ttm/ttm_bo_util.c        |  2 +-
 drivers/gpu/drm/ttm/ttm_bo_vm.c          |  4 +-
 drivers/gpu/drm/ttm/ttm_execbuf_util.c   | 25 +++------
 drivers/gpu/drm/ttm/ttm_memory.c         |  2 +-
 drivers/gpu/drm/ttm/ttm_page_alloc.c     |  4 +-
 drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |  4 +-
 include/drm/ttm/ttm_bo_driver.h          |  6 +--
 include/drm/ttm/ttm_memory.h             |  1 -
 15 files changed, 57 insertions(+), 88 deletions(-)

Comments

Thomas Hellström (Intel) Sept. 25, 2019, 11:56 a.m. UTC | #1
On 9/25/19 12:55 PM, Christian König wrote:
> As the name says global memory and bo accounting is global. So it doesn't
> make to much sense having pointers to global structures all around the code.
>
> Signed-off-by: Christian König <christian.koenig@amd.com>
> ---
>   drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c |  2 +-
>   drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c   |  9 ++--
>   drivers/gpu/drm/drm_gem_vram_helper.c    |  5 +-
>   drivers/gpu/drm/qxl/qxl_release.c        |  7 +--
>   drivers/gpu/drm/qxl/qxl_ttm.c            |  7 +--
>   drivers/gpu/drm/ttm/ttm_agp_backend.c    |  2 +-
>   drivers/gpu/drm/ttm/ttm_bo.c             | 65 +++++++++++-------------
>   drivers/gpu/drm/ttm/ttm_bo_util.c        |  2 +-
>   drivers/gpu/drm/ttm/ttm_bo_vm.c          |  4 +-
>   drivers/gpu/drm/ttm/ttm_execbuf_util.c   | 25 +++------
>   drivers/gpu/drm/ttm/ttm_memory.c         |  2 +-
>   drivers/gpu/drm/ttm/ttm_page_alloc.c     |  4 +-
>   drivers/gpu/drm/ttm/ttm_page_alloc_dma.c |  4 +-
>   include/drm/ttm/ttm_bo_driver.h          |  6 +--
>   include/drm/ttm/ttm_memory.h             |  1 -
>   15 files changed, 57 insertions(+), 88 deletions(-)
>
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> index d79ab1da9e07..c392f7672e06 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
> @@ -71,7 +71,7 @@
>    */
>   static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
>   {
> -	struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
> +	struct page *dummy_page = ttm_bo_glob.dummy_read_page;
>   
>   	if (adev->dummy_page_addr)
>   		return 0;
> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> index f555843daef0..cca1887465a4 100644
> --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
> @@ -600,21 +600,20 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
>   void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
>   				struct amdgpu_vm *vm)
>   {
> -	struct ttm_bo_global *glob = adev->mman.bdev.glob;
>   	struct amdgpu_vm_bo_base *bo_base;
>   
>   #if 0
>   	if (vm->bulk_moveable) {
> -		spin_lock(&glob->lru_lock);
> +		spin_lock(&ttm_bo_glob.lru_lock);
>   		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
> -		spin_unlock(&glob->lru_lock);
> +		spin_unlock(&ttm_bo_glob.lru_lock);
>   		return;
>   	}
>   #endif
>   
>   	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
>   
> -	spin_lock(&glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	list_for_each_entry(bo_base, &vm->idle, vm_status) {
>   		struct amdgpu_bo *bo = bo_base->bo;
>   
> @@ -626,7 +625,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
>   			ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
>   						&vm->lru_bulk_move);
>   	}
> -	spin_unlock(&glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   
>   	vm->bulk_moveable = true;
>   }
> diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
> index 49588de88959..3695168206b5 100644
> --- a/drivers/gpu/drm/drm_gem_vram_helper.c
> +++ b/drivers/gpu/drm/drm_gem_vram_helper.c
> @@ -896,12 +896,11 @@ static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
>   	struct drm_info_node *node = (struct drm_info_node *) m->private;
>   	struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
>   	struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
> -	struct ttm_bo_global *glob = vmm->bdev.glob;
>   	struct drm_printer p = drm_seq_file_printer(m);
>   
> -	spin_lock(&glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	drm_mm_print(mm, &p);
> -	spin_unlock(&glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   	return 0;
>   }
>   
> diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
> index 71f1b51a1350..a0d388b7b53b 100644
> --- a/drivers/gpu/drm/qxl/qxl_release.c
> +++ b/drivers/gpu/drm/qxl/qxl_release.c
> @@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev,
>   void qxl_release_fence_buffer_objects(struct qxl_release *release)
>   {
>   	struct ttm_buffer_object *bo;
> -	struct ttm_bo_global *glob;
>   	struct ttm_bo_device *bdev;
>   	struct ttm_validate_buffer *entry;
>   	struct qxl_device *qdev;
> @@ -451,9 +450,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
>   		       release->id | 0xf0000000, release->base.seqno);
>   	trace_dma_fence_emit(&release->base);
>   
> -	glob = bdev->glob;
> -
> -	spin_lock(&glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   
>   	list_for_each_entry(entry, &release->bos, head) {
>   		bo = entry->bo;
> @@ -462,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
>   		ttm_bo_move_to_lru_tail(bo, NULL);
>   		dma_resv_unlock(bo->base.resv);
>   	}
> -	spin_unlock(&glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   	dma_resv_ctx_fini(&release->ticket);
>   }
>   
> diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
> index cbc6c2ba8630..8d779ca2682a 100644
> --- a/drivers/gpu/drm/qxl/qxl_ttm.c
> +++ b/drivers/gpu/drm/qxl/qxl_ttm.c
> @@ -369,14 +369,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
>   {
>   	struct drm_info_node *node = (struct drm_info_node *)m->private;
>   	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
> -	struct drm_device *dev = node->minor->dev;
> -	struct qxl_device *rdev = dev->dev_private;
> -	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
>   	struct drm_printer p = drm_seq_file_printer(m);
>   
> -	spin_lock(&glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	drm_mm_print(mm, &p);
> -	spin_unlock(&glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   	return 0;
>   }
>   #endif
> diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
> index ea4d59eb8966..6050dc846894 100644
> --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
> +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
> @@ -51,7 +51,7 @@ struct ttm_agp_backend {
>   static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
>   {
>   	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
> -	struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
> +	struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
>   	struct drm_mm_node *node = bo_mem->mm_node;
>   	struct agp_memory *mem;
>   	int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
> index ab9967b84e0b..57cb77811dd5 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo.c
> @@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj);
>   DEFINE_MUTEX(ttm_global_mutex);
>   unsigned ttm_bo_glob_use_count;
>   struct ttm_bo_global ttm_bo_glob;
> +EXPORT_SYMBOL(ttm_bo_glob);
>   
>   static struct attribute ttm_bo_count = {
>   	.name = "bo_count",
> @@ -148,7 +149,6 @@ static void ttm_bo_release_list(struct kref *list_kref)
>   {
>   	struct ttm_buffer_object *bo =
>   	    container_of(list_kref, struct ttm_buffer_object, list_kref);
> -	struct ttm_bo_device *bdev = bo->bdev;
>   	size_t acc_size = bo->acc_size;
>   
>   	BUG_ON(kref_read(&bo->list_kref));
> @@ -158,13 +158,13 @@ static void ttm_bo_release_list(struct kref *list_kref)
>   	BUG_ON(!list_empty(&bo->lru));
>   	BUG_ON(!list_empty(&bo->ddestroy));
>   	ttm_tt_destroy(bo->ttm);
> -	atomic_dec(&bo->bdev->glob->bo_count);
> +	atomic_dec(&ttm_bo_glob.bo_count);
>   	dma_fence_put(bo->moving);
>   	if (!ttm_bo_uses_embedded_gem_object(bo))
>   		dma_resv_fini(&bo->base._resv);
>   	mutex_destroy(&bo->wu_mutex);
>   	bo->destroy(bo);
> -	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
> +	ttm_mem_global_free(&ttm_mem_glob, acc_size);
>   }
>   
>   static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
> @@ -187,7 +187,7 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
>   
>   	if (bo->ttm && !(bo->ttm->page_flags &
>   			 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
> -		list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
> +		list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
>   		kref_get(&bo->list_kref);
>   	}
>   }
> @@ -294,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
>   		dma_resv_assert_held(pos->first->base.resv);
>   		dma_resv_assert_held(pos->last->base.resv);
>   
> -		lru = &pos->first->bdev->glob->swap_lru[i];
> +		lru = &ttm_bo_glob.swap_lru[i];
>   		list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
>   	}
>   }
> @@ -458,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
>   static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
>   {
>   	struct ttm_bo_device *bdev = bo->bdev;
> -	struct ttm_bo_global *glob = bdev->glob;
>   	int ret;
>   
>   	ret = ttm_bo_individualize_resv(bo);
> @@ -468,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
>   		 */
>   		dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
>   						    30 * HZ);
> -		spin_lock(&glob->lru_lock);
> +		spin_lock(&ttm_bo_glob.lru_lock);
>   		goto error;
>   	}
>   
> -	spin_lock(&glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
>   	if (!ret) {
>   		if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
>   			ttm_bo_del_from_lru(bo);
> -			spin_unlock(&glob->lru_lock);
> +			spin_unlock(&ttm_bo_glob.lru_lock);
>   			if (bo->base.resv != &bo->base._resv)
>   				dma_resv_unlock(&bo->base._resv);
>   
> @@ -506,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
>   error:
>   	kref_get(&bo->list_kref);
>   	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
> -	spin_unlock(&glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   
>   	schedule_delayed_work(&bdev->wq,
>   			      ((HZ / 100) < 1) ? 1 : HZ / 100);
> @@ -529,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>   			       bool interruptible, bool no_wait_gpu,
>   			       bool unlock_resv)
>   {
> -	struct ttm_bo_global *glob = bo->bdev->glob;
>   	struct dma_resv *resv;
>   	int ret;
>   
> @@ -548,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>   
>   		if (unlock_resv)
>   			dma_resv_unlock(bo->base.resv);
> -		spin_unlock(&glob->lru_lock);
> +		spin_unlock(&ttm_bo_glob.lru_lock);
>   
>   		lret = dma_resv_wait_timeout_rcu(resv, true,
>   							   interruptible,
> @@ -559,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>   		else if (lret == 0)
>   			return -EBUSY;
>   
> -		spin_lock(&glob->lru_lock);
> +		spin_lock(&ttm_bo_glob.lru_lock);
>   		if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
>   			/*
>   			 * We raced, and lost, someone else holds the reservation now,
> @@ -569,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>   			 * delayed destruction would succeed, so just return success
>   			 * here.
>   			 */
> -			spin_unlock(&glob->lru_lock);
> +			spin_unlock(&ttm_bo_glob.lru_lock);
>   			return 0;
>   		}
>   		ret = 0;
> @@ -578,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>   	if (ret || unlikely(list_empty(&bo->ddestroy))) {
>   		if (unlock_resv)
>   			dma_resv_unlock(bo->base.resv);
> -		spin_unlock(&glob->lru_lock);
> +		spin_unlock(&ttm_bo_glob.lru_lock);
>   		return ret;
>   	}
>   
> @@ -586,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>   	list_del_init(&bo->ddestroy);
>   	kref_put(&bo->list_kref, ttm_bo_ref_bug);
>   
> -	spin_unlock(&glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   	ttm_bo_cleanup_memtype_use(bo);
>   
>   	if (unlock_resv)
> @@ -601,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
>    */
>   static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
>   {
> -	struct ttm_bo_global *glob = bdev->glob;
> +	struct ttm_bo_global *glob = &ttm_bo_glob;
>   	struct list_head removed;
>   	bool empty;
>   
> @@ -822,13 +820,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>   			       struct ww_acquire_ctx *ticket)
>   {
>   	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
> -	struct ttm_bo_global *glob = bdev->glob;
>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
>   	bool locked = false;
>   	unsigned i;
>   	int ret;
>   
> -	spin_lock(&glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
>   		list_for_each_entry(bo, &man->lru[i], lru) {
>   			bool busy;
> @@ -860,7 +857,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>   	if (!bo) {
>   		if (busy_bo)
>   			kref_get(&busy_bo->list_kref);
> -		spin_unlock(&glob->lru_lock);
> +		spin_unlock(&ttm_bo_glob.lru_lock);
>   		ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
>   		if (busy_bo)
>   			kref_put(&busy_bo->list_kref, ttm_bo_release_list);
> @@ -876,7 +873,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
>   		return ret;
>   	}
>   
> -	spin_unlock(&glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   
>   	ret = ttm_bo_evict(bo, ctx);
>   	if (locked)
> @@ -1042,10 +1039,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
>   	mem->mem_type = mem_type;
>   	mem->placement = cur_flags;
>   
> -	spin_lock(&bo->bdev->glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	ttm_bo_del_from_lru(bo);
>   	ttm_bo_add_mem_to_lru(bo, mem);
> -	spin_unlock(&bo->bdev->glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   
>   	return 0;
>   }
> @@ -1132,9 +1129,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
>   
>   error:
>   	if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
> -		spin_lock(&bo->bdev->glob->lru_lock);
> +		spin_lock(&ttm_bo_glob.lru_lock);
>   		ttm_bo_move_to_lru_tail(bo, NULL);
> -		spin_unlock(&bo->bdev->glob->lru_lock);
> +		spin_unlock(&ttm_bo_glob.lru_lock);
>   	}
>   
>   	return ret;
> @@ -1258,9 +1255,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>   			 struct dma_resv *resv,
>   			 void (*destroy) (struct ttm_buffer_object *))
>   {
> +	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
>   	int ret = 0;
>   	unsigned long num_pages;
> -	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
>   	bool locked;
>   
>   	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
> @@ -1321,7 +1318,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>   		dma_resv_init(&bo->base._resv);
>   		drm_vma_node_reset(&bo->base.vma_node);
>   	}
> -	atomic_inc(&bo->bdev->glob->bo_count);
> +	atomic_inc(&ttm_bo_glob.bo_count);
>   
>   	/*
>   	 * For ttm_bo_type_device buffers, allocate
> @@ -1351,9 +1348,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
>   		return ret;
>   	}
>   
> -	spin_lock(&bdev->glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	ttm_bo_move_to_lru_tail(bo, NULL);
> -	spin_unlock(&bdev->glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   
>   	return ret;
>   }
> @@ -1451,7 +1448,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
>   		.flags = TTM_OPT_FLAG_FORCE_ALLOC
>   	};
>   	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
> -	struct ttm_bo_global *glob = bdev->glob;
> +	struct ttm_bo_global *glob = &ttm_bo_glob;
>   	struct dma_fence *fence;
>   	int ret;
>   	unsigned i;
> @@ -1620,8 +1617,6 @@ static int ttm_bo_global_init(void)
>   		goto out;
>   
>   	spin_lock_init(&glob->lru_lock);
> -	glob->mem_glob = &ttm_mem_glob;
> -	glob->mem_glob->bo_glob = glob;
>   	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
>   
>   	if (unlikely(glob->dummy_read_page == NULL)) {
> @@ -1645,10 +1640,10 @@ static int ttm_bo_global_init(void)
>   
>   int ttm_bo_device_release(struct ttm_bo_device *bdev)
>   {
> +	struct ttm_bo_global *glob = &ttm_bo_glob;
>   	int ret = 0;
>   	unsigned i = TTM_NUM_MEM_TYPES;
>   	struct ttm_mem_type_manager *man;
> -	struct ttm_bo_global *glob = bdev->glob;
>   
>   	while (i--) {
>   		man = &bdev->man[i];
> @@ -1717,7 +1712,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
>   	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
>   	INIT_LIST_HEAD(&bdev->ddestroy);
>   	bdev->dev_mapping = mapping;
> -	bdev->glob = glob;
>   	bdev->need_dma32 = need_dma32;
>   	mutex_lock(&ttm_global_mutex);
>   	list_add_tail(&bdev->device_list, &glob->device_list);
> @@ -1921,8 +1915,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
>   		.no_wait_gpu = false
>   	};
>   
> -	while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
> -		;
> +	while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
>   }
>   EXPORT_SYMBOL(ttm_bo_swapout_all);
>   
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index fe81c565e7ef..3940afa6f3ad 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -503,7 +503,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
>   	 * TODO: Explicit member copy would probably be better here.
>   	 */
>   
> -	atomic_inc(&bo->bdev->glob->bo_count);
> +	atomic_inc(&ttm_bo_glob.bo_count);
>   	INIT_LIST_HEAD(&fbo->base.ddestroy);
>   	INIT_LIST_HEAD(&fbo->base.lru);
>   	INIT_LIST_HEAD(&fbo->base.swap);
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> index 4aa007edffb0..263f4c33079a 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
> @@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
>   		}
>   
>   		if (bo->moving != moving) {
> -			spin_lock(&bdev->glob->lru_lock);
> +			spin_lock(&ttm_bo_glob.lru_lock);
>   			ttm_bo_move_to_lru_tail(bo, NULL);
> -			spin_unlock(&bdev->glob->lru_lock);
> +			spin_unlock(&ttm_bo_glob.lru_lock);
>   		}
>   		dma_fence_put(moving);
>   	}
> diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> index b09c2c8caf13..8f1dcdd0c3f5 100644
> --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
> @@ -37,15 +37,11 @@ void ttm_eu_backoff_reservation(struct dma_resv_ctx *ticket,
>   				struct list_head *list)
>   {
>   	struct ttm_validate_buffer *entry;
> -	struct ttm_bo_global *glob;
>   
>   	if (list_empty(list))
>   		return;
>   
> -	entry = list_first_entry(list, struct ttm_validate_buffer, head);
> -	glob = entry->bo->bdev->glob;
> -
> -	spin_lock(&glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	list_for_each_entry(entry, list, head) {
>   		struct ttm_buffer_object *bo = entry->bo;
>   
> @@ -53,7 +49,7 @@ void ttm_eu_backoff_reservation(struct dma_resv_ctx *ticket,
>   		if (!ticket)
>   			dma_resv_unlock(bo->base.resv);
>   	}
> -	spin_unlock(&glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   
>   	if (ticket) {
>   		dma_resv_ctx_unlock_all(ticket);
> @@ -79,15 +75,11 @@ int ttm_eu_reserve_buffers(struct dma_resv_ctx *ticket,
>   			   struct list_head *dups)
>   {
>   	struct ttm_validate_buffer *entry;
> -	struct ttm_bo_global *glob;
>   	int ret;
>   
>   	if (list_empty(list))
>   		return 0;
>   
> -	entry = list_first_entry(list, struct ttm_validate_buffer, head);
> -	glob = entry->bo->bdev->glob;
> -
>   	if (ticket)
>   		dma_resv_ctx_init(ticket);
>   
> @@ -153,19 +145,14 @@ void ttm_eu_fence_buffer_objects(struct dma_resv_ctx *ticket,
>   				 struct dma_fence *fence)
>   {
>   	struct ttm_validate_buffer *entry;
> -	struct ttm_buffer_object *bo;
> -	struct ttm_bo_global *glob;
>   
>   	if (list_empty(list))
>   		return;
>   
> -	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
> -	glob = bo->bdev->glob;
> -
> -	spin_lock(&glob->lru_lock);
> -
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	list_for_each_entry(entry, list, head) {
> -		bo = entry->bo;
> +		struct ttm_buffer_object *bo = entry->bo;
> +
>   		if (entry->num_shared)
>   			dma_resv_add_shared_fence(bo->base.resv, fence);
>   		else
> @@ -174,7 +161,7 @@ void ttm_eu_fence_buffer_objects(struct dma_resv_ctx *ticket,
>   		if (!ticket)
>   			dma_resv_unlock(bo->base.resv);
>   	}
> -	spin_unlock(&glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   	if (ticket) {
>   		dma_resv_ctx_unlock_all(ticket);
>   		dma_resv_ctx_fini(ticket);
> diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
> index 8617958b7ae6..acd63b70d814 100644
> --- a/drivers/gpu/drm/ttm/ttm_memory.c
> +++ b/drivers/gpu/drm/ttm/ttm_memory.c
> @@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
>   
>   	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
>   		spin_unlock(&glob->lock);
> -		ret = ttm_bo_swapout(glob->bo_glob, ctx);
> +		ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
>   		spin_lock(&glob->lock);
>   		if (unlikely(ret != 0))
>   			break;
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> index 627f8dc91d0e..b40a4678c296 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
> @@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void)
>   static void
>   ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
>   {
> -	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
> +	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
>   	unsigned i;
>   
>   	if (mem_count_update == 0)
> @@ -1049,7 +1049,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
>   
>   int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
>   {
> -	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
> +	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
>   	unsigned i;
>   	int ret;
>   
> diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> index d594f7520b7b..9cb588a93d43 100644
> --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
> @@ -882,8 +882,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
>   int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
>   			struct ttm_operation_ctx *ctx)
>   {
> +	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
>   	struct ttm_tt *ttm = &ttm_dma->ttm;
> -	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
>   	unsigned long num_pages = ttm->num_pages;
>   	struct dma_pool *pool;
>   	struct dma_page *d_page;
> @@ -987,8 +987,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate);
>   /* Put all pages in pages list to correct pool to wait for reuse */
>   void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
>   {
> +	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
>   	struct ttm_tt *ttm = &ttm_dma->ttm;
> -	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
>   	struct dma_pool *pool;
>   	struct dma_page *d_page, *next;
>   	enum pool_type type;
> diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
> index 4332cc036483..381961979302 100644
> --- a/include/drm/ttm/ttm_bo_driver.h
> +++ b/include/drm/ttm/ttm_bo_driver.h
> @@ -413,7 +413,6 @@ extern struct ttm_bo_global {
>   	 */
>   
>   	struct kobject kobj;
> -	struct ttm_mem_global *mem_glob;
>   	struct page *dummy_read_page;
>   	spinlock_t lru_lock;
>   
> @@ -457,7 +456,6 @@ struct ttm_bo_device {
>   	 * Constant after bo device init / atomic.
>   	 */
>   	struct list_head device_list;
> -	struct ttm_bo_global *glob;
>   	struct ttm_bo_driver *driver;
>   	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
>   
> @@ -758,9 +756,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
>    */
>   static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
>   {
> -	spin_lock(&bo->bdev->glob->lru_lock);
> +	spin_lock(&ttm_bo_glob.lru_lock);
>   	ttm_bo_move_to_lru_tail(bo, NULL);
> -	spin_unlock(&bo->bdev->glob->lru_lock);
> +	spin_unlock(&ttm_bo_glob.lru_lock);
>   	dma_resv_unlock(bo->base.resv);
>   }
>   
> diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
> index 3ff48a0a2d7b..c78ea99c42cf 100644
> --- a/include/drm/ttm/ttm_memory.h
> +++ b/include/drm/ttm/ttm_memory.h
> @@ -65,7 +65,6 @@
>   struct ttm_mem_zone;
>   extern struct ttm_mem_global {
>   	struct kobject kobj;
> -	struct ttm_bo_global *bo_glob;
>   	struct workqueue_struct *swap_queue;
>   	struct work_struct work;
>   	spinlock_t lock;

lgtm.

Reviewed-by: Thomas Hellström <thellstrom@vmware.com>
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index d79ab1da9e07..c392f7672e06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -71,7 +71,7 @@ 
  */
 static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
 {
-	struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page;
+	struct page *dummy_page = ttm_bo_glob.dummy_read_page;
 
 	if (adev->dummy_page_addr)
 		return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index f555843daef0..cca1887465a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -600,21 +600,20 @@  void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo)
 void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 				struct amdgpu_vm *vm)
 {
-	struct ttm_bo_global *glob = adev->mman.bdev.glob;
 	struct amdgpu_vm_bo_base *bo_base;
 
 #if 0
 	if (vm->bulk_moveable) {
-		spin_lock(&glob->lru_lock);
+		spin_lock(&ttm_bo_glob.lru_lock);
 		ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
-		spin_unlock(&glob->lru_lock);
+		spin_unlock(&ttm_bo_glob.lru_lock);
 		return;
 	}
 #endif
 
 	memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
 
-	spin_lock(&glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 	list_for_each_entry(bo_base, &vm->idle, vm_status) {
 		struct amdgpu_bo *bo = bo_base->bo;
 
@@ -626,7 +625,7 @@  void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
 			ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
 						&vm->lru_bulk_move);
 	}
-	spin_unlock(&glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 
 	vm->bulk_moveable = true;
 }
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 49588de88959..3695168206b5 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -896,12 +896,11 @@  static int drm_vram_mm_debugfs(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_vram_mm *vmm = node->minor->dev->vram_mm;
 	struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv;
-	struct ttm_bo_global *glob = vmm->bdev.glob;
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	spin_lock(&glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 	drm_mm_print(mm, &p);
-	spin_unlock(&glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 71f1b51a1350..a0d388b7b53b 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -429,7 +429,6 @@  void qxl_release_unmap(struct qxl_device *qdev,
 void qxl_release_fence_buffer_objects(struct qxl_release *release)
 {
 	struct ttm_buffer_object *bo;
-	struct ttm_bo_global *glob;
 	struct ttm_bo_device *bdev;
 	struct ttm_validate_buffer *entry;
 	struct qxl_device *qdev;
@@ -451,9 +450,7 @@  void qxl_release_fence_buffer_objects(struct qxl_release *release)
 		       release->id | 0xf0000000, release->base.seqno);
 	trace_dma_fence_emit(&release->base);
 
-	glob = bdev->glob;
-
-	spin_lock(&glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 
 	list_for_each_entry(entry, &release->bos, head) {
 		bo = entry->bo;
@@ -462,7 +459,7 @@  void qxl_release_fence_buffer_objects(struct qxl_release *release)
 		ttm_bo_move_to_lru_tail(bo, NULL);
 		dma_resv_unlock(bo->base.resv);
 	}
-	spin_unlock(&glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 	dma_resv_ctx_fini(&release->ticket);
 }
 
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index cbc6c2ba8630..8d779ca2682a 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -369,14 +369,11 @@  static int qxl_mm_dump_table(struct seq_file *m, void *data)
 {
 	struct drm_info_node *node = (struct drm_info_node *)m->private;
 	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
-	struct drm_device *dev = node->minor->dev;
-	struct qxl_device *rdev = dev->dev_private;
-	struct ttm_bo_global *glob = rdev->mman.bdev.glob;
 	struct drm_printer p = drm_seq_file_printer(m);
 
-	spin_lock(&glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 	drm_mm_print(mm, &p);
-	spin_unlock(&glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 	return 0;
 }
 #endif
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index ea4d59eb8966..6050dc846894 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -51,7 +51,7 @@  struct ttm_agp_backend {
 static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
 {
 	struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
-	struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page;
+	struct page *dummy_read_page = ttm_bo_glob.dummy_read_page;
 	struct drm_mm_node *node = bo_mem->mm_node;
 	struct agp_memory *mem;
 	int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index ab9967b84e0b..57cb77811dd5 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -51,6 +51,7 @@  static void ttm_bo_global_kobj_release(struct kobject *kobj);
 DEFINE_MUTEX(ttm_global_mutex);
 unsigned ttm_bo_glob_use_count;
 struct ttm_bo_global ttm_bo_glob;
+EXPORT_SYMBOL(ttm_bo_glob);
 
 static struct attribute ttm_bo_count = {
 	.name = "bo_count",
@@ -148,7 +149,6 @@  static void ttm_bo_release_list(struct kref *list_kref)
 {
 	struct ttm_buffer_object *bo =
 	    container_of(list_kref, struct ttm_buffer_object, list_kref);
-	struct ttm_bo_device *bdev = bo->bdev;
 	size_t acc_size = bo->acc_size;
 
 	BUG_ON(kref_read(&bo->list_kref));
@@ -158,13 +158,13 @@  static void ttm_bo_release_list(struct kref *list_kref)
 	BUG_ON(!list_empty(&bo->lru));
 	BUG_ON(!list_empty(&bo->ddestroy));
 	ttm_tt_destroy(bo->ttm);
-	atomic_dec(&bo->bdev->glob->bo_count);
+	atomic_dec(&ttm_bo_glob.bo_count);
 	dma_fence_put(bo->moving);
 	if (!ttm_bo_uses_embedded_gem_object(bo))
 		dma_resv_fini(&bo->base._resv);
 	mutex_destroy(&bo->wu_mutex);
 	bo->destroy(bo);
-	ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
+	ttm_mem_global_free(&ttm_mem_glob, acc_size);
 }
 
 static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
@@ -187,7 +187,7 @@  static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
 
 	if (bo->ttm && !(bo->ttm->page_flags &
 			 (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) {
-		list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]);
+		list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
 		kref_get(&bo->list_kref);
 	}
 }
@@ -294,7 +294,7 @@  void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk)
 		dma_resv_assert_held(pos->first->base.resv);
 		dma_resv_assert_held(pos->last->base.resv);
 
-		lru = &pos->first->bdev->glob->swap_lru[i];
+		lru = &ttm_bo_glob.swap_lru[i];
 		list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap);
 	}
 }
@@ -458,7 +458,6 @@  static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
 static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_bo_global *glob = bdev->glob;
 	int ret;
 
 	ret = ttm_bo_individualize_resv(bo);
@@ -468,16 +467,16 @@  static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 		 */
 		dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
 						    30 * HZ);
-		spin_lock(&glob->lru_lock);
+		spin_lock(&ttm_bo_glob.lru_lock);
 		goto error;
 	}
 
-	spin_lock(&glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 	ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
 	if (!ret) {
 		if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
 			ttm_bo_del_from_lru(bo);
-			spin_unlock(&glob->lru_lock);
+			spin_unlock(&ttm_bo_glob.lru_lock);
 			if (bo->base.resv != &bo->base._resv)
 				dma_resv_unlock(&bo->base._resv);
 
@@ -506,7 +505,7 @@  static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
 error:
 	kref_get(&bo->list_kref);
 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
-	spin_unlock(&glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 
 	schedule_delayed_work(&bdev->wq,
 			      ((HZ / 100) < 1) ? 1 : HZ / 100);
@@ -529,7 +528,6 @@  static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			       bool interruptible, bool no_wait_gpu,
 			       bool unlock_resv)
 {
-	struct ttm_bo_global *glob = bo->bdev->glob;
 	struct dma_resv *resv;
 	int ret;
 
@@ -548,7 +546,7 @@  static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 
 		if (unlock_resv)
 			dma_resv_unlock(bo->base.resv);
-		spin_unlock(&glob->lru_lock);
+		spin_unlock(&ttm_bo_glob.lru_lock);
 
 		lret = dma_resv_wait_timeout_rcu(resv, true,
 							   interruptible,
@@ -559,7 +557,7 @@  static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 		else if (lret == 0)
 			return -EBUSY;
 
-		spin_lock(&glob->lru_lock);
+		spin_lock(&ttm_bo_glob.lru_lock);
 		if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
 			/*
 			 * We raced, and lost, someone else holds the reservation now,
@@ -569,7 +567,7 @@  static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 			 * delayed destruction would succeed, so just return success
 			 * here.
 			 */
-			spin_unlock(&glob->lru_lock);
+			spin_unlock(&ttm_bo_glob.lru_lock);
 			return 0;
 		}
 		ret = 0;
@@ -578,7 +576,7 @@  static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 	if (ret || unlikely(list_empty(&bo->ddestroy))) {
 		if (unlock_resv)
 			dma_resv_unlock(bo->base.resv);
-		spin_unlock(&glob->lru_lock);
+		spin_unlock(&ttm_bo_glob.lru_lock);
 		return ret;
 	}
 
@@ -586,7 +584,7 @@  static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
 	list_del_init(&bo->ddestroy);
 	kref_put(&bo->list_kref, ttm_bo_ref_bug);
 
-	spin_unlock(&glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 	ttm_bo_cleanup_memtype_use(bo);
 
 	if (unlock_resv)
@@ -601,7 +599,7 @@  static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
  */
 static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
 {
-	struct ttm_bo_global *glob = bdev->glob;
+	struct ttm_bo_global *glob = &ttm_bo_glob;
 	struct list_head removed;
 	bool empty;
 
@@ -822,13 +820,12 @@  static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 			       struct ww_acquire_ctx *ticket)
 {
 	struct ttm_buffer_object *bo = NULL, *busy_bo = NULL;
-	struct ttm_bo_global *glob = bdev->glob;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	bool locked = false;
 	unsigned i;
 	int ret;
 
-	spin_lock(&glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 	for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
 		list_for_each_entry(bo, &man->lru[i], lru) {
 			bool busy;
@@ -860,7 +857,7 @@  static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 	if (!bo) {
 		if (busy_bo)
 			kref_get(&busy_bo->list_kref);
-		spin_unlock(&glob->lru_lock);
+		spin_unlock(&ttm_bo_glob.lru_lock);
 		ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
 		if (busy_bo)
 			kref_put(&busy_bo->list_kref, ttm_bo_release_list);
@@ -876,7 +873,7 @@  static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
 		return ret;
 	}
 
-	spin_unlock(&glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 
 	ret = ttm_bo_evict(bo, ctx);
 	if (locked)
@@ -1042,10 +1039,10 @@  static int ttm_bo_mem_placement(struct ttm_buffer_object *bo,
 	mem->mem_type = mem_type;
 	mem->placement = cur_flags;
 
-	spin_lock(&bo->bdev->glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 	ttm_bo_del_from_lru(bo);
 	ttm_bo_add_mem_to_lru(bo, mem);
-	spin_unlock(&bo->bdev->glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 
 	return 0;
 }
@@ -1132,9 +1129,9 @@  int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 
 error:
 	if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) {
-		spin_lock(&bo->bdev->glob->lru_lock);
+		spin_lock(&ttm_bo_glob.lru_lock);
 		ttm_bo_move_to_lru_tail(bo, NULL);
-		spin_unlock(&bo->bdev->glob->lru_lock);
+		spin_unlock(&ttm_bo_glob.lru_lock);
 	}
 
 	return ret;
@@ -1258,9 +1255,9 @@  int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 			 struct dma_resv *resv,
 			 void (*destroy) (struct ttm_buffer_object *))
 {
+	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
 	int ret = 0;
 	unsigned long num_pages;
-	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
 	bool locked;
 
 	ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx);
@@ -1321,7 +1318,7 @@  int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 		dma_resv_init(&bo->base._resv);
 		drm_vma_node_reset(&bo->base.vma_node);
 	}
-	atomic_inc(&bo->bdev->glob->bo_count);
+	atomic_inc(&ttm_bo_glob.bo_count);
 
 	/*
 	 * For ttm_bo_type_device buffers, allocate
@@ -1351,9 +1348,9 @@  int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
 		return ret;
 	}
 
-	spin_lock(&bdev->glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 	ttm_bo_move_to_lru_tail(bo, NULL);
-	spin_unlock(&bdev->glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 
 	return ret;
 }
@@ -1451,7 +1448,7 @@  static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 		.flags = TTM_OPT_FLAG_FORCE_ALLOC
 	};
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-	struct ttm_bo_global *glob = bdev->glob;
+	struct ttm_bo_global *glob = &ttm_bo_glob;
 	struct dma_fence *fence;
 	int ret;
 	unsigned i;
@@ -1620,8 +1617,6 @@  static int ttm_bo_global_init(void)
 		goto out;
 
 	spin_lock_init(&glob->lru_lock);
-	glob->mem_glob = &ttm_mem_glob;
-	glob->mem_glob->bo_glob = glob;
 	glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
 
 	if (unlikely(glob->dummy_read_page == NULL)) {
@@ -1645,10 +1640,10 @@  static int ttm_bo_global_init(void)
 
 int ttm_bo_device_release(struct ttm_bo_device *bdev)
 {
+	struct ttm_bo_global *glob = &ttm_bo_glob;
 	int ret = 0;
 	unsigned i = TTM_NUM_MEM_TYPES;
 	struct ttm_mem_type_manager *man;
-	struct ttm_bo_global *glob = bdev->glob;
 
 	while (i--) {
 		man = &bdev->man[i];
@@ -1717,7 +1712,6 @@  int ttm_bo_device_init(struct ttm_bo_device *bdev,
 	INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
 	INIT_LIST_HEAD(&bdev->ddestroy);
 	bdev->dev_mapping = mapping;
-	bdev->glob = glob;
 	bdev->need_dma32 = need_dma32;
 	mutex_lock(&ttm_global_mutex);
 	list_add_tail(&bdev->device_list, &glob->device_list);
@@ -1921,8 +1915,7 @@  void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
 		.no_wait_gpu = false
 	};
 
-	while (ttm_bo_swapout(bdev->glob, &ctx) == 0)
-		;
+	while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0);
 }
 EXPORT_SYMBOL(ttm_bo_swapout_all);
 
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index fe81c565e7ef..3940afa6f3ad 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -503,7 +503,7 @@  static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
 	 * TODO: Explicit member copy would probably be better here.
 	 */
 
-	atomic_inc(&bo->bdev->glob->bo_count);
+	atomic_inc(&ttm_bo_glob.bo_count);
 	INIT_LIST_HEAD(&fbo->base.ddestroy);
 	INIT_LIST_HEAD(&fbo->base.lru);
 	INIT_LIST_HEAD(&fbo->base.swap);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 4aa007edffb0..263f4c33079a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -177,9 +177,9 @@  static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 		}
 
 		if (bo->moving != moving) {
-			spin_lock(&bdev->glob->lru_lock);
+			spin_lock(&ttm_bo_glob.lru_lock);
 			ttm_bo_move_to_lru_tail(bo, NULL);
-			spin_unlock(&bdev->glob->lru_lock);
+			spin_unlock(&ttm_bo_glob.lru_lock);
 		}
 		dma_fence_put(moving);
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index b09c2c8caf13..8f1dcdd0c3f5 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -37,15 +37,11 @@  void ttm_eu_backoff_reservation(struct dma_resv_ctx *ticket,
 				struct list_head *list)
 {
 	struct ttm_validate_buffer *entry;
-	struct ttm_bo_global *glob;
 
 	if (list_empty(list))
 		return;
 
-	entry = list_first_entry(list, struct ttm_validate_buffer, head);
-	glob = entry->bo->bdev->glob;
-
-	spin_lock(&glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 	list_for_each_entry(entry, list, head) {
 		struct ttm_buffer_object *bo = entry->bo;
 
@@ -53,7 +49,7 @@  void ttm_eu_backoff_reservation(struct dma_resv_ctx *ticket,
 		if (!ticket)
 			dma_resv_unlock(bo->base.resv);
 	}
-	spin_unlock(&glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 
 	if (ticket) {
 		dma_resv_ctx_unlock_all(ticket);
@@ -79,15 +75,11 @@  int ttm_eu_reserve_buffers(struct dma_resv_ctx *ticket,
 			   struct list_head *dups)
 {
 	struct ttm_validate_buffer *entry;
-	struct ttm_bo_global *glob;
 	int ret;
 
 	if (list_empty(list))
 		return 0;
 
-	entry = list_first_entry(list, struct ttm_validate_buffer, head);
-	glob = entry->bo->bdev->glob;
-
 	if (ticket)
 		dma_resv_ctx_init(ticket);
 
@@ -153,19 +145,14 @@  void ttm_eu_fence_buffer_objects(struct dma_resv_ctx *ticket,
 				 struct dma_fence *fence)
 {
 	struct ttm_validate_buffer *entry;
-	struct ttm_buffer_object *bo;
-	struct ttm_bo_global *glob;
 
 	if (list_empty(list))
 		return;
 
-	bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
-	glob = bo->bdev->glob;
-
-	spin_lock(&glob->lru_lock);
-
+	spin_lock(&ttm_bo_glob.lru_lock);
 	list_for_each_entry(entry, list, head) {
-		bo = entry->bo;
+		struct ttm_buffer_object *bo = entry->bo;
+
 		if (entry->num_shared)
 			dma_resv_add_shared_fence(bo->base.resv, fence);
 		else
@@ -174,7 +161,7 @@  void ttm_eu_fence_buffer_objects(struct dma_resv_ctx *ticket,
 		if (!ticket)
 			dma_resv_unlock(bo->base.resv);
 	}
-	spin_unlock(&glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 	if (ticket) {
 		dma_resv_ctx_unlock_all(ticket);
 		dma_resv_ctx_fini(ticket);
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index 8617958b7ae6..acd63b70d814 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -275,7 +275,7 @@  static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq,
 
 	while (ttm_zones_above_swap_target(glob, from_wq, extra)) {
 		spin_unlock(&glob->lock);
-		ret = ttm_bo_swapout(glob->bo_glob, ctx);
+		ret = ttm_bo_swapout(&ttm_bo_glob, ctx);
 		spin_lock(&glob->lock);
 		if (unlikely(ret != 0))
 			break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 627f8dc91d0e..b40a4678c296 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -1028,7 +1028,7 @@  void ttm_page_alloc_fini(void)
 static void
 ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
 {
-	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
 	unsigned i;
 
 	if (mem_count_update == 0)
@@ -1049,7 +1049,7 @@  ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update)
 
 int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
 {
-	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
+	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
 	unsigned i;
 	int ret;
 
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index d594f7520b7b..9cb588a93d43 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -882,8 +882,8 @@  static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
 			struct ttm_operation_ctx *ctx)
 {
+	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
 	struct ttm_tt *ttm = &ttm_dma->ttm;
-	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
 	unsigned long num_pages = ttm->num_pages;
 	struct dma_pool *pool;
 	struct dma_page *d_page;
@@ -987,8 +987,8 @@  EXPORT_SYMBOL_GPL(ttm_dma_populate);
 /* Put all pages in pages list to correct pool to wait for reuse */
 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
 {
+	struct ttm_mem_global *mem_glob = &ttm_mem_glob;
 	struct ttm_tt *ttm = &ttm_dma->ttm;
-	struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob;
 	struct dma_pool *pool;
 	struct dma_page *d_page, *next;
 	enum pool_type type;
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4332cc036483..381961979302 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -413,7 +413,6 @@  extern struct ttm_bo_global {
 	 */
 
 	struct kobject kobj;
-	struct ttm_mem_global *mem_glob;
 	struct page *dummy_read_page;
 	spinlock_t lru_lock;
 
@@ -457,7 +456,6 @@  struct ttm_bo_device {
 	 * Constant after bo device init / atomic.
 	 */
 	struct list_head device_list;
-	struct ttm_bo_global *glob;
 	struct ttm_bo_driver *driver;
 	struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
 
@@ -758,9 +756,9 @@  static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
  */
 static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
 {
-	spin_lock(&bo->bdev->glob->lru_lock);
+	spin_lock(&ttm_bo_glob.lru_lock);
 	ttm_bo_move_to_lru_tail(bo, NULL);
-	spin_unlock(&bo->bdev->glob->lru_lock);
+	spin_unlock(&ttm_bo_glob.lru_lock);
 	dma_resv_unlock(bo->base.resv);
 }
 
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 3ff48a0a2d7b..c78ea99c42cf 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -65,7 +65,6 @@ 
 struct ttm_mem_zone;
 extern struct ttm_mem_global {
 	struct kobject kobj;
-	struct ttm_bo_global *bo_glob;
 	struct workqueue_struct *swap_queue;
 	struct work_struct work;
 	spinlock_t lock;