@@ -860,7 +860,7 @@ struct radeon_vm {
unsigned id;
/* contains the page directory */
- struct radeon_sa_bo *page_directory;
+ struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
/* array of page tables, one for each page directory entry */
@@ -1008,6 +1008,7 @@ struct radeon_cs_parser {
unsigned nrelocs;
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
+ struct radeon_bo_list *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
@@ -2790,9 +2791,12 @@ extern void radeon_program_register_sequence(struct radeon_device *rdev,
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
-void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
+struct radeon_bo_list *radeon_vm_add_bos(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct list_head *head);
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
@@ -109,6 +109,11 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
radeon_bo_list_add_object(&p->relocs[i].lobj,
&p->validated);
}
+
+ if (p->cs_flags & RADEON_CS_USE_VM)
+ p->vm_bos = radeon_vm_add_bos(p->rdev, p->ib.vm,
+ &p->validated);
+
return radeon_bo_list_validate(&p->ticket, &p->validated, p->ring);
}
@@ -320,6 +325,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
kfree(parser->track);
kfree(parser->relocs);
kfree(parser->relocs_ptr);
+ kfree(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks);
@@ -359,24 +365,28 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
return r;
}
-static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
struct radeon_vm *vm)
{
- struct radeon_device *rdev = parser->rdev;
- struct radeon_bo_list *lobj;
- struct radeon_bo *bo;
- int r;
+ struct radeon_device *rdev = p->rdev;
+ int i, r;
- r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo, &rdev->ring_tmp_bo.bo->tbo.mem);
- if (r) {
+ r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+ &rdev->ring_tmp_bo.bo->tbo.mem);
+ if (r)
return r;
- }
- list_for_each_entry(lobj, &parser->validated, tv.head) {
- bo = lobj->bo;
- r = radeon_vm_bo_update(parser->rdev, vm, bo, &bo->tbo.mem);
- if (r) {
+
+ for (i = 0; i < p->nrelocs; i++) {
+ struct radeon_bo *bo;
+
+ /* ignore duplicates */
+ if (p->relocs_ptr[i] != &p->relocs[i])
+ continue;
+
+ bo = p->relocs[i].robj;
+ r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+ if (r)
return r;
- }
}
return 0;
}
@@ -544,7 +544,9 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
return -ENOMEM;
}
- radeon_vm_init(rdev, &fpriv->vm);
+ r = radeon_vm_init(rdev, &fpriv->vm);
+ if (r)
+ return r;
r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
if (r)
@@ -117,7 +117,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev)
/* restore page table */
list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
- if (vm->page_directory == NULL)
+ if (vm->page_tables == NULL)
continue;
list_for_each_entry(bo_va, &vm->va, vm_list) {
@@ -143,12 +143,10 @@ static void radeon_vm_free_pt(struct radeon_device *rdev,
struct radeon_bo_va *bo_va;
int i;
- if (!vm->page_directory)
+ if (!vm->page_tables)
return;
list_del_init(&vm->list);
- radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
-
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
}
@@ -237,69 +235,19 @@ static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
*/
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
- unsigned pd_size, pd_entries, pts_size;
- struct radeon_ib ib;
- int r;
+ unsigned pts_size;
- if (vm == NULL) {
+ if (vm == NULL)
return -EINVAL;
- }
- if (vm->page_directory != NULL) {
+ if (vm->page_tables != NULL)
return 0;
- }
-
- pd_size = radeon_vm_directory_size(rdev);
- pd_entries = radeon_vm_num_pdes(rdev);
-
-retry:
- r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
- &vm->page_directory, pd_size,
- RADEON_VM_PTB_ALIGN_SIZE, false);
- if (r == -ENOMEM) {
- r = radeon_vm_evict(rdev, vm);
- if (r)
- return r;
- goto retry;
-
- } else if (r) {
- return r;
- }
-
- vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
-
- /* Initially clear the page directory */
- r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
- NULL, pd_entries * 2 + 64);
- if (r) {
- radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
- return r;
- }
-
- ib.length_dw = 0;
-
- radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
- 0, pd_entries, 0, 0);
-
- radeon_semaphore_sync_to(ib.semaphore, vm->fence);
- r = radeon_ib_schedule(rdev, &ib, NULL);
- if (r) {
- radeon_ib_free(rdev, &ib);
- radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
- return r;
- }
- radeon_fence_unref(&vm->fence);
- vm->fence = radeon_fence_ref(ib.fence);
- radeon_ib_free(rdev, &ib);
- radeon_fence_unref(&vm->last_flush);
/* allocate page table array */
pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
-
if (vm->page_tables == NULL) {
DRM_ERROR("Cannot allocate memory for page table array\n");
- radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
return -ENOMEM;
}
@@ -323,6 +271,33 @@ void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
}
/**
+ * radeon_vm_add_bos - add the vm BOs to a CS list
+ *
+ * @vm: vm providing the BOs
+ * @head: head of CS validation list
+ */
+struct radeon_bo_list *radeon_vm_add_bos(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct list_head *head)
+{
+ struct radeon_bo_list *list;
+
+ list = kmalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
+ if (!list)
+ return NULL;
+
+ /* add the vm page table to the list */
+ list[0].bo = vm->page_directory;
+ list[0].written = 1;
+ list[0].domain = RADEON_GEM_DOMAIN_VRAM;
+ list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM;
+ list[0].tv.bo = &vm->page_directory->tbo;
+ radeon_bo_list_add_object(&list[0], head);
+
+ return list;
+}
+
+/**
* radeon_vm_grab_id - allocate the next free VMID
*
* @rdev: radeon_device pointer
@@ -393,10 +368,14 @@ void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
int ring)
{
+ uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
+
/* if we can't remember our last VM flush then flush now! */
/* XXX figure out why we have to flush all the time */
- if (!vm->last_flush || true)
+ if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
+ vm->pd_gpu_addr = pd_addr;
radeon_ring_vm_flush(rdev, ring, vm);
+ }
}
/**
@@ -668,7 +647,7 @@ retry:
return r;
}
- pde = vm->pd_gpu_addr + pt_idx * 8;
+ pde = radeon_bo_gpu_offset(vm->page_directory) + pt_idx * 8;
pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
@@ -800,7 +779,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
int r;
/* nothing to do if vm isn't bound */
- if (vm->page_directory == NULL)
+ if (vm->page_tables == NULL)
return 0;
bo_va = radeon_vm_bo_find(vm, bo);
@@ -957,8 +936,15 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
*
* Init @vm fields (cayman+).
*/
-void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
+ unsigned pd_size, pd_entries;
+ struct ttm_validate_buffer tv;
+ struct ww_acquire_ctx ticket;
+ struct list_head head;
+ struct radeon_ib ib;
+ int r;
+
vm->id = 0;
vm->fence = NULL;
vm->last_flush = NULL;
@@ -966,6 +952,68 @@ void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
+
+ pd_size = radeon_vm_directory_size(rdev);
+ pd_entries = radeon_vm_num_pdes(rdev);
+
+ r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
+ RADEON_GEM_DOMAIN_VRAM, NULL,
+ &vm->page_directory);
+ if (r)
+ return r;
+
+ /* Initially clear the page directory */
+
+ memset(&tv, 0, sizeof(tv));
+ tv.bo = &vm->page_directory->tbo;
+
+ INIT_LIST_HEAD(&head);
+ list_add(&tv.head, &head);
+
+ r = ttm_eu_reserve_buffers(&ticket, &head);
+ if (r)
+ goto error_free;
+
+ r = ttm_bo_validate(&vm->page_directory->tbo,
+ &vm->page_directory->placement,
+ true, false);
+ if (r)
+ goto error_unreserve;
+
+ vm->pd_gpu_addr = radeon_bo_gpu_offset(vm->page_directory);
+
+ r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
+ NULL, pd_entries * 2 + 64);
+ if (r)
+ goto error_unreserve;
+
+ ib.length_dw = 0;
+
+ radeon_asic_vm_set_page(rdev, &ib, vm->pd_gpu_addr,
+ 0, pd_entries, 0, 0);
+
+ radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
+ r = radeon_ib_schedule(rdev, &ib, NULL);
+ if (r)
+ goto error_unreserve;
+
+ ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
+ radeon_fence_unref(&vm->fence);
+ vm->fence = radeon_fence_ref(ib.fence);
+ radeon_fence_unref(&vm->last_flush);
+
+ radeon_ib_free(rdev, &ib);
+
+ return 0;
+
+error_unreserve:
+ ttm_eu_backoff_reservation(&ticket, &head);
+
+error_free:
+ radeon_bo_unref(&vm->page_directory);
+ vm->page_directory = NULL;
+ return r;
+
}
/**
@@ -1003,4 +1051,6 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
radeon_fence_unref(&vm->last_flush);
radeon_fence_unref(&vm->last_id_use);
mutex_unlock(&vm->mutex);
+
+ radeon_bo_unref(&vm->page_directory);
}