diff mbox series

[RFC,v2,04/12] drm/i915/svm: Page table update support for SVM

Message ID 20191213215614.24558-5-niranjana.vishwanathapura@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915/svm: Add SVM support | expand

Commit Message

Niranjana Vishwanathapura Dec. 13, 2019, 9:56 p.m. UTC
For Shared Virtual Memory (SVM) system (SYS) allocator, there is no
backing buffer object (BO). Add support to bind a VA to PA mapping
in the device page table.

Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Cc: Sudeep Dutt <sudeep.dutt@intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 60 ++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem_gtt.h | 10 +++++
 2 files changed, 68 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7d4f5fa84b02..6657ff41dc3f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -195,6 +195,50 @@  static void ppgtt_unbind_vma(struct i915_vma *vma)
 		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
 }
 
+int svm_bind_addr_prepare(struct i915_address_space *vm, u64 start, u64 size)
+{
+	return vm->allocate_va_range(vm, start, size);
+}
+
+int svm_bind_addr_commit(struct i915_address_space *vm, u64 start, u64 size,
+			 u64 flags, struct sg_table *st, u32 sg_page_sizes)
+{
+	struct i915_vma vma = {0};
+	u32 pte_flags = 0;
+
+	/* use a vma wrapper */
+	vma.page_sizes.sg = sg_page_sizes;
+	vma.node.start = start;
+	vma.node.size = size;
+	vma.pages = st;
+	vma.vm = vm;
+
+	/* Applicable to VLV, and gen8+ */
+	if (flags & I915_GTT_SVM_READONLY)
+		pte_flags |= PTE_READ_ONLY;
+
+	vm->insert_entries(vm, &vma, 0, pte_flags);
+	return 0;
+}
+
+int svm_bind_addr(struct i915_address_space *vm, u64 start, u64 size,
+		  u64 flags, struct sg_table *st, u32 sg_page_sizes)
+{
+	int ret;
+
+	ret = svm_bind_addr_prepare(vm, start, size);
+	if (ret)
+		return ret;
+
+	return svm_bind_addr_commit(vm, start, size, flags, st, sg_page_sizes);
+}
+
+void svm_unbind_addr(struct i915_address_space *vm,
+		     u64 start, u64 size)
+{
+	vm->clear_range(vm, start, size);
+}
+
 static int ppgtt_set_pages(struct i915_vma *vma)
 {
 	GEM_BUG_ON(vma->pages);
@@ -985,11 +1029,21 @@  static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
 	DBG("%s(%p):{ lvl:%d, start:%llx, end:%llx, idx:%d, len:%d, used:%d }\n",
 	    __func__, vm, lvl + 1, start, end,
 	    idx, len, atomic_read(px_used(pd)));
-	GEM_BUG_ON(!len || len >= atomic_read(px_used(pd)));
+	/*
+	 * FIXME: In SVM case, during mmu invalidation, we need to clear ppgtt,
+	 * but we don't know if the entry exist or not. So, we can't assume
+	 * that it is called only when the entry exist. revisit.
+	 * Also need to add the ebility to properly handle partial invalidations
+	 * by downgrading the large mappings.
+	 */
+	GEM_BUG_ON(!len);
 
 	do {
 		struct i915_page_table *pt = pd->entry[idx];
 
+		if (!pt)
+			continue;
+
 		if (atomic_fetch_inc(&pt->used) >> gen8_pd_shift(1) &&
 		    gen8_pd_contains(start, end, lvl)) {
 			DBG("%s(%p):{ lvl:%d, idx:%d, start:%llx, end:%llx } removing pd\n",
@@ -1012,7 +1066,9 @@  static u64 __gen8_ppgtt_clear(struct i915_address_space * const vm,
 			    __func__, vm, lvl, start, end,
 			    gen8_pd_index(start, 0), count,
 			    atomic_read(&pt->used));
-			GEM_BUG_ON(!count || count >= atomic_read(&pt->used));
+			GEM_BUG_ON(!count);
+			if (count > atomic_read(&pt->used))
+				count = atomic_read(&pt->used);
 
 			vaddr = kmap_atomic_px(pt);
 			memset64(vaddr + gen8_pd_index(start, 0),
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 7c1b54c9677d..8a8a314e1295 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -39,6 +39,7 @@ 
 #include <linux/mm.h>
 #include <linux/pagevec.h>
 #include <linux/workqueue.h>
+#include <linux/scatterlist.h>
 
 #include <drm/drm_mm.h>
 
@@ -679,4 +680,13 @@  int i915_gem_gtt_insert(struct i915_address_space *vm,
 
 #define PIN_OFFSET_MASK		(-I915_GTT_PAGE_SIZE)
 
+/* SVM UAPI */
+#define I915_GTT_SVM_READONLY  BIT(0)
+
+int svm_bind_addr_prepare(struct i915_address_space *vm, u64 start, u64 size);
+int svm_bind_addr_commit(struct i915_address_space *vm, u64 start, u64 size,
+			 u64 flags, struct sg_table *st, u32 sg_page_sizes);
+int svm_bind_addr(struct i915_address_space *vm, u64 start, u64 size,
+		  u64 flags, struct sg_table *st, u32 sg_page_sizes);
+void svm_unbind_addr(struct i915_address_space *vm, u64 start, u64 size);
 #endif