diff mbox

[1/5] drm/i915/gtt: Add read only pages to gen8_pte_encode

Message ID 20180531113552.13152-1-chris@chris-wilson.co.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Chris Wilson May 31, 2018, 11:35 a.m. UTC
From: Jon Bloomfield <jon.bloomfield@intel.com>

We can set a bit inside the ppGTT PTE to indicate a page is read-only;
writes from the GPU will be discarded. We can use this to protect pages
and in particular support read-only userptr mappings (necessary for
importing PROT_READ vma).

Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_gtt.c | 25 ++++++++++++++-----------
 1 file changed, 14 insertions(+), 11 deletions(-)

Comments

Matthew Auld May 31, 2018, 2:43 p.m. UTC | #1
On 31 May 2018 at 12:35, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> From: Jon Bloomfield <jon.bloomfield@intel.com>
>
> We can set a bit inside the ppGTT PTE to indicate a page is read-only;
> writes from the GPU will be discarded. We can use this to protect pages
> and in particular support read-only userptr mappings (necessary for
> importing PROT_READ vma).
>
> Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Matthew Auld <matthew.william.auld@gmail.com>
> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

fwiw I didn't see any surprises with ro 2M/64K PTEs when testing locally.

For the series:
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>
Chris Wilson May 31, 2018, 3 p.m. UTC | #2
Quoting Matthew Auld (2018-05-31 15:43:14)
> On 31 May 2018 at 12:35, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > From: Jon Bloomfield <jon.bloomfield@intel.com>
> >
> > We can set a bit inside the ppGTT PTE to indicate a page is read-only;
> > writes from the GPU will be discarded. We can use this to protect pages
> > and in particular support read-only userptr mappings (necessary for
> > importing PROT_READ vma).
> >
> > Signed-off-by: Jon Bloomfield <jon.bloomfield@intel.com>
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > Cc: Matthew Auld <matthew.william.auld@gmail.com>
> > Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> 
> fwiw I didn't see any surprises with ro 2M/64K PTEs when testing locally.
> 
> For the series:
> Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com>

I think I'd like to see a selftest also exercising the ro nature before
pushing. Thanks for the review, stick around there'll be another patch
to come :)
-Chris
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 7f4def556f40..5936f0bfdd19 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -244,10 +244,13 @@  static void clear_pages(struct i915_vma *vma)
 }
 
 static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
-				  enum i915_cache_level level)
+				  enum i915_cache_level level,
+				  u32 flags)
 {
-	gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
-	pte |= addr;
+	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+	if (unlikely(flags & PTE_READ_ONLY))
+		pte &= ~_PAGE_RW;
 
 	switch (level) {
 	case I915_CACHE_NONE:
@@ -637,7 +640,7 @@  static void gen8_initialize_pt(struct i915_address_space *vm,
 			       struct i915_page_table *pt)
 {
 	fill_px(vm, pt,
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
+		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
 }
 
 static void gen6_initialize_pt(struct i915_address_space *vm,
@@ -833,7 +836,7 @@  static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
 	unsigned int pte = gen8_pte_index(start);
 	unsigned int pte_end = pte + num_entries;
 	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
 	gen8_pte_t *vaddr;
 
 	GEM_BUG_ON(num_entries > pt->used_ptes);
@@ -1008,7 +1011,7 @@  gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
 			      enum i915_cache_level cache_level)
 {
 	struct i915_page_directory *pd;
-	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, 0);
 	gen8_pte_t *vaddr;
 	bool ret;
 
@@ -1076,7 +1079,7 @@  static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
 					   struct sgt_dma *iter,
 					   enum i915_cache_level cache_level)
 {
-	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
+	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, 0);
 	u64 start = vma->node.start;
 	dma_addr_t rem = iter->sg->length;
 
@@ -1542,7 +1545,7 @@  static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
 {
 	struct i915_address_space *vm = &ppgtt->base;
 	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
 	u64 start = 0, length = ppgtt->base.total;
 
 	if (use_4lvl(vm)) {
@@ -2419,7 +2422,7 @@  static void gen8_ggtt_insert_page(struct i915_address_space *vm,
 	gen8_pte_t __iomem *pte =
 		(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
 
-	gen8_set_pte(pte, gen8_pte_encode(addr, level));
+	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
 
 	ggtt->invalidate(vm->i915);
 }
@@ -2432,7 +2435,7 @@  static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
 	struct sgt_iter sgt_iter;
 	gen8_pte_t __iomem *gtt_entries;
-	const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
+	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
 	dma_addr_t addr;
 
 	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
@@ -2500,7 +2503,7 @@  static void gen8_ggtt_clear_range(struct i915_address_space *vm,
 	unsigned first_entry = start >> PAGE_SHIFT;
 	unsigned num_entries = length >> PAGE_SHIFT;
 	const gen8_pte_t scratch_pte =
-		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
+		gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
 	gen8_pte_t __iomem *gtt_base =
 		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
 	const int max_entries = ggtt_total_entries(ggtt) - first_entry;