diff mbox series

[2/2] drm/i915/gem: Fix Virtual Memory mapping boundaries calculation

Message ID 20240802083850.103694-3-andi.shyti@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Fix mmap memory boundary calculation | expand

Commit Message

Andi Shyti Aug. 2, 2024, 8:38 a.m. UTC
Calculating the size of the mapped area as the lesser value
between the requested size and the actual size does not consider
the partial mapping offset. This can cause page fault access.

Fix the calculation of the starting and ending addresses, the
total size is now deduced from the difference between the end and
start addresses.

Additionally, the calculations have been rewritten in a clearer
and more understandable form.

Fixes: c58305af1835 ("drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass")
Reported-by: Jann Horn <jannh@google.com>
Co-developed-by: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Chris Wilson <chris.p.wilson@linux.intel.com>
Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: <stable@vger.kernel.org> # v4.9+
---
 drivers/gpu/drm/i915/gem/i915_gem_mman.c | 53 +++++++++++++++++++++---
 1 file changed, 47 insertions(+), 6 deletions(-)

Comments

Jann Horn Aug. 2, 2024, 8:43 a.m. UTC | #1
On Fri, Aug 2, 2024 at 10:39 AM Andi Shyti <andi.shyti@linux.intel.com> wrote:
> Calculating the size of the mapped area as the lesser value
> between the requested size and the actual size does not consider
> the partial mapping offset. This can cause page fault access.
>
> Fix the calculation of the starting and ending addresses, the
> total size is now deduced from the difference between the end and
> start addresses.
>
> Additionally, the calculations have been rewritten in a clearer
> and more understandable form.
>
> Fixes: c58305af1835 ("drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass")
> Reported-by: Jann Horn <jannh@google.com>
> Co-developed-by: Chris Wilson <chris.p.wilson@linux.intel.com>
> Signed-off-by: Chris Wilson <chris.p.wilson@linux.intel.com>
> Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Matthew Auld <matthew.auld@intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Cc: <stable@vger.kernel.org> # v4.9+

Reviewed-by: Jann Horn <jannh@google.com>
Cavitt, Jonathan Aug. 2, 2024, 5:25 p.m. UTC | #2
-----Original Message-----
From: Intel-gfx <intel-gfx-bounces@lists.freedesktop.org> On Behalf Of Andi Shyti
Sent: Friday, August 2, 2024 1:39 AM
To: intel-gfx <intel-gfx@lists.freedesktop.org>; dri-devel <dri-devel@lists.freedesktop.org>
Cc: Jann Horn <jannh@chromium.org>; Jani Nikula <jani.nikula@linux.intel.com>; Joonas Lahtinen <joonas.lahtinen@linux.intel.com>; Vivi, Rodrigo <rodrigo.vivi@intel.com>; Tvrtko Ursulin <tursulin@ursulin.net>; Jann Horn <jannh@google.com>; Chris Wilson <chris.p.wilson@linux.intel.com>; Niemiec, Krzysztof <krzysztof.niemiec@intel.com>; Andi Shyti <andi.shyti@kernel.org>; Auld, Matthew <matthew.auld@intel.com>; Andi Shyti <andi.shyti@linux.intel.com>
Subject: [PATCH 2/2] drm/i915/gem: Fix Virtual Memory mapping boundaries calculation
> 
> Calculating the size of the mapped area as the lesser value
> between the requested size and the actual size does not consider
> the partial mapping offset. This can cause page fault access.
> 
> Fix the calculation of the starting and ending addresses, the
> total size is now deduced from the difference between the end and
> start addresses.
> 
> Additionally, the calculations have been rewritten in a clearer
> and more understandable form.
> 
> Fixes: c58305af1835 ("drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass")
> Reported-by: Jann Horn <jannh@google.com>
> Co-developed-by: Chris Wilson <chris.p.wilson@linux.intel.com>
> Signed-off-by: Chris Wilson <chris.p.wilson@linux.intel.com>
> Signed-off-by: Andi Shyti <andi.shyti@linux.intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Matthew Auld <matthew.auld@intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Cc: <stable@vger.kernel.org> # v4.9+
> ---
>  drivers/gpu/drm/i915/gem/i915_gem_mman.c | 53 +++++++++++++++++++++---
>  1 file changed, 47 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> index ce10dd259812..cac6d4184506 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
> @@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
>  	return i915_error_to_vmf_fault(err);
>  }
>  
> +static void set_address_limits(struct vm_area_struct *area,
> +			       struct i915_vma *vma,
> +			       unsigned long obj_offset,
> +			       unsigned long *start_vaddr,
> +			       unsigned long *end_vaddr)
> +{
> +	unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
> +	long start, end; /* memory boundaries */
> +
> +	/*
> +	 * Let's move into the ">> PAGE_SHIFT"
> +	 * domain to be sure not to lose bits
> +	 */
> +	vm_start = area->vm_start >> PAGE_SHIFT;
> +	vm_end = area->vm_end >> PAGE_SHIFT;
> +	vma_size = vma->size >> PAGE_SHIFT;
> +
> +	/*
> +	 * Calculate the memory boundaries by considering the offset
> +	 * provided by the user during memory mapping and the offset
> +	 * provided for the partial mapping.
> +	 */
> +	start = vm_start;
> +	start -= obj_offset;
> +	start += vma->gtt_view.partial.offset;
> +	end = start + vma_size;
> +
> +	start = max_t(long, start, vm_start);
> +	end = min_t(long, end, vm_end);
> +
> +	/* Let's move back into the "<< PAGE_SHIFT" domain */
> +	*start_vaddr = (unsigned long)start << PAGE_SHIFT;
> +	*end_vaddr = (unsigned long)end << PAGE_SHIFT;
> +}
> +
>  static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
>  {
>  #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
> @@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
>  	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
>  	bool write = area->vm_flags & VM_WRITE;
>  	struct i915_gem_ww_ctx ww;
> +	unsigned long obj_offset;
> +	unsigned long start, end; /* memory boundaries */
>  	intel_wakeref_t wakeref;
>  	struct i915_vma *vma;
>  	pgoff_t page_offset;
> +	unsigned long pfn;
>  	int srcu;
>  	int ret;
>  
> -	/* We don't use vmf->pgoff since that has the fake offset */
> +	obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
>  	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
> +	page_offset += obj_offset;
>  
>  	trace_i915_gem_object_fault(obj, page_offset, true, write);
>  
> @@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
>  	if (ret)
>  		goto err_unpin;
>  
> +	set_address_limits(area, vma, obj_offset, &start, &end);
> +
> +	pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
> +	pfn += (start - area->vm_start) >> PAGE_SHIFT;
> +	pfn += obj_offset - vma->gtt_view.partial.offset;

I don't know how viable it would be, but maybe we could
calculate pfn as a part of set_address_limits?
Just a suggestion, not blocking
Reviewed-by: Jonathan Cavitt <Jonathan.cavitt@intel.com>
-Jonathan Cavitt

> +
>  	/* Finally, remap it using the new GTT offset */
> -	ret = remap_io_mapping(area,
> -			       area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
> -			       (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
> -			       min_t(u64, vma->size, area->vm_end - area->vm_start),
> -			       &ggtt->iomap);
> +	ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
>  	if (ret)
>  		goto err_fence;
>  
> -- 
> 2.45.2
> 
>
Andi Shyti Aug. 5, 2024, 9:39 a.m. UTC | #3
Hi Jonathan,

...

> > +	set_address_limits(area, vma, obj_offset, &start, &end);
> > +
> > +	pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
> > +	pfn += (start - area->vm_start) >> PAGE_SHIFT;
> > +	pfn += obj_offset - vma->gtt_view.partial.offset;
> 
> I don't know how viable it would be, but maybe we could
> calculate pfn as a part of set_address_limits?

I was a bit afraid of dumping a massive parameter list...

> Just a suggestion, not blocking

... but now that I'm looking at it more carefully, it would be
just two parameters more (&pfn and ggtt->gmadr.start) and would
be cleaner. Will send a v2.

> Reviewed-by: Jonathan Cavitt <Jonathan.cavitt@intel.com>
> -Jonathan Cavitt

Thanks!

Andi
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index ce10dd259812..cac6d4184506 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -290,6 +290,41 @@  static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
 	return i915_error_to_vmf_fault(err);
 }
 
+static void set_address_limits(struct vm_area_struct *area,
+			       struct i915_vma *vma,
+			       unsigned long obj_offset,
+			       unsigned long *start_vaddr,
+			       unsigned long *end_vaddr)
+{
+	unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
+	long start, end; /* memory boundaries */
+
+	/*
+	 * Let's move into the ">> PAGE_SHIFT"
+	 * domain to be sure not to lose bits
+	 */
+	vm_start = area->vm_start >> PAGE_SHIFT;
+	vm_end = area->vm_end >> PAGE_SHIFT;
+	vma_size = vma->size >> PAGE_SHIFT;
+
+	/*
+	 * Calculate the memory boundaries by considering the offset
+	 * provided by the user during memory mapping and the offset
+	 * provided for the partial mapping.
+	 */
+	start = vm_start;
+	start -= obj_offset;
+	start += vma->gtt_view.partial.offset;
+	end = start + vma_size;
+
+	start = max_t(long, start, vm_start);
+	end = min_t(long, end, vm_end);
+
+	/* Let's move back into the "<< PAGE_SHIFT" domain */
+	*start_vaddr = (unsigned long)start << PAGE_SHIFT;
+	*end_vaddr = (unsigned long)end << PAGE_SHIFT;
+}
+
 static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
 {
 #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
@@ -302,14 +337,18 @@  static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
 	struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
 	bool write = area->vm_flags & VM_WRITE;
 	struct i915_gem_ww_ctx ww;
+	unsigned long obj_offset;
+	unsigned long start, end; /* memory boundaries */
 	intel_wakeref_t wakeref;
 	struct i915_vma *vma;
 	pgoff_t page_offset;
+	unsigned long pfn;
 	int srcu;
 	int ret;
 
-	/* We don't use vmf->pgoff since that has the fake offset */
+	obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
 	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+	page_offset += obj_offset;
 
 	trace_i915_gem_object_fault(obj, page_offset, true, write);
 
@@ -402,12 +441,14 @@  static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
 	if (ret)
 		goto err_unpin;
 
+	set_address_limits(area, vma, obj_offset, &start, &end);
+
+	pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
+	pfn += (start - area->vm_start) >> PAGE_SHIFT;
+	pfn += obj_offset - vma->gtt_view.partial.offset;
+
 	/* Finally, remap it using the new GTT offset */
-	ret = remap_io_mapping(area,
-			       area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
-			       (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
-			       min_t(u64, vma->size, area->vm_end - area->vm_start),
-			       &ggtt->iomap);
+	ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
 	if (ret)
 		goto err_fence;