diff mbox series

[-next,v2] riscv/mm/fault: simplify code for do_page_fault()

Message ID 20221029084715.3669204-1-tongtiangen@huawei.com (mailing list archive)
State Changes Requested
Delegated to: Palmer Dabbelt
Headers show
Series [-next,v2] riscv/mm/fault: simplify code for do_page_fault() | expand

Checks

Context Check Description
conchuod/patch_count success Link
conchuod/fixes_present success Fixes tag not required for -next series
conchuod/tree_selection success Guessed tree name to be for-next
conchuod/cover_letter success Single patches do not need cover letters
conchuod/kdoc success Errors and warnings before: 0 this patch: 0
conchuod/source_inline success Was 0 now: 0
conchuod/verify_signedoff success Signed-off-by tag matches author and committer
conchuod/cc_maintainers warning 1 maintainers not CCed: akpm@linux-foundation.org
conchuod/module_param success Was 0 now: 0
conchuod/verify_fixes success No Fixes tag
conchuod/checkpatch warning CHECK: Alignment should match open parenthesis
conchuod/header_inline success No static functions without inline keyword in header files
conchuod/build_warn_rv64 fail Errors and warnings before: 0 this patch: 0

Commit Message

Tong Tiangen Oct. 29, 2022, 8:47 a.m. UTC
To make the code more hierarchical and readable, we fold vma related
judgments into __do_page_fault(). And to simplify the code, move the
tsk->thread.bad_cause's setting into bad_area(). No functional change
intended.

Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
---
v1 -> v2:
  Fixed incorrect use of macro VM_FAULT_BADMAP.

 arch/riscv/mm/fault.c | 77 +++++++++++++++++++++++--------------------
 1 file changed, 41 insertions(+), 36 deletions(-)

Comments

Palmer Dabbelt Dec. 3, 2022, 3:08 a.m. UTC | #1
On Sat, 29 Oct 2022 01:47:15 PDT (-0700), tongtiangen@huawei.com wrote:
> To make the code more hierarchical and readable, we fold vma related
> judgments into __do_page_fault(). And to simplify the code, move the
> tsk->thread.bad_cause's setting into bad_area(). No functional change
> intended.
>
> Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
> ---
> v1 -> v2:
>   Fixed incorrect use of macro VM_FAULT_BADMAP.
>
>  arch/riscv/mm/fault.c | 77 +++++++++++++++++++++++--------------------
>  1 file changed, 41 insertions(+), 36 deletions(-)
>
> diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
> index d86f7cebd4a7..3fdc2eebdd36 100644
> --- a/arch/riscv/mm/fault.c
> +++ b/arch/riscv/mm/fault.c
> @@ -85,6 +85,8 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
>
>  static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
>  {
> +	current->thread.bad_cause = regs->cause;
> +
>  	/*
>  	 * Something tried to access memory that isn't in our memory map.
>  	 * Fix it, but check if it's kernel or user first.
> @@ -200,6 +202,38 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
>  	return false;
>  }
>
> +#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
> +#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)

These alias with VM_FAULT_HINDEX_MASK.  I just sent along 
<https://lore.kernel.org/all/20221203030356.3917-1-palmer@rivosinc.com/T/#t> 
to clean that up for the other ports that also do so, I'm going to hold 
off on this patch until I see what the comments on that one look like.

Otherwise this looks fine, it might not make 6.2 pending the discussion 
over there.

Thanks!

> +
> +static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
> +				unsigned int mm_flags, struct pt_regs *regs)
> +{
> +	struct vm_area_struct *vma = find_vma(mm, addr);
> +
> +	if (unlikely(!vma))
> +		return VM_FAULT_BADMAP;
> +
> +	if (unlikely(vma->vm_start > addr)) {
> +		if (unlikely(!(vma->vm_flags & VM_GROWSDOWN) ||
> +				expand_stack(vma, addr)))
> +			return VM_FAULT_BADMAP;
> +	}
> +
> +	/*
> +	 * Ok, we have a good vm_area for this memory access, so
> +	 * we can handle it.
> +	 */
> +	if (unlikely(access_error(regs->cause, vma)))
> +		return VM_FAULT_BADACCESS;
> +
> +	/*
> +	 * If for any reason at all we could not handle the fault,
> +	 * make sure we exit gracefully rather than endlessly redo
> +	 * the fault.
> +	 */
> +	return handle_mm_fault(vma, addr, mm_flags, regs);
> +}
> +
>  /*
>   * This routine handles page faults.  It determines the address and the
>   * problem, and then passes it off to one of the appropriate routines.
> @@ -207,7 +241,6 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
>  asmlinkage void do_page_fault(struct pt_regs *regs)
>  {
>  	struct task_struct *tsk;
> -	struct vm_area_struct *vma;
>  	struct mm_struct *mm;
>  	unsigned long addr, cause;
>  	unsigned int flags = FAULT_FLAG_DEFAULT;
> @@ -280,44 +313,16 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
>  		flags |= FAULT_FLAG_INSTRUCTION;
>  retry:
>  	mmap_read_lock(mm);
> -	vma = find_vma(mm, addr);
> -	if (unlikely(!vma)) {
> -		tsk->thread.bad_cause = cause;
> -		bad_area(regs, mm, code, addr);
> -		return;
> -	}
> -	if (likely(vma->vm_start <= addr))
> -		goto good_area;
> -	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
> -		tsk->thread.bad_cause = cause;
> -		bad_area(regs, mm, code, addr);
> -		return;
> -	}
> -	if (unlikely(expand_stack(vma, addr))) {
> -		tsk->thread.bad_cause = cause;
> -		bad_area(regs, mm, code, addr);
> -		return;
> -	}
>
> -	/*
> -	 * Ok, we have a good vm_area for this memory access, so
> -	 * we can handle it.
> -	 */
> -good_area:
> -	code = SEGV_ACCERR;
> +	fault = __do_page_fault(mm, addr, flags, regs);
>
> -	if (unlikely(access_error(cause, vma))) {
> -		tsk->thread.bad_cause = cause;
> -		bad_area(regs, mm, code, addr);
> -		return;
> -	}
> +	if (unlikely(fault & VM_FAULT_BADMAP))
> +		return bad_area(regs, mm, code, addr);
>
> -	/*
> -	 * If for any reason at all we could not handle the fault,
> -	 * make sure we exit gracefully rather than endlessly redo
> -	 * the fault.
> -	 */
> -	fault = handle_mm_fault(vma, addr, flags, regs);
> +	if (unlikely(fault & VM_FAULT_BADACCESS)) {
> +		code = SEGV_ACCERR;
> +		return bad_area(regs, mm, code, addr);
> +	}
>
>  	/*
>  	 * If we need to retry but a fatal signal is pending, handle the
diff mbox series

Patch

diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index d86f7cebd4a7..3fdc2eebdd36 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -85,6 +85,8 @@  static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
 
 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
 {
+	current->thread.bad_cause = regs->cause;
+
 	/*
 	 * Something tried to access memory that isn't in our memory map.
 	 * Fix it, but check if it's kernel or user first.
@@ -200,6 +202,38 @@  static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
 	return false;
 }
 
+#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
+#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)
+
+static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
+				unsigned int mm_flags, struct pt_regs *regs)
+{
+	struct vm_area_struct *vma = find_vma(mm, addr);
+
+	if (unlikely(!vma))
+		return VM_FAULT_BADMAP;
+
+	if (unlikely(vma->vm_start > addr)) {
+		if (unlikely(!(vma->vm_flags & VM_GROWSDOWN) ||
+				expand_stack(vma, addr)))
+			return VM_FAULT_BADMAP;
+	}
+
+	/*
+	 * Ok, we have a good vm_area for this memory access, so
+	 * we can handle it.
+	 */
+	if (unlikely(access_error(regs->cause, vma)))
+		return VM_FAULT_BADACCESS;
+
+	/*
+	 * If for any reason at all we could not handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+	return handle_mm_fault(vma, addr, mm_flags, regs);
+}
+
 /*
  * This routine handles page faults.  It determines the address and the
  * problem, and then passes it off to one of the appropriate routines.
@@ -207,7 +241,6 @@  static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
 asmlinkage void do_page_fault(struct pt_regs *regs)
 {
 	struct task_struct *tsk;
-	struct vm_area_struct *vma;
 	struct mm_struct *mm;
 	unsigned long addr, cause;
 	unsigned int flags = FAULT_FLAG_DEFAULT;
@@ -280,44 +313,16 @@  asmlinkage void do_page_fault(struct pt_regs *regs)
 		flags |= FAULT_FLAG_INSTRUCTION;
 retry:
 	mmap_read_lock(mm);
-	vma = find_vma(mm, addr);
-	if (unlikely(!vma)) {
-		tsk->thread.bad_cause = cause;
-		bad_area(regs, mm, code, addr);
-		return;
-	}
-	if (likely(vma->vm_start <= addr))
-		goto good_area;
-	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-		tsk->thread.bad_cause = cause;
-		bad_area(regs, mm, code, addr);
-		return;
-	}
-	if (unlikely(expand_stack(vma, addr))) {
-		tsk->thread.bad_cause = cause;
-		bad_area(regs, mm, code, addr);
-		return;
-	}
 
-	/*
-	 * Ok, we have a good vm_area for this memory access, so
-	 * we can handle it.
-	 */
-good_area:
-	code = SEGV_ACCERR;
+	fault = __do_page_fault(mm, addr, flags, regs);
 
-	if (unlikely(access_error(cause, vma))) {
-		tsk->thread.bad_cause = cause;
-		bad_area(regs, mm, code, addr);
-		return;
-	}
+	if (unlikely(fault & VM_FAULT_BADMAP))
+		return bad_area(regs, mm, code, addr);
 
-	/*
-	 * If for any reason at all we could not handle the fault,
-	 * make sure we exit gracefully rather than endlessly redo
-	 * the fault.
-	 */
-	fault = handle_mm_fault(vma, addr, flags, regs);
+	if (unlikely(fault & VM_FAULT_BADACCESS)) {
+		code = SEGV_ACCERR;
+		return bad_area(regs, mm, code, addr);
+	}
 
 	/*
 	 * If we need to retry but a fatal signal is pending, handle the