diff mbox series

[v2,2/5] parisc: deduplicate code in flush_cache_mm() and flush_cache_range()

Message ID 20211009182439.30016-3-svens@stackframe.org (mailing list archive)
State Accepted, archived
Headers show
Series parisc: fixes for CONFIG_PREEMPT | expand

Commit Message

Sven Schnelle Oct. 9, 2021, 6:24 p.m. UTC
Parts of both functions are the same, so deduplicate them. No functional
change.

Signed-off-by: Sven Schnelle <svens@stackframe.org>
---
 arch/parisc/kernel/cache.c | 81 ++++++++++++++------------------------
 1 file changed, 30 insertions(+), 51 deletions(-)

Comments

Rolf Eike Beer Oct. 11, 2021, 3:06 p.m. UTC | #1
Am Samstag, 9. Oktober 2021, 20:24:36 CEST schrieb Sven Schnelle:
> Parts of both functions are the same, so deduplicate them. No functional
> change.
> 
> Signed-off-by: Sven Schnelle <svens@stackframe.org>
> ---
>  arch/parisc/kernel/cache.c | 81 ++++++++++++++------------------------
>  1 file changed, 30 insertions(+), 51 deletions(-)
> 
> diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
> index a1a7e2b0812f..c61827e4928a 100644
> --- a/arch/parisc/kernel/cache.c
> +++ b/arch/parisc/kernel/cache.c
> @@ -543,10 +543,33 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned
> long addr) return ptep;
>  }
> 
> +static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct
> *mm, +			      unsigned long start, unsigned long end)
> +{
> +	unsigned long addr, pfn;
> +	pte_t *ptep;
> +
> +	for (addr = start; addr < end; addr += PAGE_SIZE) {
> +		ptep = get_ptep(mm->pgd, addr);
> +		if (ptep) {
> +			pfn = pte_pfn(*ptep);
> +			flush_cache_page(vma, addr, pfn);
> +		}
> +	}
> +}
> +
> +static void flush_user_cache_tlb(struct vm_area_struct *vma,
> +				 unsigned long start, unsigned 
long end)
> +{
> +	flush_user_dcache_range_asm(start, end);
> +	if (vma->vm_flags & VM_EXEC)
> +		flush_user_icache_range_asm(start, end);
> +	flush_tlb_range(vma, start, end);
> +}

What you add here is less than what you removed below. If that is intentional 
I would welcome a description on why it is correct in the commit message.

>  void flush_cache_mm(struct mm_struct *mm)
>  {
>  	struct vm_area_struct *vma;
> -	pgd_t *pgd;
> 
>  	/* Flushing the whole cache on each cpu takes forever on
>  	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
> @@ -560,46 +583,20 @@ void flush_cache_mm(struct mm_struct *mm)
> 
>  	preempt_disable();
>  	if (mm->context == mfsp(3)) {
> -		for (vma = mm->mmap; vma; vma = vma->vm_next) {
> -			flush_user_dcache_range_asm(vma->vm_start, 
vma->vm_end);
> -			if (vma->vm_flags & VM_EXEC)
> -				flush_user_icache_range_asm(vma-
>vm_start, vma->vm_end);
> -			flush_tlb_range(vma, vma->vm_start, vma-
>vm_end);
> -		}
> +		for (vma = mm->mmap; vma; vma = vma->vm_next)
> +			flush_user_cache_tlb(vma, vma->vm_start, vma-
>vm_end);
>  		preempt_enable();
>  		return;
>  	}
> 
> -	pgd = mm->pgd;
> -	for (vma = mm->mmap; vma; vma = vma->vm_next) {
> -		unsigned long addr;
> -
> -		for (addr = vma->vm_start; addr < vma->vm_end;
> -		     addr += PAGE_SIZE) {
> -			unsigned long pfn;
> -			pte_t *ptep = get_ptep(pgd, addr);
> -			if (!ptep)
> -				continue;
> -			pfn = pte_pfn(*ptep);
> -			if (!pfn_valid(pfn))
> -				continue;
> -			if (unlikely(mm->context)) {
> -				flush_tlb_page(vma, addr);
> -				__flush_cache_page(vma, addr, 
PFN_PHYS(pfn));
> -			} else {
> -				__purge_cache_page(vma, addr, 
PFN_PHYS(pfn));
> -			}
> -		}
> -	}
> +	for (vma = mm->mmap; vma; vma = vma->vm_next)
> +		flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
>  	preempt_enable();
>  }
> 
>  void flush_cache_range(struct vm_area_struct *vma,
>  		unsigned long start, unsigned long end)
>  {
> -	pgd_t *pgd;
> -	unsigned long addr;
> -
>  	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
>  	    end - start >= parisc_cache_flush_threshold) {
>  		if (vma->vm_mm->context)
> @@ -610,30 +607,12 @@ void flush_cache_range(struct vm_area_struct *vma,
> 
>  	preempt_disable();
>  	if (vma->vm_mm->context == mfsp(3)) {
> -		flush_user_dcache_range_asm(start, end);
> -		if (vma->vm_flags & VM_EXEC)
> -			flush_user_icache_range_asm(start, end);
> -		flush_tlb_range(vma, start, end);
> +		flush_user_cache_tlb(vma, start, end);
>  		preempt_enable();
>  		return;
>  	}
> 
> -	pgd = vma->vm_mm->pgd;
> -	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) 
{
> -		unsigned long pfn;
> -		pte_t *ptep = get_ptep(pgd, addr);
> -		if (!ptep)
> -			continue;
> -		pfn = pte_pfn(*ptep);
> -		if (pfn_valid(pfn)) {
> -			if (unlikely(vma->vm_mm->context)) {
> -				flush_tlb_page(vma, addr);
> -				__flush_cache_page(vma, addr, 
PFN_PHYS(pfn));
> -			} else {
> -				__purge_cache_page(vma, addr, 
PFN_PHYS(pfn));
> -			}
> -		}
> -	}
> +	flush_cache_pages(vma, vma->vm_mm, vma->vm_start, vma->vm_end);
>  	preempt_enable();
>  }
diff mbox series

Patch

diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index a1a7e2b0812f..c61827e4928a 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -543,10 +543,33 @@  static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
 	return ptep;
 }
 
+static void flush_cache_pages(struct vm_area_struct *vma, struct mm_struct *mm,
+			      unsigned long start, unsigned long end)
+{
+	unsigned long addr, pfn;
+	pte_t *ptep;
+
+	for (addr = start; addr < end; addr += PAGE_SIZE) {
+		ptep = get_ptep(mm->pgd, addr);
+		if (ptep) {
+			pfn = pte_pfn(*ptep);
+			flush_cache_page(vma, addr, pfn);
+		}
+	}
+}
+
+static void flush_user_cache_tlb(struct vm_area_struct *vma,
+				 unsigned long start, unsigned long end)
+{
+	flush_user_dcache_range_asm(start, end);
+	if (vma->vm_flags & VM_EXEC)
+		flush_user_icache_range_asm(start, end);
+	flush_tlb_range(vma, start, end);
+}
+
 void flush_cache_mm(struct mm_struct *mm)
 {
 	struct vm_area_struct *vma;
-	pgd_t *pgd;
 
 	/* Flushing the whole cache on each cpu takes forever on
 	   rp3440, etc.  So, avoid it if the mm isn't too big.  */
@@ -560,46 +583,20 @@  void flush_cache_mm(struct mm_struct *mm)
 
 	preempt_disable();
 	if (mm->context == mfsp(3)) {
-		for (vma = mm->mmap; vma; vma = vma->vm_next) {
-			flush_user_dcache_range_asm(vma->vm_start, vma->vm_end);
-			if (vma->vm_flags & VM_EXEC)
-				flush_user_icache_range_asm(vma->vm_start, vma->vm_end);
-			flush_tlb_range(vma, vma->vm_start, vma->vm_end);
-		}
+		for (vma = mm->mmap; vma; vma = vma->vm_next)
+			flush_user_cache_tlb(vma, vma->vm_start, vma->vm_end);
 		preempt_enable();
 		return;
 	}
 
-	pgd = mm->pgd;
-	for (vma = mm->mmap; vma; vma = vma->vm_next) {
-		unsigned long addr;
-
-		for (addr = vma->vm_start; addr < vma->vm_end;
-		     addr += PAGE_SIZE) {
-			unsigned long pfn;
-			pte_t *ptep = get_ptep(pgd, addr);
-			if (!ptep)
-				continue;
-			pfn = pte_pfn(*ptep);
-			if (!pfn_valid(pfn))
-				continue;
-			if (unlikely(mm->context)) {
-				flush_tlb_page(vma, addr);
-				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
-			} else {
-				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
-			}
-		}
-	}
+	for (vma = mm->mmap; vma; vma = vma->vm_next)
+		flush_cache_pages(vma, mm, vma->vm_start, vma->vm_end);
 	preempt_enable();
 }
 
 void flush_cache_range(struct vm_area_struct *vma,
 		unsigned long start, unsigned long end)
 {
-	pgd_t *pgd;
-	unsigned long addr;
-
 	if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) &&
 	    end - start >= parisc_cache_flush_threshold) {
 		if (vma->vm_mm->context)
@@ -610,30 +607,12 @@  void flush_cache_range(struct vm_area_struct *vma,
 
 	preempt_disable();
 	if (vma->vm_mm->context == mfsp(3)) {
-		flush_user_dcache_range_asm(start, end);
-		if (vma->vm_flags & VM_EXEC)
-			flush_user_icache_range_asm(start, end);
-		flush_tlb_range(vma, start, end);
+		flush_user_cache_tlb(vma, start, end);
 		preempt_enable();
 		return;
 	}
 
-	pgd = vma->vm_mm->pgd;
-	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) {
-		unsigned long pfn;
-		pte_t *ptep = get_ptep(pgd, addr);
-		if (!ptep)
-			continue;
-		pfn = pte_pfn(*ptep);
-		if (pfn_valid(pfn)) {
-			if (unlikely(vma->vm_mm->context)) {
-				flush_tlb_page(vma, addr);
-				__flush_cache_page(vma, addr, PFN_PHYS(pfn));
-			} else {
-				__purge_cache_page(vma, addr, PFN_PHYS(pfn));
-			}
-		}
-	}
+	flush_cache_pages(vma, vma->vm_mm, vma->vm_start, vma->vm_end);
 	preempt_enable();
 }