diff mbox series

[v4,08/12] riscv: mm: Introduce cntx2asid/cntx2version helper macros

Message ID 20240102220134.3229156-9-samuel.holland@sifive.com (mailing list archive)
State Superseded
Headers show
Series riscv: ASID-related and UP-related TLB flush enhancements | expand

Checks

Context Check Description
conchuod/vmtest-for-next-PR success PR summary
conchuod/vmtest-fixes-PR success PR summary
conchuod/patch-8-test-1 success .github/scripts/patches/tests/build_rv32_defconfig.sh
conchuod/patch-8-test-2 success .github/scripts/patches/tests/build_rv64_clang_allmodconfig.sh
conchuod/patch-8-test-3 success .github/scripts/patches/tests/build_rv64_gcc_allmodconfig.sh
conchuod/patch-8-test-4 success .github/scripts/patches/tests/build_rv64_nommu_k210_defconfig.sh
conchuod/patch-8-test-5 success .github/scripts/patches/tests/build_rv64_nommu_virt_defconfig.sh
conchuod/patch-8-test-6 success .github/scripts/patches/tests/checkpatch.sh
conchuod/patch-8-test-7 success .github/scripts/patches/tests/dtb_warn_rv64.sh
conchuod/patch-8-test-8 success .github/scripts/patches/tests/header_inline.sh
conchuod/patch-8-test-9 success .github/scripts/patches/tests/kdoc.sh
conchuod/patch-8-test-10 success .github/scripts/patches/tests/module_param.sh
conchuod/patch-8-test-11 success .github/scripts/patches/tests/verify_fixes.sh
conchuod/patch-8-test-12 success .github/scripts/patches/tests/verify_signedoff.sh

Commit Message

Samuel Holland Jan. 2, 2024, 10 p.m. UTC
When using the ASID allocator, the MM context ID contains two values:
the ASID in the lower bits, and the allocator version number in the
remaining bits. Use macros to make this separation more obvious.

Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
---

(no changes since v1)

 arch/riscv/include/asm/mmu.h |  3 +++
 arch/riscv/mm/context.c      | 12 ++++++------
 arch/riscv/mm/tlbflush.c     |  2 +-
 3 files changed, 10 insertions(+), 7 deletions(-)

Comments

Alexandre Ghiti Jan. 4, 2024, 12:39 p.m. UTC | #1
On Tue, Jan 2, 2024 at 11:01 PM Samuel Holland
<samuel.holland@sifive.com> wrote:
>
> When using the ASID allocator, the MM context ID contains two values:
> the ASID in the lower bits, and the allocator version number in the
> remaining bits. Use macros to make this separation more obvious.
>
> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
> ---
>
> (no changes since v1)
>
>  arch/riscv/include/asm/mmu.h |  3 +++
>  arch/riscv/mm/context.c      | 12 ++++++------
>  arch/riscv/mm/tlbflush.c     |  2 +-
>  3 files changed, 10 insertions(+), 7 deletions(-)
>
> diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
> index 355504b37f8e..a550fbf770be 100644
> --- a/arch/riscv/include/asm/mmu.h
> +++ b/arch/riscv/include/asm/mmu.h
> @@ -26,6 +26,9 @@ typedef struct {
>  #endif
>  } mm_context_t;
>
> +#define cntx2asid(cntx)                ((cntx) & asid_mask)
> +#define cntx2version(cntx)     ((cntx) & ~asid_mask)

Not a big fan of the naming, I would have something like
get_asid_from_context() and get_version_from_context() or something
like that, but up to you of course.

> +
>  void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
>                                phys_addr_t sz, pgprot_t prot);
>  #endif /* __ASSEMBLY__ */
> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
> index 217fd4de6134..43d005f63253 100644
> --- a/arch/riscv/mm/context.c
> +++ b/arch/riscv/mm/context.c
> @@ -81,7 +81,7 @@ static void __flush_context(void)
>                 if (cntx == 0)
>                         cntx = per_cpu(reserved_context, i);
>
> -               __set_bit(cntx & asid_mask, context_asid_map);
> +               __set_bit(cntx2asid(cntx), context_asid_map);
>                 per_cpu(reserved_context, i) = cntx;
>         }
>
> @@ -102,7 +102,7 @@ static unsigned long __new_context(struct mm_struct *mm)
>         lockdep_assert_held(&context_lock);
>
>         if (cntx != 0) {
> -               unsigned long newcntx = ver | (cntx & asid_mask);
> +               unsigned long newcntx = ver | cntx2asid(cntx);
>
>                 /*
>                  * If our current CONTEXT was active during a rollover, we
> @@ -115,7 +115,7 @@ static unsigned long __new_context(struct mm_struct *mm)
>                  * We had a valid CONTEXT in a previous life, so try to
>                  * re-use it if possible.
>                  */
> -               if (!__test_and_set_bit(cntx & asid_mask, context_asid_map))
> +               if (!__test_and_set_bit(cntx2asid(cntx), context_asid_map))
>                         return newcntx;
>         }
>
> @@ -168,7 +168,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
>          */
>         old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
>         if (old_active_cntx &&
> -           ((cntx & ~asid_mask) == atomic_long_read(&current_version)) &&
> +           (cntx2version(cntx) == atomic_long_read(&current_version)) &&
>             atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
>                                         old_active_cntx, cntx))
>                 goto switch_mm_fast;
> @@ -177,7 +177,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
>
>         /* Check that our ASID belongs to the current_version. */
>         cntx = atomic_long_read(&mm->context.id);
> -       if ((cntx & ~asid_mask) != atomic_long_read(&current_version)) {
> +       if (cntx2version(cntx) != atomic_long_read(&current_version)) {
>                 cntx = __new_context(mm);
>                 atomic_long_set(&mm->context.id, cntx);
>         }
> @@ -191,7 +191,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
>
>  switch_mm_fast:
>         csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
> -                 ((cntx & asid_mask) << SATP_ASID_SHIFT) |
> +                 (cntx2asid(cntx) << SATP_ASID_SHIFT) |
>                   satp_mode);
>
>         if (need_flush_tlb)
> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
> index 76b24d4ed4ab..5ec621545c69 100644
> --- a/arch/riscv/mm/tlbflush.c
> +++ b/arch/riscv/mm/tlbflush.c
> @@ -85,7 +85,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
>                         return;
>
>                 if (static_branch_unlikely(&use_asid_allocator))
> -                       asid = atomic_long_read(&mm->context.id) & asid_mask;
> +                       asid = cntx2asid(atomic_long_read(&mm->context.id));
>         } else {
>                 cmask = cpu_online_mask;
>         }
> --
> 2.42.0
>

You can add:

Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>

Thanks,

Alex
Samuel Holland Jan. 4, 2024, 3:42 p.m. UTC | #2
Hi Alex,

On 2024-01-04 6:39 AM, Alexandre Ghiti wrote:
> On Tue, Jan 2, 2024 at 11:01 PM Samuel Holland
> <samuel.holland@sifive.com> wrote:
>>
>> When using the ASID allocator, the MM context ID contains two values:
>> the ASID in the lower bits, and the allocator version number in the
>> remaining bits. Use macros to make this separation more obvious.
>>
>> Signed-off-by: Samuel Holland <samuel.holland@sifive.com>
>> ---
>>
>> (no changes since v1)
>>
>>  arch/riscv/include/asm/mmu.h |  3 +++
>>  arch/riscv/mm/context.c      | 12 ++++++------
>>  arch/riscv/mm/tlbflush.c     |  2 +-
>>  3 files changed, 10 insertions(+), 7 deletions(-)
>>
>> diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
>> index 355504b37f8e..a550fbf770be 100644
>> --- a/arch/riscv/include/asm/mmu.h
>> +++ b/arch/riscv/include/asm/mmu.h
>> @@ -26,6 +26,9 @@ typedef struct {
>>  #endif
>>  } mm_context_t;
>>
>> +#define cntx2asid(cntx)                ((cntx) & asid_mask)
>> +#define cntx2version(cntx)     ((cntx) & ~asid_mask)
> 
> Not a big fan of the naming, I would have something like
> get_asid_from_context() and get_version_from_context() or something
> like that, but up to you of course.

Thanks for the review. I'm not really a fan of it either, but I tried to match
precedent from other architectures. These loosely follow the naming of
ctxid2asid() from arm64 and idx2asid() from csky. arm uses ASID().

Regards,
Samuel

>> +
>>  void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
>>                                phys_addr_t sz, pgprot_t prot);
>>  #endif /* __ASSEMBLY__ */
>> diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
>> index 217fd4de6134..43d005f63253 100644
>> --- a/arch/riscv/mm/context.c
>> +++ b/arch/riscv/mm/context.c
>> @@ -81,7 +81,7 @@ static void __flush_context(void)
>>                 if (cntx == 0)
>>                         cntx = per_cpu(reserved_context, i);
>>
>> -               __set_bit(cntx & asid_mask, context_asid_map);
>> +               __set_bit(cntx2asid(cntx), context_asid_map);
>>                 per_cpu(reserved_context, i) = cntx;
>>         }
>>
>> @@ -102,7 +102,7 @@ static unsigned long __new_context(struct mm_struct *mm)
>>         lockdep_assert_held(&context_lock);
>>
>>         if (cntx != 0) {
>> -               unsigned long newcntx = ver | (cntx & asid_mask);
>> +               unsigned long newcntx = ver | cntx2asid(cntx);
>>
>>                 /*
>>                  * If our current CONTEXT was active during a rollover, we
>> @@ -115,7 +115,7 @@ static unsigned long __new_context(struct mm_struct *mm)
>>                  * We had a valid CONTEXT in a previous life, so try to
>>                  * re-use it if possible.
>>                  */
>> -               if (!__test_and_set_bit(cntx & asid_mask, context_asid_map))
>> +               if (!__test_and_set_bit(cntx2asid(cntx), context_asid_map))
>>                         return newcntx;
>>         }
>>
>> @@ -168,7 +168,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
>>          */
>>         old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
>>         if (old_active_cntx &&
>> -           ((cntx & ~asid_mask) == atomic_long_read(&current_version)) &&
>> +           (cntx2version(cntx) == atomic_long_read(&current_version)) &&
>>             atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
>>                                         old_active_cntx, cntx))
>>                 goto switch_mm_fast;
>> @@ -177,7 +177,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
>>
>>         /* Check that our ASID belongs to the current_version. */
>>         cntx = atomic_long_read(&mm->context.id);
>> -       if ((cntx & ~asid_mask) != atomic_long_read(&current_version)) {
>> +       if (cntx2version(cntx) != atomic_long_read(&current_version)) {
>>                 cntx = __new_context(mm);
>>                 atomic_long_set(&mm->context.id, cntx);
>>         }
>> @@ -191,7 +191,7 @@ static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
>>
>>  switch_mm_fast:
>>         csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
>> -                 ((cntx & asid_mask) << SATP_ASID_SHIFT) |
>> +                 (cntx2asid(cntx) << SATP_ASID_SHIFT) |
>>                   satp_mode);
>>
>>         if (need_flush_tlb)
>> diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
>> index 76b24d4ed4ab..5ec621545c69 100644
>> --- a/arch/riscv/mm/tlbflush.c
>> +++ b/arch/riscv/mm/tlbflush.c
>> @@ -85,7 +85,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
>>                         return;
>>
>>                 if (static_branch_unlikely(&use_asid_allocator))
>> -                       asid = atomic_long_read(&mm->context.id) & asid_mask;
>> +                       asid = cntx2asid(atomic_long_read(&mm->context.id));
>>         } else {
>>                 cmask = cpu_online_mask;
>>         }
>> --
>> 2.42.0
>>
> 
> You can add:
> 
> Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
> 
> Thanks,
> 
> Alex
diff mbox series

Patch

diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
index 355504b37f8e..a550fbf770be 100644
--- a/arch/riscv/include/asm/mmu.h
+++ b/arch/riscv/include/asm/mmu.h
@@ -26,6 +26,9 @@  typedef struct {
 #endif
 } mm_context_t;
 
+#define cntx2asid(cntx)		((cntx) & asid_mask)
+#define cntx2version(cntx)	((cntx) & ~asid_mask)
+
 void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
 			       phys_addr_t sz, pgprot_t prot);
 #endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
index 217fd4de6134..43d005f63253 100644
--- a/arch/riscv/mm/context.c
+++ b/arch/riscv/mm/context.c
@@ -81,7 +81,7 @@  static void __flush_context(void)
 		if (cntx == 0)
 			cntx = per_cpu(reserved_context, i);
 
-		__set_bit(cntx & asid_mask, context_asid_map);
+		__set_bit(cntx2asid(cntx), context_asid_map);
 		per_cpu(reserved_context, i) = cntx;
 	}
 
@@ -102,7 +102,7 @@  static unsigned long __new_context(struct mm_struct *mm)
 	lockdep_assert_held(&context_lock);
 
 	if (cntx != 0) {
-		unsigned long newcntx = ver | (cntx & asid_mask);
+		unsigned long newcntx = ver | cntx2asid(cntx);
 
 		/*
 		 * If our current CONTEXT was active during a rollover, we
@@ -115,7 +115,7 @@  static unsigned long __new_context(struct mm_struct *mm)
 		 * We had a valid CONTEXT in a previous life, so try to
 		 * re-use it if possible.
 		 */
-		if (!__test_and_set_bit(cntx & asid_mask, context_asid_map))
+		if (!__test_and_set_bit(cntx2asid(cntx), context_asid_map))
 			return newcntx;
 	}
 
@@ -168,7 +168,7 @@  static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
 	 */
 	old_active_cntx = atomic_long_read(&per_cpu(active_context, cpu));
 	if (old_active_cntx &&
-	    ((cntx & ~asid_mask) == atomic_long_read(&current_version)) &&
+	    (cntx2version(cntx) == atomic_long_read(&current_version)) &&
 	    atomic_long_cmpxchg_relaxed(&per_cpu(active_context, cpu),
 					old_active_cntx, cntx))
 		goto switch_mm_fast;
@@ -177,7 +177,7 @@  static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
 
 	/* Check that our ASID belongs to the current_version. */
 	cntx = atomic_long_read(&mm->context.id);
-	if ((cntx & ~asid_mask) != atomic_long_read(&current_version)) {
+	if (cntx2version(cntx) != atomic_long_read(&current_version)) {
 		cntx = __new_context(mm);
 		atomic_long_set(&mm->context.id, cntx);
 	}
@@ -191,7 +191,7 @@  static void set_mm_asid(struct mm_struct *mm, unsigned int cpu)
 
 switch_mm_fast:
 	csr_write(CSR_SATP, virt_to_pfn(mm->pgd) |
-		  ((cntx & asid_mask) << SATP_ASID_SHIFT) |
+		  (cntx2asid(cntx) << SATP_ASID_SHIFT) |
 		  satp_mode);
 
 	if (need_flush_tlb)
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 76b24d4ed4ab..5ec621545c69 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -85,7 +85,7 @@  static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
 			return;
 
 		if (static_branch_unlikely(&use_asid_allocator))
-			asid = atomic_long_read(&mm->context.id) & asid_mask;
+			asid = cntx2asid(atomic_long_read(&mm->context.id));
 	} else {
 		cmask = cpu_online_mask;
 	}