diff mbox series

arm64: kernel: Replace manual computation with macro

Message ID 20240517132421.285667-1-dev.jain@arm.com (mailing list archive)
State New
Headers show
Series arm64: kernel: Replace manual computation with macro | expand

Commit Message

Dev Jain May 17, 2024, 1:24 p.m. UTC
Use macro from pgtable-hwdef.h for better readability.

I have boot tested this patch on all page table level configs, including
LVA/LPA/LPA2 configurations, and forced 52 bit userspace VA.

 - Claim: There should be no functional change after applying this patch.
Proof: lshift is being used only for computing lmask and tbl. It suffices
to prove that lmask and tbl remain invariant.
Let s = PAGE_SHIFT, l = level.
new_lshift ---> (expand macro) = (s - 3) * (4 - l) + 3 =
4s - sl - 12 + 3l + 3 = 4s - sl + 3l - 9 = (3s - sl + 3l - 9) + s =
s(3 - l) + 3(l - 3) + s = (3 - l) * (s - 3) + s = old_lshift + s

Hence, new_lshift = old_lshift + PAGE_SHIFT. => tbl is invariant. Noting
that old_lmask = (PAGE_SIZE << old_lshift) = ((1 << PAGE_SHIFT) <<
old_lshift) = (1 << (PAGE_SHIFT + old_lshift)) = (1 << new_lshift) =
new_lmask, we are done. Q.E.D
 
Signed-off-by: Dev Jain <dev.jain@arm.com>
---
 arch/arm64/kernel/pi/map_range.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

Comments

Catalin Marinas May 17, 2024, 3:31 p.m. UTC | #1
On Fri, May 17, 2024 at 06:54:21PM +0530, Dev Jain wrote:
> diff --git a/arch/arm64/kernel/pi/map_range.c b/arch/arm64/kernel/pi/map_range.c
> index 5410b2cac590..4b145be7f846 100644
> --- a/arch/arm64/kernel/pi/map_range.c
> +++ b/arch/arm64/kernel/pi/map_range.c
> @@ -31,14 +31,14 @@ void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
>  {
>  	u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
>  	u64 protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
> -	int lshift = (3 - level) * (PAGE_SHIFT - 3);
> -	u64 lmask = (PAGE_SIZE << lshift) - 1;
> +	int lshift = ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
> +	u64 lmask = ((u64)1 << lshift) - 1;

Nitpick: you can use 1UL instead of (u64)1.

>  
>  	start	&= PAGE_MASK;
>  	pa	&= PAGE_MASK;
>  
>  	/* Advance tbl to the entry that covers start */
> -	tbl += (start >> (lshift + PAGE_SHIFT)) % PTRS_PER_PTE;
> +	tbl += (start >> lshift) % PTRS_PER_PTE;

I did the maths as well my own way and the change is correct:

	lshift + PAGE_SHIT == ARM64_HW_PGTABLE_LEVEL_SHIFT(level)

It probably is easier to read this way, so:

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Dave Martin May 23, 2024, 1:02 p.m. UTC | #2
On Fri, May 17, 2024 at 04:31:12PM +0100, Catalin Marinas wrote:
> On Fri, May 17, 2024 at 06:54:21PM +0530, Dev Jain wrote:
> > diff --git a/arch/arm64/kernel/pi/map_range.c b/arch/arm64/kernel/pi/map_range.c
> > index 5410b2cac590..4b145be7f846 100644
> > --- a/arch/arm64/kernel/pi/map_range.c
> > +++ b/arch/arm64/kernel/pi/map_range.c
> > @@ -31,14 +31,14 @@ void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
> >  {
> >  	u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
> >  	u64 protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
> > -	int lshift = (3 - level) * (PAGE_SHIFT - 3);
> > -	u64 lmask = (PAGE_SIZE << lshift) - 1;
> > +	int lshift = ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
> > +	u64 lmask = ((u64)1 << lshift) - 1;
> 
> Nitpick: you can use 1UL instead of (u64)1.

Or, rather than reinventing this again, how about:

#include <linux/bits.h>

	u64 lmask = GENMASK_ULL(lshift - 1, 0);

[...]

Cheers
---Dave
Catalin Marinas May 23, 2024, 4:59 p.m. UTC | #3
On Thu, May 23, 2024 at 02:02:37PM +0100, Dave P Martin wrote:
> On Fri, May 17, 2024 at 04:31:12PM +0100, Catalin Marinas wrote:
> > On Fri, May 17, 2024 at 06:54:21PM +0530, Dev Jain wrote:
> > > diff --git a/arch/arm64/kernel/pi/map_range.c b/arch/arm64/kernel/pi/map_range.c
> > > index 5410b2cac590..4b145be7f846 100644
> > > --- a/arch/arm64/kernel/pi/map_range.c
> > > +++ b/arch/arm64/kernel/pi/map_range.c
> > > @@ -31,14 +31,14 @@ void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
> > >  {
> > >  	u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
> > >  	u64 protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
> > > -	int lshift = (3 - level) * (PAGE_SHIFT - 3);
> > > -	u64 lmask = (PAGE_SIZE << lshift) - 1;
> > > +	int lshift = ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
> > > +	u64 lmask = ((u64)1 << lshift) - 1;
> > 
> > Nitpick: you can use 1UL instead of (u64)1.
> 
> Or, rather than reinventing this again, how about:
> 
> #include <linux/bits.h>
> 
> 	u64 lmask = GENMASK_ULL(lshift - 1, 0);

Even better. Thanks Dave.
diff mbox series

Patch

diff --git a/arch/arm64/kernel/pi/map_range.c b/arch/arm64/kernel/pi/map_range.c
index 5410b2cac590..4b145be7f846 100644
--- a/arch/arm64/kernel/pi/map_range.c
+++ b/arch/arm64/kernel/pi/map_range.c
@@ -31,14 +31,14 @@  void __init map_range(u64 *pte, u64 start, u64 end, u64 pa, pgprot_t prot,
 {
 	u64 cmask = (level == 3) ? CONT_PTE_SIZE - 1 : U64_MAX;
 	u64 protval = pgprot_val(prot) & ~PTE_TYPE_MASK;
-	int lshift = (3 - level) * (PAGE_SHIFT - 3);
-	u64 lmask = (PAGE_SIZE << lshift) - 1;
+	int lshift = ARM64_HW_PGTABLE_LEVEL_SHIFT(level);
+	u64 lmask = ((u64)1 << lshift) - 1;
 
 	start	&= PAGE_MASK;
 	pa	&= PAGE_MASK;
 
 	/* Advance tbl to the entry that covers start */
-	tbl += (start >> (lshift + PAGE_SHIFT)) % PTRS_PER_PTE;
+	tbl += (start >> lshift) % PTRS_PER_PTE;
 
 	/*
 	 * Set the right block/page bits for this level unless we are