diff mbox series

xen/arm64: Correctly compute the virtual address in maddr_to_virt()

Message ID 20190718115714.634-1-julien.grall@arm.com (mailing list archive)
State New, archived
Headers show
Series xen/arm64: Correctly compute the virtual address in maddr_to_virt() | expand

Commit Message

Julien Grall July 18, 2019, 11:57 a.m. UTC
The helper maddr_to_virt() is used to translate a machine address to a
virtual address. To save some valuable address space, some part of the
machine address may be compressed.

In theory the PDX code is free to compress any bits so there are no
guarantee the machine index computed will be always greater than
xenheap_mfn_start. This would result to return a virtual address that is
not part of the direct map and trigger a crash at least on debug-build later
on because of the check in virt_to_page().

A recently reverted patch (see 1191156361 "xen/arm: fix mask calculation
in pdx_init_mask") allows the PDX to compress more bits and triggered a
crash on AMD Seattle Platform.

Avoid the crash by keeping track of the base PDX for the xenheap and use
it for computing the virtual address.

Note that virt_to_maddr() does not need to have similar modification as
it is using the hardware to translate the virtual address to a machine
address.

Take the opportunity to fix the ASSERT() as the direct map base address
correspond to the start of the RAM (this is not always 0).

Signed-off-by: Julien Grall <julien.grall@arm.com>

---

With that, the patch 1191156361 "xen/arm: fix mask calculation in
pdx_init_mask" could be re-instated.
---
 xen/arch/arm/mm.c        | 2 ++
 xen/include/asm-arm/mm.h | 6 ++++--
 2 files changed, 6 insertions(+), 2 deletions(-)

Comments

Stefano Stabellini July 24, 2019, 12:17 a.m. UTC | #1
On Thu, 18 Jul 2019, Julien Grall wrote:
> The helper maddr_to_virt() is used to translate a machine address to a
> virtual address. To save some valuable address space, some part of the
> machine address may be compressed.
> 
> In theory the PDX code is free to compress any bits so there are no
> guarantee the machine index computed will be always greater than
> xenheap_mfn_start. This would result to return a virtual address that is
> not part of the direct map and trigger a crash at least on debug-build later
> on because of the check in virt_to_page().
> 
> A recently reverted patch (see 1191156361 "xen/arm: fix mask calculation
> in pdx_init_mask") allows the PDX to compress more bits and triggered a
> crash on AMD Seattle Platform.
> 
> Avoid the crash by keeping track of the base PDX for the xenheap and use
> it for computing the virtual address.
> 
> Note that virt_to_maddr() does not need to have similar modification as
> it is using the hardware to translate the virtual address to a machine
> address.
> 
> Take the opportunity to fix the ASSERT() as the direct map base address
> correspond to the start of the RAM (this is not always 0).
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>

Thank you very much for debugging and fixing this problem! It is
surprising that it has been working at all :-)


> ---
> 
> With that, the patch 1191156361 "xen/arm: fix mask calculation in
> pdx_init_mask" could be re-instated.
> ---
>  xen/arch/arm/mm.c        | 2 ++
>  xen/include/asm-arm/mm.h | 6 ++++--
>  2 files changed, 6 insertions(+), 2 deletions(-)
> 
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index 44258ad89c..e1cdeaaf2f 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -165,6 +165,7 @@ mfn_t xenheap_mfn_end __read_mostly;
>  vaddr_t xenheap_virt_end __read_mostly;
>  #ifdef CONFIG_ARM_64
>  vaddr_t xenheap_virt_start __read_mostly;
> +unsigned long xenheap_base_pdx __read_mostly;
>  #endif
>  
>  unsigned long frametable_base_pdx __read_mostly;
> @@ -796,6 +797,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>      if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) )
>      {
>          xenheap_mfn_start = _mfn(base_mfn);
> +        xenheap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
>          xenheap_virt_start = DIRECTMAP_VIRT_START +
>              (base_mfn - mfn) * PAGE_SIZE;
>      }

I can see that this would work, but wouldn't it be a better fit to set
xenheap_base_pdx in xen/arch/arm/setup.c:setup_mm, where we already set:


    xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
    xenheap_mfn_start = maddr_to_mfn(ram_start);
    xenheap_mfn_end = maddr_to_mfn(ram_end);

Or it too late by then?



> diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
> index 3dbc8a6469..d6b5544015 100644
> --- a/xen/include/asm-arm/mm.h
> +++ b/xen/include/asm-arm/mm.h
> @@ -135,6 +135,7 @@ extern mfn_t xenheap_mfn_start, xenheap_mfn_end;
>  extern vaddr_t xenheap_virt_end;
>  #ifdef CONFIG_ARM_64
>  extern vaddr_t xenheap_virt_start;
> +extern unsigned long xenheap_base_pdx;
>  #endif
>  
>  #ifdef CONFIG_ARM_32
> @@ -253,9 +254,10 @@ static inline void *maddr_to_virt(paddr_t ma)
>  #else
>  static inline void *maddr_to_virt(paddr_t ma)
>  {
> -    ASSERT(mfn_to_pdx(maddr_to_mfn(ma)) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
> +    ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - xenheap_base_pdx) <
> +           (DIRECTMAP_SIZE >> PAGE_SHIFT));
>      return (void *)(XENHEAP_VIRT_START -
> -                    mfn_to_maddr(xenheap_mfn_start) +
> +                    (xenheap_base_pdx << PAGE_SHIFT) +
>                      ((ma & ma_va_bottom_mask) |
>                       ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
>  }
Julien Grall July 24, 2019, 11:21 a.m. UTC | #2
Hi Stefano,

On 24/07/2019 01:17, Stefano Stabellini wrote:
> On Thu, 18 Jul 2019, Julien Grall wrote:
>> With that, the patch 1191156361 "xen/arm: fix mask calculation in
>> pdx_init_mask" could be re-instated.
>> ---
>>   xen/arch/arm/mm.c        | 2 ++
>>   xen/include/asm-arm/mm.h | 6 ++++--
>>   2 files changed, 6 insertions(+), 2 deletions(-)
>>
>> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
>> index 44258ad89c..e1cdeaaf2f 100644
>> --- a/xen/arch/arm/mm.c
>> +++ b/xen/arch/arm/mm.c
>> @@ -165,6 +165,7 @@ mfn_t xenheap_mfn_end __read_mostly;
>>   vaddr_t xenheap_virt_end __read_mostly;
>>   #ifdef CONFIG_ARM_64
>>   vaddr_t xenheap_virt_start __read_mostly;
>> +unsigned long xenheap_base_pdx __read_mostly;
>>   #endif
>>   
>>   unsigned long frametable_base_pdx __read_mostly;
>> @@ -796,6 +797,7 @@ void __init setup_xenheap_mappings(unsigned long base_mfn,
>>       if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) )
>>       {
>>           xenheap_mfn_start = _mfn(base_mfn);
>> +        xenheap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
>>           xenheap_virt_start = DIRECTMAP_VIRT_START +
>>               (base_mfn - mfn) * PAGE_SIZE;
>>       }
> 
> I can see that this would work, but wouldn't it be a better fit to set
> xenheap_base_pdx in xen/arch/arm/setup.c:setup_mm, where we already set:
> 
> 
>      xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
>      xenheap_mfn_start = maddr_to_mfn(ram_start);
>      xenheap_mfn_end = maddr_to_mfn(ram_end);
> 
> Or it too late by then?

Yes setup_xenheap_mappings() is using __mfn_to_virt() that will call 
maddr_to_virt(). So we need to setup xenheam_base_start earlier.

TBH, this 3 variables should be set within xenheap as it makes clearer how they 
are computed. Actually, xenheam_mfn_start will be overidden, thankfully the new 
and old values are exactly the same...

I have plan to rewrite the xenheap code as there are few problems with it:
   1) Chicken and eggs problem with the alloc_boot_pages(...). We may need to 
allocate memory while doing the xenheap mapping but page are not given to the 
allocator until late. But if you give to the allocator the page and it is not 
yet unmap, then you would receive a data abort.
   2) We are mapping all the RAMs, include reserved-memory marked no-map. This 
may result to caching problem later on.
   3) We are using 1GB mapping, however if the RAM is less than a 1GB, we will 
end-up to cover non-RAM. With bad luck, this may cover device memory leading to 
interesting result. AFAIK, the RPI4 has this exact use case.

Cheers,
Stefano Stabellini July 24, 2019, 3:22 p.m. UTC | #3
On Wed, 24 Jul 2019, Julien Grall wrote:
> Hi Stefano,
> 
> On 24/07/2019 01:17, Stefano Stabellini wrote:
> > On Thu, 18 Jul 2019, Julien Grall wrote:
> > > With that, the patch 1191156361 "xen/arm: fix mask calculation in
> > > pdx_init_mask" could be re-instated.
> > > ---
> > >   xen/arch/arm/mm.c        | 2 ++
> > >   xen/include/asm-arm/mm.h | 6 ++++--
> > >   2 files changed, 6 insertions(+), 2 deletions(-)
> > > 
> > > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> > > index 44258ad89c..e1cdeaaf2f 100644
> > > --- a/xen/arch/arm/mm.c
> > > +++ b/xen/arch/arm/mm.c
> > > @@ -165,6 +165,7 @@ mfn_t xenheap_mfn_end __read_mostly;
> > >   vaddr_t xenheap_virt_end __read_mostly;
> > >   #ifdef CONFIG_ARM_64
> > >   vaddr_t xenheap_virt_start __read_mostly;
> > > +unsigned long xenheap_base_pdx __read_mostly;
> > >   #endif
> > >     unsigned long frametable_base_pdx __read_mostly;
> > > @@ -796,6 +797,7 @@ void __init setup_xenheap_mappings(unsigned long
> > > base_mfn,
> > >       if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) )
> > >       {
> > >           xenheap_mfn_start = _mfn(base_mfn);
> > > +        xenheap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
> > >           xenheap_virt_start = DIRECTMAP_VIRT_START +
> > >               (base_mfn - mfn) * PAGE_SIZE;
> > >       }
> > 
> > I can see that this would work, but wouldn't it be a better fit to set
> > xenheap_base_pdx in xen/arch/arm/setup.c:setup_mm, where we already set:
> > 
> > 
> >      xenheap_virt_end = XENHEAP_VIRT_START + ram_end - ram_start;
> >      xenheap_mfn_start = maddr_to_mfn(ram_start);
> >      xenheap_mfn_end = maddr_to_mfn(ram_end);
> > 
> > Or it too late by then?
> 
> Yes setup_xenheap_mappings() is using __mfn_to_virt() that will call
> maddr_to_virt(). So we need to setup xenheam_base_start earlier.

OK then:

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> TBH, this 3 variables should be set within xenheap as it makes clearer how
> they are computed. Actually, xenheam_mfn_start will be overidden, thankfully
> the new and old values are exactly the same...
> 
> I have plan to rewrite the xenheap code as there are few problems with it:
>   1) Chicken and eggs problem with the alloc_boot_pages(...). We may need to
> allocate memory while doing the xenheap mapping but page are not given to the
> allocator until late. But if you give to the allocator the page and it is not
> yet unmap, then you would receive a data abort.
>   2) We are mapping all the RAMs, include reserved-memory marked no-map. This
> may result to caching problem later on.
>   3) We are using 1GB mapping, however if the RAM is less than a 1GB, we will
> end-up to cover non-RAM. With bad luck, this may cover device memory leading
> to interesting result. AFAIK, the RPI4 has this exact use case.

Thanks for the write-up, I wasn't aware of all the issues.
diff mbox series

Patch

diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 44258ad89c..e1cdeaaf2f 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -165,6 +165,7 @@  mfn_t xenheap_mfn_end __read_mostly;
 vaddr_t xenheap_virt_end __read_mostly;
 #ifdef CONFIG_ARM_64
 vaddr_t xenheap_virt_start __read_mostly;
+unsigned long xenheap_base_pdx __read_mostly;
 #endif
 
 unsigned long frametable_base_pdx __read_mostly;
@@ -796,6 +797,7 @@  void __init setup_xenheap_mappings(unsigned long base_mfn,
     if ( mfn_eq(xenheap_mfn_start, INVALID_MFN) )
     {
         xenheap_mfn_start = _mfn(base_mfn);
+        xenheap_base_pdx = mfn_to_pdx(_mfn(base_mfn));
         xenheap_virt_start = DIRECTMAP_VIRT_START +
             (base_mfn - mfn) * PAGE_SIZE;
     }
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 3dbc8a6469..d6b5544015 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -135,6 +135,7 @@  extern mfn_t xenheap_mfn_start, xenheap_mfn_end;
 extern vaddr_t xenheap_virt_end;
 #ifdef CONFIG_ARM_64
 extern vaddr_t xenheap_virt_start;
+extern unsigned long xenheap_base_pdx;
 #endif
 
 #ifdef CONFIG_ARM_32
@@ -253,9 +254,10 @@  static inline void *maddr_to_virt(paddr_t ma)
 #else
 static inline void *maddr_to_virt(paddr_t ma)
 {
-    ASSERT(mfn_to_pdx(maddr_to_mfn(ma)) < (DIRECTMAP_SIZE >> PAGE_SHIFT));
+    ASSERT((mfn_to_pdx(maddr_to_mfn(ma)) - xenheap_base_pdx) <
+           (DIRECTMAP_SIZE >> PAGE_SHIFT));
     return (void *)(XENHEAP_VIRT_START -
-                    mfn_to_maddr(xenheap_mfn_start) +
+                    (xenheap_base_pdx << PAGE_SHIFT) +
                     ((ma & ma_va_bottom_mask) |
                      ((ma & ma_top_mask) >> pfn_pdx_hole_shift)));
 }