diff mbox series

[v4,1/6] libnvdimm/namespace: Make namespace size validation arch dependent

Message ID 20200120140749.69549-2-aneesh.kumar@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series Validating namespace size and start address attributes. | expand

Commit Message

Aneesh Kumar K.V Jan. 20, 2020, 2:07 p.m. UTC
The page size used to map the namespace is arch dependent. For example
architectures like ppc64 use 16MB page size for direct-mapping. If the namespace
size is not aligned to the mapping page size, users can observe kernel crash
during namespace init and destroy.

This is due to kernel doing partial map/unmap of the resource range

BUG: Unable to handle kernel data access at 0xc001000406000000
Faulting instruction address: 0xc000000000090790
NIP [c000000000090790] arch_add_memory+0xc0/0x130
LR [c000000000090744] arch_add_memory+0x74/0x130
Call Trace:
 arch_add_memory+0x74/0x130 (unreliable)
 memremap_pages+0x74c/0xa30
 devm_memremap_pages+0x3c/0xa0
 pmem_attach_disk+0x188/0x770
 nvdimm_bus_probe+0xd8/0x470
 really_probe+0x148/0x570
 driver_probe_device+0x19c/0x1d0
 device_driver_attach+0xcc/0x100
 bind_store+0x134/0x1c0
 drv_attr_store+0x44/0x60
 sysfs_kf_write+0x74/0xc0
 kernfs_fop_write+0x1b4/0x290
 __vfs_write+0x3c/0x70
 vfs_write+0xd0/0x260
 ksys_write+0xdc/0x130
 system_call+0x5c/0x68

Kernel should also ensure that namespace size is also mulitple of subsection size.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 arch/arm64/mm/flush.c     | 6 ++++++
 arch/powerpc/lib/pmem.c   | 9 +++++++++
 arch/x86/mm/pageattr.c    | 7 +++++++
 include/linux/libnvdimm.h | 1 +
 4 files changed, 23 insertions(+)

Comments

Dan Williams Jan. 24, 2020, 5:57 a.m. UTC | #1
On Mon, Jan 20, 2020 at 6:08 AM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
>
> The page size used to map the namespace is arch dependent. For example
> architectures like ppc64 use 16MB page size for direct-mapping. If the namespace
> size is not aligned to the mapping page size, users can observe kernel crash
> during namespace init and destroy.
>
> This is due to kernel doing partial map/unmap of the resource range
>
> BUG: Unable to handle kernel data access at 0xc001000406000000
> Faulting instruction address: 0xc000000000090790
> NIP [c000000000090790] arch_add_memory+0xc0/0x130
> LR [c000000000090744] arch_add_memory+0x74/0x130
> Call Trace:
>  arch_add_memory+0x74/0x130 (unreliable)
>  memremap_pages+0x74c/0xa30
>  devm_memremap_pages+0x3c/0xa0
>  pmem_attach_disk+0x188/0x770
>  nvdimm_bus_probe+0xd8/0x470
>  really_probe+0x148/0x570
>  driver_probe_device+0x19c/0x1d0
>  device_driver_attach+0xcc/0x100
>  bind_store+0x134/0x1c0
>  drv_attr_store+0x44/0x60
>  sysfs_kf_write+0x74/0xc0
>  kernfs_fop_write+0x1b4/0x290
>  __vfs_write+0x3c/0x70
>  vfs_write+0xd0/0x260
>  ksys_write+0xdc/0x130
>  system_call+0x5c/0x68
>
> Kernel should also ensure that namespace size is also mulitple of subsection size.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
>  arch/arm64/mm/flush.c     | 6 ++++++
>  arch/powerpc/lib/pmem.c   | 9 +++++++++
>  arch/x86/mm/pageattr.c    | 7 +++++++
>  include/linux/libnvdimm.h | 1 +
>  4 files changed, 23 insertions(+)
>
> diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
> index ac485163a4a7..95cb5538bc6e 100644
> --- a/arch/arm64/mm/flush.c
> +++ b/arch/arm64/mm/flush.c
> @@ -91,4 +91,10 @@ void arch_invalidate_pmem(void *addr, size_t size)
>         __inval_dcache_area(addr, size);
>  }
>  EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
> +
> +unsigned long arch_namespace_map_size(void)
> +{
> +       return PAGE_SIZE;
> +}
> +EXPORT_SYMBOL_GPL(arch_namespace_map_size);
>  #endif
> diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c
> index 0666a8d29596..63dca24e4a18 100644
> --- a/arch/powerpc/lib/pmem.c
> +++ b/arch/powerpc/lib/pmem.c
> @@ -26,6 +26,15 @@ void arch_invalidate_pmem(void *addr, size_t size)
>  }
>  EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
>
> +unsigned long arch_namespace_map_size(void)
> +{
> +       if (radix_enabled())
> +               return PAGE_SIZE;
> +       return (1UL << mmu_psize_defs[mmu_linear_psize].shift);
> +
> +}
> +EXPORT_SYMBOL_GPL(arch_namespace_map_size);
> +
>  /*
>   * CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols
>   */
> diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
> index 1b99ad05b117..d78b5082f376 100644
> --- a/arch/x86/mm/pageattr.c
> +++ b/arch/x86/mm/pageattr.c
> @@ -310,6 +310,13 @@ void arch_invalidate_pmem(void *addr, size_t size)
>  }
>  EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
>
> +unsigned long arch_namespace_map_size(void)
> +{
> +       return PAGE_SIZE;
> +}
> +EXPORT_SYMBOL_GPL(arch_namespace_map_size);
> +
> +
>  static void __cpa_flush_all(void *arg)
>  {
>         unsigned long cache = (unsigned long)arg;
> diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
> index 9df091bd30ba..a3476dbd2656 100644
> --- a/include/linux/libnvdimm.h
> +++ b/include/linux/libnvdimm.h
> @@ -284,4 +284,5 @@ static inline void arch_invalidate_pmem(void *addr, size_t size)
>  }
>  #endif
>
> +unsigned long arch_namespace_map_size(void);

This property is more generic than the nvdimm namespace mapping size,
it's more the fundamental remap granularity that the architecture
supports. So I would expect this to be defined in core header files.
Something like:

diff --git a/include/linux/io.h b/include/linux/io.h
index a59834bc0a11..58b3b2091dbb 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -155,6 +155,13 @@ enum {
 void *memremap(resource_size_t offset, size_t size, unsigned long flags);
 void memunmap(void *addr);

+#ifndef memremap_min_align
+static inline unsigned int memremap_min_align(void)
+{
+       return PAGE_SIZE;
+}
+#endif
+
 /*
  * On x86 PAT systems we have memory tracking that keeps track of
  * the allowed mappings on memory ranges. This tracking works for

...and then have a definition is asm/io.h like this:

unsigned int memremap_min_align(void);
#define memremap_min_align memremap_min_align

That way only architectures that want to opt out of the default need
to define something in their local header.
Aneesh Kumar K.V Jan. 24, 2020, 7:34 a.m. UTC | #2
On 1/24/20 11:27 AM, Dan Williams wrote:
> On Mon, Jan 20, 2020 at 6:08 AM Aneesh Kumar K.V
>

....

>>
>> +unsigned long arch_namespace_map_size(void)
>> +{
>> +       return PAGE_SIZE;
>> +}
>> +EXPORT_SYMBOL_GPL(arch_namespace_map_size);
>> +
>> +
>>   static void __cpa_flush_all(void *arg)
>>   {
>>          unsigned long cache = (unsigned long)arg;
>> diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
>> index 9df091bd30ba..a3476dbd2656 100644
>> --- a/include/linux/libnvdimm.h
>> +++ b/include/linux/libnvdimm.h
>> @@ -284,4 +284,5 @@ static inline void arch_invalidate_pmem(void *addr, size_t size)
>>   }
>>   #endif
>>
>> +unsigned long arch_namespace_map_size(void);
> 
> This property is more generic than the nvdimm namespace mapping size,
> it's more the fundamental remap granularity that the architecture
> supports. So I would expect this to be defined in core header files.
> Something like:
> 
> diff --git a/include/linux/io.h b/include/linux/io.h
> index a59834bc0a11..58b3b2091dbb 100644
> --- a/include/linux/io.h
> +++ b/include/linux/io.h
> @@ -155,6 +155,13 @@ enum {
>   void *memremap(resource_size_t offset, size_t size, unsigned long flags);
>   void memunmap(void *addr);
> 
> +#ifndef memremap_min_align
> +static inline unsigned int memremap_min_align(void)
> +{
> +       return PAGE_SIZE;
> +}
> +#endif
> +


Should that be memremap_pages_min_align()?

>   /*
>    * On x86 PAT systems we have memory tracking that keeps track of
>    * the allowed mappings on memory ranges. This tracking works for
> 
> ...and then have a definition is asm/io.h like this:
> 
> unsigned int memremap_min_align(void);
> #define memremap_min_align memremap_min_align
> 
> That way only architectures that want to opt out of the default need
> to define something in their local header.
> 

-aneesh
Dan Williams Jan. 24, 2020, 4:45 p.m. UTC | #3
On Thu, Jan 23, 2020 at 11:34 PM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
>
> On 1/24/20 11:27 AM, Dan Williams wrote:
> > On Mon, Jan 20, 2020 at 6:08 AM Aneesh Kumar K.V
> >
>
> ....
>
> >>
> >> +unsigned long arch_namespace_map_size(void)
> >> +{
> >> +       return PAGE_SIZE;
> >> +}
> >> +EXPORT_SYMBOL_GPL(arch_namespace_map_size);
> >> +
> >> +
> >>   static void __cpa_flush_all(void *arg)
> >>   {
> >>          unsigned long cache = (unsigned long)arg;
> >> diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
> >> index 9df091bd30ba..a3476dbd2656 100644
> >> --- a/include/linux/libnvdimm.h
> >> +++ b/include/linux/libnvdimm.h
> >> @@ -284,4 +284,5 @@ static inline void arch_invalidate_pmem(void *addr, size_t size)
> >>   }
> >>   #endif
> >>
> >> +unsigned long arch_namespace_map_size(void);
> >
> > This property is more generic than the nvdimm namespace mapping size,
> > it's more the fundamental remap granularity that the architecture
> > supports. So I would expect this to be defined in core header files.
> > Something like:
> >
> > diff --git a/include/linux/io.h b/include/linux/io.h
> > index a59834bc0a11..58b3b2091dbb 100644
> > --- a/include/linux/io.h
> > +++ b/include/linux/io.h
> > @@ -155,6 +155,13 @@ enum {
> >   void *memremap(resource_size_t offset, size_t size, unsigned long flags);
> >   void memunmap(void *addr);
> >
> > +#ifndef memremap_min_align
> > +static inline unsigned int memremap_min_align(void)
> > +{
> > +       return PAGE_SIZE;
> > +}
> > +#endif
> > +
>
>
> Should that be memremap_pages_min_align()?

No, and on second look it needs to be a common value that results in
properly aligned / sized namespaces across architectures.

What would it take for Power to make it's minimum mapping granularity
SUBSECTION_SIZE? The minute that the minimum alignment changes across
architectures we lose compatibility.

The namespaces need to be sized such that the mode can be changed freely.
Aneesh Kumar K.V Jan. 24, 2020, 5:07 p.m. UTC | #4
On 1/24/20 10:15 PM, Dan Williams wrote:
> On Thu, Jan 23, 2020 at 11:34 PM Aneesh Kumar K.V
> <aneesh.kumar@linux.ibm.com> wrote:
>>
>> On 1/24/20 11:27 AM, Dan Williams wrote:
>>> On Mon, Jan 20, 2020 at 6:08 AM Aneesh Kumar K.V
>>>
>>
>> ....
>>
>>>>
>>>> +unsigned long arch_namespace_map_size(void)
>>>> +{
>>>> +       return PAGE_SIZE;
>>>> +}
>>>> +EXPORT_SYMBOL_GPL(arch_namespace_map_size);
>>>> +
>>>> +
>>>>    static void __cpa_flush_all(void *arg)
>>>>    {
>>>>           unsigned long cache = (unsigned long)arg;
>>>> diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
>>>> index 9df091bd30ba..a3476dbd2656 100644
>>>> --- a/include/linux/libnvdimm.h
>>>> +++ b/include/linux/libnvdimm.h
>>>> @@ -284,4 +284,5 @@ static inline void arch_invalidate_pmem(void *addr, size_t size)
>>>>    }
>>>>    #endif
>>>>
>>>> +unsigned long arch_namespace_map_size(void);
>>>
>>> This property is more generic than the nvdimm namespace mapping size,
>>> it's more the fundamental remap granularity that the architecture
>>> supports. So I would expect this to be defined in core header files.
>>> Something like:
>>>
>>> diff --git a/include/linux/io.h b/include/linux/io.h
>>> index a59834bc0a11..58b3b2091dbb 100644
>>> --- a/include/linux/io.h
>>> +++ b/include/linux/io.h
>>> @@ -155,6 +155,13 @@ enum {
>>>    void *memremap(resource_size_t offset, size_t size, unsigned long flags);
>>>    void memunmap(void *addr);
>>>
>>> +#ifndef memremap_min_align
>>> +static inline unsigned int memremap_min_align(void)
>>> +{
>>> +       return PAGE_SIZE;
>>> +}
>>> +#endif
>>> +
>>
>>
>> Should that be memremap_pages_min_align()?
> 
> No, and on second look it needs to be a common value that results in
> properly aligned / sized namespaces across architectures.
> 
> What would it take for Power to make it's minimum mapping granularity
> SUBSECTION_SIZE? The minute that the minimum alignment changes across
> architectures we lose compatibility.
> 
> The namespaces need to be sized such that the mode can be changed freely.
> 

Linux on ppc64 with hash translation use just one page size for direct 
mapping and that is 16MB.

-aneesh
Aneesh Kumar K.V Jan. 24, 2020, 5:08 p.m. UTC | #5
On 1/24/20 10:15 PM, Dan Williams wrote:
> On Thu, Jan 23, 2020 at 11:34 PM Aneesh Kumar K.V
> <aneesh.kumar@linux.ibm.com> wrote:
>>
>> On 1/24/20 11:27 AM, Dan Williams wrote:
>>> On Mon, Jan 20, 2020 at 6:08 AM Aneesh Kumar K.V
>>>
>>
>> ....
>>
>>>>
>>>> +unsigned long arch_namespace_map_size(void)
>>>> +{
>>>> +       return PAGE_SIZE;
>>>> +}
>>>> +EXPORT_SYMBOL_GPL(arch_namespace_map_size);
>>>> +
>>>> +
>>>>    static void __cpa_flush_all(void *arg)
>>>>    {
>>>>           unsigned long cache = (unsigned long)arg;
>>>> diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
>>>> index 9df091bd30ba..a3476dbd2656 100644
>>>> --- a/include/linux/libnvdimm.h
>>>> +++ b/include/linux/libnvdimm.h
>>>> @@ -284,4 +284,5 @@ static inline void arch_invalidate_pmem(void *addr, size_t size)
>>>>    }
>>>>    #endif
>>>>
>>>> +unsigned long arch_namespace_map_size(void);
>>>
>>> This property is more generic than the nvdimm namespace mapping size,
>>> it's more the fundamental remap granularity that the architecture
>>> supports. So I would expect this to be defined in core header files.
>>> Something like:
>>>
>>> diff --git a/include/linux/io.h b/include/linux/io.h
>>> index a59834bc0a11..58b3b2091dbb 100644
>>> --- a/include/linux/io.h
>>> +++ b/include/linux/io.h
>>> @@ -155,6 +155,13 @@ enum {
>>>    void *memremap(resource_size_t offset, size_t size, unsigned long flags);
>>>    void memunmap(void *addr);
>>>
>>> +#ifndef memremap_min_align
>>> +static inline unsigned int memremap_min_align(void)
>>> +{
>>> +       return PAGE_SIZE;
>>> +}
>>> +#endif
>>> +
>>
>>
>> Should that be memremap_pages_min_align()?
> 
> No, and on second look it needs to be a common value that results in
> properly aligned / sized namespaces across architectures.
> 
> What would it take for Power to make it's minimum mapping granularity
> SUBSECTION_SIZE? The minute that the minimum alignment changes across
> architectures we lose compatibility.
> 
> The namespaces need to be sized such that the mode can be changed freely.
> 

We should do a discussion for LSF/MM for architecture compatibility 
challenges with NVDIMM. I did post a request to attend on that. But 
never got a response on that submission.



-aneesh
Dan Williams Jan. 24, 2020, 6:25 p.m. UTC | #6
On Fri, Jan 24, 2020 at 9:07 AM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
>
> On 1/24/20 10:15 PM, Dan Williams wrote:
> > On Thu, Jan 23, 2020 at 11:34 PM Aneesh Kumar K.V
> > <aneesh.kumar@linux.ibm.com> wrote:
> >>
> >> On 1/24/20 11:27 AM, Dan Williams wrote:
> >>> On Mon, Jan 20, 2020 at 6:08 AM Aneesh Kumar K.V
> >>>
> >>
> >> ....
> >>
> >>>>
> >>>> +unsigned long arch_namespace_map_size(void)
> >>>> +{
> >>>> +       return PAGE_SIZE;
> >>>> +}
> >>>> +EXPORT_SYMBOL_GPL(arch_namespace_map_size);
> >>>> +
> >>>> +
> >>>>    static void __cpa_flush_all(void *arg)
> >>>>    {
> >>>>           unsigned long cache = (unsigned long)arg;
> >>>> diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
> >>>> index 9df091bd30ba..a3476dbd2656 100644
> >>>> --- a/include/linux/libnvdimm.h
> >>>> +++ b/include/linux/libnvdimm.h
> >>>> @@ -284,4 +284,5 @@ static inline void arch_invalidate_pmem(void *addr, size_t size)
> >>>>    }
> >>>>    #endif
> >>>>
> >>>> +unsigned long arch_namespace_map_size(void);
> >>>
> >>> This property is more generic than the nvdimm namespace mapping size,
> >>> it's more the fundamental remap granularity that the architecture
> >>> supports. So I would expect this to be defined in core header files.
> >>> Something like:
> >>>
> >>> diff --git a/include/linux/io.h b/include/linux/io.h
> >>> index a59834bc0a11..58b3b2091dbb 100644
> >>> --- a/include/linux/io.h
> >>> +++ b/include/linux/io.h
> >>> @@ -155,6 +155,13 @@ enum {
> >>>    void *memremap(resource_size_t offset, size_t size, unsigned long flags);
> >>>    void memunmap(void *addr);
> >>>
> >>> +#ifndef memremap_min_align
> >>> +static inline unsigned int memremap_min_align(void)
> >>> +{
> >>> +       return PAGE_SIZE;
> >>> +}
> >>> +#endif
> >>> +
> >>
> >>
> >> Should that be memremap_pages_min_align()?
> >
> > No, and on second look it needs to be a common value that results in
> > properly aligned / sized namespaces across architectures.
> >
> > What would it take for Power to make it's minimum mapping granularity
> > SUBSECTION_SIZE? The minute that the minimum alignment changes across
> > architectures we lose compatibility.
> >
> > The namespaces need to be sized such that the mode can be changed freely.
> >
>
> Linux on ppc64 with hash translation use just one page size for direct
> mapping and that is 16MB.

Ok, I think this means that the dream of SUBSECTION_SIZE being the
minimum compat alignment is dead, or at least a dream deferred.

Let's do this, change the name of this function to:

    memremap_compat_align()

...and define it to be the max() of all the alignment constraints that
the arch may require through either memremap(), or memremap_pages().
Then, teach ndctl to make its default alignment compatible by default,
16MiB, with an override to allow namespace creation with the current
architecture's memremap_compat_align(), exported via sysfs, if it
happens to be less then 16MiB. Finally, cross our fingers and hope
that Power remains the only arch that violates the SUBSECTION_SIZE
minimum value for memremap_compat_align().
Aneesh Kumar K.V Jan. 26, 2020, 11:41 a.m. UTC | #7
Dan Williams <dan.j.williams@intel.com> writes:

> On Fri, Jan 24, 2020 at 9:07 AM Aneesh Kumar K.V
> <aneesh.kumar@linux.ibm.com> wrote:
>>
>> On 1/24/20 10:15 PM, Dan Williams wrote:
>> > On Thu, Jan 23, 2020 at 11:34 PM Aneesh Kumar K.V
>> > <aneesh.kumar@linux.ibm.com> wrote:
>> >>
>> >> On 1/24/20 11:27 AM, Dan Williams wrote:
>> >>> On Mon, Jan 20, 2020 at 6:08 AM Aneesh Kumar K.V
>> >>>
>> >>
>> >> ....
>> >>
>> >>>>
>> >>>> +unsigned long arch_namespace_map_size(void)
>> >>>> +{
>> >>>> +       return PAGE_SIZE;
>> >>>> +}
>> >>>> +EXPORT_SYMBOL_GPL(arch_namespace_map_size);
>> >>>> +
>> >>>> +
>> >>>>    static void __cpa_flush_all(void *arg)
>> >>>>    {
>> >>>>           unsigned long cache = (unsigned long)arg;
>> >>>> diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
>> >>>> index 9df091bd30ba..a3476dbd2656 100644
>> >>>> --- a/include/linux/libnvdimm.h
>> >>>> +++ b/include/linux/libnvdimm.h
>> >>>> @@ -284,4 +284,5 @@ static inline void arch_invalidate_pmem(void *addr, size_t size)
>> >>>>    }
>> >>>>    #endif
>> >>>>
>> >>>> +unsigned long arch_namespace_map_size(void);
>> >>>
>> >>> This property is more generic than the nvdimm namespace mapping size,
>> >>> it's more the fundamental remap granularity that the architecture
>> >>> supports. So I would expect this to be defined in core header files.
>> >>> Something like:
>> >>>
>> >>> diff --git a/include/linux/io.h b/include/linux/io.h
>> >>> index a59834bc0a11..58b3b2091dbb 100644
>> >>> --- a/include/linux/io.h
>> >>> +++ b/include/linux/io.h
>> >>> @@ -155,6 +155,13 @@ enum {
>> >>>    void *memremap(resource_size_t offset, size_t size, unsigned long flags);
>> >>>    void memunmap(void *addr);
>> >>>
>> >>> +#ifndef memremap_min_align
>> >>> +static inline unsigned int memremap_min_align(void)
>> >>> +{
>> >>> +       return PAGE_SIZE;
>> >>> +}
>> >>> +#endif
>> >>> +
>> >>
>> >>
>> >> Should that be memremap_pages_min_align()?
>> >
>> > No, and on second look it needs to be a common value that results in
>> > properly aligned / sized namespaces across architectures.
>> >
>> > What would it take for Power to make it's minimum mapping granularity
>> > SUBSECTION_SIZE? The minute that the minimum alignment changes across
>> > architectures we lose compatibility.
>> >
>> > The namespaces need to be sized such that the mode can be changed freely.
>> >
>>
>> Linux on ppc64 with hash translation use just one page size for direct
>> mapping and that is 16MB.
>
> Ok, I think this means that the dream of SUBSECTION_SIZE being the
> minimum compat alignment is dead, or at least a dream deferred.
>
> Let's do this, change the name of this function to:
>
>     memremap_compat_align()
>
> ...and define it to be the max() of all the alignment constraints that
> the arch may require through either memremap(), or memremap_pages().
> Then, teach ndctl to make its default alignment compatible by default,
> 16MiB, with an override to allow namespace creation with the current
> architecture's memremap_compat_align(), exported via sysfs, if it
> happens to be less then 16MiB. Finally, cross our fingers and hope
> that Power remains the only arch that violates the SUBSECTION_SIZE
> minimum value for memremap_compat_align().

We do have two issues related to alignment here.

1) With upstream kernel, we don't validate the namespace start and size
value and hence we can end up creating namespace that is not aligned to
SUBSECTION_SIZE. This was observed by Jeff Moyer in his test. That means
we will fail to enable already created namespace if we use
SUBSECTION_SIZE to validate their alignment.

The solution I came up with was arch_namespace_map_size() that depends on the
direct-map mapping page size. On architecture like ppc64, this value can
be 16MB. 

3) For new namespaces, we can now ensure they are properly
aligned. For architectures other than ppc64 that value is SUBSECTION_SIZE;
ie, the resource start address and the size value should be aligned to
SUBSECTION_SIZE. For ppc64 this value should be 16MB because if they are
not 16MB aligned we cannot direct-map them.

I guess this can be memremap_compat_align() and we expose this value via
namespace attribute. By default, all architecture will now to try to
align things to 16MB unless specified --nocompat as ndctl
create-namespace command-line option. When used we use the
architecture-specific sysfs value (SUBSECTION_SIZE) to align things
correctly.


-aneesh
diff mbox series

Patch

diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index ac485163a4a7..95cb5538bc6e 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -91,4 +91,10 @@  void arch_invalidate_pmem(void *addr, size_t size)
 	__inval_dcache_area(addr, size);
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
+
+unsigned long arch_namespace_map_size(void)
+{
+	return PAGE_SIZE;
+}
+EXPORT_SYMBOL_GPL(arch_namespace_map_size);
 #endif
diff --git a/arch/powerpc/lib/pmem.c b/arch/powerpc/lib/pmem.c
index 0666a8d29596..63dca24e4a18 100644
--- a/arch/powerpc/lib/pmem.c
+++ b/arch/powerpc/lib/pmem.c
@@ -26,6 +26,15 @@  void arch_invalidate_pmem(void *addr, size_t size)
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 
+unsigned long arch_namespace_map_size(void)
+{
+	if (radix_enabled())
+		return PAGE_SIZE;
+	return (1UL << mmu_psize_defs[mmu_linear_psize].shift);
+
+}
+EXPORT_SYMBOL_GPL(arch_namespace_map_size);
+
 /*
  * CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE symbols
  */
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 1b99ad05b117..d78b5082f376 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -310,6 +310,13 @@  void arch_invalidate_pmem(void *addr, size_t size)
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 
+unsigned long arch_namespace_map_size(void)
+{
+	return PAGE_SIZE;
+}
+EXPORT_SYMBOL_GPL(arch_namespace_map_size);
+
+
 static void __cpa_flush_all(void *arg)
 {
 	unsigned long cache = (unsigned long)arg;
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 9df091bd30ba..a3476dbd2656 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -284,4 +284,5 @@  static inline void arch_invalidate_pmem(void *addr, size_t size)
 }
 #endif
 
+unsigned long arch_namespace_map_size(void);
 #endif /* __LIBNVDIMM_H__ */