diff mbox series

[v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping

Message ID 20191017073308.32645-1-aneesh.kumar@linux.ibm.com (mailing list archive)
State New, archived
Headers show
Series [v3] libnvdimm/nsio: differentiate between probe mapping and runtime mapping | expand

Commit Message

Aneesh Kumar K.V Oct. 17, 2019, 7:33 a.m. UTC
nvdimm core currently maps the full namespace to an ioremap range
while probing the namespace mode. This can result in probe failures
on architectures that have limited ioremap space.

For example, with a large btt namespace that consumes most of I/O remap
range, depending on the sequence of namespace initialization, the user can find
a pfn namespace initialization failure due to unavailable I/O remap space
which nvdimm core uses for temporary mapping.

nvdimm core can avoid this failure by only mapping the reserver block area to
check for pfn superblock type and map the full namespace resource only before
using the namespace.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
Changes from v2:
* update changelog

Changes from V1:
* update changelog
* update patch based on review feedback.

 drivers/dax/pmem/core.c   |  2 +-
 drivers/nvdimm/claim.c    |  7 +++----
 drivers/nvdimm/nd.h       |  4 ++--
 drivers/nvdimm/pfn.h      |  6 ++++++
 drivers/nvdimm/pfn_devs.c |  5 -----
 drivers/nvdimm/pmem.c     | 15 ++++++++++++---
 6 files changed, 24 insertions(+), 15 deletions(-)

Comments

Dan Williams Oct. 24, 2019, 2:06 a.m. UTC | #1
On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
>
> nvdimm core currently maps the full namespace to an ioremap range
> while probing the namespace mode. This can result in probe failures
> on architectures that have limited ioremap space.
>
> For example, with a large btt namespace that consumes most of I/O remap
> range, depending on the sequence of namespace initialization, the user can find
> a pfn namespace initialization failure due to unavailable I/O remap space
> which nvdimm core uses for temporary mapping.
>
> nvdimm core can avoid this failure by only mapping the reserver block area to

s/reserver/reserved/

> check for pfn superblock type and map the full namespace resource only before
> using the namespace.

Are you going to follow up with the BTT patch that uses this new facility?

>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
> Changes from v2:
> * update changelog
>
> Changes from V1:
> * update changelog
> * update patch based on review feedback.
>
>  drivers/dax/pmem/core.c   |  2 +-
>  drivers/nvdimm/claim.c    |  7 +++----
>  drivers/nvdimm/nd.h       |  4 ++--
>  drivers/nvdimm/pfn.h      |  6 ++++++
>  drivers/nvdimm/pfn_devs.c |  5 -----
>  drivers/nvdimm/pmem.c     | 15 ++++++++++++---
>  6 files changed, 24 insertions(+), 15 deletions(-)
>
> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
> index 6eb6dfdf19bf..f174dbfbe1c4 100644
> --- a/drivers/dax/pmem/core.c
> +++ b/drivers/dax/pmem/core.c
> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
>         nsio = to_nd_namespace_io(&ndns->dev);
>
>         /* parse the 'pfn' info block via ->rw_bytes */
> -       rc = devm_nsio_enable(dev, nsio);
> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
>         if (rc)
>                 return ERR_PTR(rc);
>         rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
> index 2985ca949912..d89d2c039e25 100644
> --- a/drivers/nvdimm/claim.c
> +++ b/drivers/nvdimm/claim.c
> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
>         return rc;
>  }
>
> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
>  {
>         struct resource *res = &nsio->res;
>         struct nd_namespace_common *ndns = &nsio->common;
>
> -       nsio->size = resource_size(res);
> +       nsio->size = size;

This needs a:

if (nsio->size)
   devm_memunmap(dev, nsio->addr);


>         if (!devm_request_mem_region(dev, res->start, resource_size(res),
>                                 dev_name(&ndns->dev))) {

Also don't repeat the devm_request_mem_region() if one was already done.

Another thing to check is if nsio->size gets cleared when the
namespace is disabled, if not that well need to be added in the
shutdown path.


>                 dev_warn(dev, "could not reserve region %pR\n", res);
> @@ -318,8 +318,7 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
>         nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
>                         &nsio->res);
>
> -       nsio->addr = devm_memremap(dev, res->start, resource_size(res),
> -                       ARCH_MEMREMAP_PMEM);
> +       nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
>
>         return PTR_ERR_OR_ZERO(nsio->addr);
>  }
> diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
> index ee5c04070ef9..93d3c760c0f3 100644
> --- a/drivers/nvdimm/nd.h
> +++ b/drivers/nvdimm/nd.h
> @@ -376,7 +376,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
>  #define MAX_STRUCT_PAGE_SIZE 64
>
>  int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size);
>  void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
>  #else
>  static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
> @@ -385,7 +385,7 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
>         return -ENXIO;
>  }
>  static inline int devm_nsio_enable(struct device *dev,
> -               struct nd_namespace_io *nsio)
> +               struct nd_namespace_io *nsio, unsigned long size)
>  {
>         return -ENXIO;
>  }
> diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
> index acb19517f678..f4856c87d01c 100644
> --- a/drivers/nvdimm/pfn.h
> +++ b/drivers/nvdimm/pfn.h
> @@ -36,4 +36,10 @@ struct nd_pfn_sb {
>         __le64 checksum;
>  };
>
> +static inline u32 info_block_reserve(void)
> +{
> +       return ALIGN(SZ_8K, PAGE_SIZE);
> +}
> +
> +
>  #endif /* __NVDIMM_PFN_H */
> diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
> index 60d81fae06ee..e49aa9a0fd04 100644
> --- a/drivers/nvdimm/pfn_devs.c
> +++ b/drivers/nvdimm/pfn_devs.c
> @@ -635,11 +635,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
>  }
>  EXPORT_SYMBOL(nd_pfn_probe);
>
> -static u32 info_block_reserve(void)
> -{
> -       return ALIGN(SZ_8K, PAGE_SIZE);
> -}
> -
>  /*
>   * We hotplug memory at sub-section granularity, pad the reserved area
>   * from the previous section base to the namespace base address.
> diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
> index f9f76f6ba07b..3c188ffeff11 100644
> --- a/drivers/nvdimm/pmem.c
> +++ b/drivers/nvdimm/pmem.c
> @@ -491,17 +491,26 @@ static int pmem_attach_disk(struct device *dev,
>  static int nd_pmem_probe(struct device *dev)
>  {
>         int ret;
> +       struct nd_namespace_io *nsio;
>         struct nd_namespace_common *ndns;
>
>         ndns = nvdimm_namespace_common_probe(dev);
>         if (IS_ERR(ndns))
>                 return PTR_ERR(ndns);
>
> -       if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
> -               return -ENXIO;
> +       nsio = to_nd_namespace_io(&ndns->dev);
>
> -       if (is_nd_btt(dev))
> +       if (is_nd_btt(dev)) {
> +               /*
> +                * Map with resource size
> +                */
> +               if (devm_nsio_enable(dev, nsio, resource_size(&nsio->res)))
> +                       return -ENXIO;
>                 return nvdimm_namespace_attach_btt(ndns);
> +       }
> +
> +       if (devm_nsio_enable(dev, nsio, info_block_reserve()))
> +               return -ENXIO;
>
>         if (is_nd_pfn(dev))
>                 return pmem_attach_disk(dev, ndns);
> --
> 2.21.0
>
Aneesh Kumar K.V Oct. 24, 2019, 9:07 a.m. UTC | #2
On 10/24/19 7:36 AM, Dan Williams wrote:
> On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
> <aneesh.kumar@linux.ibm.com> wrote:
>>
>> nvdimm core currently maps the full namespace to an ioremap range
>> while probing the namespace mode. This can result in probe failures
>> on architectures that have limited ioremap space.
>>
>> For example, with a large btt namespace that consumes most of I/O remap
>> range, depending on the sequence of namespace initialization, the user can find
>> a pfn namespace initialization failure due to unavailable I/O remap space
>> which nvdimm core uses for temporary mapping.
>>
>> nvdimm core can avoid this failure by only mapping the reserver block area to
> 
> s/reserver/reserved/

Will fix


> 
>> check for pfn superblock type and map the full namespace resource only before
>> using the namespace.
> 
> Are you going to follow up with the BTT patch that uses this new facility?
> 

I am not yet sure about that. ioremap/vmap area is the way we get a 
kernel mapping without struct page backing. What we are suggesting hee 
is the ability to have a direct mapped mapping without struct page. I 
need to look at closely about what that imply.

>>
>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>> ---
>> Changes from v2:
>> * update changelog
>>
>> Changes from V1:
>> * update changelog
>> * update patch based on review feedback.
>>
>>   drivers/dax/pmem/core.c   |  2 +-
>>   drivers/nvdimm/claim.c    |  7 +++----
>>   drivers/nvdimm/nd.h       |  4 ++--
>>   drivers/nvdimm/pfn.h      |  6 ++++++
>>   drivers/nvdimm/pfn_devs.c |  5 -----
>>   drivers/nvdimm/pmem.c     | 15 ++++++++++++---
>>   6 files changed, 24 insertions(+), 15 deletions(-)
>>
>> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
>> index 6eb6dfdf19bf..f174dbfbe1c4 100644
>> --- a/drivers/dax/pmem/core.c
>> +++ b/drivers/dax/pmem/core.c
>> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
>>          nsio = to_nd_namespace_io(&ndns->dev);
>>
>>          /* parse the 'pfn' info block via ->rw_bytes */
>> -       rc = devm_nsio_enable(dev, nsio);
>> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
>>          if (rc)
>>                  return ERR_PTR(rc);
>>          rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
>> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
>> index 2985ca949912..d89d2c039e25 100644
>> --- a/drivers/nvdimm/claim.c
>> +++ b/drivers/nvdimm/claim.c
>> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
>>          return rc;
>>   }
>>
>> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
>> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
>>   {
>>          struct resource *res = &nsio->res;
>>          struct nd_namespace_common *ndns = &nsio->common;
>>
>> -       nsio->size = resource_size(res);
>> +       nsio->size = size;
> 
> This needs a:
> 
> if (nsio->size)
>     devm_memunmap(dev, nsio->addr);


Why ? we should not be calling devm_nsio_enable twice now.

> 
> 
>>          if (!devm_request_mem_region(dev, res->start, resource_size(res),
>>                                  dev_name(&ndns->dev))) {
> 
> Also don't repeat the devm_request_mem_region() if one was already done.
> 
> Another thing to check is if nsio->size gets cleared when the
> namespace is disabled, if not that well need to be added in the
> shutdown path.
> 


Not sure I understand that. So with this patch when we probe we always 
use info_block_reserve() size irrespective of the device. This probe 
will result in us adding a btt/pfn/dax device and we return -ENXIO after 
this probe. This return value will cause us to destroy the I/O remap 
mmapping we did with info_block_reserve() size. Also the nsio data 
structure is also freed.

nd_pmem_probe is then again called with btt device type and in that case 
we do  devm_memremap again.

if (btt)
     remap the full namespace size.
else
    remap the info_block_reserve_size.


This infor block reserve mapping is unmapped in pmem_attach_disk()


Anything i am missing in the above flow?



> 
>>                  dev_warn(dev, "could not reserve region %pR\n", res);
>> @@ -318,8 +318,7 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
>>          nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
>>                          &nsio->res);
>>
>> -       nsio->addr = devm_memremap(dev, res->start, resource_size(res),
>> -                       ARCH_MEMREMAP_PMEM);
>> +       nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
>>
>>          return PTR_ERR_OR_ZERO(nsio->addr);
>>   }
>> diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
>> index ee5c04070ef9..93d3c760c0f3 100644
>> --- a/drivers/nvdimm/nd.h
>> +++ b/drivers/nvdimm/nd.h
>> @@ -376,7 +376,7 @@ void nvdimm_badblocks_populate(struct nd_region *nd_region,
>>   #define MAX_STRUCT_PAGE_SIZE 64
>>
>>   int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
>> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
>> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size);
>>   void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
>>   #else
>>   static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
>> @@ -385,7 +385,7 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
>>          return -ENXIO;
>>   }
>>   static inline int devm_nsio_enable(struct device *dev,
>> -               struct nd_namespace_io *nsio)
>> +               struct nd_namespace_io *nsio, unsigned long size)
>>   {
>>          return -ENXIO;
>>   }
>> diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
>> index acb19517f678..f4856c87d01c 100644
>> --- a/drivers/nvdimm/pfn.h
>> +++ b/drivers/nvdimm/pfn.h
>> @@ -36,4 +36,10 @@ struct nd_pfn_sb {
>>          __le64 checksum;
>>   };
>>
>> +static inline u32 info_block_reserve(void)
>> +{
>> +       return ALIGN(SZ_8K, PAGE_SIZE);
>> +}
>> +
>> +
>>   #endif /* __NVDIMM_PFN_H */
>> diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
>> index 60d81fae06ee..e49aa9a0fd04 100644
>> --- a/drivers/nvdimm/pfn_devs.c
>> +++ b/drivers/nvdimm/pfn_devs.c
>> @@ -635,11 +635,6 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
>>   }
>>   EXPORT_SYMBOL(nd_pfn_probe);
>>
>> -static u32 info_block_reserve(void)
>> -{
>> -       return ALIGN(SZ_8K, PAGE_SIZE);
>> -}
>> -
>>   /*
>>    * We hotplug memory at sub-section granularity, pad the reserved area
>>    * from the previous section base to the namespace base address.
>> diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
>> index f9f76f6ba07b..3c188ffeff11 100644
>> --- a/drivers/nvdimm/pmem.c
>> +++ b/drivers/nvdimm/pmem.c
>> @@ -491,17 +491,26 @@ static int pmem_attach_disk(struct device *dev,
>>   static int nd_pmem_probe(struct device *dev)
>>   {
>>          int ret;
>> +       struct nd_namespace_io *nsio;
>>          struct nd_namespace_common *ndns;
>>
>>          ndns = nvdimm_namespace_common_probe(dev);
>>          if (IS_ERR(ndns))
>>                  return PTR_ERR(ndns);
>>
>> -       if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
>> -               return -ENXIO;
>> +       nsio = to_nd_namespace_io(&ndns->dev);
>>
>> -       if (is_nd_btt(dev))
>> +       if (is_nd_btt(dev)) {
>> +               /*
>> +                * Map with resource size
>> +                */
>> +               if (devm_nsio_enable(dev, nsio, resource_size(&nsio->res)))
>> +                       return -ENXIO;
>>                  return nvdimm_namespace_attach_btt(ndns);
>> +       }
>> +
>> +       if (devm_nsio_enable(dev, nsio, info_block_reserve()))
>> +               return -ENXIO;
>>
>>          if (is_nd_pfn(dev))
>>                  return pmem_attach_disk(dev, ndns);
>> --
>> 2.21.0
>>
Dan Williams Oct. 30, 2019, 11:33 p.m. UTC | #3
On Thu, Oct 24, 2019 at 2:07 AM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
>
> On 10/24/19 7:36 AM, Dan Williams wrote:
> > On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
> > <aneesh.kumar@linux.ibm.com> wrote:
> >>
> >> nvdimm core currently maps the full namespace to an ioremap range
> >> while probing the namespace mode. This can result in probe failures
> >> on architectures that have limited ioremap space.
> >>
> >> For example, with a large btt namespace that consumes most of I/O remap
> >> range, depending on the sequence of namespace initialization, the user can find
> >> a pfn namespace initialization failure due to unavailable I/O remap space
> >> which nvdimm core uses for temporary mapping.
> >>
> >> nvdimm core can avoid this failure by only mapping the reserver block area to
> >
> > s/reserver/reserved/
>
> Will fix
>
>
> >
> >> check for pfn superblock type and map the full namespace resource only before
> >> using the namespace.
> >
> > Are you going to follow up with the BTT patch that uses this new facility?
> >
>
> I am not yet sure about that. ioremap/vmap area is the way we get a
> kernel mapping without struct page backing. What we are suggesting hee
> is the ability to have a direct mapped mapping without struct page. I
> need to look at closely about what that imply.
>
> >>
> >> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> >> ---
> >> Changes from v2:
> >> * update changelog
> >>
> >> Changes from V1:
> >> * update changelog
> >> * update patch based on review feedback.
> >>
> >>   drivers/dax/pmem/core.c   |  2 +-
> >>   drivers/nvdimm/claim.c    |  7 +++----
> >>   drivers/nvdimm/nd.h       |  4 ++--
> >>   drivers/nvdimm/pfn.h      |  6 ++++++
> >>   drivers/nvdimm/pfn_devs.c |  5 -----
> >>   drivers/nvdimm/pmem.c     | 15 ++++++++++++---
> >>   6 files changed, 24 insertions(+), 15 deletions(-)
> >>
> >> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
> >> index 6eb6dfdf19bf..f174dbfbe1c4 100644
> >> --- a/drivers/dax/pmem/core.c
> >> +++ b/drivers/dax/pmem/core.c
> >> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
> >>          nsio = to_nd_namespace_io(&ndns->dev);
> >>
> >>          /* parse the 'pfn' info block via ->rw_bytes */
> >> -       rc = devm_nsio_enable(dev, nsio);
> >> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
> >>          if (rc)
> >>                  return ERR_PTR(rc);
> >>          rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
> >> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
> >> index 2985ca949912..d89d2c039e25 100644
> >> --- a/drivers/nvdimm/claim.c
> >> +++ b/drivers/nvdimm/claim.c
> >> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
> >>          return rc;
> >>   }
> >>
> >> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
> >> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
> >>   {
> >>          struct resource *res = &nsio->res;
> >>          struct nd_namespace_common *ndns = &nsio->common;
> >>
> >> -       nsio->size = resource_size(res);
> >> +       nsio->size = size;
> >
> > This needs a:
> >
> > if (nsio->size)
> >     devm_memunmap(dev, nsio->addr);
>
>
> Why ? we should not be calling devm_nsio_enable twice now.
>
> >
> >
> >>          if (!devm_request_mem_region(dev, res->start, resource_size(res),
> >>                                  dev_name(&ndns->dev))) {
> >
> > Also don't repeat the devm_request_mem_region() if one was already done.
> >
> > Another thing to check is if nsio->size gets cleared when the
> > namespace is disabled, if not that well need to be added in the
> > shutdown path.
> >
>
>
> Not sure I understand that. So with this patch when we probe we always
> use info_block_reserve() size irrespective of the device. This probe
> will result in us adding a btt/pfn/dax device and we return -ENXIO after
> this probe. This return value will cause us to destroy the I/O remap
> mmapping we did with info_block_reserve() size. Also the nsio data
> structure is also freed.
>
> nd_pmem_probe is then again called with btt device type and in that case
> we do  devm_memremap again.
>
> if (btt)
>      remap the full namespace size.
> else
>     remap the info_block_reserve_size.
>
>
> This infor block reserve mapping is unmapped in pmem_attach_disk()
>
>
> Anything i am missing in the above flow?

Oh no, you're right, this does make the implementation only call
devm_nsio_enable() once. However, now that I look I want to suggest
some small reorganizations of where devm_nsio_enable() is called. I'll
take a stab at folding some changes on top of your patch.
Dan Williams Oct. 31, 2019, 3:55 a.m. UTC | #4
On Wed, Oct 30, 2019 at 4:33 PM Dan Williams <dan.j.williams@intel.com> wrote:
>
> On Thu, Oct 24, 2019 at 2:07 AM Aneesh Kumar K.V
> <aneesh.kumar@linux.ibm.com> wrote:
> >
> > On 10/24/19 7:36 AM, Dan Williams wrote:
> > > On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
> > > <aneesh.kumar@linux.ibm.com> wrote:
> > >>
> > >> nvdimm core currently maps the full namespace to an ioremap range
> > >> while probing the namespace mode. This can result in probe failures
> > >> on architectures that have limited ioremap space.
> > >>
> > >> For example, with a large btt namespace that consumes most of I/O remap
> > >> range, depending on the sequence of namespace initialization, the user can find
> > >> a pfn namespace initialization failure due to unavailable I/O remap space
> > >> which nvdimm core uses for temporary mapping.
> > >>
> > >> nvdimm core can avoid this failure by only mapping the reserver block area to
> > >
> > > s/reserver/reserved/
> >
> > Will fix
> >
> >
> > >
> > >> check for pfn superblock type and map the full namespace resource only before
> > >> using the namespace.
> > >
> > > Are you going to follow up with the BTT patch that uses this new facility?
> > >
> >
> > I am not yet sure about that. ioremap/vmap area is the way we get a
> > kernel mapping without struct page backing. What we are suggesting hee
> > is the ability to have a direct mapped mapping without struct page. I
> > need to look at closely about what that imply.
> >
> > >>
> > >> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> > >> ---
> > >> Changes from v2:
> > >> * update changelog
> > >>
> > >> Changes from V1:
> > >> * update changelog
> > >> * update patch based on review feedback.
> > >>
> > >>   drivers/dax/pmem/core.c   |  2 +-
> > >>   drivers/nvdimm/claim.c    |  7 +++----
> > >>   drivers/nvdimm/nd.h       |  4 ++--
> > >>   drivers/nvdimm/pfn.h      |  6 ++++++
> > >>   drivers/nvdimm/pfn_devs.c |  5 -----
> > >>   drivers/nvdimm/pmem.c     | 15 ++++++++++++---
> > >>   6 files changed, 24 insertions(+), 15 deletions(-)
> > >>
> > >> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
> > >> index 6eb6dfdf19bf..f174dbfbe1c4 100644
> > >> --- a/drivers/dax/pmem/core.c
> > >> +++ b/drivers/dax/pmem/core.c
> > >> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
> > >>          nsio = to_nd_namespace_io(&ndns->dev);
> > >>
> > >>          /* parse the 'pfn' info block via ->rw_bytes */
> > >> -       rc = devm_nsio_enable(dev, nsio);
> > >> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
> > >>          if (rc)
> > >>                  return ERR_PTR(rc);
> > >>          rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
> > >> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
> > >> index 2985ca949912..d89d2c039e25 100644
> > >> --- a/drivers/nvdimm/claim.c
> > >> +++ b/drivers/nvdimm/claim.c
> > >> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
> > >>          return rc;
> > >>   }
> > >>
> > >> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
> > >> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
> > >>   {
> > >>          struct resource *res = &nsio->res;
> > >>          struct nd_namespace_common *ndns = &nsio->common;
> > >>
> > >> -       nsio->size = resource_size(res);
> > >> +       nsio->size = size;
> > >
> > > This needs a:
> > >
> > > if (nsio->size)
> > >     devm_memunmap(dev, nsio->addr);
> >
> >
> > Why ? we should not be calling devm_nsio_enable twice now.
> >
> > >
> > >
> > >>          if (!devm_request_mem_region(dev, res->start, resource_size(res),
> > >>                                  dev_name(&ndns->dev))) {
> > >
> > > Also don't repeat the devm_request_mem_region() if one was already done.
> > >
> > > Another thing to check is if nsio->size gets cleared when the
> > > namespace is disabled, if not that well need to be added in the
> > > shutdown path.
> > >
> >
> >
> > Not sure I understand that. So with this patch when we probe we always
> > use info_block_reserve() size irrespective of the device. This probe
> > will result in us adding a btt/pfn/dax device and we return -ENXIO after
> > this probe. This return value will cause us to destroy the I/O remap
> > mmapping we did with info_block_reserve() size. Also the nsio data
> > structure is also freed.
> >
> > nd_pmem_probe is then again called with btt device type and in that case
> > we do  devm_memremap again.
> >
> > if (btt)
> >      remap the full namespace size.
> > else
> >     remap the info_block_reserve_size.
> >
> >
> > This infor block reserve mapping is unmapped in pmem_attach_disk()
> >
> >
> > Anything i am missing in the above flow?
>
> Oh no, you're right, this does make the implementation only call
> devm_nsio_enable() once. However, now that I look I want to suggest
> some small reorganizations of where devm_nsio_enable() is called. I'll
> take a stab at folding some changes on top of your patch.

This change is causing the "pfn-meta-errors.sh" test to fail. It is
due to the fact the info_block_reserve() is not a sufficient
reservation for errors in the page metadata area.
Aneesh Kumar K.V Oct. 31, 2019, 4:10 a.m. UTC | #5
On 10/31/19 9:25 AM, Dan Williams wrote:
> On Wed, Oct 30, 2019 at 4:33 PM Dan Williams <dan.j.williams@intel.com> wrote:
>>
>> On Thu, Oct 24, 2019 at 2:07 AM Aneesh Kumar K.V
>> <aneesh.kumar@linux.ibm.com> wrote:
>>>
>>> On 10/24/19 7:36 AM, Dan Williams wrote:
>>>> On Thu, Oct 17, 2019 at 12:33 AM Aneesh Kumar K.V
>>>> <aneesh.kumar@linux.ibm.com> wrote:
>>>>>
>>>>> nvdimm core currently maps the full namespace to an ioremap range
>>>>> while probing the namespace mode. This can result in probe failures
>>>>> on architectures that have limited ioremap space.
>>>>>
>>>>> For example, with a large btt namespace that consumes most of I/O remap
>>>>> range, depending on the sequence of namespace initialization, the user can find
>>>>> a pfn namespace initialization failure due to unavailable I/O remap space
>>>>> which nvdimm core uses for temporary mapping.
>>>>>
>>>>> nvdimm core can avoid this failure by only mapping the reserver block area to
>>>>
>>>> s/reserver/reserved/
>>>
>>> Will fix
>>>
>>>
>>>>
>>>>> check for pfn superblock type and map the full namespace resource only before
>>>>> using the namespace.
>>>>
>>>> Are you going to follow up with the BTT patch that uses this new facility?
>>>>
>>>
>>> I am not yet sure about that. ioremap/vmap area is the way we get a
>>> kernel mapping without struct page backing. What we are suggesting hee
>>> is the ability to have a direct mapped mapping without struct page. I
>>> need to look at closely about what that imply.
>>>
>>>>>
>>>>> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
>>>>> ---
>>>>> Changes from v2:
>>>>> * update changelog
>>>>>
>>>>> Changes from V1:
>>>>> * update changelog
>>>>> * update patch based on review feedback.
>>>>>
>>>>>    drivers/dax/pmem/core.c   |  2 +-
>>>>>    drivers/nvdimm/claim.c    |  7 +++----
>>>>>    drivers/nvdimm/nd.h       |  4 ++--
>>>>>    drivers/nvdimm/pfn.h      |  6 ++++++
>>>>>    drivers/nvdimm/pfn_devs.c |  5 -----
>>>>>    drivers/nvdimm/pmem.c     | 15 ++++++++++++---
>>>>>    6 files changed, 24 insertions(+), 15 deletions(-)
>>>>>
>>>>> diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
>>>>> index 6eb6dfdf19bf..f174dbfbe1c4 100644
>>>>> --- a/drivers/dax/pmem/core.c
>>>>> +++ b/drivers/dax/pmem/core.c
>>>>> @@ -28,7 +28,7 @@ struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
>>>>>           nsio = to_nd_namespace_io(&ndns->dev);
>>>>>
>>>>>           /* parse the 'pfn' info block via ->rw_bytes */
>>>>> -       rc = devm_nsio_enable(dev, nsio);
>>>>> +       rc = devm_nsio_enable(dev, nsio, info_block_reserve());
>>>>>           if (rc)
>>>>>                   return ERR_PTR(rc);
>>>>>           rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
>>>>> diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
>>>>> index 2985ca949912..d89d2c039e25 100644
>>>>> --- a/drivers/nvdimm/claim.c
>>>>> +++ b/drivers/nvdimm/claim.c
>>>>> @@ -300,12 +300,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
>>>>>           return rc;
>>>>>    }
>>>>>
>>>>> -int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
>>>>> +int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
>>>>>    {
>>>>>           struct resource *res = &nsio->res;
>>>>>           struct nd_namespace_common *ndns = &nsio->common;
>>>>>
>>>>> -       nsio->size = resource_size(res);
>>>>> +       nsio->size = size;
>>>>
>>>> This needs a:
>>>>
>>>> if (nsio->size)
>>>>      devm_memunmap(dev, nsio->addr);
>>>
>>>
>>> Why ? we should not be calling devm_nsio_enable twice now.
>>>
>>>>
>>>>
>>>>>           if (!devm_request_mem_region(dev, res->start, resource_size(res),
>>>>>                                   dev_name(&ndns->dev))) {
>>>>
>>>> Also don't repeat the devm_request_mem_region() if one was already done.
>>>>
>>>> Another thing to check is if nsio->size gets cleared when the
>>>> namespace is disabled, if not that well need to be added in the
>>>> shutdown path.
>>>>
>>>
>>>
>>> Not sure I understand that. So with this patch when we probe we always
>>> use info_block_reserve() size irrespective of the device. This probe
>>> will result in us adding a btt/pfn/dax device and we return -ENXIO after
>>> this probe. This return value will cause us to destroy the I/O remap
>>> mmapping we did with info_block_reserve() size. Also the nsio data
>>> structure is also freed.
>>>
>>> nd_pmem_probe is then again called with btt device type and in that case
>>> we do  devm_memremap again.
>>>
>>> if (btt)
>>>       remap the full namespace size.
>>> else
>>>      remap the info_block_reserve_size.
>>>
>>>
>>> This infor block reserve mapping is unmapped in pmem_attach_disk()
>>>
>>>
>>> Anything i am missing in the above flow?
>>
>> Oh no, you're right, this does make the implementation only call
>> devm_nsio_enable() once. However, now that I look I want to suggest
>> some small reorganizations of where devm_nsio_enable() is called. I'll
>> take a stab at folding some changes on top of your patch.
> 
> This change is causing the "pfn-meta-errors.sh" test to fail. It is
> due to the fact the info_block_reserve() is not a sufficient
> reservation for errors in the page metadata area.
> 

Can you share the call stack? we clear bad blocks on write isn't? and we 
do only read while probing?

We are still working on enabling `ndctl test` to run on POWER. Hence was 
not able capture these issues. Will try to get that resolved soon.

-aneesh
Dan Williams Oct. 31, 2019, 4:19 a.m. UTC | #6
On Wed, Oct 30, 2019 at 9:10 PM Aneesh Kumar K.V
<aneesh.kumar@linux.ibm.com> wrote:
[..]
> >
>
> Can you share the call stack? we clear bad blocks on write isn't? and we
> do only read while probing?
>
> We are still working on enabling `ndctl test` to run on POWER. Hence was
> not able capture these issues. Will try to get that resolved soon.

Here's the splat. I've also attached a reworked version of the patch
that has code organization fixups. It might be the case that it needs
to fully move to a model where each nd_*_probe() routine does its own
management of devm_namespace_enable() based on the sizes it needs to
complete the probe process.

nd_pmem namespace3.0: request out of range
WARNING: CPU: 16 PID: 1551 at
tools/testing/nvdimm/../../../drivers/nvdimm/claim.c:264
nsio_rw_bytes+0x14c/0x260 [libnvdimm]
RIP: 0010:nsio_rw_bytes+0x14c/0x260 [libnvdimm]
Call Trace:
 nd_pfn_validate+0x3d9/0x560 [libnvdimm]
 nd_pfn_probe+0xc0/0x150 [libnvdimm]
 ? nd_btt_probe+0x1e/0x270 [libnvdimm]
 nd_pmem_probe+0x63/0xc0 [nd_pmem]
 nvdimm_bus_probe+0x91/0x310 [libnvdimm]

That nd_pfn_validate+0x3d9 corresponds to the nvdimm_write_bytes() in
nd_pfn_clear_memmap_errors().
diff mbox series

Patch

diff --git a/drivers/dax/pmem/core.c b/drivers/dax/pmem/core.c
index 6eb6dfdf19bf..f174dbfbe1c4 100644
--- a/drivers/dax/pmem/core.c
+++ b/drivers/dax/pmem/core.c
@@ -28,7 +28,7 @@  struct dev_dax *__dax_pmem_probe(struct device *dev, enum dev_dax_subsys subsys)
 	nsio = to_nd_namespace_io(&ndns->dev);
 
 	/* parse the 'pfn' info block via ->rw_bytes */
-	rc = devm_nsio_enable(dev, nsio);
+	rc = devm_nsio_enable(dev, nsio, info_block_reserve());
 	if (rc)
 		return ERR_PTR(rc);
 	rc = nvdimm_setup_pfn(nd_pfn, &pgmap);
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 2985ca949912..d89d2c039e25 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -300,12 +300,12 @@  static int nsio_rw_bytes(struct nd_namespace_common *ndns,
 	return rc;
 }
 
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size)
 {
 	struct resource *res = &nsio->res;
 	struct nd_namespace_common *ndns = &nsio->common;
 
-	nsio->size = resource_size(res);
+	nsio->size = size;
 	if (!devm_request_mem_region(dev, res->start, resource_size(res),
 				dev_name(&ndns->dev))) {
 		dev_warn(dev, "could not reserve region %pR\n", res);
@@ -318,8 +318,7 @@  int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
 	nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
 			&nsio->res);
 
-	nsio->addr = devm_memremap(dev, res->start, resource_size(res),
-			ARCH_MEMREMAP_PMEM);
+	nsio->addr = devm_memremap(dev, res->start, size, ARCH_MEMREMAP_PMEM);
 
 	return PTR_ERR_OR_ZERO(nsio->addr);
 }
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index ee5c04070ef9..93d3c760c0f3 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -376,7 +376,7 @@  void nvdimm_badblocks_populate(struct nd_region *nd_region,
 #define MAX_STRUCT_PAGE_SIZE 64
 
 int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
-int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
+int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio, unsigned long size);
 void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
 #else
 static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
@@ -385,7 +385,7 @@  static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
 	return -ENXIO;
 }
 static inline int devm_nsio_enable(struct device *dev,
-		struct nd_namespace_io *nsio)
+		struct nd_namespace_io *nsio, unsigned long size)
 {
 	return -ENXIO;
 }
diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h
index acb19517f678..f4856c87d01c 100644
--- a/drivers/nvdimm/pfn.h
+++ b/drivers/nvdimm/pfn.h
@@ -36,4 +36,10 @@  struct nd_pfn_sb {
 	__le64 checksum;
 };
 
+static inline u32 info_block_reserve(void)
+{
+	return ALIGN(SZ_8K, PAGE_SIZE);
+}
+
+
 #endif /* __NVDIMM_PFN_H */
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 60d81fae06ee..e49aa9a0fd04 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -635,11 +635,6 @@  int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
 }
 EXPORT_SYMBOL(nd_pfn_probe);
 
-static u32 info_block_reserve(void)
-{
-	return ALIGN(SZ_8K, PAGE_SIZE);
-}
-
 /*
  * We hotplug memory at sub-section granularity, pad the reserved area
  * from the previous section base to the namespace base address.
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f9f76f6ba07b..3c188ffeff11 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -491,17 +491,26 @@  static int pmem_attach_disk(struct device *dev,
 static int nd_pmem_probe(struct device *dev)
 {
 	int ret;
+	struct nd_namespace_io *nsio;
 	struct nd_namespace_common *ndns;
 
 	ndns = nvdimm_namespace_common_probe(dev);
 	if (IS_ERR(ndns))
 		return PTR_ERR(ndns);
 
-	if (devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev)))
-		return -ENXIO;
+	nsio = to_nd_namespace_io(&ndns->dev);
 
-	if (is_nd_btt(dev))
+	if (is_nd_btt(dev)) {
+		/*
+		 * Map with resource size
+		 */
+		if (devm_nsio_enable(dev, nsio, resource_size(&nsio->res)))
+			return -ENXIO;
 		return nvdimm_namespace_attach_btt(ndns);
+	}
+
+	if (devm_nsio_enable(dev, nsio, info_block_reserve()))
+		return -ENXIO;
 
 	if (is_nd_pfn(dev))
 		return pmem_attach_disk(dev, ndns);