Message ID | 20211109083309.584081-18-hch@lst.de (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [01/29] nvdimm/pmem: move dax_attribute_group from dax to pmem | expand |
On Tue, Nov 9, 2021 at 12:34 AM Christoph Hellwig <hch@lst.de> wrote: > > Factor out a helper for the "manual" zeroing of a DAX range to clean > up dax_iomap_zero a lot. > Small / optional fixup below: Reviewed-by: Dan Williams <dan.j.williams@intel.com> > Signed-off-by: Christoph Hellwig <hch@lst.de> > --- > fs/dax.c | 36 +++++++++++++++++++----------------- > 1 file changed, 19 insertions(+), 17 deletions(-) > > diff --git a/fs/dax.c b/fs/dax.c > index d7a923d152240..dc9ebeff850ab 100644 > --- a/fs/dax.c > +++ b/fs/dax.c > @@ -1121,34 +1121,36 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, > } > #endif /* CONFIG_FS_DAX_PMD */ > > +static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff, > + unsigned int offset, size_t size) > +{ > + void *kaddr; > + long rc; > + > + rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); > + if (rc >= 0) { Technically this should be "> 0" because dax_direct_access() returns nr_available_pages @pgoff, but this isn't broken because dax_direct_access() converts the "zero pages available" case into -ERANGE. > + memset(kaddr + offset, 0, size); > + dax_flush(dax_dev, kaddr + offset, size); > + } > + return rc; > +} > + > s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) > { > pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); > long rc, id; > - void *kaddr; > - bool page_aligned = false; > unsigned offset = offset_in_page(pos); > unsigned size = min_t(u64, PAGE_SIZE - offset, length); > > - if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) > - page_aligned = true; > - > id = dax_read_lock(); > - > - if (page_aligned) > + if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) > rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); > else > - rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL); > - if (rc < 0) { > - dax_read_unlock(id); > - return rc; > - } > - > - if (!page_aligned) { > - memset(kaddr + offset, 0, size); > - dax_flush(iomap->dax_dev, kaddr + offset, size); > - } > + rc = dax_memzero(iomap->dax_dev, pgoff, offset, size); > dax_read_unlock(id); > + > + if (rc < 0) > + return rc; > return size; > } > > -- > 2.30.2 >
On Tue, Nov 23, 2021 at 01:22:13PM -0800, Dan Williams wrote: > On Tue, Nov 9, 2021 at 12:34 AM Christoph Hellwig <hch@lst.de> wrote: > > > > Factor out a helper for the "manual" zeroing of a DAX range to clean > > up dax_iomap_zero a lot. > > > > Small / optional fixup below: > > Reviewed-by: Dan Williams <dan.j.williams@intel.com> > > > Signed-off-by: Christoph Hellwig <hch@lst.de> > > --- > > fs/dax.c | 36 +++++++++++++++++++----------------- > > 1 file changed, 19 insertions(+), 17 deletions(-) > > > > diff --git a/fs/dax.c b/fs/dax.c > > index d7a923d152240..dc9ebeff850ab 100644 > > --- a/fs/dax.c > > +++ b/fs/dax.c > > @@ -1121,34 +1121,36 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, > > } > > #endif /* CONFIG_FS_DAX_PMD */ > > > > +static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff, > > + unsigned int offset, size_t size) > > +{ > > + void *kaddr; > > + long rc; > > + > > + rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); > > + if (rc >= 0) { > > Technically this should be "> 0" because dax_direct_access() returns > nr_available_pages @pgoff, but this isn't broken because > dax_direct_access() converts the "zero pages available" case into > -ERANGE. Agreed. With that fixed, Reviewed-by: Darrick J. Wong <djwong@kernel.org> --D > > > + memset(kaddr + offset, 0, size); > > + dax_flush(dax_dev, kaddr + offset, size); > > + } > > + return rc; > > +} > > + > > s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) > > { > > pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); > > long rc, id; > > - void *kaddr; > > - bool page_aligned = false; > > unsigned offset = offset_in_page(pos); > > unsigned size = min_t(u64, PAGE_SIZE - offset, length); > > > > - if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) > > - page_aligned = true; > > - > > id = dax_read_lock(); > > - > > - if (page_aligned) > > + if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) > > rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); > > else > > - rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL); > > - if (rc < 0) { > > - dax_read_unlock(id); > > - return rc; > > - } > > - > > - if (!page_aligned) { > > - memset(kaddr + offset, 0, size); > > - dax_flush(iomap->dax_dev, kaddr + offset, size); > > - } > > + rc = dax_memzero(iomap->dax_dev, pgoff, offset, size); > > dax_read_unlock(id); > > + > > + if (rc < 0) > > + return rc; > > return size; > > } > > > > -- > > 2.30.2 > >
On Tue, Nov 23, 2021 at 01:22:13PM -0800, Dan Williams wrote: > On Tue, Nov 9, 2021 at 12:34 AM Christoph Hellwig <hch@lst.de> wrote: > > > > Factor out a helper for the "manual" zeroing of a DAX range to clean > > up dax_iomap_zero a lot. > > > > Small / optional fixup below: Incorporated.
diff --git a/fs/dax.c b/fs/dax.c index d7a923d152240..dc9ebeff850ab 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1121,34 +1121,36 @@ static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, } #endif /* CONFIG_FS_DAX_PMD */ +static int dax_memzero(struct dax_device *dax_dev, pgoff_t pgoff, + unsigned int offset, size_t size) +{ + void *kaddr; + long rc; + + rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL); + if (rc >= 0) { + memset(kaddr + offset, 0, size); + dax_flush(dax_dev, kaddr + offset, size); + } + return rc; +} + s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap) { pgoff_t pgoff = dax_iomap_pgoff(iomap, pos); long rc, id; - void *kaddr; - bool page_aligned = false; unsigned offset = offset_in_page(pos); unsigned size = min_t(u64, PAGE_SIZE - offset, length); - if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) - page_aligned = true; - id = dax_read_lock(); - - if (page_aligned) + if (IS_ALIGNED(pos, PAGE_SIZE) && size == PAGE_SIZE) rc = dax_zero_page_range(iomap->dax_dev, pgoff, 1); else - rc = dax_direct_access(iomap->dax_dev, pgoff, 1, &kaddr, NULL); - if (rc < 0) { - dax_read_unlock(id); - return rc; - } - - if (!page_aligned) { - memset(kaddr + offset, 0, size); - dax_flush(iomap->dax_dev, kaddr + offset, size); - } + rc = dax_memzero(iomap->dax_dev, pgoff, offset, size); dax_read_unlock(id); + + if (rc < 0) + return rc; return size; }
Factor out a helper for the "manual" zeroing of a DAX range to clean up dax_iomap_zero a lot. Signed-off-by: Christoph Hellwig <hch@lst.de> --- fs/dax.c | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-)