diff mbox

[3/3] fs/dax: Assigning NULL to gfn of dax_direct_access if useless

Message ID CAPcyv4hPEch02C7UfCGgpXmsCVyT2zwV=4Tr8jv8W1i6_poAJQ@mail.gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Dan Williams July 4, 2018, 2:37 p.m. UTC
On Wed, Jul 4, 2018 at 6:07 AM, Huaisheng Ye <yehs2007@zoho.com> wrote:
>  ---- On Wed, 04 Jul 2018 19:30:12 +0800 Jan Kara <jack@suse.cz> wrote ----
>  > On Wed 04-07-18 14:40:58, Huaisheng Ye wrote:
>  > > From: Huaisheng Ye <yehs1@lenovo.com>
>  > >
>  > > Some functions within fs/dax don't need to get gfn from direct_access.
>  > > Assigning NULL to gfn of dax_direct_access is more intuitive and simple
>  > > than offering a useless local variable.
>  > >
>  > > Signed-off-by: Huaisheng Ye <yehs1@lenovo.com>
>  >
>  > I like this. You can add:
>  >
>  > Reviewed-by: Jan Kara <jack@suse.cz>
>  >
>  > for the series.
>  >
>  >                                 Honza
>  >
> I am so happy you like them, thank you very much.

Yes, I like this too. In fact I have a similar patch in my tree
already that I have been preparing to send out. I am using it to delay
when we need to have the 'struct page' memmap for dax initialized.
Attached is the full patch, but the series is still a work in
progress.

Comments

Dan Williams July 4, 2018, 2:41 p.m. UTC | #1
On Wed, Jul 4, 2018 at 7:37 AM, Dan Williams <dan.j.williams@intel.com> wrote:
> On Wed, Jul 4, 2018 at 6:07 AM, Huaisheng Ye <yehs2007@zoho.com> wrote:
>>  ---- On Wed, 04 Jul 2018 19:30:12 +0800 Jan Kara <jack@suse.cz> wrote ----
>>  > On Wed 04-07-18 14:40:58, Huaisheng Ye wrote:
>>  > > From: Huaisheng Ye <yehs1@lenovo.com>
>>  > >
>>  > > Some functions within fs/dax don't need to get gfn from direct_access.
>>  > > Assigning NULL to gfn of dax_direct_access is more intuitive and simple
>>  > > than offering a useless local variable.
>>  > >
>>  > > Signed-off-by: Huaisheng Ye <yehs1@lenovo.com>
>>  >
>>  > I like this. You can add:
>>  >
>>  > Reviewed-by: Jan Kara <jack@suse.cz>
>>  >
>>  > for the series.
>>  >
>>  >                                 Honza
>>  >
>> I am so happy you like them, thank you very much.
>
> Yes, I like this too. In fact I have a similar patch in my tree
> already that I have been preparing to send out. I am using it to delay
> when we need to have the 'struct page' memmap for dax initialized.
> Attached is the full patch, but the series is still a work in
> progress.

Btw, I'll drop my version and apply your series since you got it
posted first and it can stand alone as its own cleanup.
Huaisheng HS1 Ye July 4, 2018, 3:35 p.m. UTC | #2
From: Dan Williams [mailto:dan.j.williams@intel.com]
Sent: Wednesday, July 04, 2018 10:42 PM
> 
> On Wed, Jul 4, 2018 at 7:37 AM, Dan Williams <dan.j.williams@intel.com> wrote:
> > On Wed, Jul 4, 2018 at 6:07 AM, Huaisheng Ye <yehs2007@zoho.com> wrote:
> >>  ---- On Wed, 04 Jul 2018 19:30:12 +0800 Jan Kara <jack@suse.cz> wrote ----
> >>  > On Wed 04-07-18 14:40:58, Huaisheng Ye wrote:
> >>  > > From: Huaisheng Ye <yehs1@lenovo.com>
> >>  > >
> >>  > > Some functions within fs/dax don't need to get gfn from direct_access.
> >>  > > Assigning NULL to gfn of dax_direct_access is more intuitive and simple
> >>  > > than offering a useless local variable.
> >>  > >
> >>  > > Signed-off-by: Huaisheng Ye <yehs1@lenovo.com>
> >>  >
> >>  > I like this. You can add:
> >>  >
> >>  > Reviewed-by: Jan Kara <jack@suse.cz>
> >>  >
> >>  > for the series.
> >>  >
> >>  >                                 Honza
> >>  >
> >> I am so happy you like them, thank you very much.
> >
> > Yes, I like this too. In fact I have a similar patch in my tree
> > already that I have been preparing to send out. I am using it to delay
> > when we need to have the 'struct page' memmap for dax initialized.
> > Attached is the full patch, but the series is still a work in
> > progress.
> 
> Btw, I'll drop my version and apply your series since you got it
> posted first and it can stand alone as its own cleanup.

Hi Dan,

I will resend the series later, and thanks for your help.

Cheers,
Huaisheng Ye
diff mbox

Patch

From 62e12abebacbafdb5218a92501885fe0e80ea922 Mon Sep 17 00:00:00 2001
From: Dan Williams <dan.j.williams@intel.com>
Date: Wed, 27 Jun 2018 22:21:30 -0700
Subject: [PATCH] dax: Elide pfn lookups

When not required, allow drivers to skip returning a pfn if one was not
requested. In support of asynchronous initialization of 'struct page'
there needs to be a sync point at which the memmap is made valid. The
latest point where the kernel can reasonably sync is when a pfn is being
used to populate userspace page tables. Otherwise, if only the kernel
address is needed from dax_direct_access(), typically in-kernel users,
then the 'pfn_t *' argument to dax_direct_access() can be NULL.

Signed-off-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/dax/super.c             | 17 +++++++++++------
 drivers/nvdimm/pmem.c           |  3 ++-
 drivers/s390/block/dcssblk.c    |  5 +++--
 fs/dax.c                        | 10 +++-------
 tools/testing/nvdimm/pmem-dax.c |  6 ++++--
 5 files changed, 23 insertions(+), 18 deletions(-)

diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 3b1fe4b6b00e..82256506e40b 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -72,7 +72,7 @@  struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
 EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
 #endif
 
-static bool validate_dax_pfn(pfn_t pfn)
+static bool validate_dax_pfn(pfn_t *pfn)
 {
 	bool dax_enabled = false;
 
@@ -84,7 +84,7 @@  static bool validate_dax_pfn(pfn_t pfn)
 	if (!IS_ENABLED(CONFIG_DAX_DRIVER_DEBUG))
 		return true;
 
-	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(pfn)) {
+	if (IS_ENABLED(CONFIG_FS_DAX_LIMITED) && pfn_t_special(*pfn)) {
 		/*
 		 * An arch that has enabled the pmem api should also
 		 * have its drivers support pfn_t_devmap()
@@ -95,10 +95,10 @@  static bool validate_dax_pfn(pfn_t pfn)
 		 */
 		WARN_ON(IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API));
 		dax_enabled = true;
-	} else if (pfn_t_devmap(pfn)) {
+	} else if (pfn_t_devmap(*pfn)) {
 		struct dev_pagemap *pgmap;
 
-		pgmap = get_dev_pagemap(pfn_t_to_pfn(pfn), NULL);
+		pgmap = get_dev_pagemap(pfn_t_to_pfn(*pfn), NULL);
 		if (pgmap && pgmap->type == MEMORY_DEVICE_FS_DAX)
 			dax_enabled = true;
 		put_dev_pagemap(pgmap);
@@ -120,10 +120,10 @@  static bool validate_dax_pfn(pfn_t pfn)
 bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 {
 	struct dax_device *dax_dev;
+	pfn_t _pfn, *pfn;
 	pgoff_t pgoff;
 	int err, id;
 	void *kaddr;
-	pfn_t pfn;
 	long len;
 	char buf[BDEVNAME_SIZE];
 
@@ -147,8 +147,13 @@  bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
 		return false;
 	}
 
+	if (IS_ENABLED(DAX_DRIVER_DEBUG))
+		pfn = &_pfn;
+	else
+		pfn = NULL;
+
 	id = dax_read_lock();
-	len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn);
+	len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, pfn);
 	dax_read_unlock(id);
 
 	put_dax(dax_dev);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index e8ac6f244d2b..c430536320a5 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -228,7 +228,8 @@  __weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
 					PFN_PHYS(nr_pages))))
 		return -EIO;
 	*kaddr = pmem->virt_addr + offset;
-	*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+	if (pfn)
+		*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
 
 	/*
 	 * If badblocks are present, limit known good range to the
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index ed607288e696..a645b2c93c34 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -923,8 +923,9 @@  __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
 
 	dev_sz = dev_info->end - dev_info->start + 1;
 	*kaddr = (void *) dev_info->start + offset;
-	*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
-			PFN_DEV|PFN_SPECIAL);
+	if (pfn)
+		*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
+				PFN_DEV|PFN_SPECIAL);
 
 	return (dev_sz - offset) / PAGE_SIZE;
 }
diff --git a/fs/dax.c b/fs/dax.c
index 641192808bb6..28264ff4e343 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -647,7 +647,6 @@  static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
 {
 	void *vto, *kaddr;
 	pgoff_t pgoff;
-	pfn_t pfn;
 	long rc;
 	int id;
 
@@ -656,7 +655,7 @@  static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
 		return rc;
 
 	id = dax_read_lock();
-	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
+	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
 	if (rc < 0) {
 		dax_read_unlock(id);
 		return rc;
@@ -1052,15 +1051,13 @@  int __dax_zero_page_range(struct block_device *bdev,
 		pgoff_t pgoff;
 		long rc, id;
 		void *kaddr;
-		pfn_t pfn;
 
 		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
 		if (rc)
 			return rc;
 
 		id = dax_read_lock();
-		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
-				&pfn);
+		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
 		if (rc < 0) {
 			dax_read_unlock(id);
 			return rc;
@@ -1116,7 +1113,6 @@  dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 		ssize_t map_len;
 		pgoff_t pgoff;
 		void *kaddr;
-		pfn_t pfn;
 
 		if (fatal_signal_pending(current)) {
 			ret = -EINTR;
@@ -1128,7 +1124,7 @@  dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
 			break;
 
 		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
-				&kaddr, &pfn);
+				&kaddr, NULL);
 		if (map_len < 0) {
 			ret = map_len;
 			break;
diff --git a/tools/testing/nvdimm/pmem-dax.c b/tools/testing/nvdimm/pmem-dax.c
index b53596ad601b..d4cb5281b30e 100644
--- a/tools/testing/nvdimm/pmem-dax.c
+++ b/tools/testing/nvdimm/pmem-dax.c
@@ -33,7 +33,8 @@  long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
 
 		*kaddr = pmem->virt_addr + offset;
 		page = vmalloc_to_page(pmem->virt_addr + offset);
-		*pfn = page_to_pfn_t(page);
+		if (pfn)
+			*pfn = page_to_pfn_t(page);
 		pr_debug_ratelimited("%s: pmem: %p pgoff: %#lx pfn: %#lx\n",
 				__func__, pmem, pgoff, page_to_pfn(page));
 
@@ -41,7 +42,8 @@  long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
 	}
 
 	*kaddr = pmem->virt_addr + offset;
-	*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
+	if (pfn)
+		*pfn = phys_to_pfn_t(pmem->phys_addr + offset, pmem->pfn_flags);
 
 	/*
 	 * If badblocks are present, limit known good range to the
-- 
2.13.6