diff mbox series

[v4,21/23] mm: use custom page_free for P2PDMA pages

Message ID 20211117215410.3695-22-logang@deltatee.com (mailing list archive)
State Superseded
Delegated to: Bjorn Helgaas
Headers show
Series Userspace P2PDMA with O_DIRECT NVMe devices | expand

Commit Message

Logan Gunthorpe Nov. 17, 2021, 9:54 p.m. UTC
When P2PDMA pages are passed to userspace, they will need to be
reference counted properly and returned to their genalloc after their
reference count returns to 1. This is accomplished with the existing
DEV_PAGEMAP_OPS and the .page_free() operation.

Change CONFIG_P2PDMA to select CONFIG_DEV_PAGEMAP_OPS and add
MEMORY_DEVICE_PCI_P2PDMA to page_is_devmap_managed(),
devmap_managed_enable_[put|get]() and free_devmap_managed_page().

Signed-off-by: Logan Gunthorpe <logang@deltatee.com>
---
 drivers/pci/Kconfig  |  1 +
 drivers/pci/p2pdma.c | 13 +++++++++++++
 include/linux/mm.h   |  1 +
 mm/memremap.c        | 12 +++++++++---
 4 files changed, 24 insertions(+), 3 deletions(-)

Comments

Christoph Hellwig Dec. 21, 2021, 9:06 a.m. UTC | #1
On Wed, Nov 17, 2021 at 02:54:08PM -0700, Logan Gunthorpe wrote:
> When P2PDMA pages are passed to userspace, they will need to be
> reference counted properly and returned to their genalloc after their
> reference count returns to 1. This is accomplished with the existing
> DEV_PAGEMAP_OPS and the .page_free() operation.
> 
> Change CONFIG_P2PDMA to select CONFIG_DEV_PAGEMAP_OPS and add
> MEMORY_DEVICE_PCI_P2PDMA to page_is_devmap_managed(),
> devmap_managed_enable_[put|get]() and free_devmap_managed_page().

Uuuh.  We are trying hard to kill off this magic free at refcount 1
behavior in the amdgpu device coherent series.  We really should not
add more of this.
Logan Gunthorpe Dec. 21, 2021, 5:27 p.m. UTC | #2
On 2021-12-21 2:06 a.m., Christoph Hellwig wrote:
> On Wed, Nov 17, 2021 at 02:54:08PM -0700, Logan Gunthorpe wrote:
>> When P2PDMA pages are passed to userspace, they will need to be
>> reference counted properly and returned to their genalloc after their
>> reference count returns to 1. This is accomplished with the existing
>> DEV_PAGEMAP_OPS and the .page_free() operation.
>>
>> Change CONFIG_P2PDMA to select CONFIG_DEV_PAGEMAP_OPS and add
>> MEMORY_DEVICE_PCI_P2PDMA to page_is_devmap_managed(),
>> devmap_managed_enable_[put|get]() and free_devmap_managed_page().
> 
> Uuuh.  We are trying hard to kill off this magic free at refcount 1
> behavior in the amdgpu device coherent series.  We really should not
> add more of this.

Ah, ok. I found Ralph's patch that cleans this up and I can try to
rebase this onto it for future postings until it gets merged.

Your other comments I can address for the next time I post this series.

Thanks for the review!

Logan
diff mbox series

Patch

diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 95f29601a4df..da53799cddab 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -170,6 +170,7 @@  config PCI_P2PDMA
 	#
 	select NEED_SG_DMA_BUS_ADDR_FLAG
 	select GENERIC_ALLOCATOR
+	select DEV_PAGEMAP_OPS
 	help
 	  Enableѕ drivers to do PCI peer-to-peer transactions to and from
 	  BARs that are exposed in other devices that are the part of
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 563e9be9599e..16992b0f0c36 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -101,6 +101,18 @@  static const struct attribute_group p2pmem_group = {
 	.name = "p2pmem",
 };
 
+static void p2pdma_page_free(struct page *page)
+{
+	struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap);
+
+	gen_pool_free(pgmap->provider->p2pdma->pool,
+		      (uintptr_t)page_to_virt(page), PAGE_SIZE);
+}
+
+static const struct dev_pagemap_ops p2pdma_pgmap_ops = {
+	.page_free = p2pdma_page_free,
+};
+
 static void pci_p2pdma_release(void *data)
 {
 	struct pci_dev *pdev = data;
@@ -198,6 +210,7 @@  int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
 	pgmap->range.end = pgmap->range.start + size - 1;
 	pgmap->nr_range = 1;
 	pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
+	pgmap->ops = &p2pdma_pgmap_ops;
 
 	p2p_pgmap->provider = pdev;
 	p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 3367d936b256..f26ea7e1fc74 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1168,6 +1168,7 @@  static inline bool page_is_devmap_managed(struct page *page)
 	switch (page->pgmap->type) {
 	case MEMORY_DEVICE_PRIVATE:
 	case MEMORY_DEVICE_FS_DAX:
+	case MEMORY_DEVICE_PCI_P2PDMA:
 		return true;
 	default:
 		break;
diff --git a/mm/memremap.c b/mm/memremap.c
index 5a66a71ab591..ec3143ffdeee 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -44,14 +44,16 @@  EXPORT_SYMBOL(devmap_managed_key);
 static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
 {
 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
-	    pgmap->type == MEMORY_DEVICE_FS_DAX)
+	    pgmap->type == MEMORY_DEVICE_FS_DAX ||
+	    pgmap->type == MEMORY_DEVICE_PCI_P2PDMA)
 		static_branch_dec(&devmap_managed_key);
 }
 
 static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
 {
 	if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
-	    pgmap->type == MEMORY_DEVICE_FS_DAX)
+	    pgmap->type == MEMORY_DEVICE_FS_DAX ||
+	    pgmap->type == MEMORY_DEVICE_PCI_P2PDMA)
 		static_branch_inc(&devmap_managed_key);
 }
 #else
@@ -355,6 +357,10 @@  void *memremap_pages(struct dev_pagemap *pgmap, int nid)
 	case MEMORY_DEVICE_GENERIC:
 		break;
 	case MEMORY_DEVICE_PCI_P2PDMA:
+		if (!pgmap->ops->page_free) {
+			WARN(1, "Missing page_free method\n");
+			return ERR_PTR(-EINVAL);
+		}
 		params.pgprot = pgprot_noncached(params.pgprot);
 		break;
 	default:
@@ -498,7 +504,7 @@  EXPORT_SYMBOL_GPL(get_dev_pagemap);
 void free_devmap_managed_page(struct page *page)
 {
 	/* notify page idle for dax */
-	if (!is_device_private_page(page)) {
+	if (!is_device_private_page(page) && !is_pci_p2pdma_page(page)) {
 		wake_up_var(&page->_refcount);
 		return;
 	}