@@ -170,6 +170,7 @@ config PCI_P2PDMA
#
select NEED_SG_DMA_BUS_ADDR_FLAG
select GENERIC_ALLOCATOR
+ select DEV_PAGEMAP_OPS
help
Enableѕ drivers to do PCI peer-to-peer transactions to and from
BARs that are exposed in other devices that are the part of
@@ -101,6 +101,18 @@ static const struct attribute_group p2pmem_group = {
.name = "p2pmem",
};
+static void p2pdma_page_free(struct page *page)
+{
+ struct pci_p2pdma_pagemap *pgmap = to_p2p_pgmap(page->pgmap);
+
+ gen_pool_free(pgmap->provider->p2pdma->pool,
+ (uintptr_t)page_to_virt(page), PAGE_SIZE);
+}
+
+static const struct dev_pagemap_ops p2pdma_pgmap_ops = {
+ .page_free = p2pdma_page_free,
+};
+
static void pci_p2pdma_release(void *data)
{
struct pci_dev *pdev = data;
@@ -198,6 +210,7 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
pgmap->range.end = pgmap->range.start + size - 1;
pgmap->nr_range = 1;
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
+ pgmap->ops = &p2pdma_pgmap_ops;
p2p_pgmap->provider = pdev;
p2p_pgmap->bus_offset = pci_bus_address(pdev, bar) -
@@ -1168,6 +1168,7 @@ static inline bool page_is_devmap_managed(struct page *page)
switch (page->pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
case MEMORY_DEVICE_FS_DAX:
+ case MEMORY_DEVICE_PCI_P2PDMA:
return true;
default:
break;
@@ -44,14 +44,16 @@ EXPORT_SYMBOL(devmap_managed_key);
static void devmap_managed_enable_put(struct dev_pagemap *pgmap)
{
if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
- pgmap->type == MEMORY_DEVICE_FS_DAX)
+ pgmap->type == MEMORY_DEVICE_FS_DAX ||
+ pgmap->type == MEMORY_DEVICE_PCI_P2PDMA)
static_branch_dec(&devmap_managed_key);
}
static void devmap_managed_enable_get(struct dev_pagemap *pgmap)
{
if (pgmap->type == MEMORY_DEVICE_PRIVATE ||
- pgmap->type == MEMORY_DEVICE_FS_DAX)
+ pgmap->type == MEMORY_DEVICE_FS_DAX ||
+ pgmap->type == MEMORY_DEVICE_PCI_P2PDMA)
static_branch_inc(&devmap_managed_key);
}
#else
@@ -355,6 +357,10 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
case MEMORY_DEVICE_GENERIC:
break;
case MEMORY_DEVICE_PCI_P2PDMA:
+ if (!pgmap->ops->page_free) {
+ WARN(1, "Missing page_free method\n");
+ return ERR_PTR(-EINVAL);
+ }
params.pgprot = pgprot_noncached(params.pgprot);
break;
default:
@@ -498,7 +504,7 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
void free_devmap_managed_page(struct page *page)
{
/* notify page idle for dax */
- if (!is_device_private_page(page)) {
+ if (!is_device_private_page(page) && !is_pci_p2pdma_page(page)) {
wake_up_var(&page->_refcount);
return;
}
When P2PDMA pages are passed to userspace, they will need to be reference counted properly and returned to their genalloc after their reference count returns to 1. This is accomplished with the existing DEV_PAGEMAP_OPS and the .page_free() operation. Change CONFIG_P2PDMA to select CONFIG_DEV_PAGEMAP_OPS and add MEMORY_DEVICE_PCI_P2PDMA to page_is_devmap_managed(), devmap_managed_enable_[put|get]() and free_devmap_managed_page(). Signed-off-by: Logan Gunthorpe <logang@deltatee.com> --- drivers/pci/Kconfig | 1 + drivers/pci/p2pdma.c | 13 +++++++++++++ include/linux/mm.h | 1 + mm/memremap.c | 12 +++++++++--- 4 files changed, 24 insertions(+), 3 deletions(-)