diff mbox series

[EARLY,RFC,2/4] dma-buf: pools: Add page-pool for dma-buf pools

Message ID 1550734830-23499-3-git-send-email-john.stultz@linaro.org (mailing list archive)
State New, archived
Headers show
Series dmabuf pools infrastructure (destaging ION) | expand

Commit Message

John Stultz Feb. 21, 2019, 7:40 a.m. UTC
This adds the page-pool logic to the dma-buf pools which allows
a pool to keep pre-allocated/flushed pages around which can
speed up allocation performance.

NOTE: The page-pools name is term preserved from ION, but it has
potential to be easily confused with dma-buf pools. Suggestions
for alternatives here would be great.

Cc: Laura Abbott <labbott@redhat.com>
Cc: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Liam Mark <lmark@codeaurora.org>
Cc: Brian Starkey <Brian.Starkey@arm.com>
Cc: Andrew F. Davis <afd@ti.com>
Cc: Chenbo Feng <fengc@google.com>
Cc: Alistair Strachan <astrachan@google.com>
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: John Stultz <john.stultz@linaro.org>
---
 drivers/dma-buf/pools/Makefile       |   2 +-
 drivers/dma-buf/pools/dmabuf-pools.h |  51 ++++++++++++
 drivers/dma-buf/pools/page_pool.c    | 157 +++++++++++++++++++++++++++++++++++
 3 files changed, 209 insertions(+), 1 deletion(-)
 create mode 100644 drivers/dma-buf/pools/page_pool.c
diff mbox series

Patch

diff --git a/drivers/dma-buf/pools/Makefile b/drivers/dma-buf/pools/Makefile
index 6cb1284..a51ec25 100644
--- a/drivers/dma-buf/pools/Makefile
+++ b/drivers/dma-buf/pools/Makefile
@@ -1,2 +1,2 @@ 
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DMABUF_POOLS)		+= dmabuf-pools.o pool-ioctl.o pool-helpers.o
+obj-$(CONFIG_DMABUF_POOLS)		+= dmabuf-pools.o pool-ioctl.o pool-helpers.o page_pool.o
diff --git a/drivers/dma-buf/pools/dmabuf-pools.h b/drivers/dma-buf/pools/dmabuf-pools.h
index 12110f2..e3a0aac 100644
--- a/drivers/dma-buf/pools/dmabuf-pools.h
+++ b/drivers/dma-buf/pools/dmabuf-pools.h
@@ -238,6 +238,57 @@  size_t dmabuf_pool_freelist_shrink(struct dmabuf_pool *pool,
  */
 size_t dmabuf_pool_freelist_size(struct dmabuf_pool *pool);
 
+/**
+ * functions for creating and destroying a page pool -- allows you
+ * to keep a page pool of pre allocated memory to use from your pool.  Keeping
+ * a page pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant performance benefit on
+ * many systems
+ */
+
+/**
+ * struct dmabuf_page_pool - pagepool struct
+ * @high_count:		number of highmem items in the pool
+ * @low_count:		number of lowmem items in the pool
+ * @high_items:		list of highmem items
+ * @low_items:		list of lowmem items
+ * @mutex:		lock protecting this struct and especially the count
+ *			item list
+ * @gfp_mask:		gfp_mask to use from alloc
+ * @order:		order of pages in the pool
+ * @list:		plist node for list of pools
+ *
+ * Allows you to keep a page pool of pre allocated pages to use from your pool.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant performance benefit
+ * on many systems
+ */
+struct dmabuf_page_pool {
+	int high_count;
+	int low_count;
+	struct list_head high_items;
+	struct list_head low_items;
+	struct mutex mutex;
+	gfp_t gfp_mask;
+	unsigned int order;
+	struct plist_node list;
+};
+
+struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask,
+						 unsigned int order);
+void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool);
+struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool);
+void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page);
+
+/** dmabuf_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool:		the page pool
+ * @gfp_mask:		the memory type to reclaim
+ * @nr_to_scan:		number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int dmabuf_page_pool_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask,
+			    int nr_to_scan);
 
 long dmabuf_pool_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 
diff --git a/drivers/dma-buf/pools/page_pool.c b/drivers/dma-buf/pools/page_pool.c
new file mode 100644
index 0000000..c1fe994
--- /dev/null
+++ b/drivers/dma-buf/pools/page_pool.c
@@ -0,0 +1,157 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * drivers/dma-buf/pools/page_pool.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+
+#include "dmabuf-pools.h"
+
+static inline struct page *dmabuf_page_pool_alloc_pages(
+						struct dmabuf_page_pool *pool)
+{
+	return alloc_pages(pool->gfp_mask, pool->order);
+}
+
+static void dmabuf_page_pool_free_pages(struct dmabuf_page_pool *pool,
+					struct page *page)
+{
+	__free_pages(page, pool->order);
+}
+
+static void dmabuf_page_pool_add(struct dmabuf_page_pool *pool,
+				 struct page *page)
+{
+	mutex_lock(&pool->mutex);
+	if (PageHighMem(page)) {
+		list_add_tail(&page->lru, &pool->high_items);
+		pool->high_count++;
+	} else {
+		list_add_tail(&page->lru, &pool->low_items);
+		pool->low_count++;
+	}
+
+	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
+							1 << pool->order);
+	mutex_unlock(&pool->mutex);
+}
+
+static struct page *dmabuf_page_pool_remove(struct dmabuf_page_pool *pool,
+					    bool high)
+{
+	struct page *page;
+
+	if (high) {
+		WARN_ON(!pool->high_count);
+		page = list_first_entry(&pool->high_items, struct page, lru);
+		pool->high_count--;
+	} else {
+		WARN_ON(!pool->low_count);
+		page = list_first_entry(&pool->low_items, struct page, lru);
+		pool->low_count--;
+	}
+
+	list_del(&page->lru);
+	mod_node_page_state(page_pgdat(page), NR_KERNEL_MISC_RECLAIMABLE,
+							-(1 << pool->order));
+	return page;
+}
+
+struct page *dmabuf_page_pool_alloc(struct dmabuf_page_pool *pool)
+{
+	struct page *page = NULL;
+
+	WARN_ON(!pool);
+
+	mutex_lock(&pool->mutex);
+	if (pool->high_count)
+		page = dmabuf_page_pool_remove(pool, true);
+	else if (pool->low_count)
+		page = dmabuf_page_pool_remove(pool, false);
+	mutex_unlock(&pool->mutex);
+
+	if (!page)
+		page = dmabuf_page_pool_alloc_pages(pool);
+
+	return page;
+}
+
+void dmabuf_page_pool_free(struct dmabuf_page_pool *pool, struct page *page)
+{
+	WARN_ON(pool->order != compound_order(page));
+
+	dmabuf_page_pool_add(pool, page);
+}
+
+static int dmabuf_page_pool_total(struct dmabuf_page_pool *pool, bool high)
+{
+	int count = pool->low_count;
+
+	if (high)
+		count += pool->high_count;
+
+	return count << pool->order;
+}
+
+int dmabuf_page_pool_shrink(struct dmabuf_page_pool *pool, gfp_t gfp_mask,
+			    int nr_to_scan)
+{
+	int freed = 0;
+	bool high;
+
+	if (current_is_kswapd())
+		high = true;
+	else
+		high = !!(gfp_mask & __GFP_HIGHMEM);
+
+	if (nr_to_scan == 0)
+		return dmabuf_page_pool_total(pool, high);
+
+	while (freed < nr_to_scan) {
+		struct page *page;
+
+		mutex_lock(&pool->mutex);
+		if (pool->low_count) {
+			page = dmabuf_page_pool_remove(pool, false);
+		} else if (high && pool->high_count) {
+			page = dmabuf_page_pool_remove(pool, true);
+		} else {
+			mutex_unlock(&pool->mutex);
+			break;
+		}
+		mutex_unlock(&pool->mutex);
+		dmabuf_page_pool_free_pages(pool, page);
+		freed += (1 << pool->order);
+	}
+
+	return freed;
+}
+
+struct dmabuf_page_pool *dmabuf_page_pool_create(gfp_t gfp_mask,
+						 unsigned int order)
+{
+	struct dmabuf_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+
+	if (!pool)
+		return NULL;
+	pool->high_count = 0;
+	pool->low_count = 0;
+	INIT_LIST_HEAD(&pool->low_items);
+	INIT_LIST_HEAD(&pool->high_items);
+	pool->gfp_mask = gfp_mask | __GFP_COMP;
+	pool->order = order;
+	mutex_init(&pool->mutex);
+	plist_node_init(&pool->list, order);
+
+	return pool;
+}
+
+void dmabuf_page_pool_destroy(struct dmabuf_page_pool *pool)
+{
+	kfree(pool);
+}