diff mbox series

[v3,1/8] drm/gem: Add helpers to request a range of pages on a GEM

Message ID 20250404092634.2968115-2-boris.brezillon@collabora.com (mailing list archive)
State New
Headers show
Series drm: Introduce sparse GEM shmem | expand

Commit Message

Boris Brezillon April 4, 2025, 9:26 a.m. UTC
From: Adrián Larumbe <adrian.larumbe@collabora.com>

This new API provides a way to partially populate/unpopulate a GEM
object, and also lets the caller specify the GFP flags to use for
the allocation.

This will help drivers that need to support sparse/alloc-on-demand
GEM objects.

Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com>
---
 drivers/gpu/drm/drm_gem.c | 134 ++++++++++++++++++++++++++++++++++++++
 include/drm/drm_gem.h     |  14 ++++
 2 files changed, 148 insertions(+)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 1e659d2660f7..769eaf9943d7 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -679,6 +679,140 @@  void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 }
 EXPORT_SYMBOL(drm_gem_put_pages);
 
+/**
+ * drm_gem_put_page_range - helper to return a range of pages backing a GEM
+ * @obj: Object this request applies to.
+ * @pa: Page array to unpopulate.
+ * @start: The first page to unpopulate.
+ * @npages: The number of pages to unpopulate.
+ * @dirty: Flag all returned pages dirty if true.
+ * @accessed: Flag all returned pages accessed if true.
+ *
+ * This is used to flag pages as unused. The pages themselves will stay
+ * unreclaimable until all pages are gone, because we can't partially
+ * flag a mapping unevictable.
+ *
+ * @npages is clamped to the object size, so start=0, npages=UINT_MAX
+ * effectively return all pages.
+ */
+void drm_gem_put_page_range(struct drm_gem_object *obj, struct xarray *pa,
+			    pgoff_t start, unsigned int npages,
+			    bool dirty, bool accessed)
+{
+	struct folio_batch fbatch;
+	unsigned long idx;
+	unsigned long end = start + npages - 1;
+	struct page *page;
+
+	xa_for_each_range(pa, idx, page, start, end)
+		xa_clear_mark(pa, idx, DRM_GEM_PAGE_USED);
+
+	/* If the mapping is still used, we bail out. */
+	if (xa_marked(pa, DRM_GEM_PAGE_USED))
+		return;
+
+	mapping_clear_unevictable(file_inode(obj->filp)->i_mapping);
+
+	folio_batch_init(&fbatch);
+
+	xa_for_each(pa, idx, page) {
+		struct folio *folio = page_folio(page);
+		unsigned long folio_pg_idx = folio_page_idx(folio, page);
+
+		xa_erase(pa, idx);
+
+		if (dirty)
+			folio_mark_dirty(folio);
+
+		if (accessed)
+			folio_mark_accessed(folio);
+
+		/* Undo the reference we took when populating the table */
+		if (!folio_batch_add(&fbatch, folio))
+			drm_gem_check_release_batch(&fbatch);
+
+		idx += folio_nr_pages(folio) - folio_pg_idx - 1;
+	}
+
+	if (folio_batch_count(&fbatch))
+		drm_gem_check_release_batch(&fbatch);
+}
+EXPORT_SYMBOL(drm_gem_put_page_range);
+
+/**
+ * drm_gem_get_page_range - helper to populate GEM a range of pages
+ * @obj: Object this request applies to.
+ * @pa: Page array to populate.
+ * @start: The first page to populate.
+ * @npages: The number of pages to populate.
+ * @page_gfp: GFP flags to use for page allocations.
+ * @other_gfp: GFP flags to use for other allocations, like extending the xarray.
+ *
+ * Partially or fully populate a page xarray backing a GEM object. @npages will
+ * be clamped to the object size, so passing start=0, npages=UINT_MAX fully
+ * populates the GEM object.
+ *
+ * There's no optimization to avoid repopulating already populated ranges, but
+ * this case is not rejected either. As soon as one page is populated, the entire
+ * mapping is flagged unevictable, meaning pages returned with
+ * drm_gem_put_page_range() won't be reclaimable until all pages have been
+ * returned.
+ *
+ * If something fails in the middle, pages that were acquired stay there. The
+ * caller should call drm_gem_put_page_range() explicitly to undo what was
+ * partially done.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int drm_gem_get_page_range(struct drm_gem_object *obj, struct xarray *pa,
+			   pgoff_t start, unsigned int npages, gfp_t page_gfp,
+			   gfp_t other_gfp)
+{
+	struct address_space *mapping;
+	struct page *page;
+	unsigned long i;
+	int ret = 0;
+
+	if (WARN_ON(!obj->filp))
+		return -EINVAL;
+
+	if (start + npages < start)
+		return -EINVAL;
+
+	if (start + npages > obj->size >> PAGE_SHIFT)
+		return -EINVAL;
+
+	if (npages == 0)
+		return 0;
+
+	/* This is the shared memory object that backs the GEM resource */
+	mapping = obj->filp->f_mapping;
+
+	/* We already BUG_ON() for non-page-aligned sizes in
+	 * drm_gem_object_init(), so we should never hit this unless
+	 * driver author is doing something really wrong:
+	 */
+	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
+
+	mapping_set_unevictable(mapping);
+
+	for (i = 0; i < npages; i++) {
+		page  = shmem_read_mapping_page_gfp(mapping, start + i, page_gfp);
+		if (IS_ERR(page))
+			return PTR_ERR(page);
+
+		/* Add the page into the xarray */
+		ret = xa_err(xa_store(pa, start + i, page, other_gfp));
+		if (ret)
+			return ret;
+
+		xa_set_mark(pa, start + i, DRM_GEM_PAGE_USED);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_get_page_range);
+
 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
 			  struct drm_gem_object **objs)
 {
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 9b71f7a9f3f8..9980c04355b6 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -39,11 +39,13 @@ 
 #include <linux/dma-resv.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
+#include <linux/xarray.h>
 
 #include <drm/drm_vma_manager.h>
 
 struct iosys_map;
 struct drm_gem_object;
+struct xarray;
 
 /**
  * enum drm_gem_object_status - bitmask of object state for fdinfo reporting
@@ -537,6 +539,18 @@  struct page **drm_gem_get_pages(struct drm_gem_object *obj);
 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 		bool dirty, bool accessed);
 
+/* drm_gem_{get,put}_page_range() use XA_MARK_1 to track which pages are
+ * currently used. Make sure you don't mess up with this mark.
+ */
+#define DRM_GEM_PAGE_USED	XA_MARK_1
+
+int drm_gem_get_page_range(struct drm_gem_object *obj, struct xarray *pa,
+			   pgoff_t start, unsigned int npages,
+			   gfp_t page_gfp, gfp_t other_gfp);
+void drm_gem_put_page_range(struct drm_gem_object *obj, struct xarray *pa,
+			    pgoff_t start, unsigned int npages,
+			    bool dirty, bool accessed);
+
 void drm_gem_lock(struct drm_gem_object *obj);
 void drm_gem_unlock(struct drm_gem_object *obj);