@@ -679,6 +679,38 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
}
EXPORT_SYMBOL(drm_gem_put_pages);
+void drm_gem_put_sparse_xarray(struct xarray *pa, unsigned long idx,
+ unsigned int npages, bool dirty, bool accessed)
+{
+ struct folio_batch fbatch;
+ struct page *page;
+
+ folio_batch_init(&fbatch);
+
+ xa_for_each(pa, idx, page) {
+ struct folio *folio = page_folio(page);
+
+ if (dirty)
+ folio_mark_dirty(folio);
+ if (accessed)
+ folio_mark_accessed(folio);
+
+ /* Undo the reference we took when populating the table */
+ if (!folio_batch_add(&fbatch, folio))
+ drm_gem_check_release_batch(&fbatch);
+
+ xa_erase(pa, idx);
+
+ idx += folio_nr_pages(folio) - 1;
+ }
+
+ if (folio_batch_count(&fbatch))
+ drm_gem_check_release_batch(&fbatch);
+
+ WARN_ON((idx+1) != npages);
+}
+EXPORT_SYMBOL(drm_gem_put_sparse_xarray);
+
static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
struct drm_gem_object **objs)
{
@@ -10,7 +10,6 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <linux/xarray.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>
@@ -161,6 +160,18 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_create_with_mnt);
+static void drm_gem_shmem_put_pages_sparse(struct drm_gem_shmem_object *shmem)
+{
+ unsigned int n_pages = shmem->rss_size / PAGE_SIZE;
+
+ drm_WARN_ON(shmem->base.dev, (shmem->rss_size & (PAGE_SIZE - 1)) != 0);
+ drm_WARN_ON(shmem->base.dev, !shmem->sparse);
+
+ drm_gem_put_sparse_xarray(&shmem->xapages, 0, n_pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+}
+
/**
* drm_gem_shmem_free - Free resources associated with a shmem GEM object
* @shmem: shmem GEM object to free
@@ -264,10 +275,15 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem)
set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
#endif
- drm_gem_put_pages(obj, shmem->pages,
- shmem->pages_mark_dirty_on_put,
- shmem->pages_mark_accessed_on_put);
- shmem->pages = NULL;
+ if (!shmem->sparse) {
+ drm_gem_put_pages(obj, shmem->pages,
+ shmem->pages_mark_dirty_on_put,
+ shmem->pages_mark_accessed_on_put);
+ shmem->pages = NULL;
+ } else {
+ drm_gem_shmem_put_pages_sparse(shmem);
+ xa_destroy(&shmem->xapages);
+ }
}
EXPORT_SYMBOL(drm_gem_shmem_put_pages);
@@ -765,6 +781,81 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_
return ERR_PTR(ret);
}
+static struct sg_table *drm_gem_shmem_get_sparse_pages_locked(struct drm_gem_shmem_object *shmem,
+ unsigned int n_pages,
+ pgoff_t page_offset)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ gfp_t mask = GFP_KERNEL | GFP_NOWAIT;
+ size_t size = n_pages * PAGE_SIZE;
+ struct address_space *mapping;
+ struct sg_table *sgt;
+ struct page *page;
+ bool first_alloc;
+ int ret, i;
+
+ if (!shmem->sparse)
+ return ERR_PTR(-EINVAL);
+
+ /* If the mapping exists, then bail out immediately */
+ if (xa_load(&shmem->xapages, page_offset) != NULL)
+ return ERR_PTR(-EEXIST);
+
+ dma_resv_assert_held(shmem->base.resv);
+
+ first_alloc = xa_empty(&shmem->xapages);
+
+ mapping = shmem->base.filp->f_mapping;
+ mapping_set_unevictable(mapping);
+
+ for (i = 0; i < n_pages; i++) {
+ page = shmem_read_mapping_page_nonblocking(mapping, page_offset + i);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto err_free_pages;
+ }
+
+ /* Add the page into the xarray */
+ ret = xa_err(xa_store(&shmem->xapages, page_offset + i, page, mask));
+ if (ret) {
+ put_page(page);
+ goto err_free_pages;
+ }
+ }
+
+ sgt = kzalloc(sizeof(*sgt), mask);
+ if (!sgt) {
+ ret = -ENOMEM;
+ goto err_free_pages;
+ }
+
+ ret = sg_alloc_table_from_page_xarray(sgt, &shmem->xapages, page_offset, n_pages, 0, size, mask);
+ if (ret)
+ goto err_free_sgtable;
+
+ ret = dma_map_sgtable(obj->dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ if (ret)
+ goto err_free_sgtable;
+
+ if (first_alloc)
+ shmem->pages_use_count = 1;
+
+ shmem->rss_size += size;
+
+ return sgt;
+
+err_free_sgtable:
+ kfree(sgt);
+err_free_pages:
+ while (--i) {
+ page = xa_erase(&shmem->xapages, page_offset + i);
+ if (drm_WARN_ON(obj->dev, !page))
+ continue;
+ put_page(page);
+ }
+ return ERR_PTR(ret);
+}
+
/**
* drm_gem_shmem_get_pages_sgt - Pin pages, dma map them, and return a
* scatter/gather table for a shmem GEM object.
@@ -796,6 +887,28 @@ struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem)
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_get_pages_sgt);
+struct sg_table *drm_gem_shmem_get_sparse_pages_sgt(struct drm_gem_shmem_object *shmem,
+ unsigned int n_pages, pgoff_t page_offset)
+{
+ struct drm_gem_object *obj = &shmem->base;
+ struct sg_table *sgt;
+ int ret;
+
+ if (drm_WARN_ON(obj->dev, !shmem->sparse))
+ return ERR_PTR(-EINVAL);
+
+ ret = dma_resv_lock(shmem->base.resv, NULL);
+ if (ret)
+ return ERR_PTR(ret);
+
+ sgt = drm_gem_shmem_get_sparse_pages_locked(shmem, n_pages, page_offset);
+
+ dma_resv_unlock(shmem->base.resv);
+
+ return sgt;
+}
+EXPORT_SYMBOL_GPL(drm_gem_shmem_get_sparse_pages_sgt);
+
/**
* drm_gem_shmem_prime_import_sg_table - Produce a shmem GEM object from
* another driver's scatter/gather table of pinned pages
@@ -38,6 +38,7 @@
#include <linux/dma-resv.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/xarray.h>
#include <drm/drm_vma_manager.h>
@@ -532,6 +533,8 @@ int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
struct page **drm_gem_get_pages(struct drm_gem_object *obj);
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
bool dirty, bool accessed);
+void drm_gem_put_sparse_xarray(struct xarray *pa, unsigned long idx,
+ unsigned int npages, bool dirty, bool accessed);
void drm_gem_lock(struct drm_gem_object *obj);
void drm_gem_unlock(struct drm_gem_object *obj);
@@ -44,6 +44,14 @@ struct drm_gem_shmem_object {
*/
unsigned int pages_use_count;
+ /**
+ * @rss_size:
+ *
+ * Size of the object RSS, in bytes.
+ * lifetime.
+ */
+ size_t rss_size;
+
/**
* @madv: State for madvise
*
@@ -107,6 +115,7 @@ struct drm_gem_shmem_object {
container_of(obj, struct drm_gem_shmem_object, base)
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
+struct drm_gem_shmem_object *drm_gem_shmem_create_sparse(struct drm_device *dev, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
size_t size,
struct vfsmount *gemfs);
@@ -138,6 +147,9 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
+struct sg_table *drm_gem_shmem_get_sparse_pages_sgt(struct drm_gem_shmem_object *shmem,
+ unsigned int n_pages, pgoff_t page_offset);
+
void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem,
struct drm_printer *p, unsigned int indent);
Add a new function that lets drivers allocate pages for a subset of the shmem object's virtual address range. Expand the shmem object's definition to include an RSS field, since it's different from the base GEM object's virtual size. Add also new function for putting the pages of a sparse page array. There is refactorisation potential with drm_gem_put_pages, but it is yet to be decided what this should look like. Signed-off-by: Adrián Larumbe <adrian.larumbe@collabora.com> --- drivers/gpu/drm/drm_gem.c | 32 +++++++ drivers/gpu/drm/drm_gem_shmem_helper.c | 123 ++++++++++++++++++++++++- include/drm/drm_gem.h | 3 + include/drm/drm_gem_shmem_helper.h | 12 +++ 4 files changed, 165 insertions(+), 5 deletions(-)