diff mbox

[5/5] drm/gem: remove misleading gfp parameter to get_pages()

Message ID 1401021252-29006-5-git-send-email-dh.herrmann@gmail.com (mailing list archive)
State Accepted
Headers show

Commit Message

David Herrmann May 25, 2014, 12:34 p.m. UTC
drm_gem_get_pages() currently allows passing a 'gfp' parameter that is
passed to shmem combined with mapping_gfp_mask(). Given that the default
mapping_gfp_mask() is GFP_HIGHUSER, it is _very_ unlikely that anyone will
ever make use of that parameter. In fact, all drivers currently pass
redundant flags or 0.

This patch removes the 'gfp' parameter. The only reason to keep it is to
remove flags like __GFP_WAIT. But in its current form, it can only be used
to add flags. So to remove __GFP_WAIT, you'd have to drop it from the
mapping_gfp_mask, which again is stupid as this mask is used by shmem-core
for other allocations, too.

If any driver ever requires that parameter, we can introduce a new helper
that takes the raw 'gfp' parameter. The caller'd be responsible to combine
it with mapping_gfp_mask() in a suitable way. The current
drm_gem_get_pages() helper would then simply use mapping_gfp_mask() and
call the new helper. This is what shmem_read_mapping_pages{_gfp,} does
right now.

Moreover, the gfp-zone flag-usage is not obvious: If you pass a modified
zone, shmem core will WARN() or even BUG(). In other words, the following
must be true for 'gfp' passed to shmem_read_mapping_pages_gfp():
    gfp_zone(mapping_gfp_mask(mapping)) == gfp_zone(gfp)
Add a comment to drm_gem_read_pages() explaining that constraint.

Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
---
 drivers/gpu/drm/drm_gem.c          | 29 ++++++++++++++++++++---------
 drivers/gpu/drm/gma500/gtt.c       |  2 +-
 drivers/gpu/drm/msm/msm_gem.c      |  2 +-
 drivers/gpu/drm/omapdrm/omap_gem.c |  2 +-
 drivers/gpu/drm/udl/udl_gem.c      |  8 ++++----
 include/drm/drmP.h                 |  2 +-
 6 files changed, 28 insertions(+), 17 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f7d7119..6adee4c 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -441,18 +441,31 @@  EXPORT_SYMBOL(drm_gem_create_mmap_offset);
  * drm_gem_get_pages - helper to allocate backing pages for a GEM object
  * from shmem
  * @obj: obj in question
- * @gfpmask: gfp mask of requested pages
+ *
+ * This reads the page-array of the shmem-backing storage of the given gem
+ * object. An array of pages is returned. If a page is not allocated or
+ * swapped-out, this will allocate/swap-in the required pages. Note that the
+ * whole object is covered by the page-array and pinned in memory.
+ *
+ * Use drm_gem_put_pages() to release the array and unpin all pages.
+ *
+ * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
+ * If you require other GFP-masks, you have to do those allocations yourself.
+ *
+ * Note that you are not allowed to change gfp-zones during runtime. That is,
+ * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
+ * set during initialization. If you have special zone constraints, set them
+ * after drm_gem_init_object() via mapping_set_gfp_mask(). shmem-core takes care
+ * to keep pages in the required zone during swap-in.
  */
-struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+struct page **drm_gem_get_pages(struct drm_gem_object *obj)
 {
-	struct inode *inode;
 	struct address_space *mapping;
 	struct page *p, **pages;
 	int i, npages;
 
 	/* This is the shared memory object that backs the GEM resource */
-	inode = file_inode(obj->filp);
-	mapping = inode->i_mapping;
+	mapping = file_inode(obj->filp)->i_mapping;
 
 	/* We already BUG_ON() for non-page-aligned sizes in
 	 * drm_gem_object_init(), so we should never hit this unless
@@ -466,10 +479,8 @@  struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
 	if (pages == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	gfpmask |= mapping_gfp_mask(mapping);
-
 	for (i = 0; i < npages; i++) {
-		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+		p = shmem_read_mapping_page(mapping, i);
 		if (IS_ERR(p))
 			goto fail;
 		pages[i] = p;
@@ -479,7 +490,7 @@  struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
 		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
 		 * so shmem can relocate pages during swapin if required.
 		 */
-		BUG_ON((gfpmask & __GFP_DMA32) &&
+		BUG_ON((mapping_gfp_mask(mapping) & __GFP_DMA32) &&
 				(page_to_pfn(p) >= 0x00100000UL));
 	}
 
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index 592d205..ce015db 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -206,7 +206,7 @@  static int psb_gtt_attach_pages(struct gtt_range *gt)
 
 	WARN_ON(gt->pages);
 
-	pages = drm_gem_get_pages(&gt->gem, 0);
+	pages = drm_gem_get_pages(&gt->gem);
 	if (IS_ERR(pages))
 		return PTR_ERR(pages);
 
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index bb8026d..6866879 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -73,7 +73,7 @@  static struct page **get_pages(struct drm_gem_object *obj)
 		int npages = obj->size >> PAGE_SHIFT;
 
 		if (iommu_present(&platform_bus_type))
-			p = drm_gem_get_pages(obj, 0);
+			p = drm_gem_get_pages(obj);
 		else
 			p = get_pages_vram(obj, npages);
 
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 1331fd5..7c68c8a 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -233,7 +233,7 @@  static int omap_gem_attach_pages(struct drm_gem_object *obj)
 
 	WARN_ON(omap_obj->pages);
 
-	pages = drm_gem_get_pages(obj, GFP_KERNEL);
+	pages = drm_gem_get_pages(obj);
 	if (IS_ERR(pages)) {
 		dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
 		return PTR_ERR(pages);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index c041cd7..8044f5f 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -107,14 +107,14 @@  int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 }
 
-static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
+static int udl_gem_get_pages(struct udl_gem_object *obj)
 {
 	struct page **pages;
 
 	if (obj->pages)
 		return 0;
 
-	pages = drm_gem_get_pages(&obj->base, gfpmask);
+	pages = drm_gem_get_pages(&obj->base);
 	if (IS_ERR(pages))
 		return PTR_ERR(pages);
 
@@ -147,7 +147,7 @@  int udl_gem_vmap(struct udl_gem_object *obj)
 		return 0;
 	}
 		
-	ret = udl_gem_get_pages(obj, GFP_KERNEL);
+	ret = udl_gem_get_pages(obj);
 	if (ret)
 		return ret;
 
@@ -205,7 +205,7 @@  int udl_gem_mmap(struct drm_file *file, struct drm_device *dev,
 	}
 	gobj = to_udl_bo(obj);
 
-	ret = udl_gem_get_pages(gobj, GFP_KERNEL);
+	ret = udl_gem_get_pages(gobj);
 	if (ret)
 		goto out;
 	ret = drm_gem_create_mmap_offset(obj);
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 12f10bc..40b6e2f 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1572,7 +1572,7 @@  void drm_gem_free_mmap_offset(struct drm_gem_object *obj);
 int drm_gem_create_mmap_offset(struct drm_gem_object *obj);
 int drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
 
-struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+struct page **drm_gem_get_pages(struct drm_gem_object *obj);
 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
 		bool dirty, bool accessed);