diff mbox

[v4,2/2] drm/i915: optimise i915_gem_object_map() for small objects

Message ID 1463489483-34903-2-git-send-email-david.s.gordon@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Dave Gordon May 17, 2016, 12:51 p.m. UTC
We're using this function for ringbuffers and other "small" objects, so
it's worth avoiding an extra malloc()/free() cycle if the page array is
small enough to put on the stack. Here we've chosen an arbitrary cutoff
of 32 (4k) pages, which is big enough for a ringbuffer (4 pages) or a
context image (currently up to 22 pages).

v2:
    change name of local array [Chris Wilson]

Signed-off-by: Dave Gordon <david.s.gordon@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 82a1bc4..dda7fe6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2404,7 +2404,8 @@  static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
 	struct sg_table *sgt = obj->pages;
 	struct sg_page_iter sg_iter;
-	struct page **pages;
+	struct page *stack_pages[32];
+	struct page **pages = stack_pages;
 	unsigned long i = 0;
 	void *addr;
 
@@ -2412,9 +2413,12 @@  static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
 	if (n_pages == 1)
 		return kmap(sg_page(sgt->sgl));
 
-	pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
-	if (pages == NULL)
-		return NULL;
+	if (n_pages > ARRAY_SIZE(stack_pages)) {
+		/* Too big for stack -- allocate temporary array instead */
+		pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+		if (pages == NULL)
+			return NULL;
+	}
 
 	for_each_sg_page(sgt->sgl, &sg_iter, sgt->nents, 0)
 		pages[i++] = sg_page_iter_page(&sg_iter);
@@ -2424,7 +2428,8 @@  static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
 
 	addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
 
-	drm_free_large(pages);
+	if (pages != stack_pages)
+		drm_free_large(pages);
 
 	return addr;
 }