From patchwork Thu Feb 18 18:31:37 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: yu.dai@intel.com X-Patchwork-Id: 8353471 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 07DA8C0553 for ; Thu, 18 Feb 2016 18:36:27 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id F079620398 for ; Thu, 18 Feb 2016 18:36:25 +0000 (UTC) Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by mail.kernel.org (Postfix) with ESMTP id 64367203A0 for ; Thu, 18 Feb 2016 18:36:24 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 3B17A6ED22; Thu, 18 Feb 2016 18:36:23 +0000 (UTC) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by gabe.freedesktop.org (Postfix) with ESMTP id 3FB486ED20 for ; Thu, 18 Feb 2016 18:36:19 +0000 (UTC) Received: from fmsmga004.fm.intel.com ([10.253.24.48]) by fmsmga101.fm.intel.com with ESMTP; 18 Feb 2016 10:36:19 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.22,466,1449561600"; d="scan'208";a="50349857" Received: from alex-hsw.fm.intel.com ([10.19.83.36]) by fmsmga004.fm.intel.com with ESMTP; 18 Feb 2016 10:36:20 -0800 From: yu.dai@intel.com To: intel-gfx@lists.freedesktop.org Date: Thu, 18 Feb 2016 10:31:37 -0800 Message-Id: <1455820298-5463-2-git-send-email-yu.dai@intel.com> X-Mailer: git-send-email 2.5.0 In-Reply-To: <1455820298-5463-1-git-send-email-yu.dai@intel.com> References: <1455820298-5463-1-git-send-email-yu.dai@intel.com> Cc: daniel.vetter@ffwll.ch Subject: [Intel-gfx] [PATCH v2 1/2] drm/i915: Add i915_gem_object_vmap to map GEM object to virtual space X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.18 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Errors-To: intel-gfx-bounces@lists.freedesktop.org Sender: "Intel-gfx" X-Spam-Status: No, score=-4.2 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_MED, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Alex Dai There are several places inside driver where a GEM object is mapped to kernel virtual space. The mapping is either done for the whole object or certain page range of it. This patch introduces a function i915_gem_object_vmap to do such job. v2: Use obj->pages->nents for iteration within i915_gem_object_vmap; break when it finishes all desired pages. The caller need to pass in actual page number. (Tvrtko Ursulin) Signed-off-by: Alex Dai Cc: Dave Gordon Cc: Daniel Vetter Cc: Tvrtko Ursulin Cc: Chris Wilson Signed-off-by: Alex Dai Reviewed-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_cmd_parser.c | 28 +------------------- drivers/gpu/drm/i915/i915_drv.h | 3 +++ drivers/gpu/drm/i915/i915_gem.c | 47 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_gem_dmabuf.c | 16 +++-------- drivers/gpu/drm/i915/intel_ringbuffer.c | 24 ++--------------- 5 files changed, 56 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 814d894..915e8c1 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -863,37 +863,11 @@ find_reg(const struct drm_i915_reg_descriptor *table, static u32 *vmap_batch(struct drm_i915_gem_object *obj, unsigned start, unsigned len) { - int i; - void *addr = NULL; - struct sg_page_iter sg_iter; int first_page = start >> PAGE_SHIFT; int last_page = (len + start + 4095) >> PAGE_SHIFT; int npages = last_page - first_page; - struct page **pages; - - pages = drm_malloc_ab(npages, sizeof(*pages)); - if (pages == NULL) { - DRM_DEBUG_DRIVER("Failed to get space for pages\n"); - goto finish; - } - - i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) { - pages[i++] = sg_page_iter_page(&sg_iter); - if (i == npages) - break; - } - - addr = vmap(pages, i, 0, PAGE_KERNEL); - if (addr == NULL) { - DRM_DEBUG_DRIVER("Failed to vmap pages\n"); - goto finish; - } -finish: - if (pages) - drm_free_large(pages); - return (u32*)addr; + return (u32*)i915_gem_object_vmap(obj, first_page, npages); } /* Returns a vmap'd pointer to dest_obj, which the caller must unmap */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6644c2e..5b00a6a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2899,6 +2899,9 @@ struct drm_i915_gem_object *i915_gem_object_create_from_data( struct drm_device *dev, const void *data, size_t size); void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_vma_destroy(struct i915_vma *vma); +void *i915_gem_object_vmap(struct drm_i915_gem_object *obj, + unsigned int first, + unsigned int npages); /* Flags used by pin/bind&friends. */ #define PIN_MAPPABLE (1<<0) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f68f346..4bc0ce7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -5356,3 +5356,50 @@ fail: drm_gem_object_unreference(&obj->base); return ERR_PTR(ret); } + +/** + * i915_gem_object_vmap - map a GEM obj into kernel virtual space + * @obj: the GEM obj to be mapped + * @first: index of the first page where mapping starts + * @npages: how many pages to be mapped, starting from first page + * + * Map a given page range of GEM obj into kernel virtual space. The caller must + * make sure the associated pages are gathered and pinned before calling this + * function. vunmap should be called after use. + * + * NULL will be returned if fails. + */ +void *i915_gem_object_vmap(struct drm_i915_gem_object *obj, + unsigned int first, + unsigned int npages) +{ + struct sg_page_iter sg_iter; + struct page **pages; + void *addr; + int i; + + if (first + npages > obj->pages->nents) { + DRM_DEBUG_DRIVER("Invalid page count\n"); + return NULL; + } + + pages = drm_malloc_ab(npages, sizeof(*pages)); + if (pages == NULL) { + DRM_DEBUG_DRIVER("Failed to get space for pages\n"); + return NULL; + } + + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first) { + pages[i++] = sg_page_iter_page(&sg_iter); + if (i == npages) + break; + } + + addr = vmap(pages, npages, 0, PAGE_KERNEL); + if (addr == NULL) + DRM_DEBUG_DRIVER("Failed to vmap pages\n"); + drm_free_large(pages); + + return addr; +} diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 1f3eef6..6133036 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -110,9 +110,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf); struct drm_device *dev = obj->base.dev; - struct sg_page_iter sg_iter; - struct page **pages; - int ret, i; + int ret; ret = i915_mutex_lock_interruptible(dev); if (ret) @@ -131,16 +129,8 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ret = -ENOMEM; - pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); - if (pages == NULL) - goto err_unpin; - - i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) - pages[i++] = sg_page_iter_page(&sg_iter); - - obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); - drm_free_large(pages); + obj->dma_buf_vmapping = i915_gem_object_vmap(obj, 0, + dma_buf->size >> PAGE_SHIFT); if (!obj->dma_buf_vmapping) goto err_unpin; diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 45ce45a..93666e9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -2064,27 +2064,6 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) i915_gem_object_ggtt_unpin(ringbuf->obj); } -static u32 *vmap_obj(struct drm_i915_gem_object *obj) -{ - struct sg_page_iter sg_iter; - struct page **pages; - void *addr; - int i; - - pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); - if (pages == NULL) - return NULL; - - i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) - pages[i++] = sg_page_iter_page(&sg_iter); - - addr = vmap(pages, i, 0, PAGE_KERNEL); - drm_free_large(pages); - - return addr; -} - int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, struct intel_ringbuffer *ringbuf) { @@ -2103,7 +2082,8 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, return ret; } - ringbuf->virtual_start = vmap_obj(obj); + ringbuf->virtual_start = i915_gem_object_vmap(obj, 0, + ringbuf->size >> PAGE_SHIFT); if (ringbuf->virtual_start == NULL) { i915_gem_object_ggtt_unpin(obj); return -ENOMEM;