From patchwork Mon Feb 18 17:28:02 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Imre Deak X-Patchwork-Id: 2159701 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by patchwork1.kernel.org (Postfix) with ESMTP id B0B063FCFC for ; Mon, 18 Feb 2013 17:33:06 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id AE515E63E7 for ; Mon, 18 Feb 2013 09:33:06 -0800 (PST) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga11.intel.com (mga11.intel.com [192.55.52.93]) by gabe.freedesktop.org (Postfix) with ESMTP id BADE7E5DD2; Mon, 18 Feb 2013 09:28:17 -0800 (PST) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga102.fm.intel.com with ESMTP; 18 Feb 2013 09:28:13 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.84,689,1355126400"; d="scan'208";a="288699399" Received: from unknown (HELO localhost) ([10.237.72.52]) by fmsmga001.fm.intel.com with ESMTP; 18 Feb 2013 09:28:11 -0800 From: Imre Deak To: intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org Date: Mon, 18 Feb 2013 19:28:02 +0200 Message-Id: <1361208484-27039-3-git-send-email-imre.deak@intel.com> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1361208484-27039-1-git-send-email-imre.deak@intel.com> References: <1361208484-27039-1-git-send-email-imre.deak@intel.com> In-Reply-To: <1360423656-10816-1-git-send-email-imre.deak@intel.com> References: <1360423656-10816-1-git-send-email-imre.deak@intel.com> Cc: Daniel Vetter , Rahul Sharma Subject: [Intel-gfx] [PATCH v2 2/4] drm/i915: handle walking compact dma scatter lists X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org Errors-To: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org So far the assumption was that each dma scatter list entry contains only a single page. This might not hold in the future, when we'll introduce compact scatter lists, so prepare for this everywhere in the i915 code where we walk such a list. We'll fix the place _creating_ these lists separately in the next patch to help the reviewing/bisectability. Reference: http://www.spinics.net/lists/dri-devel/msg33917.html Signed-off-by: Imre Deak --- drivers/gpu/drm/i915/i915_drv.h | 17 ++++++----------- drivers/gpu/drm/i915/i915_gem.c | 24 ++++++++---------------- drivers/gpu/drm/i915/i915_gem_dmabuf.c | 13 +++++++------ drivers/gpu/drm/i915/i915_gem_tiling.c | 18 ++++++++++-------- 4 files changed, 31 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e95337c..2c03636 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1528,17 +1528,12 @@ void i915_gem_lastclose(struct drm_device *dev); int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj); static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) { - struct scatterlist *sg = obj->pages->sgl; - int nents = obj->pages->nents; - while (nents > SG_MAX_SINGLE_ALLOC) { - if (n < SG_MAX_SINGLE_ALLOC - 1) - break; - - sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1); - n -= SG_MAX_SINGLE_ALLOC - 1; - nents -= SG_MAX_SINGLE_ALLOC - 1; - } - return sg_page(sg+n); + struct sg_page_iter sg_iter; + + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n) + return sg_iter.page; + + return NULL; } static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) { diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8413ffc..03037cf 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -411,8 +411,7 @@ i915_gem_shmem_pread(struct drm_device *dev, int obj_do_bit17_swizzling, page_do_bit17_swizzling; int prefaulted = 0; int needs_clflush = 0; - struct scatterlist *sg; - int i; + struct sg_page_iter sg_iter; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -441,11 +440,9 @@ i915_gem_shmem_pread(struct drm_device *dev, offset = args->offset; - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { - struct page *page; - - if (i < offset >> PAGE_SHIFT) - continue; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, + offset >> PAGE_SHIFT) { + struct page *page = sg_iter.page; if (remain <= 0) break; @@ -460,7 +457,6 @@ i915_gem_shmem_pread(struct drm_device *dev, if ((shmem_page_offset + page_length) > PAGE_SIZE) page_length = PAGE_SIZE - shmem_page_offset; - page = sg_page(sg); page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; @@ -732,8 +728,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev, int hit_slowpath = 0; int needs_clflush_after = 0; int needs_clflush_before = 0; - int i; - struct scatterlist *sg; + struct sg_page_iter sg_iter; user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; @@ -768,13 +763,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev, offset = args->offset; obj->dirty = 1; - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { - struct page *page; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, + offset >> PAGE_SHIFT) { + struct page *page = sg_iter.page; int partial_cacheline_write; - if (i < offset >> PAGE_SHIFT) - continue; - if (remain <= 0) break; @@ -796,7 +789,6 @@ i915_gem_shmem_pwrite(struct drm_device *dev, ((shmem_page_offset | page_length) & (boot_cpu_data.x86_clflush_size - 1)); - page = sg_page(sg); page_do_bit17_swizzling = obj_do_bit17_swizzling && (page_to_phys(page) & (1 << 17)) != 0; diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index 6a5af68..898615d 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -62,7 +62,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme src = obj->pages->sgl; dst = st->sgl; for (i = 0; i < obj->pages->nents; i++) { - sg_set_page(dst, sg_page(src), PAGE_SIZE, 0); + sg_set_page(dst, sg_page(src), src->length, 0); dst = sg_next(dst); src = sg_next(src); } @@ -105,7 +105,7 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) { struct drm_i915_gem_object *obj = dma_buf->priv; struct drm_device *dev = obj->base.dev; - struct scatterlist *sg; + struct sg_page_iter sg_iter; struct page **pages; int ret, i; @@ -124,14 +124,15 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) ret = -ENOMEM; - pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *)); + pages = drm_malloc_ab(obj->base.size >> PAGE_SHIFT, sizeof(*pages)); if (pages == NULL) goto error; - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) - pages[i] = sg_page(sg); + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0); + pages[i++] = sg_iter.page; - obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL); + obj->dma_buf_vmapping = vmap(pages, i, 0, PAGE_KERNEL); drm_free_large(pages); if (!obj->dma_buf_vmapping) diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index abcba2f..f799708 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -473,28 +473,29 @@ i915_gem_swizzle_page(struct page *page) void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct scatterlist *sg; - int page_count = obj->base.size >> PAGE_SHIFT; + struct sg_page_iter sg_iter; int i; if (obj->bit_17 == NULL) return; - for_each_sg(obj->pages->sgl, sg, page_count, i) { - struct page *page = sg_page(sg); + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + struct page *page = sg_iter.page; char new_bit_17 = page_to_phys(page) >> 17; if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) { i915_gem_swizzle_page(page); set_page_dirty(page); } + i++; } } void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) { - struct scatterlist *sg; + struct sg_page_iter sg_iter; int page_count = obj->base.size >> PAGE_SHIFT; int i; @@ -508,11 +509,12 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) } } - for_each_sg(obj->pages->sgl, sg, page_count, i) { - struct page *page = sg_page(sg); - if (page_to_phys(page) & (1 << 17)) + i = 0; + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { + if (page_to_phys(sg_iter.page) & (1 << 17)) __set_bit(i, obj->bit_17); else __clear_bit(i, obj->bit_17); + i++; } }