From patchwork Thu Nov 15 11:32:24 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Chris Wilson X-Patchwork-Id: 1748571 Return-Path: X-Original-To: patchwork-intel-gfx@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork1.kernel.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by patchwork1.kernel.org (Postfix) with ESMTP id 32BD23FC8A for ; Thu, 15 Nov 2012 11:35:55 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 232CC9EF38 for ; Thu, 15 Nov 2012 03:35:55 -0800 (PST) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from relay.fireflyinternet.com (relay1.fireflyinternet.com [217.160.24.105]) by gabe.freedesktop.org (Postfix) with ESMTP id 703C39E887 for ; Thu, 15 Nov 2012 03:33:09 -0800 (PST) X-Default-Received-SPF: pass (skip=forwardok (res=PASS)) x-ip-name=109.228.6.235; Received: from fireflyinternet.com (unverified [109.228.6.235]) by relay.fireflyinternet.com (FireflyRelay1) with ESMTP id 208342-2000100 for ; Thu, 15 Nov 2012 11:33:18 +0000 X-Default-Received-SPF: pass (skip=forwardok (res=PASS)) x-ip-name=78.156.73.22; Received: from arrandale.alporthouse.com (unverified [78.156.73.22]) by fireflyinternet.com (Firefly Internet SMTP) with ESMTP id 125281905-1500050 for multiple; Thu, 15 Nov 2012 11:33:02 +0000 From: Chris Wilson To: intel-gfx@lists.freedesktop.org Date: Thu, 15 Nov 2012 11:32:24 +0000 Message-Id: <1352979151-9934-10-git-send-email-chris@chris-wilson.co.uk> X-Mailer: git-send-email 1.7.10.4 In-Reply-To: <1352979151-9934-1-git-send-email-chris@chris-wilson.co.uk> References: <1352979151-9934-1-git-send-email-chris@chris-wilson.co.uk> X-Originating-IP: 78.156.73.22 Subject: [Intel-gfx] [PATCH 09/16] drm/i915: Handle stolen objects in pwrite X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org Errors-To: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org Signed-off-by: Chris Wilson --- drivers/gpu/drm/i915/i915_gem.c | 159 ++++++++++++++++++++++++--------------- 1 file changed, 100 insertions(+), 59 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a6a2893..9e66e29 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -663,19 +663,17 @@ out: * needs_clflush_before is set and flushes out any written cachelines after * writing if needs_clflush is set. */ static int -shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, +shmem_pwrite_fast(char *vaddr, int shmem_page_offset, int page_length, char __user *user_data, bool page_do_bit17_swizzling, bool needs_clflush_before, bool needs_clflush_after) { - char *vaddr; int ret; if (unlikely(page_do_bit17_swizzling)) return -EINVAL; - vaddr = kmap_atomic(page); if (needs_clflush_before) drm_clflush_virt_range(vaddr + shmem_page_offset, page_length); @@ -685,7 +683,6 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, if (needs_clflush_after) drm_clflush_virt_range(vaddr + shmem_page_offset, page_length); - kunmap_atomic(vaddr); return ret ? -EFAULT : 0; } @@ -693,16 +690,14 @@ shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length, /* Only difference to the fast-path function is that this can handle bit17 * and uses non-atomic copy and kmap functions. */ static int -shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, +shmem_pwrite_slow(char *vaddr, int shmem_page_offset, int page_length, char __user *user_data, bool page_do_bit17_swizzling, bool needs_clflush_before, bool needs_clflush_after) { - char *vaddr; int ret; - vaddr = kmap(page); if (unlikely(needs_clflush_before || page_do_bit17_swizzling)) shmem_clflush_swizzled_range(vaddr + shmem_page_offset, page_length, @@ -719,7 +714,6 @@ shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length, shmem_clflush_swizzled_range(vaddr + shmem_page_offset, page_length, page_do_bit17_swizzling); - kunmap(page); return ret ? -EFAULT : 0; } @@ -730,10 +724,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_i915_gem_pwrite *args, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; ssize_t remain; loff_t offset; char __user *user_data; - int shmem_page_offset, page_length, ret = 0; + int page_length, ret = 0; int obj_do_bit17_swizzling, page_do_bit17_swizzling; int hit_slowpath = 0; int needs_clflush_after = 0; @@ -769,74 +764,120 @@ i915_gem_shmem_pwrite(struct drm_device *dev, if (ret) return ret; - i915_gem_object_pin_pages(obj); - offset = args->offset; obj->dirty = 1; - for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { - struct page *page; - int partial_cacheline_write; + if (obj->stolen) { + char *vaddr; - if (i < offset >> PAGE_SHIFT) - continue; + vaddr = (char *)dev_priv->mm.stolen_base; + vaddr += obj->stolen->start + (offset & PAGE_MASK); - if (remain <= 0) - break; + offset = offset_in_page(offset); + while (remain > 0) { + int partial_cacheline_write; - /* Operation in this page - * - * shmem_page_offset = offset within page in shmem file - * page_length = bytes to copy for this page - */ - shmem_page_offset = offset_in_page(offset); + page_length = remain; + if ((offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - offset; - page_length = remain; - if ((shmem_page_offset + page_length) > PAGE_SIZE) - page_length = PAGE_SIZE - shmem_page_offset; + /* If we don't overwrite a cacheline completely we need to be + * careful to have up-to-date data by first clflushing. Don't + * overcomplicate things and flush the entire patch. */ + partial_cacheline_write = needs_clflush_before && + ((offset | page_length) & (boot_cpu_data.x86_clflush_size - 1)); - /* If we don't overwrite a cacheline completely we need to be - * careful to have up-to-date data by first clflushing. Don't - * overcomplicate things and flush the entire patch. */ - partial_cacheline_write = needs_clflush_before && - ((shmem_page_offset | page_length) - & (boot_cpu_data.x86_clflush_size - 1)); + page_do_bit17_swizzling = obj_do_bit17_swizzling && + ((uintptr_t)vaddr & (1 << 17)) != 0; - page = sg_page(sg); - page_do_bit17_swizzling = obj_do_bit17_swizzling && - (page_to_phys(page) & (1 << 17)) != 0; + ret = shmem_pwrite_fast(vaddr, offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); - ret = shmem_pwrite_fast(page, shmem_page_offset, page_length, - user_data, page_do_bit17_swizzling, - partial_cacheline_write, - needs_clflush_after); - if (ret == 0) - goto next_page; + if (ret == 0) + goto next_stolen; - hit_slowpath = 1; - mutex_unlock(&dev->struct_mutex); - ret = shmem_pwrite_slow(page, shmem_page_offset, page_length, - user_data, page_do_bit17_swizzling, - partial_cacheline_write, - needs_clflush_after); + hit_slowpath = 1; + mutex_unlock(&dev->struct_mutex); - mutex_lock(&dev->struct_mutex); + ret = shmem_pwrite_slow(vaddr, offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); -next_page: - set_page_dirty(page); - mark_page_accessed(page); + mutex_lock(&dev->struct_mutex); + if (ret) + goto out; - if (ret) - goto out; +next_stolen: + remain -= page_length; + user_data += page_length; + offset = 0; + } + } else { + i915_gem_object_pin_pages(obj); - remain -= page_length; - user_data += page_length; - offset += page_length; + offset = offset_in_page(offset); + for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) { + struct page *page; + char *vaddr; + int partial_cacheline_write; + + if (i < args->offset >> PAGE_SHIFT) + continue; + + if (remain <= 0) + break; + + page_length = remain; + if ((offset + page_length) > PAGE_SIZE) + page_length = PAGE_SIZE - offset; + + /* If we don't overwrite a cacheline completely we need to be + * careful to have up-to-date data by first clflushing. Don't + * overcomplicate things and flush the entire patch. */ + partial_cacheline_write = needs_clflush_before && + ((offset | page_length) & (boot_cpu_data.x86_clflush_size - 1)); + + page = sg_page(sg); + page_do_bit17_swizzling = obj_do_bit17_swizzling && + (page_to_phys(page) & (1 << 17)) != 0; + + vaddr = kmap_atomic(page); + ret = shmem_pwrite_fast(vaddr, offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); + kunmap_atomic(vaddr); + + if (ret == 0) + goto next_page; + + hit_slowpath = 1; + mutex_unlock(&dev->struct_mutex); + + vaddr = kmap(page); + ret = shmem_pwrite_slow(vaddr, offset, page_length, + user_data, page_do_bit17_swizzling, + partial_cacheline_write, + needs_clflush_after); + kunmap(page); + + mutex_lock(&dev->struct_mutex); + if (ret) + goto out_unpin; + +next_page: + remain -= page_length; + user_data += page_length; + offset = 0; + } +out_unpin: + i915_gem_object_unpin_pages(obj); } out: - i915_gem_object_unpin_pages(obj); - if (hit_slowpath) { /* Fixup: Kill any reinstated backing storage pages */ if (obj->madv == __I915_MADV_PURGED)