From patchwork Tue May 25 05:06:50 2010 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Xiang, Haihao" X-Patchwork-Id: 102078 Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by demeter.kernel.org (8.14.3/8.14.3) with ESMTP id o4P56UOm017142 for ; Tue, 25 May 2010 05:07:05 GMT Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 863F69E9EB for ; Mon, 24 May 2010 22:06:29 -0700 (PDT) X-Original-To: intel-gfx@lists.freedesktop.org Delivered-To: intel-gfx@lists.freedesktop.org Received: from mga01.intel.com (mga01.intel.com [192.55.52.88]) by gabe.freedesktop.org (Postfix) with ESMTP id 83AB99E78C for ; Mon, 24 May 2010 22:06:18 -0700 (PDT) Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by fmsmga101.fm.intel.com with ESMTP; 24 May 2010 22:02:55 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.53,296,1272870000"; d="scan'208";a="801574998" Received: from xhh-ilk32.sh.intel.com (HELO localhost.localdomain) ([10.239.36.137]) by fmsmga001.fm.intel.com with ESMTP; 24 May 2010 22:06:00 -0700 From: "Xiang, Haihao" To: intel-gfx@lists.freedesktop.org Date: Tue, 25 May 2010 13:06:50 +0800 Message-Id: <1274764010-6660-1-git-send-email-haihao.xiang@intel.com> X-Mailer: git-send-email 1.6.3.3 Subject: [Intel-gfx] [intel-gfx][PATCH] intel: add a new interface drm_intel_bo_alloc_direct X-BeenThere: intel-gfx@lists.freedesktop.org X-Mailman-Version: 2.1.11 Precedence: list List-Id: Intel graphics driver community testing & development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , MIME-Version: 1.0 Sender: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org Errors-To: intel-gfx-bounces+patchwork-intel-gfx=patchwork.kernel.org@lists.freedesktop.org X-Greylist: IP, sender and recipient auto-whitelisted, not delayed by milter-greylist-4.2.3 (demeter.kernel.org [140.211.167.41]); Tue, 25 May 2010 05:07:05 +0000 (UTC) diff --git a/intel/intel_bufmgr.c b/intel/intel_bufmgr.c index 9144fdd..1188253 100644 --- a/intel/intel_bufmgr.c +++ b/intel/intel_bufmgr.c @@ -50,6 +50,15 @@ drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, return bufmgr->bo_alloc(bufmgr, name, size, alignment); } +drm_intel_bo *drm_intel_bo_alloc_direct(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment) +{ + if (bufmgr->bo_alloc_direct) + return bufmgr->bo_alloc_direct(bufmgr, name, size, alignment); + + return NULL; +} + drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name, unsigned long size, diff --git a/intel/intel_bufmgr.h b/intel/intel_bufmgr.h index cbcddb6..9dd0443 100644 --- a/intel/intel_bufmgr.h +++ b/intel/intel_bufmgr.h @@ -78,9 +78,12 @@ struct _drm_intel_bo { }; #define BO_ALLOC_FOR_RENDER (1<<0) +#define BO_ALLOC_DIRECT (1<<1) drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name, unsigned long size, unsigned int alignment); +drm_intel_bo *drm_intel_bo_alloc_direct(drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment); drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name, unsigned long size, diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c index b76fd7e..a16cd16 100644 --- a/intel/intel_bufmgr_gem.c +++ b/intel/intel_bufmgr_gem.c @@ -556,17 +556,22 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, drm_intel_bo_gem *bo_gem; unsigned int page_size = getpagesize(); int ret; - struct drm_intel_gem_bo_bucket *bucket; + struct drm_intel_gem_bo_bucket *bucket = NULL; int alloc_from_cache; unsigned long bo_size; int for_render = 0; + int reusable = 1; if (flags & BO_ALLOC_FOR_RENDER) for_render = 1; /* Round the allocated size up to a power of two number of pages. */ - bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size); + if (!(flags & BO_ALLOC_DIRECT)) + bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size); + if (bucket == NULL) + reusable = 0; + /* If we don't have caching at this size, don't actually round the * allocation up. */ @@ -653,7 +658,7 @@ retry: bo_gem->has_error = 0; bo_gem->tiling_mode = I915_TILING_NONE; bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; - bo_gem->reusable = 1; + bo_gem->reusable = reusable; drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem); @@ -683,6 +688,15 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, } static drm_intel_bo * +drm_intel_gem_bo_alloc_direct(drm_intel_bufmgr *bufmgr, + const char *name, + unsigned long size, + unsigned int alignment) +{ + return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, BO_ALLOC_DIRECT); +} + +static drm_intel_bo * drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, int x, int y, int cpp, uint32_t *tiling_mode, unsigned long *pitch, unsigned long flags) @@ -2058,6 +2072,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size) bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc; + bufmgr_gem->bufmgr.bo_alloc_direct = drm_intel_gem_bo_alloc_direct; bufmgr_gem->bufmgr.bo_alloc_for_render = drm_intel_gem_bo_alloc_for_render; bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled; diff --git a/intel/intel_bufmgr_priv.h b/intel/intel_bufmgr_priv.h index f987d97..47277f4 100644 --- a/intel/intel_bufmgr_priv.h +++ b/intel/intel_bufmgr_priv.h @@ -51,6 +51,14 @@ struct _drm_intel_bufmgr { unsigned long size, unsigned int alignment); /** + * Allocate a buffer object. + * + * This is the same as bo_alloc except it bypasses the cache bucket + */ + drm_intel_bo *(*bo_alloc_direct) (drm_intel_bufmgr *bufmgr, const char *name, + unsigned long size, unsigned int alignment); + + /** * Allocate a buffer object, hinting that it will be used as a * render target. *