@@ -50,6 +50,15 @@ drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
+drm_intel_bo *drm_intel_bo_alloc_direct(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment)
+{
+ if (bufmgr->bo_alloc_direct)
+ return bufmgr->bo_alloc_direct(bufmgr, name, size, alignment);
+
+ return NULL;
+}
+
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
@@ -78,9 +78,12 @@ struct _drm_intel_bo {
};
#define BO_ALLOC_FOR_RENDER (1<<0)
+#define BO_ALLOC_DIRECT (1<<1)
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
+drm_intel_bo *drm_intel_bo_alloc_direct(drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
@@ -556,17 +556,22 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
drm_intel_bo_gem *bo_gem;
unsigned int page_size = getpagesize();
int ret;
- struct drm_intel_gem_bo_bucket *bucket;
+ struct drm_intel_gem_bo_bucket *bucket = NULL;
int alloc_from_cache;
unsigned long bo_size;
int for_render = 0;
+ int reusable = 1;
if (flags & BO_ALLOC_FOR_RENDER)
for_render = 1;
/* Round the allocated size up to a power of two number of pages. */
- bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
+ if (!(flags & BO_ALLOC_DIRECT))
+ bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
+ if (bucket == NULL)
+ reusable = 0;
+
/* If we don't have caching at this size, don't actually round the
* allocation up.
*/
@@ -653,7 +658,7 @@ retry:
bo_gem->has_error = 0;
bo_gem->tiling_mode = I915_TILING_NONE;
bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
- bo_gem->reusable = 1;
+ bo_gem->reusable = reusable;
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
@@ -683,6 +688,15 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
}
static drm_intel_bo *
+drm_intel_gem_bo_alloc_direct(drm_intel_bufmgr *bufmgr,
+ const char *name,
+ unsigned long size,
+ unsigned int alignment)
+{
+ return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, BO_ALLOC_DIRECT);
+}
+
+static drm_intel_bo *
drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
@@ -2058,6 +2072,7 @@ drm_intel_bufmgr_gem_init(int fd, int batch_size)
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
+ bufmgr_gem->bufmgr.bo_alloc_direct = drm_intel_gem_bo_alloc_direct;
bufmgr_gem->bufmgr.bo_alloc_for_render =
drm_intel_gem_bo_alloc_for_render;
bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
@@ -51,6 +51,14 @@ struct _drm_intel_bufmgr {
unsigned long size, unsigned int alignment);
/**
+ * Allocate a buffer object.
+ *
+ * This is the same as bo_alloc except it bypasses the cache bucket
+ */
+ drm_intel_bo *(*bo_alloc_direct) (drm_intel_bufmgr *bufmgr, const char *name,
+ unsigned long size, unsigned int alignment);
+
+ /**
* Allocate a buffer object, hinting that it will be used as a
* render target.
*