@@ -1641,6 +1641,15 @@ struct i915_workarounds {
u32 count;
};
+struct eb_vmas {
+ struct list_head vmas;
+ int and;
+ union {
+ struct i915_vma *lut[0];
+ struct hlist_head buckets[0];
+ };
+};
+
struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
@@ -1896,7 +1905,7 @@ struct drm_i915_private {
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
+ struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
int (*init_rings)(struct drm_device *dev);
@@ -2626,7 +2635,7 @@ int i915_gem_ringbuffer_submission(struct drm_device *dev,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
+ struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
int i915_gem_execbuffer(struct drm_device *dev, void *data,
@@ -41,15 +41,6 @@
#define BATCH_OFFSET_BIAS (256*1024)
-struct eb_vmas {
- struct list_head vmas;
- int and;
- union {
- struct i915_vma *lut[0];
- struct hlist_head buckets[0];
- };
-};
-
static struct eb_vmas *
eb_create(struct drm_i915_gem_execbuffer2 *args)
{
@@ -617,10 +608,11 @@ eb_vma_misplaced(struct i915_vma *vma)
static int
i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
- struct list_head *vmas,
+ struct eb_vmas *eb,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
+ struct list_head *vmas = &eb->vmas;
struct i915_vma *vma;
struct i915_address_space *vm;
struct list_head ordered_vmas;
@@ -803,7 +795,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
goto err;
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, eb, &need_relocs);
if (ret)
goto err;
@@ -829,8 +821,9 @@ err:
static int
i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
- struct list_head *vmas)
+ struct eb_vmas *eb)
{
+ struct list_head *vmas = &eb->vmas;
struct i915_vma *vma;
uint32_t flush_domains = 0;
bool flush_chipset = false;
@@ -1136,12 +1129,13 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
+ struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
{
struct drm_clip_rect *cliprects = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct list_head *vmas = &eb->vmas;
u64 exec_len;
int instp_mode;
u32 instp_mask;
@@ -1190,7 +1184,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
}
}
- ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
+ ret = i915_gem_execbuffer_move_to_gpu(ring, eb);
if (ret)
goto error;
@@ -1463,7 +1457,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Move the objects en-masse into the GTT, evicting if necessary. */
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
- ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
+ ret = i915_gem_execbuffer_reserve(ring, eb, &need_relocs);
if (ret)
goto err;
@@ -1527,7 +1521,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
exec_start += i915_gem_obj_offset(batch_obj, vm);
ret = dev_priv->gt.do_execbuf(dev, file, ring, ctx, args,
- &eb->vmas, batch_obj, exec_start, flags);
+ eb, batch_obj, exec_start, flags);
/*
* FIXME: We crucially rely upon the active tracking for the (ppgtt)
@@ -571,9 +571,10 @@ static int logical_ring_invalidate_all_caches(struct intel_ringbuffer *ringbuf,
static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
struct intel_context *ctx,
- struct list_head *vmas)
+ struct eb_vmas *eb)
{
struct intel_engine_cs *ring = ringbuf->ring;
+ struct list_head *vmas = &eb->vmas;
struct i915_vma *vma;
uint32_t flush_domains = 0;
int ret;
@@ -621,12 +622,13 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
+ struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+ struct list_head *vmas = &eb->vmas;
int instp_mode;
u32 instp_mask;
int ret;
@@ -677,7 +679,7 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
return -EINVAL;
}
- ret = execlists_move_to_gpu(ringbuf, ctx, vmas);
+ ret = execlists_move_to_gpu(ringbuf, ctx, eb);
if (ret)
return ret;
@@ -80,11 +80,12 @@ void intel_lr_context_unpin(struct intel_engine_cs *ring,
/* Execlists */
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+struct eb_vmas;
int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
struct intel_engine_cs *ring,
struct intel_context *ctx,
struct drm_i915_gem_execbuffer2 *args,
- struct list_head *vmas,
+ struct eb_vmas *eb,
struct drm_i915_gem_object *batch_obj,
u64 exec_start, u32 flags);
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
The role of eb_vmas continues to grow here as it becomes the proper encapsulation for the data passed to the various execution function. Next patch makes use of it... This patch was initially part of the next patch, but got split out after I had found a bug that convinced me the two should be separate. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_drv.h | 13 +++++++++++-- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 26 ++++++++++---------------- drivers/gpu/drm/i915/intel_lrc.c | 8 +++++--- drivers/gpu/drm/i915/intel_lrc.h | 3 ++- 4 files changed, 28 insertions(+), 22 deletions(-)