@@ -399,6 +399,11 @@ static void __destroy_hw_context(struct i915_gem_context *ctx,
context_close(ctx);
}
+struct i915_address_space *i915_gem_context_vm(struct i915_gem_context *ctx)
+{
+ return ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
+}
+
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *dev_priv,
struct drm_i915_file_private *file_priv)
@@ -39,6 +39,7 @@ struct drm_file;
struct drm_i915_private;
struct drm_i915_file_private;
+struct i915_address_space;
struct i915_hw_ppgtt;
struct i915_request;
struct i915_vma;
@@ -351,6 +352,8 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
struct i915_gem_context *
i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio);
+struct i915_address_space *i915_gem_context_vm(struct i915_gem_context *ctx);
+
static inline struct i915_gem_context *
i915_gem_context_get(struct i915_gem_context *ctx)
{
@@ -740,12 +740,9 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT;
eb->ctx = ctx;
- if (ctx->ppgtt) {
- eb->vm = &ctx->ppgtt->vm;
+ eb->vm = i915_gem_context_vm(ctx);
+ if (ctx->ppgtt)
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
- } else {
- eb->vm = &eb->i915->ggtt.vm;
- }
eb->context_flags = 0;
if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
@@ -1416,7 +1416,6 @@ capture_object(struct drm_i915_private *dev_priv,
static void gem_record_rings(struct i915_gpu_state *error)
{
struct drm_i915_private *i915 = error->i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
int i;
for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -1440,7 +1439,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring;
- ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
+ ee->vm = i915_gem_context_vm(ctx);
record_context(&ee->context, ctx);
@@ -1053,9 +1053,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u64 size, u64 offset,
u32 dword, u32 val)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_vm(ctx);
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -1108,8 +1106,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_vm(ctx);
static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine;
I915_RND_STATE(prng);
@@ -1543,8 +1540,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_vm(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1600,8 +1596,7 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_vm(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
@@ -310,9 +310,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
struct intel_engine_cs *engine,
unsigned int dw)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_vm(ctx);
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
@@ -491,8 +489,7 @@ create_test_object(struct i915_gem_context *ctx,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_vm(ctx);
u64 size;
int err;
@@ -561,8 +561,7 @@ static int live_empty_request(void *arg)
static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{
struct i915_gem_context *ctx = i915->kernel_context;
- struct i915_address_space *vm =
- ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_vm(ctx);
struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915);
struct i915_vma *vma;
@@ -106,10 +106,7 @@ static int emit_recurse_batch(struct hang *h,
struct i915_request *rq)
{
struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm =
- rq->gem_context->ppgtt ?
- &rq->gem_context->ppgtt->vm :
- &i915->ggtt.vm;
+ struct i915_address_space *vm = i915_gem_context_vm(rq->gem_context);
struct i915_vma *hws, *vma;
unsigned int flags;
u32 *batch;
Throughout the kernel it's a pretty common pattern to do: vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm; in order to determine the correct vm, so why not roll into a helper. Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Chris Wilson <chris@chris-wilson.co.uk> --- drivers/gpu/drm/i915/i915_gem_context.c | 5 +++++ drivers/gpu/drm/i915/i915_gem_context.h | 3 +++ drivers/gpu/drm/i915/i915_gem_execbuffer.c | 7 ++----- drivers/gpu/drm/i915/i915_gpu_error.c | 3 +-- drivers/gpu/drm/i915/selftests/huge_pages.c | 13 ++++--------- drivers/gpu/drm/i915/selftests/i915_gem_context.c | 7 ++----- drivers/gpu/drm/i915/selftests/i915_request.c | 3 +-- drivers/gpu/drm/i915/selftests/intel_hangcheck.c | 5 +---- 8 files changed, 19 insertions(+), 27 deletions(-)