@@ -1039,8 +1039,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u64 size, u64 offset,
u32 dword, u32 val)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma;
int err;
@@ -241,8 +241,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
struct intel_engine_cs *engine,
unsigned int dw)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
- struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq;
struct i915_vma *vma;
struct i915_vma *batch;
@@ -580,7 +580,7 @@ static int init_status_page(struct intel_engine_cs *engine)
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto err;
@@ -1879,7 +1879,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj))
return PTR_ERR(obj);
- vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err;
@@ -2970,7 +2970,7 @@ static int execlists_context_deferred_alloc(struct intel_context *ce,
if (IS_ERR(ctx_obj))
return PTR_ERR(ctx_obj);
- vma = i915_vma_instance(ctx_obj, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto error_deref_obj;
@@ -1405,7 +1405,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_unpin_map(obj);
}
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -1369,7 +1369,7 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count)
return 0;
- vma = create_scratch(&ce->engine->i915->ggtt.vm, wal->count);
+ vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -130,7 +130,7 @@ static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = h->i915;
- struct i915_address_space *vm = h->ctx->vm ?: &i915->ggtt.vm;
+ struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
unsigned int flags;
@@ -143,12 +143,12 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
struct drm_i915_gem_object *obj;
void *vaddr;
- obj = i915_gem_object_create_internal(h->i915, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
vaddr = i915_gem_object_pin_map(obj,
- i915_coherent_map_type(h->i915));
+ i915_coherent_map_type(i915));
if (IS_ERR(vaddr)) {
i915_gem_object_put(obj);
return ERR_CAST(vaddr);
@@ -255,7 +255,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
}
flags = 0;
- if (INTEL_GEN(vm->i915) <= 5)
+ if (INTEL_GEN(i915) <= 5)
flags |= I915_DISPATCH_SECURE;
err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
@@ -103,7 +103,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
i915_gem_object_flush_map(result);
i915_gem_object_unpin_map(result);
- vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
+ vma = i915_vma_instance(result, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
@@ -1873,7 +1873,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
if (flush) {
mark_tlbs_dirty(&ppgtt->base);
- gen6_ggtt_invalidate(&vm->i915->ggtt);
+ gen6_ggtt_invalidate(vm->gt->ggtt);
}
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
@@ -2059,7 +2059,7 @@ static const struct i915_vma_ops pd_vma_ops = {
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
{
struct drm_i915_private *i915 = ppgtt->base.vm.i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
+ struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
struct i915_vma *vma;
GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj))
return PTR_ERR(so.obj);
- so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
+ so.vma = i915_vma_instance(so.obj, &engine->gt->ggtt->vm, NULL);
if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma);
goto err_obj;
@@ -1409,12 +1409,12 @@ capture_object(struct drm_i915_private *dev_priv,
static void gem_record_rings(struct i915_gpu_state *error)
{
struct drm_i915_private *i915 = error->i915;
- struct i915_ggtt *ggtt = &i915->ggtt;
int i;
for (i = 0; i < I915_NUM_ENGINES; i++) {
struct intel_engine_cs *engine = i915->engine[i];
struct drm_i915_error_engine *ee = &error->engine[i];
+ struct i915_ggtt *ggtt = engine->gt->ggtt;
struct i915_request *request;
ee->engine_id = -1;