@@ -183,13 +183,11 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt, *alloc = NULL;
- intel_wakeref_t wakeref;
+ bool flush = false;
u64 from = start;
unsigned int pde;
int ret = 0;
- wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
-
spin_lock(&pd->lock);
gen6_for_each_pde(pt, pd, start, length, pde) {
const unsigned int count = gen6_pte_count(start, length);
@@ -214,14 +212,20 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
alloc = pt;
pt = pd->entry[pde];
}
+
+ flush = true;
}
atomic_add(count, &pt->used);
}
spin_unlock(&pd->lock);
- if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
- gen6_flush_pd(ppgtt, from, start);
+ if (flush && i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(&vm->i915->runtime_pm, wakeref)
+ gen6_flush_pd(ppgtt, from, start);
+ }
goto out;
@@ -230,7 +234,6 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
out:
if (alloc)
free_px(vm, alloc);
- intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
return ret;
}