@@ -1966,7 +1966,6 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
- i915_vma_unpin(ppgtt->vma);
i915_vma_destroy(ppgtt->vma);
gen6_ppgtt_free_pd(ppgtt);
@@ -2060,10 +2059,19 @@ static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
return vma;
}
-static int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
{
struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+ /*
+ * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
+ * which will be pinned into every active context.
+ * (When vma->pin_count becomes atomic, I expect we will naturally
+ * need a larger, unpacked, type and kill this redundancy.)
+ */
+ if (ppgtt->pin_count++)
+ return 0;
+
/*
* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
* allocator works in address space sizes, so it's multiplied by page
@@ -2074,6 +2082,17 @@ static int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
PIN_GLOBAL | PIN_HIGH);
}
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
+{
+ struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
+
+ GEM_BUG_ON(!ppgtt->pin_count);
+ if (--ppgtt->pin_count)
+ return;
+
+ i915_vma_unpin(ppgtt->vma);
+}
+
static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
{
struct i915_ggtt * const ggtt = &i915->ggtt;
@@ -2121,21 +2140,8 @@ static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
if (err)
goto err_vma;
- err = gen6_ppgtt_pin(&ppgtt->base);
- if (err)
- goto err_pd;
-
- DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
- ppgtt->vma->node.size >> 20,
- ppgtt->vma->node.start / PAGE_SIZE);
-
- DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
- ppgtt->base.pd.base.ggtt_offset << 10);
-
return &ppgtt->base;
-err_pd:
- gen6_ppgtt_free_pd(ppgtt);
err_vma:
i915_vma_destroy(ppgtt->vma);
err_scratch:
@@ -413,6 +413,8 @@ struct gen6_hw_ppgtt {
struct i915_vma *vma;
gen6_pte_t __iomem *pd_addr;
+ unsigned int pin_count;
+
int (*switch_mm)(struct gen6_hw_ppgtt *ppgtt, struct i915_request *rq);
};
@@ -627,6 +629,9 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
kref_put(&ppgtt->ref, i915_ppgtt_release);
}
+int gen6_ppgtt_pin(struct i915_hw_ppgtt *base);
+void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base);
+
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
@@ -1195,6 +1195,27 @@ static void intel_ring_context_destroy(struct intel_context *ce)
__i915_gem_object_release_unless_active(ce->state->obj);
}
+static int __context_pin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+ int err = 0;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ err = gen6_ppgtt_pin(ppgtt);
+
+ return err;
+}
+
+static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt;
+
+ ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
+ if (ppgtt)
+ gen6_ppgtt_unpin(ppgtt);
+}
+
static int __context_pin(struct intel_context *ce)
{
struct i915_vma *vma;
@@ -1243,6 +1264,7 @@ static void __context_unpin(struct intel_context *ce)
static void intel_ring_context_unpin(struct intel_context *ce)
{
+ __context_unpin_ppgtt(ce->gem_context);
__context_unpin(ce);
i915_gem_context_put(ce->gem_context);
@@ -1340,6 +1362,10 @@ __ring_context_pin(struct intel_engine_cs *engine,
if (err)
goto err;
+ err = __context_pin_ppgtt(ce->gem_context);
+ if (err)
+ goto err_unpin;
+
i915_gem_context_get(ctx);
/* One ringbuffer to rule them all */
@@ -1348,6 +1374,8 @@ __ring_context_pin(struct intel_engine_cs *engine,
return ce;
+err_unpin:
+ __context_unpin(ce);
err:
ce->pin_count = 0;
return ERR_PTR(err);