@@ -133,6 +133,49 @@ static int get_context_size(struct drm_device *dev)
return ret;
}
+static void
+i915_gem_context_cleanup_worker(struct work_struct *work)
+{
+ struct i915_hw_ppgtt *ppgtt = container_of(work, typeof(*ppgtt),
+ cleanup_work);
+ struct drm_device *dev = ppgtt->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct i915_vma *vma, *next;
+ bool was_interruptible;
+
+ mutex_lock(&dev->struct_mutex);
+ was_interruptible = dev_priv->mm.interruptible;
+
+ WARN_ON(!list_empty(&ppgtt->base.active_list));
+
+ list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
+ mm_list) {
+ dev_priv->mm.interruptible = false;
+ if (WARN_ON(i915_vma_unbind(vma)))
+ break;
+ }
+
+ i915_ppgtt_put(ppgtt);
+
+ dev_priv->mm.interruptible = was_interruptible;
+ mutex_unlock(&dev->struct_mutex);
+}
+
+static void i915_gem_context_clean(struct intel_context *ctx)
+{
+ struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
+
+ if (!ppgtt)
+ return;
+
+ /*
+ * Unbind all inactive VMAs for this VM, but do it asynchronously.
+ */
+ INIT_WORK(&ppgtt->cleanup_work, i915_gem_context_cleanup_worker);
+ i915_ppgtt_get(ppgtt);
+ schedule_work(&ppgtt->cleanup_work);
+}
+
void i915_gem_context_free(struct kref *ctx_ref)
{
struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -142,6 +185,13 @@ void i915_gem_context_free(struct kref *ctx_ref)
if (i915.enable_execlists)
intel_lr_context_free(ctx);
+ /*
+ * This context is going away and we need to remove all VMAs still
+ * around. This is to handle imported shared objects for which
+ * destructor did not run when their handles were closed.
+ */
+ i915_gem_context_clean(ctx);
+
i915_ppgtt_put(ctx->ppgtt);
if (ctx->legacy_hw_ctx.rcs_state)
@@ -375,6 +375,8 @@ struct i915_hw_ppgtt {
struct drm_i915_file_private *file_priv;
+ struct work_struct cleanup_work;
+
gen6_pte_t __iomem *pd_addr;
int (*enable)(struct i915_hw_ppgtt *ppgtt);