@@ -8,6 +8,7 @@
#include <linux/bitops.h>
#include <linux/list.h>
+#include <linux/llist.h>
struct i915_buddy_block {
#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
@@ -32,6 +33,15 @@ struct i915_buddy_block {
*/
struct list_head link;
struct list_head tmp_link;
+
+ /*
+ * XXX: consider moving this somewhere specific to the pd stuff. In an
+ * ideal world we would like to keep i915_buddy as non-i915 specific as
+ * possible and in this case the delayed freeing is only required for
+ * our pd handling, which is only one part of our overall i915_buddy
+ * use.
+ */
+ struct llist_node freed;
};
#define I915_BUDDY_MAX_ORDER I915_BUDDY_HEADER_ORDER
@@ -84,14 +84,29 @@ __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
mutex_unlock(&mem->mm_lock);
}
-void
-__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
+static void __intel_memory_region_put_block_work(struct work_struct *work)
{
+ struct intel_memory_region *mem =
+ container_of(work, struct intel_memory_region, pd_put.work);
+ struct llist_node *freed = llist_del_all(&mem->pd_put.blocks);
+ struct i915_buddy_block *block;
struct list_head blocks;
INIT_LIST_HEAD(&blocks);
- list_add(&block->link, &blocks);
- __intel_memory_region_put_pages_buddy(block->private, &blocks);
+
+ llist_for_each_entry(block, freed, freed)
+ list_add(&block->link, &blocks);
+
+ __intel_memory_region_put_pages_buddy(mem, &blocks);
+}
+
+void
+__intel_memory_region_put_block_buddy(struct i915_buddy_block *block)
+{
+ struct intel_memory_region *mem = block->private;
+
+ if (llist_add(&block->freed, &mem->pd_put.blocks))
+ queue_work(mem->i915->wq, &mem->pd_put.work);
}
int
@@ -224,6 +239,8 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->total = size;
mem->avail = mem->total;
+ INIT_WORK(&mem->pd_put.work, __intel_memory_region_put_block_work);
+
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
INIT_LIST_HEAD(&mem->objects.purgeable);
@@ -260,6 +277,9 @@ static void __intel_memory_region_destroy(struct kref *kref)
struct intel_memory_region *mem =
container_of(kref, typeof(*mem), kref);
+ /* Flush any pending work items to free blocks region */
+ flush_workqueue(mem->i915->wq);
+
if (mem->ops->release)
mem->ops->release(mem);
@@ -83,6 +83,11 @@ struct intel_memory_region {
struct i915_buddy_mm mm;
struct mutex mm_lock;
+ struct {
+ struct work_struct work;
+ struct llist_head blocks;
+ } pd_put;
+
struct kref kref;
resource_size_t io_start;
@@ -69,11 +69,12 @@ static void mock_device_release(struct drm_device *dev)
i915_gem_drain_freed_objects(i915);
mock_fini_ggtt(&i915->ggtt);
- destroy_workqueue(i915->wq);
intel_gt_driver_late_release(&i915->gt);
intel_memory_regions_driver_release(i915);
+ destroy_workqueue(i915->wq);
+
drm_mode_config_cleanup(&i915->drm);
out: