diff mbox series

[RFC,20/42] drm/i915/lmem: support pread

Message ID 20190214145740.14521-21-matthew.auld@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce memory region concept (including device local memory) | expand

Commit Message

Matthew Auld Feb. 14, 2019, 2:57 p.m. UTC
We need to add support for pread'ing an LMEM object.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_region_lmem.c | 73 ++++++++++++++++++++++++
 1 file changed, 73 insertions(+)

Comments

Chris Wilson Feb. 14, 2019, 3:50 p.m. UTC | #1
Quoting Matthew Auld (2019-02-14 14:57:18)
> +       ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
> +       if (ret)
> +               goto out_unpin;
> +
> +       wakeref = intel_runtime_pm_get(i915);

But why wakeref in the middle?
> +
> +       ret = i915_gem_object_set_to_wc_domain(obj, false);
> +       mutex_unlock(&i915->drm.struct_mutex);

Anyway, this is a disaster that needs fixing before we make it worse.
-Chris
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index c8bee0c18c88..67dc50be2e6b 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -26,10 +26,83 @@ 
 #include "intel_memory_region.h"
 #include "intel_region_lmem.h"
 
+static int region_lmem_pread(struct drm_i915_gem_object *obj,
+			     const struct drm_i915_gem_pread *args)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	intel_wakeref_t wakeref;
+	char __user *user_data;
+	unsigned int offset;
+	unsigned long idx;
+	u64 remain;
+	int ret;
+
+	ret = i915_gem_object_wait(obj,
+				   I915_WAIT_INTERRUPTIBLE,
+				   MAX_SCHEDULE_TIMEOUT);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_pin_pages(obj);
+	if (ret)
+		return ret;
+
+	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
+	if (ret)
+		goto out_unpin;
+
+	wakeref = intel_runtime_pm_get(i915);
+
+	ret = i915_gem_object_set_to_wc_domain(obj, false);
+	mutex_unlock(&i915->drm.struct_mutex);
+	if (ret)
+		goto out_put;
+
+	remain = args->size;
+	user_data = u64_to_user_ptr(args->data_ptr);
+	offset = offset_in_page(args->offset);
+	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
+		unsigned long unwritten;
+		void __iomem *vaddr;
+		int length;
+
+		length = remain;
+		if (offset + length > PAGE_SIZE)
+			length = PAGE_SIZE - offset;
+
+		vaddr = i915_gem_object_lmem_io_map_page(obj, idx);
+		if (!vaddr) {
+			ret = -ENOMEM;
+			goto out_put;
+		}
+
+		unwritten = copy_to_user(user_data,
+					 (void __force *)vaddr + offset,
+					 length);
+		io_mapping_unmap_atomic(vaddr);
+		if (unwritten) {
+			ret = -EFAULT;
+			goto out_put;
+		}
+
+		remain -= length;
+		user_data += length;
+		offset = 0;
+	}
+
+out_put:
+	intel_runtime_pm_put(i915, wakeref);
+out_unpin:
+	i915_gem_object_unpin_pages(obj);
+
+	return ret;
+}
+
 static const struct drm_i915_gem_object_ops region_lmem_obj_ops = {
 	.get_pages = i915_memory_region_get_pages_buddy,
 	.put_pages = i915_memory_region_put_pages_buddy,
 	.release = i915_gem_object_release_memory_region,
+	.pread = region_lmem_pread,
 };
 
 static struct drm_i915_gem_object *