diff mbox

[03/10] drm/i915: Clearing buffer objects via CPU/GTT

Message ID 1450765253-32104-4-git-send-email-ankitprasad.r.sharma@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

ankitprasad.r.sharma@intel.com Dec. 22, 2015, 6:20 a.m. UTC
From: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>

This patch adds support for clearing buffer objects via CPU/GTT. This
is particularly useful for clearing out the non shmem backed objects.
Currently intend to use this only for buffers allocated from stolen
region.

v2: Added kernel doc for i915_gem_clear_object(), corrected/removed
variable assignments (Tvrtko)

v3: Map object page by page to the gtt if the pinning of the whole object
to the ggtt fails, Corrected function name (Chris)

v4: Clear the buffer page by page, and not map the whole object in the gtt
aperture. Use i915 wrapper function in place of drm_mm_insert_node_in_range.

v5: Use renamed wrapper function for drm_mm_insert_node_in_range,
updated barrier positioning (Chris)

v6: Use PAGE_SIZE instead of 4096, use get_pages call before pinning pages
(Tvrtko)

Testcase: igt/gem_stolen

Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h |  1 +
 drivers/gpu/drm/i915/i915_gem.c | 49 +++++++++++++++++++++++++++++++++++++++++
 2 files changed, 50 insertions(+)

Comments

Tvrtko Ursulin Dec. 22, 2015, 11:09 a.m. UTC | #1
On 22/12/15 06:20, ankitprasad.r.sharma@intel.com wrote:
> From: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>
>
> This patch adds support for clearing buffer objects via CPU/GTT. This
> is particularly useful for clearing out the non shmem backed objects.
> Currently intend to use this only for buffers allocated from stolen
> region.
>
> v2: Added kernel doc for i915_gem_clear_object(), corrected/removed
> variable assignments (Tvrtko)
>
> v3: Map object page by page to the gtt if the pinning of the whole object
> to the ggtt fails, Corrected function name (Chris)
>
> v4: Clear the buffer page by page, and not map the whole object in the gtt
> aperture. Use i915 wrapper function in place of drm_mm_insert_node_in_range.
>
> v5: Use renamed wrapper function for drm_mm_insert_node_in_range,
> updated barrier positioning (Chris)
>
> v6: Use PAGE_SIZE instead of 4096, use get_pages call before pinning pages
> (Tvrtko)
>
> Testcase: igt/gem_stolen
>
> Signed-off-by: Ankitprasad Sharma <ankitprasad.r.sharma@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_drv.h |  1 +
>   drivers/gpu/drm/i915/i915_gem.c | 49 +++++++++++++++++++++++++++++++++++++++++
>   2 files changed, 50 insertions(+)
>
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index a10b866..e195fee 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -2897,6 +2897,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
>   				    int *needs_clflush);
>
>   int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
> +int i915_gem_object_clear(struct drm_i915_gem_object *obj);
>
>   static inline int __sg_page_count(struct scatterlist *sg)
>   {
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index f11ec89..8f637a2 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -5301,3 +5301,52 @@ fail:
>   	drm_gem_object_unreference(&obj->base);
>   	return ERR_PTR(ret);
>   }
> +
> +/**
> + * i915_gem_object_clear() - Clear buffer object via CPU/GTT
> + * @obj: Buffer object to be cleared
> + *
> + * Return: 0 - success, non-zero - failure
> + */
> +int i915_gem_object_clear(struct drm_i915_gem_object *obj)
> +{
> +	int ret, i;
> +	char __iomem *base;
> +	size_t size = obj->base.size;
> +	struct drm_i915_private *i915 = to_i915(obj->base.dev);
> +	struct drm_mm_node node;
> +
> +	lockdep_assert_held(&obj->base.dev->struct_mutex);
> +	memset(&node, 0, sizeof(node));
> +	ret = insert_mappable_node(i915, &node);
> +	if (ret)
> +		goto out;
> +
> +	ret = i915_gem_object_get_pages(obj);
> +	if (ret) {
> +		remove_mappable_node(&node);
> +		goto out;
> +	}
> +
> +	i915_gem_object_pin_pages(obj);
> +	base = io_mapping_map_wc(i915->gtt.mappable, node.start);
> +	for (i = 0; i < size/PAGE_SIZE; i++) {
> +		i915->gtt.base.insert_page(&i915->gtt.base,
> +					   i915_gem_object_get_dma_address(obj, i),
> +					   node.start,
> +					   I915_CACHE_NONE, 0);
> +		wmb();
> +		memset_io(base, 0, PAGE_SIZE);
> +		wmb();
> +	}
> +
> +	io_mapping_unmap(base);
> +	i915->gtt.base.clear_range(&i915->gtt.base,
> +			node.start, node.size,
> +			true);
> +	remove_mappable_node(&node);
> +	i915_gem_object_unpin_pages(obj);
> +	i915_gem_object_put_pages(obj);
> +out:
> +	return ret;
> +}
>

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a10b866..e195fee 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2897,6 +2897,7 @@  int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 				    int *needs_clflush);
 
 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
+int i915_gem_object_clear(struct drm_i915_gem_object *obj);
 
 static inline int __sg_page_count(struct scatterlist *sg)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f11ec89..8f637a2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5301,3 +5301,52 @@  fail:
 	drm_gem_object_unreference(&obj->base);
 	return ERR_PTR(ret);
 }
+
+/**
+ * i915_gem_object_clear() - Clear buffer object via CPU/GTT
+ * @obj: Buffer object to be cleared
+ *
+ * Return: 0 - success, non-zero - failure
+ */
+int i915_gem_object_clear(struct drm_i915_gem_object *obj)
+{
+	int ret, i;
+	char __iomem *base;
+	size_t size = obj->base.size;
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct drm_mm_node node;
+
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+	memset(&node, 0, sizeof(node));
+	ret = insert_mappable_node(i915, &node);
+	if (ret)
+		goto out;
+
+	ret = i915_gem_object_get_pages(obj);
+	if (ret) {
+		remove_mappable_node(&node);
+		goto out;
+	}
+
+	i915_gem_object_pin_pages(obj);
+	base = io_mapping_map_wc(i915->gtt.mappable, node.start);
+	for (i = 0; i < size/PAGE_SIZE; i++) {
+		i915->gtt.base.insert_page(&i915->gtt.base,
+					   i915_gem_object_get_dma_address(obj, i),
+					   node.start,
+					   I915_CACHE_NONE, 0);
+		wmb();
+		memset_io(base, 0, PAGE_SIZE);
+		wmb();
+	}
+
+	io_mapping_unmap(base);
+	i915->gtt.base.clear_range(&i915->gtt.base,
+			node.start, node.size,
+			true);
+	remove_mappable_node(&node);
+	i915_gem_object_unpin_pages(obj);
+	i915_gem_object_put_pages(obj);
+out:
+	return ret;
+}