diff mbox

[6/9] drm/exynos: switch to new buffer allocation

Message ID 1444719654-10639-7-git-send-email-jy0922.shim@samsung.com (mailing list archive)
State New, archived
Headers show

Commit Message

Joonyoung Shim Oct. 13, 2015, 7 a.m. UTC
The buffer allocation using DMA mapping API can't support non-continuous
buffer on non-iommu and cachable buffer, so switch to new buffer
allocation using drm_gem_get/put_pages() and doesn't use DMA mapping API
for mmap except allocation of physically continuous buffer on non-iommu.

Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com>
---
 drivers/gpu/drm/exynos/exynos_drm_gem.c | 90 +++++++++++----------------------
 1 file changed, 29 insertions(+), 61 deletions(-)

Comments

Inki Dae Oct. 19, 2015, 12:20 p.m. UTC | #1
Hi,

How about combining patch 5 and 6?

Patch 5 just introduces new internal API but these API aren't used 
anywhere in patch 5.

Thanks,
Inki Dae

2015? 10? 13? 16:00? Joonyoung Shim ?(?) ? ?:
> The buffer allocation using DMA mapping API can't support non-continuous
> buffer on non-iommu and cachable buffer, so switch to new buffer
> allocation using drm_gem_get/put_pages() and doesn't use DMA mapping API
> for mmap except allocation of physically continuous buffer on non-iommu.
>
> Signed-off-by: Joonyoung Shim <jy0922.shim@samsung.com>
> ---
>   drivers/gpu/drm/exynos/exynos_drm_gem.c | 90 +++++++++++----------------------
>   1 file changed, 29 insertions(+), 61 deletions(-)
>
> diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
> index d982d46b04da..163d113df1ab 100644
> --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
> +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
> @@ -77,10 +77,7 @@ static int exynos_drm_alloc_dma(struct exynos_drm_gem *exynos_gem)
>
>   	init_dma_attrs(&exynos_gem->dma_attrs);
>
> -	if (exynos_gem->flags & EXYNOS_BO_WC ||
> -			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
> -		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &exynos_gem->dma_attrs);
> -
> +	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &exynos_gem->dma_attrs);
>   	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
>
>   	nr_pages = exynos_gem->size >> PAGE_SHIFT;
> @@ -128,51 +125,21 @@ static void exynos_drm_free_dma(struct exynos_drm_gem *exynos_gem)
>   static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
>   {
>   	struct drm_device *dev = exynos_gem->base.dev;
> -	enum dma_attr attr;
> -	unsigned int nr_pages;
> +	int ret;
>
>   	if (exynos_gem->dma_addr) {
>   		DRM_DEBUG_KMS("already allocated.\n");
>   		return 0;
>   	}
>
> -	if (!is_drm_iommu_supported(dev))
> -		return exynos_drm_alloc_dma(exynos_gem);
> -
> -	init_dma_attrs(&exynos_gem->dma_attrs);
> -
> -	/*
> -	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
> -	 * region will be allocated else physically contiguous
> -	 * as possible.
> -	 */
> -	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
> -		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
> -
> -	/* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping */
> -	if (exynos_gem->flags & EXYNOS_BO_WC ||
> -			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
> -		attr = DMA_ATTR_WRITE_COMBINE;
> -
> -	dma_set_attr(attr, &exynos_gem->dma_attrs);
> -	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
> -
> -	nr_pages = exynos_gem->size >> PAGE_SHIFT;
> -
> -	exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
> -					     &exynos_gem->dma_addr, GFP_KERNEL,
> -					     &exynos_gem->dma_attrs);
> -	if (!exynos_gem->cookie) {
> -		DRM_ERROR("failed to allocate buffer.\n");
> -		if (exynos_gem->pages)
> -			drm_free_large(exynos_gem->pages);
> -		return -ENOMEM;
> +	if (!is_drm_iommu_supported(dev)) {
> +		if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
> +			return exynos_drm_alloc_dma(exynos_gem);
>   	}
>
> -	exynos_gem->pages = exynos_gem->cookie;
> -
> -	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
> -			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
> +	ret = exynos_drm_get_pages(exynos_gem);
> +	if (ret < 0)
> +		return ret;
>
>   	return 0;
>   }
> @@ -186,15 +153,12 @@ static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
>   		return;
>   	}
>
> -	if (!is_drm_iommu_supported(dev))
> -		return exynos_drm_free_dma(exynos_gem);
> -
> -	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
> -			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
> +	if (!is_drm_iommu_supported(dev)) {
> +		if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
> +			return exynos_drm_free_dma(exynos_gem);
> +	}
>
> -	dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
> -			(dma_addr_t)exynos_gem->dma_addr,
> -			&exynos_gem->dma_attrs);
> +	exynos_drm_put_pages(exynos_gem);
>   }
>
>   static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
> @@ -400,8 +364,8 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
>   	drm_gem_object_unreference_unlocked(obj);
>   }
>
> -static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
> -				      struct vm_area_struct *vma)
> +static int exynos_drm_gem_mmap_dma(struct exynos_drm_gem *exynos_gem,
> +				   struct vm_area_struct *vma)
>   {
>   	struct drm_device *drm_dev = exynos_gem->base.dev;
>   	unsigned long vm_size;
> @@ -579,6 +543,19 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
>
>   	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
>
> +	if (!is_drm_iommu_supported(obj->dev)) {
> +		if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) {
> +			ret = exynos_drm_gem_mmap_dma(exynos_gem, vma);
> +			if (ret < 0)
> +				drm_gem_vm_close(vma);
> +
> +			return ret;
> +		}
> +	}
> +
> +	vma->vm_flags &= ~VM_PFNMAP;
> +	vma->vm_flags |= VM_MIXEDMAP;
> +
>   	/* non-cachable as default. */
>   	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
>   		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
> @@ -589,16 +566,7 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
>   		vma->vm_page_prot =
>   			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
>
> -	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
> -	if (ret)
> -		goto err_close_vm;
> -
> -	return ret;
> -
> -err_close_vm:
> -	drm_gem_vm_close(vma);
> -
> -	return ret;
> +	return 0;
>   }
>
>   /* low-level interface prime helpers */
>
Joonyoung Shim Oct. 20, 2015, 6:03 a.m. UTC | #2
On 10/19/2015 09:20 PM, Inki Dae wrote:
> Hi,
> 
> How about combining patch 5 and 6?
> 
> Patch 5 just introduces new internal API but these API aren't used anywhere in patch 5.
> 

I split it to be easy to understand changes of codes on patch file. It's
no matter to me to combine them.

Anyway, because this patchset introduces new userspace interfaces and
there is no real userspace to use them yet, i'm not sure it's better
whether i keep going this patchset or without the patch to introduce new
userspace interfaces.

Thanks.
diff mbox

Patch

diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index d982d46b04da..163d113df1ab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -77,10 +77,7 @@  static int exynos_drm_alloc_dma(struct exynos_drm_gem *exynos_gem)
 
 	init_dma_attrs(&exynos_gem->dma_attrs);
 
-	if (exynos_gem->flags & EXYNOS_BO_WC ||
-			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
-		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &exynos_gem->dma_attrs);
-
+	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &exynos_gem->dma_attrs);
 	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
 
 	nr_pages = exynos_gem->size >> PAGE_SHIFT;
@@ -128,51 +125,21 @@  static void exynos_drm_free_dma(struct exynos_drm_gem *exynos_gem)
 static int exynos_drm_alloc_buf(struct exynos_drm_gem *exynos_gem)
 {
 	struct drm_device *dev = exynos_gem->base.dev;
-	enum dma_attr attr;
-	unsigned int nr_pages;
+	int ret;
 
 	if (exynos_gem->dma_addr) {
 		DRM_DEBUG_KMS("already allocated.\n");
 		return 0;
 	}
 
-	if (!is_drm_iommu_supported(dev))
-		return exynos_drm_alloc_dma(exynos_gem);
-
-	init_dma_attrs(&exynos_gem->dma_attrs);
-
-	/*
-	 * if EXYNOS_BO_CONTIG, fully physically contiguous memory
-	 * region will be allocated else physically contiguous
-	 * as possible.
-	 */
-	if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
-		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &exynos_gem->dma_attrs);
-
-	/* if EXYNOS_BO_WC or EXYNOS_BO_NONCACHABLE, writecombine mapping */
-	if (exynos_gem->flags & EXYNOS_BO_WC ||
-			!(exynos_gem->flags & EXYNOS_BO_CACHABLE))
-		attr = DMA_ATTR_WRITE_COMBINE;
-
-	dma_set_attr(attr, &exynos_gem->dma_attrs);
-	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &exynos_gem->dma_attrs);
-
-	nr_pages = exynos_gem->size >> PAGE_SHIFT;
-
-	exynos_gem->cookie = dma_alloc_attrs(dev->dev, exynos_gem->size,
-					     &exynos_gem->dma_addr, GFP_KERNEL,
-					     &exynos_gem->dma_attrs);
-	if (!exynos_gem->cookie) {
-		DRM_ERROR("failed to allocate buffer.\n");
-		if (exynos_gem->pages)
-			drm_free_large(exynos_gem->pages);
-		return -ENOMEM;
+	if (!is_drm_iommu_supported(dev)) {
+		if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
+			return exynos_drm_alloc_dma(exynos_gem);
 	}
 
-	exynos_gem->pages = exynos_gem->cookie;
-
-	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
+	ret = exynos_drm_get_pages(exynos_gem);
+	if (ret < 0)
+		return ret;
 
 	return 0;
 }
@@ -186,15 +153,12 @@  static void exynos_drm_free_buf(struct exynos_drm_gem *exynos_gem)
 		return;
 	}
 
-	if (!is_drm_iommu_supported(dev))
-		return exynos_drm_free_dma(exynos_gem);
-
-	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
-			(unsigned long)exynos_gem->dma_addr, exynos_gem->size);
+	if (!is_drm_iommu_supported(dev)) {
+		if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG))
+			return exynos_drm_free_dma(exynos_gem);
+	}
 
-	dma_free_attrs(dev->dev, exynos_gem->size, exynos_gem->cookie,
-			(dma_addr_t)exynos_gem->dma_addr,
-			&exynos_gem->dma_attrs);
+	exynos_drm_put_pages(exynos_gem);
 }
 
 static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
@@ -400,8 +364,8 @@  void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
 	drm_gem_object_unreference_unlocked(obj);
 }
 
-static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
-				      struct vm_area_struct *vma)
+static int exynos_drm_gem_mmap_dma(struct exynos_drm_gem *exynos_gem,
+				   struct vm_area_struct *vma)
 {
 	struct drm_device *drm_dev = exynos_gem->base.dev;
 	unsigned long vm_size;
@@ -579,6 +543,19 @@  int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 
 	DRM_DEBUG_KMS("flags = 0x%x\n", exynos_gem->flags);
 
+	if (!is_drm_iommu_supported(obj->dev)) {
+		if (!(exynos_gem->flags & EXYNOS_BO_NONCONTIG)) {
+			ret = exynos_drm_gem_mmap_dma(exynos_gem, vma);
+			if (ret < 0)
+				drm_gem_vm_close(vma);
+
+			return ret;
+		}
+	}
+
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+
 	/* non-cachable as default. */
 	if (exynos_gem->flags & EXYNOS_BO_CACHABLE)
 		vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
@@ -589,16 +566,7 @@  int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 		vma->vm_page_prot =
 			pgprot_noncached(vm_get_page_prot(vma->vm_flags));
 
-	ret = exynos_drm_gem_mmap_buffer(exynos_gem, vma);
-	if (ret)
-		goto err_close_vm;
-
-	return ret;
-
-err_close_vm:
-	drm_gem_vm_close(vma);
-
-	return ret;
+	return 0;
 }
 
 /* low-level interface prime helpers */