@@ -10,6 +10,12 @@ config DRM_EXYNOS
Choose this option if you have a Samsung SoC EXYNOS chipset.
If M is selected the module will be called exynosdrm.
+config DRM_EXYNOS_IOMMU
+ bool "EXYNOS DRM IOMMU Support"
+ depends on DRM_EXYNOS && EXYNOS_IOMMU && ARM_DMA_USE_IOMMU
+ help
+ Choose this option if you want to use IOMMU feature for DRM.
+
config DRM_EXYNOS_DMABUF
bool "EXYNOS DRM DMABUF"
depends on DRM_EXYNOS
@@ -8,6 +8,7 @@ exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
exynos_drm_plane.o
+exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
exynosdrm-$(CONFIG_DRM_EXYNOS_DMABUF) += exynos_drm_dmabuf.o
exynosdrm-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
exynosdrm-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o \
@@ -33,71 +33,58 @@
static int lowlevel_buffer_allocate(struct drm_device *dev,
unsigned int flags, struct exynos_drm_gem_buf *buf)
{
- dma_addr_t start_addr;
+ int ret = 0;
unsigned int npages, i = 0;
struct scatterlist *sgl;
- int ret = 0;
+ enum dma_attr attr = DMA_ATTR_FORCE_CONTIGUOUS;
DRM_DEBUG_KMS("%s\n", __FILE__);
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return -EINVAL;
- }
-
if (buf->dma_addr) {
DRM_DEBUG_KMS("already allocated.\n");
return 0;
}
- if (buf->size >= SZ_1M) {
- npages = buf->size >> SECTION_SHIFT;
- buf->page_size = SECTION_SIZE;
- } else if (buf->size >= SZ_64K) {
- npages = buf->size >> 16;
- buf->page_size = SZ_64K;
- } else {
- npages = buf->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
+ init_dma_attrs(&buf->dma_attrs);
+
+ if (flags & EXYNOS_BO_NONCONTIG)
+ attr = DMA_ATTR_WRITE_COMBINE;
+
+ dma_set_attr(attr, &buf->dma_attrs);
+
+ buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
+ &buf->dma_addr, GFP_KERNEL, &buf->dma_attrs);
+ if (!buf->kvaddr) {
+ DRM_ERROR("failed to allocate buffer.\n");
+ return -ENOMEM;
}
buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!buf->sgt) {
DRM_ERROR("failed to allocate sg table.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_attrs;
}
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
+ ret = dma_get_sgtable(dev->dev, buf->sgt, buf->kvaddr, buf->dma_addr,
+ buf->size);
if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- kfree(buf->sgt);
- buf->sgt = NULL;
- return -ENOMEM;
+ DRM_ERROR("failed to get sgtable.\n");
+ goto err_free_sgt;
}
- buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
- &buf->dma_addr, GFP_KERNEL);
- if (!buf->kvaddr) {
- DRM_ERROR("failed to allocate buffer.\n");
- ret = -ENOMEM;
- goto err1;
- }
+ npages = buf->sgt->nents;
buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
if (!buf->pages) {
DRM_ERROR("failed to allocate pages.\n");
ret = -ENOMEM;
- goto err2;
+ goto err_free_table;
}
sgl = buf->sgt->sgl;
- start_addr = buf->dma_addr;
-
while (i < npages) {
- buf->pages[i] = phys_to_page(start_addr);
- sg_set_page(sgl, buf->pages[i], buf->page_size, 0);
- sg_dma_address(sgl) = start_addr;
- start_addr += buf->page_size;
+ buf->pages[i] = sg_page(sgl);
sgl = sg_next(sgl);
i++;
}
@@ -108,14 +95,16 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
buf->size);
return ret;
-err2:
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
- buf->dma_addr = (dma_addr_t)NULL;
-err1:
+
+err_free_table:
sg_free_table(buf->sgt);
+err_free_sgt:
kfree(buf->sgt);
buf->sgt = NULL;
+err_free_attrs:
+ dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
+ buf->dma_addr = (dma_addr_t)NULL;
return ret;
}
@@ -125,16 +114,6 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
{
DRM_DEBUG_KMS("%s.\n", __FILE__);
- /*
- * release only physically continuous memory and
- * non-continuous memory would be released by exynos
- * gem framework.
- */
- if (IS_NONCONTIG_BUFFER(flags)) {
- DRM_DEBUG_KMS("not support allocation type.\n");
- return;
- }
-
if (!buf->dma_addr) {
DRM_DEBUG_KMS("dma_addr is invalid.\n");
return;
@@ -150,11 +129,8 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
kfree(buf->sgt);
buf->sgt = NULL;
- kfree(buf->pages);
- buf->pages = NULL;
-
- dma_free_writecombine(dev->dev, buf->size, buf->kvaddr,
- (dma_addr_t)buf->dma_addr);
+ dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
+ (dma_addr_t)buf->dma_addr, &buf->dma_attrs);
buf->dma_addr = (dma_addr_t)NULL;
}
@@ -30,29 +30,31 @@
#include <linux/dma-buf.h>
-static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
- unsigned int page_size)
+static struct sg_table *exynos_get_sgt(struct drm_device *drm_dev,
+ struct exynos_drm_gem_buf *buf)
{
struct sg_table *sgt = NULL;
- struct scatterlist *sgl;
- int i, ret;
+ int ret;
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
goto out;
- ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
+ ret = sg_alloc_table(sgt, buf->sgt->nents, GFP_KERNEL);
if (ret)
goto err_free_sgt;
- if (page_size < PAGE_SIZE)
- page_size = PAGE_SIZE;
-
- for_each_sg(sgt->sgl, sgl, nr_pages, i)
- sg_set_page(sgl, pages[i], page_size, 0);
+ ret = dma_get_sgtable(drm_dev->dev, sgt, buf->kvaddr,
+ buf->dma_addr, buf->size);
+ if (ret < 0) {
+ DRM_ERROR("failed to get sgtable.\n");
+ goto err_free_table;
+ }
return sgt;
+err_free_table:
+ sg_free_table(sgt);
err_free_sgt:
kfree(sgt);
sgt = NULL;
@@ -68,32 +70,31 @@ static struct sg_table *
struct drm_device *dev = gem_obj->base.dev;
struct exynos_drm_gem_buf *buf;
struct sg_table *sgt = NULL;
- unsigned int npages;
int nents;
DRM_DEBUG_PRIME("%s\n", __FILE__);
- mutex_lock(&dev->struct_mutex);
-
buf = gem_obj->buffer;
-
- /* there should always be pages allocated. */
- if (!buf->pages) {
- DRM_ERROR("pages is null.\n");
- goto err_unlock;
+ if (!buf) {
+ DRM_ERROR("buffer is null.\n");
+ return sgt;
}
- npages = buf->size / buf->page_size;
+ mutex_lock(&dev->struct_mutex);
- sgt = exynos_pages_to_sg(buf->pages, npages, buf->page_size);
- if (!sgt) {
- DRM_DEBUG_PRIME("exynos_pages_to_sg returned NULL!\n");
+ sgt = exynos_get_sgt(dev, buf);
+ if (!sgt)
goto err_unlock;
- }
+
nents = dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents) {
+ DRM_ERROR("failed to map sgl with iommu.\n");
+ sgt = NULL;
+ goto err_unlock;
+ }
- DRM_DEBUG_PRIME("npages = %d buffer size = 0x%lx page_size = 0x%lx\n",
- npages, buf->size, buf->page_size);
+ DRM_DEBUG_PRIME("buffer size = 0x%lx page_size = 0x%lx\n",
+ buf->size, buf->page_size);
err_unlock:
mutex_unlock(&dev->struct_mutex);
@@ -105,6 +106,7 @@ static void exynos_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir)
{
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
+
sg_free_table(sgt);
kfree(sgt);
sgt = NULL;
@@ -196,7 +198,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
struct scatterlist *sgl;
struct exynos_drm_gem_obj *exynos_gem_obj;
struct exynos_drm_gem_buf *buffer;
- struct page *page;
int ret;
DRM_DEBUG_PRIME("%s\n", __FILE__);
@@ -233,38 +234,27 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
goto err_unmap_attach;
}
- buffer->pages = kzalloc(sizeof(*page) * sgt->nents, GFP_KERNEL);
- if (!buffer->pages) {
- DRM_ERROR("failed to allocate pages.\n");
- ret = -ENOMEM;
- goto err_free_buffer;
- }
-
exynos_gem_obj = exynos_drm_gem_init(drm_dev, dma_buf->size);
if (!exynos_gem_obj) {
ret = -ENOMEM;
- goto err_free_pages;
+ goto err_free_buffer;
}
sgl = sgt->sgl;
- if (sgt->nents == 1) {
- buffer->dma_addr = sg_dma_address(sgt->sgl);
- buffer->size = sg_dma_len(sgt->sgl);
+ buffer->size = dma_buf->size;
+ buffer->dma_addr = sg_dma_address(sgl);
+ if (sgt->nents == 1) {
/* always physically continuous memory if sgt->nents is 1. */
exynos_gem_obj->flags |= EXYNOS_BO_CONTIG;
} else {
- unsigned int i = 0;
-
- buffer->dma_addr = sg_dma_address(sgl);
- while (i < sgt->nents) {
- buffer->pages[i] = sg_page(sgl);
- buffer->size += sg_dma_len(sgl);
- sgl = sg_next(sgl);
- i++;
- }
-
+ /*
+ * this case could be CONTIG or NONCONTIG type but for now
+ * sets NONCONTIG.
+ * TODO. we have to find a way that exporter can notify
+ * the type of its own buffer to importer.
+ */
exynos_gem_obj->flags |= EXYNOS_BO_NONCONTIG;
}
@@ -277,9 +267,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
return &exynos_gem_obj->base;
-err_free_pages:
- kfree(buffer->pages);
- buffer->pages = NULL;
err_free_buffer:
kfree(buffer);
buffer = NULL;
@@ -40,6 +40,7 @@
#include "exynos_drm_vidi.h"
#include "exynos_drm_dmabuf.h"
#include "exynos_drm_g2d.h"
+#include "exynos_drm_iommu.h"
#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
@@ -69,6 +70,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
INIT_LIST_HEAD(&private->pageflip_event_list);
dev->dev_private = (void *)private;
+ /*
+ * create mapping to manage iommu table and set a pointer to iommu
+ * mapping structure to iommu_mapping of private data.
+ * also this iommu_mapping can be used to check if iommu is supported
+ * or not.
+ */
+ ret = drm_create_iommu_mapping(dev);
+ if (ret < 0) {
+ DRM_ERROR("failed to create iommu mapping.\n");
+ goto err_crtc;
+ }
+
drm_mode_config_init(dev);
/* init kms poll for handling hpd */
@@ -83,7 +96,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
for (nr = 0; nr < MAX_CRTC; nr++) {
ret = exynos_drm_crtc_create(dev, nr);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
for (nr = 0; nr < MAX_PLANE; nr++) {
@@ -92,12 +105,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
plane = exynos_plane_init(dev, possible_crtcs, false);
if (!plane)
- goto err_crtc;
+ goto err_release_iommu_mapping;
}
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
- goto err_crtc;
+ goto err_release_iommu_mapping;
/*
* probe sub drivers such as display controller and hdmi driver,
@@ -129,6 +142,8 @@ err_drm_device:
exynos_drm_device_unregister(dev);
err_vblank:
drm_vblank_cleanup(dev);
+err_release_iommu_mapping:
+ drm_release_iommu_mapping(dev);
err_crtc:
drm_mode_config_cleanup(dev);
kfree(private);
@@ -145,6 +160,8 @@ static int exynos_drm_unload(struct drm_device *dev)
drm_vblank_cleanup(dev);
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
+
+ drm_release_iommu_mapping(dev);
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -241,6 +241,13 @@ struct drm_exynos_file_private {
/*
* Exynos drm private structure.
+ *
+ * @da_start: start address to device address space.
+ * with iommu, device address space starts from this address
+ * otherwise default one.
+ * @da_space_size: size of device address space.
+ * if 0 then default value is used for it.
+ * @da_space_order: order to device address space.
*/
struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
@@ -255,6 +262,10 @@ struct exynos_drm_private {
struct drm_crtc *crtc[MAX_CRTC];
struct drm_property *plane_zpos_property;
struct drm_property *crtc_mode_property;
+
+ unsigned long da_start;
+ unsigned long da_space_size;
+ unsigned long da_space_order;
};
/*
@@ -30,10 +30,12 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
+#include <uapi/drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_iommu.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -50,6 +52,32 @@ struct exynos_drm_fb {
struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
+static int check_fb_gem_memory_type(struct drm_device *drm_dev,
+ struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ unsigned int flags;
+
+ /*
+ * if exynos drm driver supports iommu then framebuffer can use
+ * all the buffer types.
+ */
+ if (is_drm_iommu_supported(drm_dev))
+ return 0;
+
+ flags = exynos_gem_obj->flags;
+
+ /*
+ * without iommu support, not support physically non-continuous memory
+ * for framebuffer.
+ */
+ if (IS_NONCONTIG_BUFFER(flags)) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
@@ -128,14 +156,25 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
int ret;
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ return ERR_PTR(-EINVAL);
+ }
+
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
+ exynos_fb->exynos_gem_obj[0] = exynos_gem_obj;
+
ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
DRM_ERROR("failed to initialize framebuffer\n");
@@ -143,7 +182,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
}
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
- exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
return &exynos_fb->fb;
}
@@ -214,6 +252,9 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
for (i = 1; i < exynos_fb->buf_cnt; i++) {
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
+
obj = drm_gem_object_lookup(dev, file_priv,
mode_cmd->handles[i]);
if (!obj) {
@@ -222,6 +263,15 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
return ERR_PTR(-ENOENT);
}
+ exynos_gem_obj = to_exynos_gem_obj(obj);
+
+ ret = check_fb_gem_memory_type(dev, exynos_gem_obj);
+ if (ret < 0) {
+ DRM_ERROR("cannot use this gem memory type for fb.\n");
+ exynos_drm_fb_destroy(fb);
+ return ERR_PTR(ret);
+ }
+
exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
}
@@ -83,61 +83,12 @@ static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
{
- if (!IS_NONCONTIG_BUFFER(flags)) {
- if (size >= SZ_1M)
- return roundup(size, SECTION_SIZE);
- else if (size >= SZ_64K)
- return roundup(size, SZ_64K);
- else
- goto out;
- }
-out:
- return roundup(size, PAGE_SIZE);
-}
-
-struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct page *p, **pages;
- int i, npages;
-
- npages = obj->size >> PAGE_SHIFT;
-
- pages = drm_malloc_ab(npages, sizeof(struct page *));
- if (pages == NULL)
- return ERR_PTR(-ENOMEM);
-
- for (i = 0; i < npages; i++) {
- p = alloc_page(gfpmask);
- if (IS_ERR(p))
- goto fail;
- pages[i] = p;
- }
-
- return pages;
-
-fail:
- while (--i)
- __free_page(pages[i]);
-
- drm_free_large(pages);
- return ERR_CAST(p);
-}
-
-static void exynos_gem_put_pages(struct drm_gem_object *obj,
- struct page **pages)
-{
- int npages;
+ /* TODO */
- npages = obj->size >> PAGE_SHIFT;
-
- while (--npages >= 0)
- __free_page(pages[npages]);
-
- drm_free_large(pages);
+ return roundup(size, PAGE_SIZE);
}
-static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
+static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
struct vm_area_struct *vma,
unsigned long f_vaddr,
pgoff_t page_offset)
@@ -157,85 +108,6 @@ static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
return vm_insert_mixed(vma, f_vaddr, pfn);
}
-static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
- struct scatterlist *sgl;
- struct page **pages;
- unsigned int npages, i = 0;
- int ret;
-
- if (buf->pages) {
- DRM_DEBUG_KMS("already allocated.\n");
- return -EINVAL;
- }
-
- pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
- if (IS_ERR(pages)) {
- DRM_ERROR("failed to get pages.\n");
- return PTR_ERR(pages);
- }
-
- npages = obj->size >> PAGE_SHIFT;
- buf->page_size = PAGE_SIZE;
-
- buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
- if (!buf->sgt) {
- DRM_ERROR("failed to allocate sg table.\n");
- ret = -ENOMEM;
- goto err;
- }
-
- ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
- if (ret < 0) {
- DRM_ERROR("failed to initialize sg table.\n");
- ret = -EFAULT;
- goto err1;
- }
-
- sgl = buf->sgt->sgl;
-
- /* set all pages to sg list. */
- while (i < npages) {
- sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
- sg_dma_address(sgl) = page_to_phys(pages[i]);
- i++;
- sgl = sg_next(sgl);
- }
-
- /* add some codes for UNCACHED type here. TODO */
-
- buf->pages = pages;
- return ret;
-err1:
- kfree(buf->sgt);
- buf->sgt = NULL;
-err:
- exynos_gem_put_pages(obj, pages);
- return ret;
-
-}
-
-static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
-{
- struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
- struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
-
- /*
- * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
- * allocated at gem fault handler.
- */
- sg_free_table(buf->sgt);
- kfree(buf->sgt);
- buf->sgt = NULL;
-
- exynos_gem_put_pages(obj, buf->pages);
- buf->pages = NULL;
-
- /* add some codes for UNCACHED type here. TODO */
-}
-
static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
struct drm_file *file_priv,
unsigned int *handle)
@@ -270,9 +142,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
- if (!buf->pages)
- return;
-
/*
* do not release memory region from exporter.
*
@@ -282,10 +151,7 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
if (obj->import_attach)
goto out;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
- exynos_drm_gem_put_pages(obj);
- else
- exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
+ exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
out:
exynos_drm_fini_buf(obj->dev, buf);
@@ -364,22 +230,10 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
/* set memory type and cache attribute from user side. */
exynos_gem_obj->flags = flags;
- /*
- * allocate all pages as desired size if user wants to allocate
- * physically non-continuous memory.
- */
- if (flags & EXYNOS_BO_NONCONTIG) {
- ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
- } else {
- ret = exynos_drm_alloc_buf(dev, buf, flags);
- if (ret < 0) {
- drm_gem_object_release(&exynos_gem_obj->base);
- goto err_fini_buf;
- }
+ ret = exynos_drm_alloc_buf(dev, buf, flags);
+ if (ret < 0) {
+ drm_gem_object_release(&exynos_gem_obj->base);
+ goto err_fini_buf;
}
return exynos_gem_obj;
@@ -495,8 +349,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
struct exynos_drm_gem_buf *buffer;
- unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
- int ret;
+ unsigned long vm_size;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -504,7 +357,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
update_vm_cache_attr(exynos_gem_obj, vma);
- vm_size = usize = vma->vm_end - vma->vm_start;
+ vm_size = vma->vm_end - vma->vm_start;
/*
* a buffer contains information to physically continuous memory
@@ -516,42 +369,9 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
if (vm_size > buffer->size)
return -EINVAL;
- if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
- int i = 0;
-
- if (!buffer->pages)
- return -EINVAL;
-
- vma->vm_flags |= VM_MIXEDMAP;
-
- do {
- ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
- if (ret) {
- DRM_ERROR("failed to remap user space.\n");
- return ret;
- }
-
- uaddr += PAGE_SIZE;
- usize -= PAGE_SIZE;
- } while (usize > 0);
- } else {
- /*
- * get page frame number to physical memory to be mapped
- * to user space.
- */
- pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
- PAGE_SHIFT;
-
- DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
-
- if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
- vma->vm_page_prot)) {
- DRM_ERROR("failed to remap pfn range.\n");
- return -EAGAIN;
- }
- }
-
- return 0;
+ return dma_mmap_attrs(obj->dev->dev, vma, buffer->kvaddr,
+ buffer->dma_addr, buffer->size,
+ &buffer->dma_attrs);
}
static const struct file_operations exynos_drm_gem_fops = {
@@ -753,9 +573,9 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
mutex_lock(&dev->struct_mutex);
- ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
+ ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
if (ret < 0)
- DRM_ERROR("failed to map pages.\n");
+ DRM_ERROR("failed to map a buffer with user.\n");
mutex_unlock(&dev->struct_mutex);
@@ -46,6 +46,7 @@
struct exynos_drm_gem_buf {
void __iomem *kvaddr;
dma_addr_t dma_addr;
+ struct dma_attrs dma_attrs;
struct sg_table *sgt;
struct page **pages;
unsigned long page_size;
new file mode 100644
@@ -0,0 +1,150 @@
+/* exynos_drm_iommu.c
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <drmP.h>
+#include <drm/exynos_drm.h>
+
+#include <linux/dma-mapping.h>
+#include <linux/iommu.h>
+#include <linux/kref.h>
+
+#include <asm/dma-iommu.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_iommu.h"
+
+/*
+ * drm_create_iommu_mapping - create a mapping structure
+ *
+ * @drm_dev: DRM device
+ */
+int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct dma_iommu_mapping *mapping = NULL;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct device *dev = drm_dev->dev;
+
+ if (!priv->da_start)
+ priv->da_start = EXYNOS_DEV_ADDR_START;
+ if (!priv->da_space_size)
+ priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
+ if (!priv->da_space_order)
+ priv->da_space_order = EXYNOS_DEV_ADDR_ORDER;
+
+ mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
+ priv->da_space_size,
+ priv->da_space_order);
+ if (!mapping)
+ return -ENOMEM;
+
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(dev, 0xffffffffu);
+ dev->archdata.mapping = mapping;
+
+ return 0;
+}
+
+/*
+ * drm_release_iommu_mapping - release iommu mapping structure
+ *
+ * @drm_dev: DRM device
+ *
+ * if mapping->kref becomes 0 then all things related to iommu mapping
+ * will be released
+ */
+void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+ struct device *dev = drm_dev->dev;
+
+ arm_iommu_release_mapping(dev->archdata.mapping);
+}
+
+/*
+ * drm_iommu_attach_device- attach device to iommu mapping
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be attach
+ *
+ * This function should be called by sub drivers to attach it to iommu
+ * mapping.
+ */
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ int ret;
+
+ if (!dev->archdata.mapping) {
+ DRM_ERROR("iommu_mapping is null.\n");
+ return -EFAULT;
+ }
+
+ subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev,
+ sizeof(*subdrv_dev->dma_parms),
+ GFP_KERNEL);
+ dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
+
+ ret = arm_iommu_attach_device(subdrv_dev, dev->archdata.mapping);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("failed iommu attach.\n");
+ return ret;
+ }
+
+ /*
+ * Set dma_ops to drm_device just one time.
+ *
+ * The dma mapping api needs device object and the api is used
+ * to allocate physial memory and map it with iommu table.
+ * If iommu attach succeeded, the sub driver would have dma_ops
+ * for iommu and also all sub drivers have same dma_ops.
+ */
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = subdrv_dev->archdata.dma_ops;
+
+ return 0;
+}
+
+/*
+ * drm_iommu_detach_device -detach device address space mapping from device
+ *
+ * @drm_dev: DRM device
+ * @subdrv_dev: device to be detached
+ *
+ * This function should be called by sub drivers to detach it from iommu
+ * mapping
+ */
+void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ struct device *dev = drm_dev->dev;
+ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+ if (!mapping || !mapping->domain)
+ return;
+
+ iommu_detach_device(mapping->domain, subdrv_dev);
+ drm_release_iommu_mapping(drm_dev);
+}
new file mode 100644
@@ -0,0 +1,85 @@
+/* exynos_drm_iommu.h
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_IOMMU_H_
+#define _EXYNOS_DRM_IOMMU_H_
+
+#define EXYNOS_DEV_ADDR_START 0x20000000
+#define EXYNOS_DEV_ADDR_SIZE 0x40000000
+#define EXYNOS_DEV_ADDR_ORDER 0x4
+
+#ifdef CONFIG_DRM_EXYNOS_IOMMU
+
+int drm_create_iommu_mapping(struct drm_device *drm_dev);
+
+void drm_release_iommu_mapping(struct drm_device *drm_dev);
+
+int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev);
+
+void drm_iommu_detach_device(struct drm_device *dev_dev,
+ struct device *subdrv_dev);
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+#ifdef CONFIG_ARM_DMA_USE_IOMMU
+ struct device *dev = drm_dev->dev;
+
+ return dev->archdata.mapping ? true : false;
+#else
+ return false;
+#endif
+}
+
+#else
+
+struct dma_iommu_mapping;
+static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
+{
+ return 0;
+}
+
+static inline void drm_release_iommu_mapping(struct drm_device *drm_dev)
+{
+}
+
+static inline int drm_iommu_attach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+ return 0;
+}
+
+static inline void drm_iommu_detach_device(struct drm_device *drm_dev,
+ struct device *subdrv_dev)
+{
+}
+
+static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
+{
+ return false;
+}
+
+#endif
+#endif