From patchwork Thu Dec 6 18:07:48 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Aaron Plattner X-Patchwork-Id: 1846201 Return-Path: X-Original-To: patchwork-dri-devel@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by patchwork2.kernel.org (Postfix) with ESMTP id 83915DF2F9 for ; Thu, 6 Dec 2012 18:09:24 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 703CAE6968 for ; Thu, 6 Dec 2012 10:09:24 -0800 (PST) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from hqemgate04.nvidia.com (hqemgate04.nvidia.com [216.228.121.35]) by gabe.freedesktop.org (Postfix) with ESMTP id C62B9E691D for ; Thu, 6 Dec 2012 10:08:20 -0800 (PST) Received: from hqnvupgp06.nvidia.com (Not Verified[216.228.121.13]) by hqemgate04.nvidia.com id ; Thu, 06 Dec 2012 10:07:56 -0800 Received: from hqemhub02.nvidia.com ([172.17.108.22]) by hqnvupgp06.nvidia.com (PGP Universal service); Thu, 06 Dec 2012 10:07:38 -0800 X-PGP-Universal: processed; by hqnvupgp06.nvidia.com on Thu, 06 Dec 2012 10:07:38 -0800 Received: from soprano.nvidia.com (172.20.144.16) by hqemhub02.nvidia.com (172.20.150.31) with Microsoft SMTP Server (TLS) id 8.3.279.1; Thu, 6 Dec 2012 10:08:18 -0800 From: Aaron Plattner To: , Jerome Glisse , David Airlie Subject: [PATCH 1/4] drm: add prime helpers Date: Thu, 6 Dec 2012 10:07:48 -0800 Message-ID: <1354817271-5121-2-git-send-email-aplattner@nvidia.com> X-Mailer: git-send-email 1.7.12 In-Reply-To: <1354817271-5121-1-git-send-email-aplattner@nvidia.com> References: <1354817271-5121-1-git-send-email-aplattner@nvidia.com> X-NVConfidentiality: public MIME-Version: 1.0 X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org Errors-To: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org Instead of reimplementing all of the dma_buf functionality in every driver, create helpers drm_prime_import and drm_prime_export that implement them in terms of new, lower-level hook functions: gem_prime_pin: callback when a buffer is created, used to pin buffers into GTT gem_prime_get_pages: convert a drm_gem_object to an sg_table for export gem_prime_import_sg: convert an sg_table into a drm_gem_object gem_prime_vmap, gem_prime_vunmap: map and unmap an object These hooks are optional; drivers can opt in by using drm_gem_prime_import and drm_gem_prime_export as the .gem_prime_import and .gem_prime_export fields of struct drm_driver. Signed-off-by: Aaron Plattner --- drivers/gpu/drm/drm_prime.c | 190 +++++++++++++++++++++++++++++++++++++++++++- include/drm/drmP.h | 15 ++++ 2 files changed, 204 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 7f12573..566c2ac 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -53,7 +53,8 @@ * Self-importing: if userspace is using PRIME as a replacement for flink * then it will get a fd->handle request for a GEM object that it created. * Drivers should detect this situation and return back the gem object - * from the dma-buf private. + * from the dma-buf private. Prime will do this automatically for drivers that + * use the drm_gem_prime_{import,export} helpers. */ struct drm_prime_member { @@ -62,6 +63,146 @@ struct drm_prime_member { uint32_t handle; }; +static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct sg_table *st; + + mutex_lock_interruptible(&obj->dev->struct_mutex); + + st = obj->dev->driver->gem_prime_get_pages(obj); + + if (!IS_ERR_OR_NULL(st)) + dma_map_sg(attach->dev, st->sgl, st->nents, dir); + + mutex_unlock(&obj->dev->struct_mutex); + return st; +} + +static void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sg, enum dma_data_direction dir) +{ + dma_unmap_sg(attach->dev, sg->sgl, sg->nents, dir); + sg_free_table(sg); + kfree(sg); +} + +static void drm_gem_dmabuf_release(struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj = dma_buf->priv; + + if (obj->export_dma_buf == dma_buf) { + /* drop the reference on the export fd holds */ + obj->export_dma_buf = NULL; + drm_gem_object_unreference_unlocked(obj); + } +} + +static void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; + void *virtual = ERR_PTR(-EINVAL); + + if (!dev->driver->gem_prime_vmap) + return ERR_PTR(-EINVAL); + + mutex_lock(&dev->struct_mutex); + if (obj->vmapping_count) { + obj->vmapping_count++; + virtual = obj->vmapping_ptr; + goto out_unlock; + } + + virtual = dev->driver->gem_prime_vmap(obj); + if (IS_ERR(virtual)) { + mutex_unlock(&dev->struct_mutex); + return virtual; + } + obj->vmapping_count = 1; + obj->vmapping_ptr = virtual; +out_unlock: + mutex_unlock(&dev->struct_mutex); + return obj->vmapping_ptr; +} + +static void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct drm_device *dev = obj->dev; + + mutex_lock(&dev->struct_mutex); + obj->vmapping_count--; + if (obj->vmapping_count == 0) { + dev->driver->gem_prime_vunmap(obj); + obj->vmapping_ptr = NULL; + } + mutex_unlock(&dev->struct_mutex); +} + +static void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} + +static void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf, + unsigned long page_num, void *addr) +{ + +} +static void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, + unsigned long page_num) +{ + return NULL; +} + +static void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, + unsigned long page_num, void *addr) +{ + +} + +static int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, + struct vm_area_struct *vma) +{ + return -EINVAL; +} + +static int drm_gem_begin_cpu_access(struct dma_buf *dma_buf, + size_t start, size_t length, enum dma_data_direction direction) +{ + return -EINVAL; +} + +static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { + .map_dma_buf = drm_gem_map_dma_buf, + .unmap_dma_buf = drm_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .kmap = drm_gem_dmabuf_kmap, + .kmap_atomic = drm_gem_dmabuf_kmap_atomic, + .kunmap = drm_gem_dmabuf_kunmap, + .kunmap_atomic = drm_gem_dmabuf_kunmap_atomic, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, + .begin_cpu_access = drm_gem_begin_cpu_access, +}; + +struct dma_buf *drm_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, int flags) +{ + if (dev->driver->gem_prime_pin) { + int ret = dev->driver->gem_prime_pin(obj); + if (ret) + return ERR_PTR(ret); + } + return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, + 0600); +} +EXPORT_SYMBOL(drm_gem_prime_export); + int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd) @@ -117,6 +258,53 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, } EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); +struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf) +{ + struct dma_buf_attachment *attach; + struct sg_table *sg; + struct drm_gem_object *obj; + int ret; + + if (!dev->driver->gem_prime_import_sg) + return ERR_PTR(-EINVAL); + + if (dma_buf->ops == &drm_gem_prime_dmabuf_ops) { + obj = dma_buf->priv; + if (obj->dev == dev) { + drm_gem_object_reference(obj); + return obj; + } + } + + attach = dma_buf_attach(dma_buf, dev->dev); + if (IS_ERR(attach)) + return ERR_PTR(PTR_ERR(attach)); + + sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR_OR_NULL(sg)) { + ret = PTR_ERR(sg); + goto fail_detach; + } + + obj = dev->driver->gem_prime_import_sg(dev, dma_buf->size, sg); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); + goto fail_unmap; + } + + obj->import_attach = attach; + + return obj; + +fail_unmap: + dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); +fail_detach: + dma_buf_detach(dma_buf, attach); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(drm_gem_prime_import); + int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle) { diff --git a/include/drm/drmP.h b/include/drm/drmP.h index fad21c9..f65cae9 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -674,6 +674,10 @@ struct drm_gem_object { /* dma buf attachment backing this object */ struct dma_buf_attachment *import_attach; + + /* vmap information */ + int vmapping_count; + void *vmapping_ptr; }; #include @@ -919,6 +923,13 @@ struct drm_driver { /* import dmabuf -> GEM */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); + /* low-level interface used by drm_gem_prime_{import,export} */ + int (*gem_prime_pin)(struct drm_gem_object *obj); + struct sg_table *(*gem_prime_get_pages)(struct drm_gem_object *obj); + struct drm_gem_object *(*gem_prime_import_sg)(struct drm_device *dev, + size_t size, struct sg_table *sg); + void *(*gem_prime_vmap)(struct drm_gem_object *obj); + void (*gem_prime_vunmap)(struct drm_gem_object *obj); /* vga arb irq handler */ void (*vgaarb_irq)(struct drm_device *dev, bool state); @@ -1540,9 +1551,13 @@ extern int drm_clients_info(struct seq_file *m, void* data); extern int drm_gem_name_info(struct seq_file *m, void *data); +extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev, + struct drm_gem_object *obj, int flags); extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); +extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, + struct dma_buf *dma_buf); extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle);