From patchwork Wed Mar 13 12:36:17 2013 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Terje Bergstrom X-Patchwork-Id: 2263331 Return-Path: X-Original-To: patchwork-dri-devel@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from gabe.freedesktop.org (gabe.freedesktop.org [131.252.210.177]) by patchwork2.kernel.org (Postfix) with ESMTP id 5BFB8DF215 for ; Wed, 13 Mar 2013 12:41:55 +0000 (UTC) Received: from gabe.freedesktop.org (localhost [127.0.0.1]) by gabe.freedesktop.org (Postfix) with ESMTP id 89C20E68A1 for ; Wed, 13 Mar 2013 05:41:55 -0700 (PDT) X-Original-To: dri-devel@lists.freedesktop.org Delivered-To: dri-devel@lists.freedesktop.org Received: from hqemgate04.nvidia.com (hqemgate04.nvidia.com [216.228.121.35]) by gabe.freedesktop.org (Postfix) with ESMTP id A55C6E68A7 for ; Wed, 13 Mar 2013 05:35:58 -0700 (PDT) Received: from hqnvupgp08.nvidia.com (Not Verified[216.228.121.13]) by hqemgate04.nvidia.com id ; Wed, 13 Mar 2013 05:35:41 -0700 Received: from hqemhub01.nvidia.com ([172.17.108.22]) by hqnvupgp08.nvidia.com (PGP Universal service); Wed, 13 Mar 2013 05:29:08 -0700 X-PGP-Universal: processed; by hqnvupgp08.nvidia.com on Wed, 13 Mar 2013 05:29:08 -0700 Received: from deemhub02.nvidia.com (10.21.69.138) by hqemhub01.nvidia.com (172.20.150.30) with Microsoft SMTP Server (TLS) id 8.3.298.1; Wed, 13 Mar 2013 05:35:44 -0700 Received: from tbergstrom-desktop.Nvidia.com (10.21.65.27) by deemhub02.nvidia.com (10.21.69.138) with Microsoft SMTP Server id 8.3.298.1; Wed, 13 Mar 2013 13:35:42 +0100 From: Terje Bergstrom To: , , , Subject: [PATCHv7 01/10] gpu: drm: Support CMA object preallocation Date: Wed, 13 Mar 2013 14:36:17 +0200 Message-ID: <1363178186-2017-2-git-send-email-tbergstrom@nvidia.com> X-Mailer: git-send-email 1.7.9.5 In-Reply-To: <1363178186-2017-1-git-send-email-tbergstrom@nvidia.com> References: <1363178186-2017-1-git-send-email-tbergstrom@nvidia.com> X-NVConfidentiality: public MIME-Version: 1.0 Cc: linux-kernel@vger.kernel.org, amerilainen@nvidia.com X-BeenThere: dri-devel@lists.freedesktop.org X-Mailman-Version: 2.1.13 Precedence: list List-Id: Direct Rendering Infrastructure - Development List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org Errors-To: dri-devel-bounces+patchwork-dri-devel=patchwork.kernel.org@lists.freedesktop.org From: Arto Merilainen This patch adds helper functions drm_gem_cma_init() and drm_gem_cma_deinit() for handling CMA structures that already have been allocated. This allows embedding the CMA structure inside other structures. Signed-off-by: Arto Merilainen --- drivers/gpu/drm/drm_gem_cma_helper.c | 78 ++++++++++++++++++++++++---------- include/drm/drm_gem_cma_helper.h | 9 ++++ 2 files changed, 64 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 0a7e011..3b14280 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -2,6 +2,7 @@ * drm gem CMA (contiguous memory allocator) helper functions * * Copyright (C) 2012 Sascha Hauer, Pengutronix + * Copyright (C) 2013 NVIDIA CORPORATION, All rights reserved. * * Based on Samsung Exynos code * @@ -40,30 +41,25 @@ static void drm_gem_cma_buf_destroy(struct drm_device *drm, } /* - * drm_gem_cma_create - allocate an object with the given size + * drm_gem_cma_object_init - allocate buffer and initialize given cma object * - * returns a struct drm_gem_cma_object* on success or ERR_PTR values - * on failure. + * this function allocates memory for a cma buffer and initializes the given + * cma object to use the allocated buffer. */ -struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, - unsigned int size) + +int drm_gem_cma_object_init(struct drm_device *drm, + struct drm_gem_cma_object *cma_obj, unsigned int size) { - struct drm_gem_cma_object *cma_obj; struct drm_gem_object *gem_obj; int ret; size = round_up(size, PAGE_SIZE); - cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); - if (!cma_obj) - return ERR_PTR(-ENOMEM); - cma_obj->vaddr = dma_alloc_writecombine(drm->dev, size, &cma_obj->paddr, GFP_KERNEL | __GFP_NOWARN); if (!cma_obj->vaddr) { dev_err(drm->dev, "failed to allocate buffer with size %d\n", size); - ret = -ENOMEM; - goto err_dma_alloc; + return -ENOMEM; } gem_obj = &cma_obj->base; @@ -76,7 +72,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, if (ret) goto err_create_mmap_offset; - return cma_obj; + return 0; err_create_mmap_offset: drm_gem_object_release(gem_obj); @@ -84,10 +80,36 @@ err_create_mmap_offset: err_obj_init: drm_gem_cma_buf_destroy(drm, cma_obj); -err_dma_alloc: - kfree(cma_obj); + return ret; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_object_init); + +/* + * drm_gem_cma_create - allocate an object with the given size + * + * returns a struct drm_gem_cma_object* on success or ERR_PTR values + * on failure. + */ +struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, + unsigned int size) +{ + struct drm_gem_cma_object *cma_obj; + int ret; + + size = round_up(size, PAGE_SIZE); + + cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL); + if (!cma_obj) + return ERR_PTR(-ENOMEM); + + ret = drm_gem_cma_object_init(drm, cma_obj, size); + if (ret) { + kfree(cma_obj); + return ERR_PTR(ret); + } + + return cma_obj; - return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(drm_gem_cma_create); @@ -133,22 +155,32 @@ err_handle_create: } /* - * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback - * function + * drm_gem_cma_deinit_object - deinitialize cma object + * + * this function deinitializes the given cma object without releasing the + * object memory. this function is a counterpart for the function + * drm_gem_cma_object_init(). */ -void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) +void drm_gem_cma_object_deinit(struct drm_gem_object *gem_obj) { - struct drm_gem_cma_object *cma_obj; + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj); if (gem_obj->map_list.map) drm_gem_free_mmap_offset(gem_obj); drm_gem_object_release(gem_obj); - - cma_obj = to_drm_gem_cma_obj(gem_obj); - drm_gem_cma_buf_destroy(gem_obj->dev, cma_obj); +} +EXPORT_SYMBOL_GPL(drm_gem_cma_object_deinit); +/* + * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback + * function + */ +void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) +{ + struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem_obj); + drm_gem_cma_object_deinit(gem_obj); kfree(cma_obj); } EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h index 63397ce..5fdccb3 100644 --- a/include/drm/drm_gem_cma_helper.h +++ b/include/drm/drm_gem_cma_helper.h @@ -13,6 +13,10 @@ to_drm_gem_cma_obj(struct drm_gem_object *gem_obj) return container_of(gem_obj, struct drm_gem_cma_object, base); } +/* deinitialize gem object and release the buffer. this variant does not + * release the cma object memory */ +void drm_gem_cma_object_deinit(struct drm_gem_object *gem_obj); + /* free gem object. */ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); @@ -35,6 +39,11 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); int drm_gem_cma_dumb_destroy(struct drm_file *file_priv, struct drm_device *drm, unsigned int handle); +/* allocate physical memory for a buffer and initialize already allocated + * to use the buffer */ +int drm_gem_cma_object_init(struct drm_device *drm, + struct drm_gem_cma_object *cma_obj, unsigned int size); + /* allocate physical memory. */ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, unsigned int size);