@@ -65,11 +65,11 @@ struct ioctl_hyper_dmabuf_export_fd {
uint32_t fd;
};
-#define IOCTL_HYPER_DMABUF_DESTROY \
-_IOC(_IOC_NONE, 'G', 4, sizeof(struct ioctl_hyper_dmabuf_destroy))
-struct ioctl_hyper_dmabuf_destroy {
+#define IOCTL_HYPER_DMABUF_UNEXPORT \
+_IOC(_IOC_NONE, 'G', 4, sizeof(struct ioctl_hyper_dmabuf_unexport))
+struct ioctl_hyper_dmabuf_unexport {
/* IN parameters */
- /* hyper dmabuf id to be destroyed */
+ /* hyper dmabuf id to be unexported */
uint32_t hyper_dmabuf_id;
/* OUT parameters */
/* Status of request */
@@ -104,7 +104,7 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret) {
- kfree(sgt);
+ sg_free_table(sgt);
return NULL;
}
@@ -125,6 +125,12 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
return sgt;
}
+/* free sg_table */
+void hyper_dmabuf_free_sgt(struct sg_table* sgt)
+{
+ sg_free_table(sgt);
+}
+
/*
* Creates 2 level page directory structure for referencing shared pages.
* Top level page is a single page that contains up to 1024 refids that
@@ -512,6 +518,92 @@ struct sg_table* hyper_dmabuf_map_pages(grant_ref_t top_level_gref, int frst_ofs
return st;
}
+int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int force)
+{
+ struct sgt_list *sgtl;
+ struct attachment_list *attachl;
+ struct kmap_vaddr_list *va_kmapl;
+ struct vmap_vaddr_list *va_vmapl;
+
+ if (!sgt_info) {
+ printk("invalid hyper_dmabuf_id\n");
+ return -EINVAL;
+ }
+
+ /* if force != 1, sgt_info can be released only if
+ * there's no activity on exported dma-buf on importer
+ * side.
+ */
+ if (!force &&
+ (!list_empty(&sgt_info->va_kmapped->list) ||
+ !list_empty(&sgt_info->va_vmapped->list) ||
+ !list_empty(&sgt_info->active_sgts->list) ||
+ !list_empty(&sgt_info->active_attached->list))) {
+ printk("dma-buf is used by importer\n");
+ return -EPERM;
+ }
+
+ while (!list_empty(&sgt_info->va_kmapped->list)) {
+ va_kmapl = list_first_entry(&sgt_info->va_kmapped->list,
+ struct kmap_vaddr_list, list);
+
+ dma_buf_kunmap(sgt_info->dma_buf, 1, va_kmapl->vaddr);
+ list_del(&va_kmapl->list);
+ kfree(va_kmapl);
+ }
+
+ while (!list_empty(&sgt_info->va_vmapped->list)) {
+ va_vmapl = list_first_entry(&sgt_info->va_vmapped->list,
+ struct vmap_vaddr_list, list);
+
+ dma_buf_vunmap(sgt_info->dma_buf, va_vmapl->vaddr);
+ list_del(&va_vmapl->list);
+ kfree(va_vmapl);
+ }
+
+ while (!list_empty(&sgt_info->active_sgts->list)) {
+ attachl = list_first_entry(&sgt_info->active_attached->list,
+ struct attachment_list, list);
+
+ sgtl = list_first_entry(&sgt_info->active_sgts->list,
+ struct sgt_list, list);
+
+ dma_buf_unmap_attachment(attachl->attach, sgtl->sgt,
+ DMA_BIDIRECTIONAL);
+ list_del(&sgtl->list);
+ kfree(sgtl);
+ }
+
+ while (!list_empty(&sgt_info->active_sgts->list)) {
+ attachl = list_first_entry(&sgt_info->active_attached->list,
+ struct attachment_list, list);
+
+ dma_buf_detach(sgt_info->dma_buf, attachl->attach);
+ list_del(&attachl->list);
+ kfree(attachl);
+ }
+
+ /* unmap dma-buf */
+ dma_buf_unmap_attachment(sgt_info->active_attached->attach,
+ sgt_info->active_sgts->sgt,
+ DMA_BIDIRECTIONAL);
+
+ /* detatch dma-buf */
+ dma_buf_detach(sgt_info->dma_buf, sgt_info->active_attached->attach);
+
+ /* close connection to dma-buf completely */
+ dma_buf_put(sgt_info->dma_buf);
+
+ hyper_dmabuf_cleanup_gref_table(sgt_info);
+
+ kfree(sgt_info->active_sgts);
+ kfree(sgt_info->active_attached);
+ kfree(sgt_info->va_kmapped);
+ kfree(sgt_info->va_vmapped);
+
+ return 0;
+}
+
inline int hyper_dmabuf_sync_request_and_wait(int id, int ops)
{
struct hyper_dmabuf_ring_rq *req;
@@ -24,6 +24,10 @@ grant_ref_t *hyper_dmabuf_map_gref_table(grant_ref_t *gref_table, int n_pages_ta
struct sg_table* hyper_dmabuf_map_pages(grant_ref_t gref, int frst_ofst, int last_len, int nents, int sdomain,
struct hyper_dmabuf_shared_pages_info *shared_pages_info);
+int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int force);
+
+void hyper_dmabuf_free_sgt(struct sg_table *sgt);
+
int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int flags);
struct dma_buf* hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo);
@@ -20,7 +20,7 @@ extern struct hyper_dmabuf_private hyper_dmabuf_private;
static uint32_t hyper_dmabuf_id_gen(void) {
/* TODO: add proper implementation */
- static uint32_t id = 0;
+ static uint32_t id = 1000;
static int32_t domid = -1;
if (domid == -1) {
domid = hyper_dmabuf_get_domid();
@@ -259,12 +259,12 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
return ret;
}
-/* removing dmabuf from the database and send int req to the source domain
+/* unexport dmabuf from the database and send int req to the source domain
* to unmap it.
*/
-static int hyper_dmabuf_destroy(void *data)
+static int hyper_dmabuf_unexport(void *data)
{
- struct ioctl_hyper_dmabuf_destroy *destroy_attr;
+ struct ioctl_hyper_dmabuf_unexport *unexport_attr;
struct hyper_dmabuf_sgt_info *sgt_info;
struct hyper_dmabuf_ring_rq *req;
int ret;
@@ -274,20 +274,20 @@ static int hyper_dmabuf_destroy(void *data)
return -EINVAL;
}
- destroy_attr = (struct ioctl_hyper_dmabuf_destroy *)data;
+ unexport_attr = (struct ioctl_hyper_dmabuf_unexport *)data;
/* find dmabuf in export list */
- sgt_info = hyper_dmabuf_find_exported(destroy_attr->hyper_dmabuf_id);
+ sgt_info = hyper_dmabuf_find_exported(unexport_attr->hyper_dmabuf_id);
/* failed to find corresponding entry in export list */
if (sgt_info == NULL) {
- destroy_attr->status = -EINVAL;
+ unexport_attr->status = -EINVAL;
return -EFAULT;
}
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
- hyper_dmabuf_create_request(req, HYPER_DMABUF_DESTROY, &destroy_attr->hyper_dmabuf_id);
+ hyper_dmabuf_create_request(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &unexport_attr->hyper_dmabuf_id);
/* now send destroy request to remote domain
* currently assuming there's only one importer exist
@@ -300,7 +300,7 @@ static int hyper_dmabuf_destroy(void *data)
/* free msg */
kfree(req);
- destroy_attr->status = ret;
+ unexport_attr->status = ret;
/* Rest of cleanup will follow when importer will free it's buffer,
* current implementation assumes that there is only one importer
@@ -386,7 +386,7 @@ static const struct hyper_dmabuf_ioctl_desc hyper_dmabuf_ioctls[] = {
HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_IMPORTER_RING_SETUP, hyper_dmabuf_importer_ring_setup, 0),
HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_REMOTE, hyper_dmabuf_export_remote, 0),
HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_EXPORT_FD, hyper_dmabuf_export_fd_ioctl, 0),
- HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_DESTROY, hyper_dmabuf_destroy, 0),
+ HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_UNEXPORT, hyper_dmabuf_unexport, 0),
HYPER_DMABUF_IOCTL_DEF(IOCTL_HYPER_DMABUF_QUERY, hyper_dmabuf_query, 0),
};
@@ -12,6 +12,8 @@
#include "hyper_dmabuf_msg.h"
#include "hyper_dmabuf_list.h"
+#define FORCED_UNEXPORTING 0
+
extern struct hyper_dmabuf_private hyper_dmabuf_private;
struct cmd_process {
@@ -45,7 +47,7 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq *request,
request->operands[i] = operands[i];
break;
- case HYPER_DMABUF_DESTROY:
+ case HYPER_DMABUF_NOTIFY_UNEXPORT:
/* destroy sg_list for hyper_dmabuf_id on remote side */
/* command : DMABUF_DESTROY,
* operands0 : hyper_dmabuf_id
@@ -83,7 +85,7 @@ void cmd_process_work(struct work_struct *work)
struct cmd_process *proc = container_of(work, struct cmd_process, work);
struct hyper_dmabuf_ring_rq *req;
int domid;
- int i;
+ int i, ret;
req = proc->rq;
domid = proc->domid;
@@ -99,7 +101,7 @@ void cmd_process_work(struct work_struct *work)
* operands4 : top-level reference number for shared pages
* operands5~8 : Driver-specific private data (e.g. graphic buffer's meta info)
*/
- imported_sgt_info = (struct hyper_dmabuf_imported_sgt_info*)kcalloc(1, sizeof(*imported_sgt_info), GFP_KERNEL);
+ imported_sgt_info = kcalloc(1, sizeof(*imported_sgt_info), GFP_KERNEL);
imported_sgt_info->hyper_dmabuf_id = req->operands[0];
imported_sgt_info->frst_ofst = req->operands[2];
imported_sgt_info->last_len = req->operands[3];
@@ -119,7 +121,7 @@ void cmd_process_work(struct work_struct *work)
hyper_dmabuf_register_imported(imported_sgt_info);
break;
- case HYPER_DMABUF_DESTROY_FINISH:
+ case HYPER_DMABUF_UNEXPORT_FINISH:
/* destroy sg_list for hyper_dmabuf_id on local side */
/* command : DMABUF_DESTROY_FINISH,
* operands0 : hyper_dmabuf_id
@@ -128,21 +130,16 @@ void cmd_process_work(struct work_struct *work)
/* TODO: that should be done on workqueue, when received ack from
* all importers that buffer is no longer used
*/
- sgt_info =
- hyper_dmabuf_find_exported(req->operands[0]);
-
- if (sgt_info) {
- hyper_dmabuf_cleanup_gref_table(sgt_info);
-
- /* unmap dmabuf */
- dma_buf_unmap_attachment(sgt_info->active_attached->attach,
- sgt_info->active_sgts->sgt,
- DMA_BIDIRECTIONAL);
- dma_buf_detach(sgt_info->dma_buf, sgt_info->active_attached->attach);
- dma_buf_put(sgt_info->dma_buf);
-
- /* TODO: Rest of cleanup, sgt cleanup etc */
- }
+ sgt_info = hyper_dmabuf_find_exported(req->operands[0]);
+ hyper_dmabuf_remove_exported(req->operands[0]);
+ if (!sgt_info)
+ printk("sgt_info does not exist in the list\n");
+
+ ret = hyper_dmabuf_cleanup_sgt_info(sgt_info, FORCED_UNEXPORTING);
+ if (!ret)
+ kfree(sgt_info);
+ else
+ printk("failed to clean up sgt_info\n");
break;
@@ -184,30 +181,30 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req)
/* HYPER_DMABUF_DESTROY requires immediate
* follow up so can't be processed in workqueue
*/
- if (req->command == HYPER_DMABUF_DESTROY) {
+ if (req->command == HYPER_DMABUF_NOTIFY_UNEXPORT) {
/* destroy sg_list for hyper_dmabuf_id on remote side */
- /* command : DMABUF_DESTROY,
+ /* command : HYPER_DMABUF_NOTIFY_UNEXPORT,
* operands0 : hyper_dmabuf_id
*/
+
imported_sgt_info =
hyper_dmabuf_find_imported(req->operands[0]);
if (imported_sgt_info) {
- hyper_dmabuf_cleanup_imported_pages(imported_sgt_info);
+ hyper_dmabuf_free_sgt(imported_sgt_info->sgt);
+ hyper_dmabuf_cleanup_imported_pages(imported_sgt_info);
hyper_dmabuf_remove_imported(req->operands[0]);
- /* TODO: cleanup sgt on importer side etc */
+ /* Notify exporter that buffer is freed and it can
+ * cleanup it
+ */
+ req->status = HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP;
+ req->command = HYPER_DMABUF_UNEXPORT_FINISH;
+ } else {
+ req->status = HYPER_DMABUF_REQ_ERROR;
}
- /* Notify exporter that buffer is freed and it can cleanup it */
- req->status = HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP;
- req->command = HYPER_DMABUF_DESTROY_FINISH;
-
-#if 0 /* function is not implemented yet */
-
- ret = hyper_dmabuf_destroy_sgt(req->hyper_dmabuf_id);
-#endif
return req->command;
}
@@ -233,8 +230,7 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req)
memcpy(temp_req, req, sizeof(*temp_req));
- proc = (struct cmd_process *) kcalloc(1, sizeof(struct cmd_process),
- GFP_KERNEL);
+ proc = kcalloc(1, sizeof(struct cmd_process), GFP_KERNEL);
proc->rq = temp_req;
proc->domid = domid;
@@ -3,8 +3,8 @@
enum hyper_dmabuf_command {
HYPER_DMABUF_EXPORT = 0x10,
- HYPER_DMABUF_DESTROY,
- HYPER_DMABUF_DESTROY_FINISH,
+ HYPER_DMABUF_NOTIFY_UNEXPORT,
+ HYPER_DMABUF_UNEXPORT_FINISH,
HYPER_DMABUF_OPS_TO_REMOTE,
HYPER_DMABUF_OPS_TO_SOURCE,
};
unexporting on exporter's side now have two options, one is , that just remove and free everything to literally "disconnect" from importer, the other is just to return fail if any apps running on importer is still attached or DMAing. Currently whether forcing or unforcing it is determined by how "FORCED_UNEXPORING" is defined. Also, the word "destroy" in IOCTL commands and several functions have been modified to "unexport", which sounds more reasonable. Signed-off-by: Dongwon Kim <dongwon.kim@intel.com> --- drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h | 8 +-- drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c | 94 ++++++++++++++++++++++++++- drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.h | 4 ++ drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 20 +++--- drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c | 62 +++++++++--------- drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h | 4 +- 6 files changed, 142 insertions(+), 50 deletions(-)