@@ -9,6 +9,7 @@
#include "hyper_dmabuf_imp.h"
#include "xen/hyper_dmabuf_xen_comm.h"
#include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_list.h"
#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
@@ -104,7 +105,7 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
if (ret) {
- sg_free_table(sgt);
+ hyper_dmabuf_free_sgt(sgt);
return NULL;
}
@@ -129,6 +130,7 @@ struct sg_table* hyper_dmabuf_create_sgt(struct page **pages,
void hyper_dmabuf_free_sgt(struct sg_table* sgt)
{
sg_free_table(sgt);
+ kfree(sgt);
}
/*
@@ -583,6 +585,9 @@ int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int fo
kfree(attachl);
}
+ /* Start cleanup of buffer in reverse order to exporting */
+ hyper_dmabuf_cleanup_gref_table(sgt_info);
+
/* unmap dma-buf */
dma_buf_unmap_attachment(sgt_info->active_attached->attach,
sgt_info->active_sgts->sgt,
@@ -594,8 +599,6 @@ int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int fo
/* close connection to dma-buf completely */
dma_buf_put(sgt_info->dma_buf);
- hyper_dmabuf_cleanup_gref_table(sgt_info);
-
kfree(sgt_info->active_sgts);
kfree(sgt_info->active_attached);
kfree(sgt_info->va_kmapped);
@@ -694,6 +697,9 @@ static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachme
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_MAP);
+ kfree(page_info->pages);
+ kfree(page_info);
+
if (ret < 0) {
printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
@@ -741,12 +747,34 @@ static void hyper_dmabuf_ops_release(struct dma_buf *dmabuf)
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+ if (sgt_info) {
+ /* dmabuf fd is being released - decrease refcount */
+ sgt_info->ref_count--;
+
+ /* if no one else in that domain is using that buffer, unmap it for now */
+ if (sgt_info->ref_count == 0) {
+ hyper_dmabuf_cleanup_imported_pages(sgt_info);
+ hyper_dmabuf_free_sgt(sgt_info->sgt);
+ sgt_info->sgt = NULL;
+ }
+ }
+
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_RELEASE);
if (ret < 0) {
printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
+
+ /*
+ * Check if buffer is still valid and if not remove it from imported list.
+ * That has to be done after sending sync request
+ */
+ if (sgt_info && sgt_info->ref_count == 0 &&
+ sgt_info->flags == HYPER_DMABUF_SGT_INVALID) {
+ hyper_dmabuf_remove_imported(sgt_info->hyper_dmabuf_id);
+ kfree(sgt_info);
+ }
}
static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction dir)
@@ -944,6 +972,9 @@ int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int fla
fd = dma_buf_fd(dmabuf, flags);
+ /* dmabuf fd is exported for given bufer - increase its ref count */
+ dinfo->ref_count++;
+
return fd;
}
@@ -135,6 +135,7 @@ static int hyper_dmabuf_export_remote(void *data)
/* TODO: We might need to consider using port number on event channel? */
sgt_info->hyper_dmabuf_rdomain = export_remote_attr->remote_domain;
sgt_info->dma_buf = dma_buf;
+ sgt_info->flags = 0;
sgt_info->active_sgts = kcalloc(1, sizeof(struct sgt_list), GFP_KERNEL);
sgt_info->active_attached = kcalloc(1, sizeof(struct attachment_list), GFP_KERNEL);
@@ -233,6 +234,15 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
if (imported_sgt_info == NULL) /* can't find sgt from the table */
return -1;
+ /*
+ * Check if buffer was not unexported by exporter.
+ * In such exporter is waiting for importer to finish using that buffer,
+ * so do not allow export fd of such buffer anymore.
+ */
+ if (imported_sgt_info->flags == HYPER_DMABUF_SGT_INVALID) {
+ return -EINVAL;
+ }
+
printk("%s Found buffer gref %d off %d last len %d nents %d domain %d\n", __func__,
imported_sgt_info->gref, imported_sgt_info->frst_ofst,
imported_sgt_info->last_len, imported_sgt_info->nents,
@@ -289,9 +299,7 @@ static int hyper_dmabuf_unexport(void *data)
hyper_dmabuf_create_request(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &unexport_attr->hyper_dmabuf_id);
- /* now send destroy request to remote domain
- * currently assuming there's only one importer exist
- */
+ /* Now send unexport request to remote domain, marking that buffer should not be used anymore */
ret = hyper_dmabuf_send_request(sgt_info->hyper_dmabuf_rdomain, req, true);
if (ret < 0) {
kfree(req);
@@ -300,11 +308,23 @@ static int hyper_dmabuf_unexport(void *data)
/* free msg */
kfree(req);
- unexport_attr->status = ret;
- /* Rest of cleanup will follow when importer will free it's buffer,
- * current implementation assumes that there is only one importer
- */
+ /*
+ * Check if any importer is still using buffer, if not clean it up completly,
+ * otherwise mark buffer as unexported and postpone its cleanup to time when
+ * importer will finish using it.
+ */
+ if (list_empty(&sgt_info->active_sgts->list) &&
+ list_empty(&sgt_info->active_attached->list)) {
+ hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
+ hyper_dmabuf_remove_exported(unexport_attr->hyper_dmabuf_id);
+ kfree(sgt_info);
+ } else {
+ sgt_info->flags = HYPER_DMABUF_SGT_UNEXPORTED;
+ }
+
+ /* TODO: should we mark here that buffer was destroyed immiedetaly or that was postponed ? */
+ unexport_attr->status = ret;
return ret;
}
@@ -81,11 +81,10 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq *request,
void cmd_process_work(struct work_struct *work)
{
struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
- struct hyper_dmabuf_sgt_info *sgt_info;
struct cmd_process *proc = container_of(work, struct cmd_process, work);
struct hyper_dmabuf_ring_rq *req;
int domid;
- int i, ret;
+ int i;
req = proc->rq;
domid = proc->domid;
@@ -118,31 +117,11 @@ void cmd_process_work(struct work_struct *work)
for (i=0; i<4; i++)
imported_sgt_info->private[i] = req->operands[5+i];
+ imported_sgt_info->flags = 0;
+ imported_sgt_info->ref_count = 0;
hyper_dmabuf_register_imported(imported_sgt_info);
break;
- case HYPER_DMABUF_UNEXPORT_FINISH:
- /* destroy sg_list for hyper_dmabuf_id on local side */
- /* command : DMABUF_DESTROY_FINISH,
- * operands0 : hyper_dmabuf_id
- */
-
- /* TODO: that should be done on workqueue, when received ack from
- * all importers that buffer is no longer used
- */
- sgt_info = hyper_dmabuf_find_exported(req->operands[0]);
- hyper_dmabuf_remove_exported(req->operands[0]);
- if (!sgt_info)
- printk("sgt_info does not exist in the list\n");
-
- ret = hyper_dmabuf_cleanup_sgt_info(sgt_info, FORCED_UNEXPORTING);
- if (!ret)
- kfree(sgt_info);
- else
- printk("failed to clean up sgt_info\n");
-
- break;
-
case HYPER_DMABUF_OPS_TO_REMOTE:
/* notifying dmabuf map/unmap to importer (probably not needed) */
/* for dmabuf synchronization */
@@ -191,16 +170,18 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req)
hyper_dmabuf_find_imported(req->operands[0]);
if (imported_sgt_info) {
- hyper_dmabuf_free_sgt(imported_sgt_info->sgt);
-
- hyper_dmabuf_cleanup_imported_pages(imported_sgt_info);
- hyper_dmabuf_remove_imported(req->operands[0]);
-
- /* Notify exporter that buffer is freed and it can
- * cleanup it
- */
- req->status = HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP;
- req->command = HYPER_DMABUF_UNEXPORT_FINISH;
+ /* check if buffer is still mapped and in use */
+ if (imported_sgt_info->sgt) {
+ /*
+ * Buffer is still in use, just mark that it should
+ * not be allowed to export its fd anymore.
+ */
+ imported_sgt_info->flags = HYPER_DMABUF_SGT_INVALID;
+ } else {
+ /* No one is using buffer, remove it from imported list */
+ hyper_dmabuf_remove_imported(req->operands[0]);
+ kfree(imported_sgt_info);
+ }
} else {
req->status = HYPER_DMABUF_REQ_ERROR;
}
@@ -4,7 +4,6 @@
enum hyper_dmabuf_command {
HYPER_DMABUF_EXPORT = 0x10,
HYPER_DMABUF_NOTIFY_UNEXPORT,
- HYPER_DMABUF_UNEXPORT_FINISH,
HYPER_DMABUF_OPS_TO_REMOTE,
HYPER_DMABUF_OPS_TO_SOURCE,
};
@@ -8,6 +8,7 @@
#include "hyper_dmabuf_drv.h"
#include "xen/hyper_dmabuf_xen_comm.h"
#include "hyper_dmabuf_msg.h"
+#include "hyper_dmabuf_imp.h"
extern struct hyper_dmabuf_private hyper_dmabuf_private;
@@ -114,10 +115,17 @@ int hyper_dmabuf_remote_sync(int id, int ops)
break;
case HYPER_DMABUF_OPS_RELEASE:
- /* remote importer shouldn't release dma_buf because
- * exporter will hold handle to the dma_buf as
- * far as dma_buf is shared with other domains.
+ /*
+ * Importer just released buffer fd, check if there is any other importer still using it.
+ * If not and buffer was unexported, clean up shared data and remove that buffer.
*/
+ if (list_empty(&sgt_info->active_sgts->list) && list_empty(&sgt_info->active_attached->list) &&
+ sgt_info->flags == HYPER_DMABUF_SGT_UNEXPORTED) {
+ hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
+ hyper_dmabuf_remove_exported(id);
+ kfree(sgt_info);
+ }
+
break;
case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS:
@@ -18,6 +18,11 @@
* frame buffer) */
#define MAX_ALLOWED_NUM_PAGES_FOR_GREF_NUM_ARRAYS 4
+enum hyper_dmabuf_sgt_flags {
+ HYPER_DMABUF_SGT_INVALID = 0x10,
+ HYPER_DMABUF_SGT_UNEXPORTED,
+};
+
/* stack of mapped sgts */
struct sgt_list {
struct sg_table *sgt;
@@ -76,7 +81,7 @@ struct hyper_dmabuf_sgt_info {
struct attachment_list *active_attached;
struct kmap_vaddr_list *va_kmapped;
struct vmap_vaddr_list *va_vmapped;
-
+ int flags;
struct hyper_dmabuf_shared_pages_info shared_pages_info;
int private[4]; /* device specific info (e.g. image's meta info?) */
};
@@ -92,6 +97,8 @@ struct hyper_dmabuf_imported_sgt_info {
grant_ref_t gref; /* reference number of top level addressing page of shared pages */
struct sg_table *sgt; /* sgt pointer after importing buffer */
struct hyper_dmabuf_shared_pages_info shared_pages_info;
+ int flags;
+ int ref_count;
int private[4]; /* device specific info (e.g. image's meta info?) */
};