@@ -33,6 +33,7 @@ static int hyper_dmabuf_drv_init(void)
/* device structure initialization */
/* currently only does work-queue initialization */
hyper_dmabuf_private.work_queue = create_workqueue("hyper_dmabuf_wqueue");
+ hyper_dmabuf_private.domid = hyper_dmabuf_get_domid();
ret = hyper_dmabuf_table_init();
if (ret < 0) {
@@ -3,6 +3,7 @@
struct hyper_dmabuf_private {
struct device *device;
+ int domid;
struct workqueue_struct *work_queue;
};
@@ -13,6 +13,14 @@
#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
+int dmabuf_refcount(struct dma_buf *dma_buf)
+{
+ if ((dma_buf != NULL) && (dma_buf->file != NULL))
+ return file_count(dma_buf->file);
+
+ return -1;
+}
+
/* return total number of pages referecned by a sgt
* for pre-calculation of # of pages behind a given sgt
*/
@@ -368,8 +376,8 @@ int hyper_dmabuf_cleanup_gref_table(struct hyper_dmabuf_sgt_info *sgt_info) {
struct hyper_dmabuf_shared_pages_info *shared_pages_info = &sgt_info->shared_pages_info;
grant_ref_t *ref = shared_pages_info->top_level_page;
- int n_2nd_level_pages = (sgt_info->active_sgts->sgt->nents/REFS_PER_PAGE +
- ((sgt_info->active_sgts->sgt->nents % REFS_PER_PAGE) ? 1: 0));
+ int n_2nd_level_pages = (sgt_info->nents/REFS_PER_PAGE +
+ ((sgt_info->nents % REFS_PER_PAGE) ? 1: 0));
if (shared_pages_info->data_refs == NULL ||
@@ -388,26 +396,28 @@ int hyper_dmabuf_cleanup_gref_table(struct hyper_dmabuf_sgt_info *sgt_info) {
if (!gnttab_end_foreign_access_ref(ref[i], 1)) {
printk("refid still in use!!!\n");
}
+ gnttab_free_grant_reference(ref[i]);
i++;
}
free_pages((unsigned long)shared_pages_info->addr_pages, i);
+
/* End foreign access for top level addressing page */
if (gnttab_query_foreign_access(shared_pages_info->top_level_ref)) {
printk("refid not shared !!\n");
}
- if (!gnttab_end_foreign_access_ref(shared_pages_info->top_level_ref, 1)) {
- printk("refid still in use!!!\n");
- }
gnttab_end_foreign_access_ref(shared_pages_info->top_level_ref, 1);
+ gnttab_free_grant_reference(shared_pages_info->top_level_ref);
+
free_pages((unsigned long)shared_pages_info->top_level_page, 1);
/* End foreign access for data pages, but do not free them */
- for (i = 0; i < sgt_info->active_sgts->sgt->nents; i++) {
+ for (i = 0; i < sgt_info->nents; i++) {
if (gnttab_query_foreign_access(shared_pages_info->data_refs[i])) {
printk("refid not shared !!\n");
}
gnttab_end_foreign_access_ref(shared_pages_info->data_refs[i], 0);
+ gnttab_free_grant_reference(shared_pages_info->data_refs[i]);
}
kfree(shared_pages_info->data_refs);
@@ -545,6 +555,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int fo
return -EPERM;
}
+ /* force == 1 is not recommended */
while (!list_empty(&sgt_info->va_kmapped->list)) {
va_kmapl = list_first_entry(&sgt_info->va_kmapped->list,
struct kmap_vaddr_list, list);
@@ -598,6 +609,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int fo
/* close connection to dma-buf completely */
dma_buf_put(sgt_info->dma_buf);
+ sgt_info->dma_buf = NULL;
kfree(sgt_info->active_sgts);
kfree(sgt_info->active_attached);
@@ -621,7 +633,7 @@ inline int hyper_dmabuf_sync_request_and_wait(int id, int ops)
hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, &operands[0]);
/* send request and wait for a response */
- ret = hyper_dmabuf_send_request(HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(id), req, true);
+ ret = hyper_dmabuf_send_request(HYPER_DMABUF_DOM_ID(id), req, true);
kfree(req);
@@ -737,30 +749,33 @@ static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment,
}
}
-static void hyper_dmabuf_ops_release(struct dma_buf *dmabuf)
+static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
{
struct hyper_dmabuf_imported_sgt_info *sgt_info;
int ret;
+ int final_release;
- if (!dmabuf->priv)
+ if (!dma_buf->priv)
return;
- sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
+ sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dma_buf->priv;
- if (sgt_info) {
- /* dmabuf fd is being released - decrease refcount */
- sgt_info->ref_count--;
+ final_release = sgt_info && !sgt_info->valid &&
+ !dmabuf_refcount(sgt_info->dma_buf);
- /* if no one else in that domain is using that buffer, unmap it for now */
- if (sgt_info->ref_count == 0) {
- hyper_dmabuf_cleanup_imported_pages(sgt_info);
- hyper_dmabuf_free_sgt(sgt_info->sgt);
- sgt_info->sgt = NULL;
- }
+ if (!dmabuf_refcount(sgt_info->dma_buf)) {
+ sgt_info->dma_buf = NULL;
}
- ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
- HYPER_DMABUF_OPS_RELEASE);
+ if (final_release) {
+ hyper_dmabuf_cleanup_imported_pages(sgt_info);
+ hyper_dmabuf_free_sgt(sgt_info->sgt);
+ ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
+ HYPER_DMABUF_OPS_RELEASE_FINAL);
+ } else {
+ ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
+ HYPER_DMABUF_OPS_RELEASE);
+ }
if (ret < 0) {
printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
@@ -770,8 +785,7 @@ static void hyper_dmabuf_ops_release(struct dma_buf *dmabuf)
* Check if buffer is still valid and if not remove it from imported list.
* That has to be done after sending sync request
*/
- if (sgt_info && sgt_info->ref_count == 0 &&
- sgt_info->flags == HYPER_DMABUF_SGT_INVALID) {
+ if (final_release) {
hyper_dmabuf_remove_imported(sgt_info->hyper_dmabuf_id);
kfree(sgt_info);
}
@@ -962,23 +976,21 @@ static const struct dma_buf_ops hyper_dmabuf_ops = {
/* exporting dmabuf as fd */
int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int flags)
{
- int fd;
- struct dma_buf* dmabuf;
+ int fd = -1;
/* call hyper_dmabuf_export_dmabuf and create
* and bind a handle for it then release
*/
- dmabuf = hyper_dmabuf_export_dma_buf(dinfo);
-
- fd = dma_buf_fd(dmabuf, flags);
+ hyper_dmabuf_export_dma_buf(dinfo);
- /* dmabuf fd is exported for given bufer - increase its ref count */
- dinfo->ref_count++;
+ if (dinfo->dma_buf) {
+ fd = dma_buf_fd(dinfo->dma_buf, flags);
+ }
return fd;
}
-struct dma_buf* hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo)
+void hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo)
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -989,5 +1001,5 @@ struct dma_buf* hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_inf
exp_info.flags = /* not sure about flag */0;
exp_info.priv = dinfo;
- return dma_buf_export(&exp_info);
+ dinfo->dma_buf = dma_buf_export(&exp_info);
}
@@ -1,6 +1,7 @@
#ifndef __HYPER_DMABUF_IMP_H__
#define __HYPER_DMABUF_IMP_H__
+#include <linux/fs.h>
#include "hyper_dmabuf_struct.h"
/* extract pages directly from struct sg_table */
@@ -30,6 +31,8 @@ void hyper_dmabuf_free_sgt(struct sg_table *sgt);
int hyper_dmabuf_export_fd(struct hyper_dmabuf_imported_sgt_info *dinfo, int flags);
-struct dma_buf* hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo);
+void hyper_dmabuf_export_dma_buf(struct hyper_dmabuf_imported_sgt_info *dinfo);
+
+int dmabuf_refcount(struct dma_buf *dma_buf);
#endif /* __HYPER_DMABUF_IMP_H__ */
@@ -107,10 +107,12 @@ static int hyper_dmabuf_export_remote(void *data)
}
/* we check if this specific attachment was already exported
- * to the same domain and if yes, it returns hyper_dmabuf_id
- * of pre-exported sgt */
- ret = hyper_dmabuf_find_id(dma_buf, export_remote_attr->remote_domain);
- if (ret != -1) {
+ * to the same domain and if yes and it's valid sgt_info,
+ * it returns hyper_dmabuf_id of pre-exported sgt_info
+ */
+ ret = hyper_dmabuf_find_id_exported(dma_buf, export_remote_attr->remote_domain);
+ sgt_info = hyper_dmabuf_find_exported(ret);
+ if (ret != -1 && sgt_info->valid) {
dma_buf_put(dma_buf);
export_remote_attr->hyper_dmabuf_id = ret;
return 0;
@@ -135,12 +137,13 @@ static int hyper_dmabuf_export_remote(void *data)
/* TODO: We might need to consider using port number on event channel? */
sgt_info->hyper_dmabuf_rdomain = export_remote_attr->remote_domain;
sgt_info->dma_buf = dma_buf;
- sgt_info->flags = 0;
+ sgt_info->valid = 1;
+ sgt_info->importer_exported = 0;
- sgt_info->active_sgts = kcalloc(1, sizeof(struct sgt_list), GFP_KERNEL);
- sgt_info->active_attached = kcalloc(1, sizeof(struct attachment_list), GFP_KERNEL);
- sgt_info->va_kmapped = kcalloc(1, sizeof(struct kmap_vaddr_list), GFP_KERNEL);
- sgt_info->va_vmapped = kcalloc(1, sizeof(struct vmap_vaddr_list), GFP_KERNEL);
+ sgt_info->active_sgts = kmalloc(sizeof(struct sgt_list), GFP_KERNEL);
+ sgt_info->active_attached = kmalloc(sizeof(struct attachment_list), GFP_KERNEL);
+ sgt_info->va_kmapped = kmalloc(sizeof(struct kmap_vaddr_list), GFP_KERNEL);
+ sgt_info->va_vmapped = kmalloc(sizeof(struct vmap_vaddr_list), GFP_KERNEL);
sgt_info->active_sgts->sgt = sgt;
sgt_info->active_attached->attach = attachment;
@@ -159,6 +162,8 @@ static int hyper_dmabuf_export_remote(void *data)
if (page_info == NULL)
goto fail_export;
+ sgt_info->nents = page_info->nents;
+
/* now register it to export list */
hyper_dmabuf_register_exported(sgt_info);
@@ -220,6 +225,8 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
{
struct ioctl_hyper_dmabuf_export_fd *export_fd_attr;
struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
+ struct hyper_dmabuf_ring_rq *req;
+ int operand;
int ret = 0;
if (!data) {
@@ -234,35 +241,38 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
if (imported_sgt_info == NULL) /* can't find sgt from the table */
return -1;
- /*
- * Check if buffer was not unexported by exporter.
- * In such exporter is waiting for importer to finish using that buffer,
- * so do not allow export fd of such buffer anymore.
- */
- if (imported_sgt_info->flags == HYPER_DMABUF_SGT_INVALID) {
- return -EINVAL;
- }
-
printk("%s Found buffer gref %d off %d last len %d nents %d domain %d\n", __func__,
imported_sgt_info->gref, imported_sgt_info->frst_ofst,
imported_sgt_info->last_len, imported_sgt_info->nents,
- HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(imported_sgt_info->hyper_dmabuf_id));
+ HYPER_DMABUF_DOM_ID(imported_sgt_info->hyper_dmabuf_id));
if (!imported_sgt_info->sgt) {
imported_sgt_info->sgt = hyper_dmabuf_map_pages(imported_sgt_info->gref,
imported_sgt_info->frst_ofst,
imported_sgt_info->last_len,
imported_sgt_info->nents,
- HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(imported_sgt_info->hyper_dmabuf_id),
+ HYPER_DMABUF_DOM_ID(imported_sgt_info->hyper_dmabuf_id),
&imported_sgt_info->shared_pages_info);
- if (!imported_sgt_info->sgt) {
- printk("Failed to create sgt\n");
+
+ /* send notifiticatio for first export_fd to exporter */
+ operand = imported_sgt_info->hyper_dmabuf_id;
+ req = kcalloc(1, sizeof(*req), GFP_KERNEL);
+ hyper_dmabuf_create_request(req, HYPER_DMABUF_FIRST_EXPORT, &operand);
+
+ ret = hyper_dmabuf_send_request(HYPER_DMABUF_DOM_ID(operand), req, false);
+
+ if (!imported_sgt_info->sgt || ret) {
+ kfree(req);
+ printk("Failed to create sgt or notify exporter\n");
return -EINVAL;
}
+ kfree(req);
}
export_fd_attr->fd = hyper_dmabuf_export_fd(imported_sgt_info, export_fd_attr->flags);
- if (export_fd_attr < 0) {
+
+ if (export_fd_attr->fd < 0) {
+ /* fail to get fd */
ret = export_fd_attr->fd;
}
@@ -309,23 +319,23 @@ static int hyper_dmabuf_unexport(void *data)
/* free msg */
kfree(req);
+ /* no longer valid */
+ sgt_info->valid = 0;
+
/*
- * Check if any importer is still using buffer, if not clean it up completly,
- * otherwise mark buffer as unexported and postpone its cleanup to time when
- * importer will finish using it.
+ * Immediately clean-up if it has never been exported by importer
+ * (so no SGT is constructed on importer).
+ * clean it up later in remote sync when final release ops
+ * is called (importer does this only when there's no
+ * no consumer of locally exported FDs)
*/
- if (list_empty(&sgt_info->active_sgts->list) &&
- list_empty(&sgt_info->active_attached->list)) {
+ printk("before claning up buffer completly\n");
+ if (!sgt_info->importer_exported) {
hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
hyper_dmabuf_remove_exported(unexport_attr->hyper_dmabuf_id);
kfree(sgt_info);
- } else {
- sgt_info->flags = HYPER_DMABUF_SGT_UNEXPORTED;
}
- /* TODO: should we mark here that buffer was destroyed immiedetaly or that was postponed ? */
- unexport_attr->status = ret;
-
return ret;
}
@@ -369,7 +379,7 @@ static int hyper_dmabuf_query(void *data)
if (sgt_info) {
query_attr->info = 0xFFFFFFFF; /* myself */
} else {
- query_attr->info = (HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(imported_sgt_info->hyper_dmabuf_id));
+ query_attr->info = (HYPER_DMABUF_DOM_ID(imported_sgt_info->hyper_dmabuf_id));
}
break;
@@ -65,7 +65,7 @@ struct hyper_dmabuf_sgt_info *hyper_dmabuf_find_exported(int id)
}
/* search for pre-exported sgt and return id of it if it exist */
-int hyper_dmabuf_find_id(struct dma_buf *dmabuf, int domid)
+int hyper_dmabuf_find_id_exported(struct dma_buf *dmabuf, int domid)
{
struct hyper_dmabuf_info_entry_exported *info_entry;
int bkt;
@@ -25,7 +25,7 @@ int hyper_dmabuf_table_destroy(void);
int hyper_dmabuf_register_exported(struct hyper_dmabuf_sgt_info *info);
/* search for pre-exported sgt and return id of it if it exist */
-int hyper_dmabuf_find_id(struct dma_buf *dmabuf, int domid);
+int hyper_dmabuf_find_id_exported(struct dma_buf *dmabuf, int domid);
int hyper_dmabuf_register_imported(struct hyper_dmabuf_imported_sgt_info* info);
@@ -55,6 +55,14 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq *request,
request->operands[0] = operands[0];
break;
+ case HYPER_DMABUF_FIRST_EXPORT:
+ /* dmabuf fd is being created on imported side for first time */
+ /* command : HYPER_DMABUF_FIRST_EXPORT,
+ * operands0 : hyper_dmabuf_id
+ */
+ request->operands[0] = operands[0];
+ break;
+
case HYPER_DMABUF_OPS_TO_REMOTE:
/* notifying dmabuf map/unmap to importer (probably not needed) */
/* for dmabuf synchronization */
@@ -81,6 +89,7 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_ring_rq *request,
void cmd_process_work(struct work_struct *work)
{
struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
+ struct hyper_dmabuf_sgt_info *sgt_info;
struct cmd_process *proc = container_of(work, struct cmd_process, work);
struct hyper_dmabuf_ring_rq *req;
int domid;
@@ -117,11 +126,25 @@ void cmd_process_work(struct work_struct *work)
for (i=0; i<4; i++)
imported_sgt_info->private[i] = req->operands[5+i];
- imported_sgt_info->flags = 0;
- imported_sgt_info->ref_count = 0;
+ imported_sgt_info->valid = 1;
hyper_dmabuf_register_imported(imported_sgt_info);
break;
+ case HYPER_DMABUF_FIRST_EXPORT:
+ /* find a corresponding SGT for the id */
+ sgt_info = hyper_dmabuf_find_exported(req->operands[0]);
+
+ if (!sgt_info) {
+ printk("critical err: requested sgt_info can't be found %d\n", req->operands[0]);
+ break;
+ }
+
+ if (sgt_info->importer_exported)
+ printk("warning: exported flag is not supposed to be 1 already\n");
+
+ sgt_info->importer_exported = 1;
+ break;
+
case HYPER_DMABUF_OPS_TO_REMOTE:
/* notifying dmabuf map/unmap to importer (probably not needed) */
/* for dmabuf synchronization */
@@ -170,13 +193,14 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_ring_rq *req)
hyper_dmabuf_find_imported(req->operands[0]);
if (imported_sgt_info) {
- /* check if buffer is still mapped and in use */
- if (imported_sgt_info->sgt) {
+ /* if anything is still using dma_buf */
+ if (imported_sgt_info->dma_buf &&
+ dmabuf_refcount(imported_sgt_info->dma_buf) > 0) {
/*
* Buffer is still in use, just mark that it should
* not be allowed to export its fd anymore.
*/
- imported_sgt_info->flags = HYPER_DMABUF_SGT_INVALID;
+ imported_sgt_info->valid = 0;
} else {
/* No one is using buffer, remove it from imported list */
hyper_dmabuf_remove_imported(req->operands[0]);
@@ -3,6 +3,7 @@
enum hyper_dmabuf_command {
HYPER_DMABUF_EXPORT = 0x10,
+ HYPER_DMABUF_FIRST_EXPORT,
HYPER_DMABUF_NOTIFY_UNEXPORT,
HYPER_DMABUF_OPS_TO_REMOTE,
HYPER_DMABUF_OPS_TO_SOURCE,
@@ -14,6 +15,7 @@ enum hyper_dmabuf_ops {
HYPER_DMABUF_OPS_MAP,
HYPER_DMABUF_OPS_UNMAP,
HYPER_DMABUF_OPS_RELEASE,
+ HYPER_DMABUF_OPS_RELEASE_FINAL,
HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS,
HYPER_DMABUF_OPS_END_CPU_ACCESS,
HYPER_DMABUF_OPS_KMAP_ATOMIC,
@@ -114,13 +114,13 @@ int hyper_dmabuf_remote_sync(int id, int ops)
kfree(sgtl);
break;
- case HYPER_DMABUF_OPS_RELEASE:
+ case HYPER_DMABUF_OPS_RELEASE_FINAL:
/*
* Importer just released buffer fd, check if there is any other importer still using it.
* If not and buffer was unexported, clean up shared data and remove that buffer.
*/
- if (list_empty(&sgt_info->active_sgts->list) && list_empty(&sgt_info->active_attached->list) &&
- sgt_info->flags == HYPER_DMABUF_SGT_UNEXPORTED) {
+ if (list_empty(&sgt_info->active_attached->list) &&
+ !sgt_info->valid) {
hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
hyper_dmabuf_remove_exported(id);
kfree(sgt_info);
@@ -128,6 +128,10 @@ int hyper_dmabuf_remote_sync(int id, int ops)
break;
+ case HYPER_DMABUF_OPS_RELEASE:
+ /* place holder */
+ break;
+
case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS:
ret = dma_buf_begin_cpu_access(sgt_info->dma_buf, DMA_BIDIRECTIONAL);
if (!ret) {
@@ -6,10 +6,10 @@
/* Importer combine source domain id with given hyper_dmabuf_id
* to make it unique in case there are multiple exporters */
-#define HYPER_DMABUF_ID_IMPORTER(sdomain, id) \
- ((((sdomain) & 0xFF) << 24) | ((id) & 0xFFFFFF))
+#define HYPER_DMABUF_ID_IMPORTER(domid, id) \
+ ((((domid) & 0xFF) << 24) | ((id) & 0xFFFFFF))
-#define HYPER_DMABUF_ID_IMPORTER_GET_SDOMAIN_ID(id) \
+#define HYPER_DMABUF_DOM_ID(id) \
(((id) >> 24) & 0xFF)
/* each grant_ref_t is 4 bytes, so total 4096 grant_ref_t can be
@@ -18,11 +18,6 @@
* frame buffer) */
#define MAX_ALLOWED_NUM_PAGES_FOR_GREF_NUM_ARRAYS 4
-enum hyper_dmabuf_sgt_flags {
- HYPER_DMABUF_SGT_INVALID = 0x10,
- HYPER_DMABUF_SGT_UNEXPORTED,
-};
-
/* stack of mapped sgts */
struct sgt_list {
struct sg_table *sgt;
@@ -77,11 +72,13 @@ struct hyper_dmabuf_sgt_info {
int hyper_dmabuf_rdomain; /* domain importing this sgt */
struct dma_buf *dma_buf; /* needed to store this for freeing it later */
+ int nents; /* number of pages, which may be different than sgt->nents */
struct sgt_list *active_sgts;
struct attachment_list *active_attached;
struct kmap_vaddr_list *va_kmapped;
struct vmap_vaddr_list *va_vmapped;
- int flags;
+ bool valid;
+ bool importer_exported; /* exported locally on importer's side */
struct hyper_dmabuf_shared_pages_info shared_pages_info;
int private[4]; /* device specific info (e.g. image's meta info?) */
};
@@ -95,10 +92,10 @@ struct hyper_dmabuf_imported_sgt_info {
int last_len; /* length of data in the last shared page */
int nents; /* number of pages to be shared */
grant_ref_t gref; /* reference number of top level addressing page of shared pages */
+ struct dma_buf *dma_buf;
struct sg_table *sgt; /* sgt pointer after importing buffer */
struct hyper_dmabuf_shared_pages_info shared_pages_info;
- int flags;
- int ref_count;
+ bool valid;
int private[4]; /* device specific info (e.g. image's meta info?) */
};
@@ -456,13 +456,12 @@ static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *info)
do {
rc = ring->req_cons;
rp = ring->sring->req_prod;
-
+ more_to_do = 0;
while (rc != rp) {
if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
break;
memcpy(&req, RING_GET_REQUEST(ring, rc), sizeof(req));
- printk("Got request\n");
ring->req_cons = ++rc;
ret = hyper_dmabuf_msg_parse(ring_info->sdomain, &req);
@@ -479,13 +478,11 @@ static irqreturn_t hyper_dmabuf_back_ring_isr(int irq, void *info)
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify);
if (notify) {
- printk("Notyfing\n");
notify_remote_via_irq(ring_info->irq);
}
}
RING_FINAL_CHECK_FOR_REQUESTS(ring, more_to_do);
- printk("Final check for requests %d\n", more_to_do);
}
} while (more_to_do);
@@ -541,7 +538,6 @@ static irqreturn_t hyper_dmabuf_front_ring_isr(int irq, void *info)
if (i != ring->req_prod_pvt) {
RING_FINAL_CHECK_FOR_RESPONSES(ring, more_to_do);
- printk("more to do %d\n", more_to_do);
} else {
ring->sring->rsp_event = i+1;
}