@@ -30,9 +30,9 @@
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/device.h>
+#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_conf.h"
#include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_list.h"
#include "hyper_dmabuf_id.h"
@@ -26,11 +26,12 @@
#define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
#include <linux/device.h>
+#include <xen/hyper_dmabuf.h>
struct hyper_dmabuf_req;
struct list_reusable_id {
- int id;
+ hyper_dmabuf_id_t hid;
struct list_head list;
};
@@ -28,13 +28,14 @@
#include <linux/list.h>
#include <linux/slab.h>
-#include "hyper_dmabuf_msg.h"
+#include <linux/random.h>
#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_msg.h"
extern struct hyper_dmabuf_private hyper_dmabuf_private;
-void store_reusable_id(int id)
+void store_reusable_hid(hyper_dmabuf_id_t hid)
{
struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
struct list_reusable_id *new_reusable;
@@ -47,15 +48,15 @@ void store_reusable_id(int id)
return;
}
- new_reusable->id = id;
+ new_reusable->hid = hid;
list_add(&new_reusable->list, &reusable_head->list);
}
-static int retrieve_reusable_id(void)
+static hyper_dmabuf_id_t retrieve_reusable_hid(void)
{
struct list_reusable_id *reusable_head = hyper_dmabuf_private.id_queue;
- int id;
+ hyper_dmabuf_id_t hid = {-1, {0,0,0}};
/* check there is reusable id */
if (!list_empty(&reusable_head->list)) {
@@ -64,12 +65,11 @@ static int retrieve_reusable_id(void)
list);
list_del(&reusable_head->list);
- id = reusable_head->id;
+ hid = reusable_head->hid;
kfree(reusable_head);
- return id;
}
- return -ENOENT;
+ return hid;
}
void destroy_reusable_list(void)
@@ -92,31 +92,50 @@ void destroy_reusable_list(void)
}
}
-int hyper_dmabuf_get_id(void)
+hyper_dmabuf_id_t hyper_dmabuf_get_hid(void)
{
- static int id = 0;
+ static int count = 0;
+ hyper_dmabuf_id_t hid;
struct list_reusable_id *reusable_head;
- int ret;
- /* first cla to hyper_dmabuf_get_id */
- if (id == 0) {
+ /* first call to hyper_dmabuf_get_id */
+ if (count == 0) {
reusable_head = kmalloc(sizeof(*reusable_head), GFP_KERNEL);
if (!reusable_head) {
dev_err(hyper_dmabuf_private.device,
"No memory left to be allocated\n");
- return -ENOMEM;
+ return (hyper_dmabuf_id_t){-1, {0,0,0}};
}
- reusable_head->id = -1; /* list head have invalid id */
+ reusable_head->hid.id = -1; /* list head has an invalid count */
INIT_LIST_HEAD(&reusable_head->list);
hyper_dmabuf_private.id_queue = reusable_head;
}
- ret = retrieve_reusable_id();
+ hid = retrieve_reusable_hid();
- if (ret < 0 && id < HYPER_DMABUF_ID_MAX)
- return HYPER_DMABUF_ID_CREATE(hyper_dmabuf_private.domid, id++);
+ /*creating a new H-ID only if nothing in the reusable id queue
+ * and count is less than maximum allowed
+ */
+ if (hid.id == -1 && count < HYPER_DMABUF_ID_MAX) {
+ hid.id = HYPER_DMABUF_ID_CREATE(hyper_dmabuf_private.domid, count++);
+ /* random data embedded in the id for security */
+ get_random_bytes(&hid.rng_key[0], 12);
+ }
+
+ return hid;
+}
+
+bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2)
+{
+ int i;
+
+ /* compare keys */
+ for (i=0; i<3; i++) {
+ if (hid1.rng_key[i] != hid2.rng_key[i])
+ return false;
+ }
- return ret;
+ return true;
}
@@ -25,24 +25,23 @@
#ifndef __HYPER_DMABUF_ID_H__
#define __HYPER_DMABUF_ID_H__
-/* Importer combine source domain id with given hyper_dmabuf_id
- * to make it unique in case there are multiple exporters */
+#define HYPER_DMABUF_ID_CREATE(domid, cnt) \
+ ((((domid) & 0xFF) << 24) | ((cnt) & 0xFFFFFF))
-#define HYPER_DMABUF_ID_CREATE(domid, id) \
- ((((domid) & 0xFF) << 24) | ((id) & 0xFFFFFF))
-
-#define HYPER_DMABUF_DOM_ID(id) \
- (((id) >> 24) & 0xFF)
+#define HYPER_DMABUF_DOM_ID(hid) \
+ (((hid.id) >> 24) & 0xFF)
/* currently maximum number of buffers shared
* at any given moment is limited to 1000
*/
#define HYPER_DMABUF_ID_MAX 1000
-void store_reusable_id(int id);
+void store_reusable_hid(hyper_dmabuf_id_t hid);
void destroy_reusable_list(void);
-int hyper_dmabuf_get_id(void);
+hyper_dmabuf_id_t hyper_dmabuf_get_hid(void);
+
+bool hyper_dmabuf_hid_keycomp(hyper_dmabuf_id_t hid1, hyper_dmabuf_id_t hid2);
#endif /*__HYPER_DMABUF_ID_H*/
@@ -33,11 +33,11 @@
#include <linux/dma-buf.h>
#include <xen/grant_table.h>
#include <asm/xen/page.h>
+#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_struct.h"
#include "hyper_dmabuf_imp.h"
#include "hyper_dmabuf_id.h"
#include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_list.h"
extern struct hyper_dmabuf_private hyper_dmabuf_private;
@@ -258,15 +258,20 @@ int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int fo
#define WAIT_AFTER_SYNC_REQ 0
-inline int hyper_dmabuf_sync_request(int id, int dmabuf_ops)
+inline int hyper_dmabuf_sync_request(hyper_dmabuf_id_t hid, int dmabuf_ops)
{
struct hyper_dmabuf_req *req;
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
- int operands[2];
+ int operands[5];
+ int i;
int ret;
- operands[0] = id;
- operands[1] = dmabuf_ops;
+ operands[0] = hid.id;
+
+ for (i=0; i<3; i++)
+ operands[i+1] = hid.rng_key[i];
+
+ operands[4] = dmabuf_ops;
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
@@ -279,7 +284,7 @@ inline int hyper_dmabuf_sync_request(int id, int dmabuf_ops)
hyper_dmabuf_create_request(req, HYPER_DMABUF_OPS_TO_SOURCE, &operands[0]);
/* send request and wait for a response */
- ret = ops->send_req(HYPER_DMABUF_DOM_ID(id), req, WAIT_AFTER_SYNC_REQ);
+ ret = ops->send_req(HYPER_DMABUF_DOM_ID(hid), req, WAIT_AFTER_SYNC_REQ);
kfree(req);
@@ -297,7 +302,7 @@ static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, struct device* dev,
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_ATTACH);
if (ret < 0) {
@@ -319,7 +324,7 @@ static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, struct dma_buf_attac
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)attach->dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_DETACH);
if (ret < 0) {
@@ -358,7 +363,7 @@ static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachme
goto err_free_sg;
}
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_MAP);
kfree(page_info->pages);
@@ -381,8 +386,8 @@ static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachme
}
static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment,
- struct sg_table *sg,
- enum dma_data_direction dir)
+ struct sg_table *sg,
+ enum dma_data_direction dir)
{
struct hyper_dmabuf_imported_sgt_info *sgt_info;
int ret;
@@ -397,7 +402,7 @@ static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment,
sg_free_table(sg);
kfree(sg);
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_UNMAP);
if (ret < 0) {
@@ -437,7 +442,7 @@ static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
final_release = sgt_info && !sgt_info->valid &&
!sgt_info->num_importers;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_RELEASE);
if (ret < 0) {
dev_warn(hyper_dmabuf_private.device,
@@ -449,7 +454,7 @@ static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
* That has to be done after sending sync request
*/
if (final_release) {
- hyper_dmabuf_remove_imported(sgt_info->hyper_dmabuf_id);
+ hyper_dmabuf_remove_imported(sgt_info->hid);
kfree(sgt_info);
}
}
@@ -464,7 +469,7 @@ static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, enum dma_da
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -484,7 +489,7 @@ static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, enum dma_data
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_END_CPU_ACCESS);
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -504,7 +509,7 @@ static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, unsigned long
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_KMAP_ATOMIC);
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -524,7 +529,7 @@ static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, unsigned long
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -542,7 +547,7 @@ static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_KMAP);
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -562,7 +567,7 @@ static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum,
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_KUNMAP);
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -580,7 +585,7 @@ static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, struct vm_area_struct *
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_MMAP);
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -600,7 +605,7 @@ static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf)
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_VMAP);
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -620,7 +625,7 @@ static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr)
sgt_info = (struct hyper_dmabuf_imported_sgt_info *)dmabuf->priv;
- ret = hyper_dmabuf_sync_request(sgt_info->hyper_dmabuf_id,
+ ret = hyper_dmabuf_sync_request(sgt_info->hid,
HYPER_DMABUF_OPS_VUNMAP);
if (ret < 0) {
dev_err(hyper_dmabuf_private.device,
@@ -35,13 +35,12 @@
#include <linux/dma-buf.h>
#include <linux/delay.h>
#include <linux/list.h>
-#include <xen/hyper_dmabuf.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_id.h"
#include "hyper_dmabuf_struct.h"
#include "hyper_dmabuf_ioctl.h"
#include "hyper_dmabuf_list.h"
#include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_drv.h"
-#include "hyper_dmabuf_id.h"
#include "hyper_dmabuf_imp.h"
#include "hyper_dmabuf_query.h"
@@ -93,6 +92,8 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
struct hyper_dmabuf_sgt_info *sgt_info;
struct hyper_dmabuf_req *req;
int operands[MAX_NUMBER_OF_OPERANDS];
+ hyper_dmabuf_id_t hid;
+ int i;
int ret = 0;
if (!data) {
@@ -113,25 +114,27 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
* to the same domain and if yes and it's valid sgt_info,
* it returns hyper_dmabuf_id of pre-exported sgt_info
*/
- ret = hyper_dmabuf_find_id_exported(dma_buf, export_remote_attr->remote_domain);
- sgt_info = hyper_dmabuf_find_exported(ret);
- if (ret != -ENOENT && sgt_info != NULL) {
- if (sgt_info->valid) {
- /*
- * Check if unexport is already scheduled for that buffer,
- * if so try to cancel it. If that will fail, buffer needs
- * to be reexport once again.
- */
- if (sgt_info->unexport_scheduled) {
- if (!cancel_delayed_work_sync(&sgt_info->unexport_work)) {
- dma_buf_put(dma_buf);
- goto reexport;
+ hid = hyper_dmabuf_find_hid_exported(dma_buf, export_remote_attr->remote_domain);
+ if (hid.id != -1) {
+ sgt_info = hyper_dmabuf_find_exported(hid);
+ if (sgt_info != NULL) {
+ if (sgt_info->valid) {
+ /*
+ * Check if unexport is already scheduled for that buffer,
+ * if so try to cancel it. If that will fail, buffer needs
+ * to be reexport once again.
+ */
+ if (sgt_info->unexport_scheduled) {
+ if (!cancel_delayed_work_sync(&sgt_info->unexport_work)) {
+ dma_buf_put(dma_buf);
+ goto reexport;
+ }
+ sgt_info->unexport_scheduled = 0;
}
- sgt_info->unexport_scheduled = 0;
+ dma_buf_put(dma_buf);
+ export_remote_attr->hid = hid;
+ return 0;
}
- dma_buf_put(dma_buf);
- export_remote_attr->hyper_dmabuf_id = ret;
- return 0;
}
}
@@ -142,11 +145,6 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
return PTR_ERR(attachment);
}
- /* Clear ret, as that will cause whole ioctl to return failure
- * to userspace, which is not true
- */
- ret = 0;
-
sgt = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
@@ -161,7 +159,15 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
return -ENOMEM;
}
- sgt_info->hyper_dmabuf_id = hyper_dmabuf_get_id();
+ sgt_info->hid = hyper_dmabuf_get_hid();
+
+ /* no more exported dmabuf allowed */
+ if(sgt_info->hid.id == -1) {
+ dev_err(hyper_dmabuf_private.device,
+ "exceeds allowed number of dmabuf to be exported\n");
+ /* TODO: Cleanup sgt */
+ return -ENOMEM;
+ }
/* TODO: We might need to consider using port number on event channel? */
sgt_info->hyper_dmabuf_rdomain = export_remote_attr->remote_domain;
@@ -198,8 +204,8 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
sgt_info->active_sgts->sgt = sgt;
sgt_info->active_attached->attach = attachment;
- sgt_info->va_kmapped->vaddr = NULL; /* first vaddr is NULL */
- sgt_info->va_vmapped->vaddr = NULL; /* first vaddr is NULL */
+ sgt_info->va_kmapped->vaddr = NULL;
+ sgt_info->va_vmapped->vaddr = NULL;
/* initialize list of sgt, attachment and vaddr for dmabuf sync
* via shadow dma-buf
@@ -221,23 +227,27 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
hyper_dmabuf_register_exported(sgt_info);
page_info->hyper_dmabuf_rdomain = sgt_info->hyper_dmabuf_rdomain;
- page_info->hyper_dmabuf_id = sgt_info->hyper_dmabuf_id; /* may not be needed */
+ page_info->hid = sgt_info->hid; /* may not be needed */
- export_remote_attr->hyper_dmabuf_id = sgt_info->hyper_dmabuf_id;
+ export_remote_attr->hid = sgt_info->hid;
/* now create request for importer via ring */
- operands[0] = page_info->hyper_dmabuf_id;
- operands[1] = page_info->nents;
- operands[2] = page_info->frst_ofst;
- operands[3] = page_info->last_len;
- operands[4] = ops->share_pages (page_info->pages, export_remote_attr->remote_domain,
+ operands[0] = page_info->hid.id;
+
+ for (i=0; i<3; i++)
+ operands[i+1] = page_info->hid.rng_key[i];
+
+ operands[4] = page_info->nents;
+ operands[5] = page_info->frst_ofst;
+ operands[6] = page_info->last_len;
+ operands[7] = ops->share_pages (page_info->pages, export_remote_attr->remote_domain,
page_info->nents, &sgt_info->refs_info);
- /* driver/application specific private info, max 32 bytes */
- operands[5] = export_remote_attr->private[0];
- operands[6] = export_remote_attr->private[1];
- operands[7] = export_remote_attr->private[2];
- operands[8] = export_remote_attr->private[3];
+ /* driver/application specific private info, max 4x4 bytes */
+ operands[8] = export_remote_attr->private[0];
+ operands[9] = export_remote_attr->private[1];
+ operands[10] = export_remote_attr->private[2];
+ operands[11] = export_remote_attr->private[3];
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
@@ -270,7 +280,7 @@ static int hyper_dmabuf_export_remote(struct file *filp, void *data)
kfree(req);
fail_map_req:
- hyper_dmabuf_remove_exported(sgt_info->hyper_dmabuf_id);
+ hyper_dmabuf_remove_exported(sgt_info->hid);
fail_export:
dma_buf_unmap_attachment(sgt_info->active_attached->attach,
@@ -298,7 +308,8 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
struct hyper_dmabuf_imported_sgt_info *sgt_info;
struct hyper_dmabuf_req *req;
struct page **data_pages;
- int operand;
+ int operands[4];
+ int i;
int ret = 0;
dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
@@ -311,7 +322,7 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
export_fd_attr = (struct ioctl_hyper_dmabuf_export_fd *)data;
/* look for dmabuf for the id */
- sgt_info = hyper_dmabuf_find_imported(export_fd_attr->hyper_dmabuf_id);
+ sgt_info = hyper_dmabuf_find_imported(export_fd_attr->hid);
/* can't find sgt from the table */
if (!sgt_info) {
@@ -324,9 +335,14 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
sgt_info->num_importers++;
/* send notification for export_fd to exporter */
- operand = sgt_info->hyper_dmabuf_id;
+ operands[0] = sgt_info->hid.id;
- dev_dbg(hyper_dmabuf_private.device, "Exporting fd of buffer %d\n", operand);
+ for (i=0; i<3; i++)
+ operands[i+1] = sgt_info->hid.rng_key[i];
+
+ dev_dbg(hyper_dmabuf_private.device, "Exporting fd of buffer {id:%d key:%d %d %d}\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2]);
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
@@ -336,30 +352,37 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
return -ENOMEM;
}
- hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD, &operand);
+ hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD, &operands[0]);
- ret = ops->send_req(HYPER_DMABUF_DOM_ID(operand), req, true);
+ ret = ops->send_req(HYPER_DMABUF_DOM_ID(sgt_info->hid), req, true);
if (ret < 0) {
/* in case of timeout other end eventually will receive request, so we need to undo it */
- hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD_FAILED, &operand);
- ops->send_req(HYPER_DMABUF_DOM_ID(operand), req, false);
+ hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD_FAILED, &operands[0]);
+ ops->send_req(operands[0], req, false);
kfree(req);
dev_err(hyper_dmabuf_private.device, "Failed to create sgt or notify exporter\n");
sgt_info->num_importers--;
mutex_unlock(&hyper_dmabuf_private.lock);
return ret;
}
+
kfree(req);
if (ret == HYPER_DMABUF_REQ_ERROR) {
dev_err(hyper_dmabuf_private.device,
- "Buffer invalid %d, cannot import\n", operand);
+ "Buffer invalid {id:%d key:%d %d %d}, cannot import\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2]);
+
sgt_info->num_importers--;
mutex_unlock(&hyper_dmabuf_private.lock);
return -EINVAL;
} else {
- dev_dbg(hyper_dmabuf_private.device, "Can import buffer %d\n", operand);
+ dev_dbg(hyper_dmabuf_private.device, "Can import buffer {id:%d key:%d %d %d}\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2]);
+
ret = 0;
}
@@ -367,22 +390,29 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
"%s Found buffer gref %d off %d last len %d nents %d domain %d\n", __func__,
sgt_info->ref_handle, sgt_info->frst_ofst,
sgt_info->last_len, sgt_info->nents,
- HYPER_DMABUF_DOM_ID(sgt_info->hyper_dmabuf_id));
+ HYPER_DMABUF_DOM_ID(sgt_info->hid));
if (!sgt_info->sgt) {
dev_dbg(hyper_dmabuf_private.device,
- "%s buffer %d pages not mapped yet\n", __func__,sgt_info->hyper_dmabuf_id);
+ "%s buffer {id:%d key:%d %d %d} pages not mapped yet\n", __func__,
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2]);
+
data_pages = ops->map_shared_pages(sgt_info->ref_handle,
- HYPER_DMABUF_DOM_ID(sgt_info->hyper_dmabuf_id),
+ HYPER_DMABUF_DOM_ID(sgt_info->hid),
sgt_info->nents,
&sgt_info->refs_info);
if (!data_pages) {
- dev_err(hyper_dmabuf_private.device, "Cannot map pages of buffer %d\n", operand);
+ dev_err(hyper_dmabuf_private.device,
+ "Cannot map pages of buffer {id:%d key:%d %d %d}\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2]);
+
sgt_info->num_importers--;
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
- hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD_FAILED, &operand);
- ops->send_req(HYPER_DMABUF_DOM_ID(operand), req, false);
+ hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT_FD_FAILED, &operands[0]);
+ ops->send_req(HYPER_DMABUF_DOM_ID(sgt_info->hid), req, false);
kfree(req);
mutex_unlock(&hyper_dmabuf_private.lock);
return -EINVAL;
@@ -401,6 +431,7 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
}
mutex_unlock(&hyper_dmabuf_private.lock);
+
dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
return ret;
}
@@ -411,8 +442,8 @@ static int hyper_dmabuf_export_fd_ioctl(struct file *filp, void *data)
static void hyper_dmabuf_delayed_unexport(struct work_struct *work)
{
struct hyper_dmabuf_req *req;
- int hyper_dmabuf_id;
- int ret;
+ int i, ret;
+ int operands[4];
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
struct hyper_dmabuf_sgt_info *sgt_info =
container_of(work, struct hyper_dmabuf_sgt_info, unexport_work.work);
@@ -420,10 +451,11 @@ static void hyper_dmabuf_delayed_unexport(struct work_struct *work)
if (!sgt_info)
return;
- hyper_dmabuf_id = sgt_info->hyper_dmabuf_id;
-
dev_dbg(hyper_dmabuf_private.device,
- "Marking buffer %d as invalid\n", hyper_dmabuf_id);
+ "Marking buffer {id:%d key:%d %d %d} as invalid\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2]);
+
/* no longer valid */
sgt_info->valid = 0;
@@ -435,12 +467,20 @@ static void hyper_dmabuf_delayed_unexport(struct work_struct *work)
return;
}
- hyper_dmabuf_create_request(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &hyper_dmabuf_id);
+ operands[0] = sgt_info->hid.id;
+
+ for (i=0; i<3; i++)
+ operands[i+1] = sgt_info->hid.rng_key[i];
+
+ hyper_dmabuf_create_request(req, HYPER_DMABUF_NOTIFY_UNEXPORT, &operands[0]);
/* Now send unexport request to remote domain, marking that buffer should not be used anymore */
ret = ops->send_req(sgt_info->hyper_dmabuf_rdomain, req, true);
if (ret < 0) {
- dev_err(hyper_dmabuf_private.device, "unexport message for buffer %d failed\n", hyper_dmabuf_id);
+ dev_err(hyper_dmabuf_private.device,
+ "unexport message for buffer {id:%d key:%d %d %d} failed\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2]);
}
/* free msg */
@@ -456,12 +496,15 @@ static void hyper_dmabuf_delayed_unexport(struct work_struct *work)
*/
if (!sgt_info->importer_exported) {
dev_dbg(hyper_dmabuf_private.device,
- "claning up buffer %d completly\n", hyper_dmabuf_id);
+ "claning up buffer {id:%d key:%d %d %d} completly\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2]);
+
hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
- hyper_dmabuf_remove_exported(hyper_dmabuf_id);
- kfree(sgt_info);
+ hyper_dmabuf_remove_exported(sgt_info->hid);
/* register hyper_dmabuf_id to the list for reuse */
- store_reusable_id(hyper_dmabuf_id);
+ store_reusable_hid(sgt_info->hid);
+ kfree(sgt_info);
}
}
@@ -482,9 +525,12 @@ static int hyper_dmabuf_unexport(struct file *filp, void *data)
unexport_attr = (struct ioctl_hyper_dmabuf_unexport *)data;
/* find dmabuf in export list */
- sgt_info = hyper_dmabuf_find_exported(unexport_attr->hyper_dmabuf_id);
+ sgt_info = hyper_dmabuf_find_exported(unexport_attr->hid);
- dev_dbg(hyper_dmabuf_private.device, "scheduling unexport of buffer %d\n", unexport_attr->hyper_dmabuf_id);
+ dev_dbg(hyper_dmabuf_private.device,
+ "scheduling unexport of buffer {id:%d key:%d %d %d}\n",
+ unexport_attr->hid.id, unexport_attr->hid.rng_key[0],
+ unexport_attr->hid.rng_key[1], unexport_attr->hid.rng_key[2]);
/* failed to find corresponding entry in export list */
if (sgt_info == NULL) {
@@ -518,8 +564,8 @@ static int hyper_dmabuf_query(struct file *filp, void *data)
query_attr = (struct ioctl_hyper_dmabuf_query *)data;
- sgt_info = hyper_dmabuf_find_exported(query_attr->hyper_dmabuf_id);
- imported_sgt_info = hyper_dmabuf_find_imported(query_attr->hyper_dmabuf_id);
+ sgt_info = hyper_dmabuf_find_exported(query_attr->hid);
+ imported_sgt_info = hyper_dmabuf_find_imported(query_attr->hid);
/* if dmabuf can't be found in both lists, return */
if (!(sgt_info && imported_sgt_info)) {
@@ -544,7 +590,7 @@ static int hyper_dmabuf_query(struct file *filp, void *data)
if (sgt_info) {
query_attr->info = 0xFFFFFFFF; /* myself */
} else {
- query_attr->info = (HYPER_DMABUF_DOM_ID(imported_sgt_info->hyper_dmabuf_id));
+ query_attr->info = HYPER_DMABUF_DOM_ID(imported_sgt_info->hid);
}
break;
@@ -674,10 +720,11 @@ static void hyper_dmabuf_emergency_release(struct hyper_dmabuf_sgt_info* sgt_inf
if (sgt_info->filp == filp) {
dev_dbg(hyper_dmabuf_private.device,
- "Executing emergency release of buffer %d\n",
- sgt_info->hyper_dmabuf_id);
+ "Executing emergency release of buffer {id:%d key:%d %d %d}\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0],
+ sgt_info->hid.rng_key[1], sgt_info->hid.rng_key[2]);
- unexport_attr.hyper_dmabuf_id = sgt_info->hyper_dmabuf_id;
+ unexport_attr.hid = sgt_info->hid;
unexport_attr.delay_ms = 0;
hyper_dmabuf_unexport(filp, &unexport_attr);
@@ -36,6 +36,7 @@
#include <linux/dma-buf.h>
#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_id.h"
extern struct hyper_dmabuf_private hyper_dmabuf_private;
@@ -51,13 +52,15 @@ static ssize_t hyper_dmabuf_imported_show(struct device *drv, struct device_attr
size_t total = 0;
hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node) {
- int id = info_entry->info->hyper_dmabuf_id;
+ hyper_dmabuf_id_t hid = info_entry->info->hid;
int nents = info_entry->info->nents;
bool valid = info_entry->info->valid;
int num_importers = info_entry->info->num_importers;
total += nents;
- count += scnprintf(buf + count, PAGE_SIZE - count, "id:%d, nents:%d, v:%c, numi:%d\n",
- id, nents, (valid ? 't' : 'f'), num_importers);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "hid:{id:%d keys:%d %d %d}, nents:%d, v:%c, numi:%d\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2],
+ nents, (valid ? 't' : 'f'), num_importers);
}
count += scnprintf(buf + count, PAGE_SIZE - count, "total nents: %lu\n",
total);
@@ -73,13 +76,15 @@ static ssize_t hyper_dmabuf_exported_show(struct device *drv, struct device_attr
size_t total = 0;
hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node) {
- int id = info_entry->info->hyper_dmabuf_id;
+ hyper_dmabuf_id_t hid = info_entry->info->hid;
int nents = info_entry->info->nents;
bool valid = info_entry->info->valid;
int importer_exported = info_entry->info->importer_exported;
total += nents;
- count += scnprintf(buf + count, PAGE_SIZE - count, "id:%d, nents:%d, v:%c, ie:%d\n",
- id, nents, (valid ? 't' : 'f'), importer_exported);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "hid:{hid:%d keys:%d %d %d}, nents:%d, v:%c, ie:%d\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2],
+ nents, (valid ? 't' : 'f'), importer_exported);
}
count += scnprintf(buf + count, PAGE_SIZE - count, "total nents: %lu\n",
total);
@@ -144,7 +149,7 @@ int hyper_dmabuf_register_exported(struct hyper_dmabuf_sgt_info *info)
info_entry->info = info;
hash_add(hyper_dmabuf_hash_exported, &info_entry->node,
- info_entry->info->hyper_dmabuf_id);
+ info_entry->info->hid.id);
return 0;
}
@@ -164,74 +169,102 @@ int hyper_dmabuf_register_imported(struct hyper_dmabuf_imported_sgt_info* info)
info_entry->info = info;
hash_add(hyper_dmabuf_hash_imported, &info_entry->node,
- info_entry->info->hyper_dmabuf_id);
+ info_entry->info->hid.id);
return 0;
}
-struct hyper_dmabuf_sgt_info *hyper_dmabuf_find_exported(int id)
+struct hyper_dmabuf_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid)
{
struct hyper_dmabuf_info_entry_exported *info_entry;
int bkt;
hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
- if(info_entry->info->hyper_dmabuf_id == id)
- return info_entry->info;
+ /* checking hid.id first */
+ if(info_entry->info->hid.id == hid.id) {
+ /* then key is compared */
+ if(hyper_dmabuf_hid_keycomp(info_entry->info->hid, hid))
+ return info_entry->info;
+ /* if key is unmatched, given HID is invalid, so returning NULL */
+ else
+ break;
+ }
return NULL;
}
/* search for pre-exported sgt and return id of it if it exist */
-int hyper_dmabuf_find_id_exported(struct dma_buf *dmabuf, int domid)
+hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, int domid)
{
struct hyper_dmabuf_info_entry_exported *info_entry;
+ hyper_dmabuf_id_t hid = {-1, {0, 0, 0}};
int bkt;
hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
if(info_entry->info->dma_buf == dmabuf &&
info_entry->info->hyper_dmabuf_rdomain == domid)
- return info_entry->info->hyper_dmabuf_id;
+ return info_entry->info->hid;
- return -ENOENT;
+ return hid;
}
-struct hyper_dmabuf_imported_sgt_info *hyper_dmabuf_find_imported(int id)
+struct hyper_dmabuf_imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid)
{
struct hyper_dmabuf_info_entry_imported *info_entry;
int bkt;
hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node)
- if(info_entry->info->hyper_dmabuf_id == id)
- return info_entry->info;
+ /* checking hid.id first */
+ if(info_entry->info->hid.id == hid.id) {
+ /* then key is compared */
+ if(hyper_dmabuf_hid_keycomp(info_entry->info->hid, hid))
+ return info_entry->info;
+ /* if key is unmatched, given HID is invalid, so returning NULL */
+ else {
+ break;
+ }
+ }
return NULL;
}
-int hyper_dmabuf_remove_exported(int id)
+int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid)
{
struct hyper_dmabuf_info_entry_exported *info_entry;
int bkt;
hash_for_each(hyper_dmabuf_hash_exported, bkt, info_entry, node)
- if(info_entry->info->hyper_dmabuf_id == id) {
- hash_del(&info_entry->node);
- kfree(info_entry);
- return 0;
+ /* checking hid.id first */
+ if(info_entry->info->hid.id == hid.id) {
+ /* then key is compared */
+ if(hyper_dmabuf_hid_keycomp(info_entry->info->hid, hid)) {
+ hash_del(&info_entry->node);
+ kfree(info_entry);
+ return 0;
+ } else {
+ break;
+ }
}
return -ENOENT;
}
-int hyper_dmabuf_remove_imported(int id)
+int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid)
{
struct hyper_dmabuf_info_entry_imported *info_entry;
int bkt;
hash_for_each(hyper_dmabuf_hash_imported, bkt, info_entry, node)
- if(info_entry->info->hyper_dmabuf_id == id) {
- hash_del(&info_entry->node);
- kfree(info_entry);
- return 0;
+ /* checking hid.id first */
+ if(info_entry->info->hid.id == hid.id) {
+ /* then key is compared */
+ if(hyper_dmabuf_hid_keycomp(info_entry->info->hid, hid)) {
+ hash_del(&info_entry->node);
+ kfree(info_entry);
+ return 0;
+ } else {
+ break;
+ }
}
return -ENOENT;
@@ -49,17 +49,17 @@ int hyper_dmabuf_table_destroy(void);
int hyper_dmabuf_register_exported(struct hyper_dmabuf_sgt_info *info);
/* search for pre-exported sgt and return id of it if it exist */
-int hyper_dmabuf_find_id_exported(struct dma_buf *dmabuf, int domid);
+hyper_dmabuf_id_t hyper_dmabuf_find_hid_exported(struct dma_buf *dmabuf, int domid);
int hyper_dmabuf_register_imported(struct hyper_dmabuf_imported_sgt_info* info);
-struct hyper_dmabuf_sgt_info *hyper_dmabuf_find_exported(int id);
+struct hyper_dmabuf_sgt_info *hyper_dmabuf_find_exported(hyper_dmabuf_id_t hid);
-struct hyper_dmabuf_imported_sgt_info *hyper_dmabuf_find_imported(int id);
+struct hyper_dmabuf_imported_sgt_info *hyper_dmabuf_find_imported(hyper_dmabuf_id_t hid);
-int hyper_dmabuf_remove_exported(int id);
+int hyper_dmabuf_remove_exported(hyper_dmabuf_id_t hid);
-int hyper_dmabuf_remove_imported(int id);
+int hyper_dmabuf_remove_imported(hyper_dmabuf_id_t hid);
void hyper_dmabuf_foreach_exported(
void (*func)(struct hyper_dmabuf_sgt_info *, void *attr),
@@ -60,32 +60,36 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
case HYPER_DMABUF_EXPORT:
/* exporting pages for dmabuf */
/* command : HYPER_DMABUF_EXPORT,
- * operands0 : hyper_dmabuf_id
- * operands1 : number of pages to be shared
- * operands2 : offset of data in the first page
- * operands3 : length of data in the last page
- * operands4 : top-level reference number for shared pages
- * operands5~8 : Driver-specific private data (e.g. graphic buffer's meta info)
+ * operands0~3 : hyper_dmabuf_id
+ * operands4 : number of pages to be shared
+ * operands5 : offset of data in the first page
+ * operands6 : length of data in the last page
+ * operands7 : top-level reference number for shared pages
+ * operands8~11 : Driver-specific private data (e.g. graphic buffer's meta info)
*/
- for (i=0; i < 8; i++)
+ for (i=0; i < 11; i++)
req->operands[i] = operands[i];
break;
case HYPER_DMABUF_NOTIFY_UNEXPORT:
/* destroy sg_list for hyper_dmabuf_id on remote side */
/* command : DMABUF_DESTROY,
- * operands0 : hyper_dmabuf_id
+ * operands0~3 : hyper_dmabuf_id_t hid
*/
- req->operands[0] = operands[0];
+
+ for (i=0; i < 4; i++)
+ req->operands[i] = operands[i];
break;
case HYPER_DMABUF_EXPORT_FD:
case HYPER_DMABUF_EXPORT_FD_FAILED:
/* dmabuf fd is being created on imported side or importing failed */
/* command : HYPER_DMABUF_EXPORT_FD or HYPER_DMABUF_EXPORT_FD_FAILED,
- * operands0 : hyper_dmabuf_id
+ * operands0~3 : hyper_dmabuf_id
*/
- req->operands[0] = operands[0];
+
+ for (i=0; i < 4; i++)
+ req->operands[i] = operands[i];
break;
case HYPER_DMABUF_OPS_TO_REMOTE:
@@ -98,10 +102,10 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
/* notifying dmabuf map/unmap to exporter, map will make the driver to do shadow mapping
* or unmapping for synchronization with original exporter (e.g. i915) */
/* command : DMABUF_OPS_TO_SOURCE.
- * operands0 : hyper_dmabuf_id
- * operands1 : map(=1)/unmap(=2)/attach(=3)/detach(=4)
+ * operands0~3 : hyper_dmabuf_id
+ * operands4 : map(=1)/unmap(=2)/attach(=3)/detach(=4)
*/
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 5; i++)
req->operands[i] = operands[i];
break;
@@ -126,12 +130,12 @@ void cmd_process_work(struct work_struct *work)
case HYPER_DMABUF_EXPORT:
/* exporting pages for dmabuf */
/* command : HYPER_DMABUF_EXPORT,
- * operands0 : hyper_dmabuf_id
- * operands1 : number of pages to be shared
- * operands2 : offset of data in the first page
- * operands3 : length of data in the last page
- * operands4 : top-level reference number for shared pages
- * operands5~8 : Driver-specific private data (e.g. graphic buffer's meta info)
+ * operands0~3 : hyper_dmabuf_id
+ * operands4 : number of pages to be shared
+ * operands5 : offset of data in the first page
+ * operands6 : length of data in the last page
+ * operands7 : top-level reference number for shared pages
+ * operands8~11 : Driver-specific private data (e.g. graphic buffer's meta info)
*/
imported_sgt_info = kcalloc(1, sizeof(*imported_sgt_info), GFP_KERNEL);
@@ -141,25 +145,31 @@ void cmd_process_work(struct work_struct *work)
break;
}
- imported_sgt_info->hyper_dmabuf_id = req->operands[0];
- imported_sgt_info->frst_ofst = req->operands[2];
- imported_sgt_info->last_len = req->operands[3];
- imported_sgt_info->nents = req->operands[1];
- imported_sgt_info->ref_handle = req->operands[4];
+ imported_sgt_info->hid.id = req->operands[0];
+
+ for (i=0; i<3; i++)
+ imported_sgt_info->hid.rng_key[i] = req->operands[i+1];
+
+ imported_sgt_info->nents = req->operands[4];
+ imported_sgt_info->frst_ofst = req->operands[5];
+ imported_sgt_info->last_len = req->operands[6];
+ imported_sgt_info->ref_handle = req->operands[7];
dev_dbg(hyper_dmabuf_private.device, "DMABUF was exported\n");
- dev_dbg(hyper_dmabuf_private.device, "\thyper_dmabuf_id %d\n", req->operands[0]);
- dev_dbg(hyper_dmabuf_private.device, "\tnents %d\n", req->operands[1]);
- dev_dbg(hyper_dmabuf_private.device, "\tfirst offset %d\n", req->operands[2]);
- dev_dbg(hyper_dmabuf_private.device, "\tlast len %d\n", req->operands[3]);
- dev_dbg(hyper_dmabuf_private.device, "\tgrefid %d\n", req->operands[4]);
+ dev_dbg(hyper_dmabuf_private.device, "\thid{id:%d key:%d %d %d}\n",
+ req->operands[0], req->operands[1], req->operands[2],
+ req->operands[3]);
+ dev_dbg(hyper_dmabuf_private.device, "\tnents %d\n", req->operands[4]);
+ dev_dbg(hyper_dmabuf_private.device, "\tfirst offset %d\n", req->operands[5]);
+ dev_dbg(hyper_dmabuf_private.device, "\tlast len %d\n", req->operands[6]);
+ dev_dbg(hyper_dmabuf_private.device, "\tgrefid %d\n", req->operands[7]);
for (i=0; i<4; i++)
- imported_sgt_info->private[i] = req->operands[5+i];
+ imported_sgt_info->private[i] = req->operands[8+i];
imported_sgt_info->valid = 1;
hyper_dmabuf_register_imported(imported_sgt_info);
- break;
+ break;
case HYPER_DMABUF_OPS_TO_REMOTE:
/* notifying dmabuf map/unmap to importer (probably not needed) */
@@ -182,6 +192,8 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
struct hyper_dmabuf_req *temp_req;
struct hyper_dmabuf_imported_sgt_info *sgt_info;
struct hyper_dmabuf_sgt_info *exp_sgt_info;
+ hyper_dmabuf_id_t hid = {req->operands[0], /* hid.id */
+ {req->operands[1], req->operands[2], req->operands[3]}}; /* hid.rng_key */
int ret;
if (!req) {
@@ -203,12 +215,12 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
if (req->command == HYPER_DMABUF_NOTIFY_UNEXPORT) {
/* destroy sg_list for hyper_dmabuf_id on remote side */
/* command : HYPER_DMABUF_NOTIFY_UNEXPORT,
- * operands0 : hyper_dmabuf_id
+ * operands0~3 : hyper_dmabuf_id
*/
dev_dbg(hyper_dmabuf_private.device,
"%s: processing HYPER_DMABUF_NOTIFY_UNEXPORT\n", __func__);
- sgt_info = hyper_dmabuf_find_imported(req->operands[0]);
+ sgt_info = hyper_dmabuf_find_imported(hid);
if (sgt_info) {
/* if anything is still using dma_buf */
@@ -220,7 +232,7 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
sgt_info->valid = 0;
} else {
/* No one is using buffer, remove it from imported list */
- hyper_dmabuf_remove_imported(req->operands[0]);
+ hyper_dmabuf_remove_imported(hid);
kfree(sgt_info);
}
} else {
@@ -236,13 +248,14 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
* or unmapping for synchronization with original exporter (e.g. i915) */
/* command : DMABUF_OPS_TO_SOURCE.
- * operands0 : hyper_dmabuf_id
+ * operands0~3 : hyper_dmabuf_id
* operands1 : enum hyper_dmabuf_ops {....}
*/
dev_dbg(hyper_dmabuf_private.device,
"%s: HYPER_DMABUF_OPS_TO_SOURCE\n", __func__);
- ret = hyper_dmabuf_remote_sync(req->operands[0], req->operands[1]);
+ ret = hyper_dmabuf_remote_sync(hid, req->operands[4]);
+
if (ret)
req->status = HYPER_DMABUF_REQ_ERROR;
else
@@ -255,20 +268,28 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
if (req->command == HYPER_DMABUF_EXPORT_FD) {
/* find a corresponding SGT for the id */
dev_dbg(hyper_dmabuf_private.device,
- "Processing HYPER_DMABUF_EXPORT_FD %d\n", req->operands[0]);
- exp_sgt_info = hyper_dmabuf_find_exported(req->operands[0]);
+ "Processing HYPER_DMABUF_EXPORT_FD for buffer {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
+
+ exp_sgt_info = hyper_dmabuf_find_exported(hid);
if (!exp_sgt_info) {
dev_err(hyper_dmabuf_private.device,
- "critical err: requested sgt_info can't be found %d\n", req->operands[0]);
+ "critical err: requested sgt_info can't be found for buffer {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
+
req->status = HYPER_DMABUF_REQ_ERROR;
} else if (!exp_sgt_info->valid) {
dev_dbg(hyper_dmabuf_private.device,
- "Buffer no longer valid - cannot export fd %d\n", req->operands[0]);
+ "Buffer no longer valid - cannot export fd for buffer {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
+
req->status = HYPER_DMABUF_REQ_ERROR;
} else {
dev_dbg(hyper_dmabuf_private.device,
- "Buffer still valid - can export fd%d\n", req->operands[0]);
+ "Buffer still valid - can export fd for buffer {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
+
exp_sgt_info->importer_exported++;
req->status = HYPER_DMABUF_REQ_PROCESSED;
}
@@ -277,12 +298,16 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
if (req->command == HYPER_DMABUF_EXPORT_FD_FAILED) {
dev_dbg(hyper_dmabuf_private.device,
- "Processing HYPER_DMABUF_EXPORT_FD_FAILED %d\n", req->operands[0]);
- exp_sgt_info = hyper_dmabuf_find_exported(req->operands[0]);
+ "Processing HYPER_DMABUF_EXPORT_FD_FAILED for buffer {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
+
+ exp_sgt_info = hyper_dmabuf_find_exported(hid);
if (!exp_sgt_info) {
dev_err(hyper_dmabuf_private.device,
- "critical err: requested sgt_info can't be found %d\n", req->operands[0]);
+ "critical err: requested sgt_info can't be found for buffer {id:%d key:%d %d %d}\n",
+ hid.id, hid.rng_key[0], hid.rng_key[1], hid.rng_key[2]);
+
req->status = HYPER_DMABUF_REQ_ERROR;
} else {
exp_sgt_info->importer_exported--;
@@ -25,7 +25,7 @@
#ifndef __HYPER_DMABUF_MSG_H__
#define __HYPER_DMABUF_MSG_H__
-#define MAX_NUMBER_OF_OPERANDS 9
+#define MAX_NUMBER_OF_OPERANDS 13
struct hyper_dmabuf_req {
unsigned int request_id;
@@ -31,10 +31,10 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_struct.h"
#include "hyper_dmabuf_list.h"
#include "hyper_dmabuf_msg.h"
-#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_id.h"
#include "hyper_dmabuf_msg.h"
#include "hyper_dmabuf_imp.h"
@@ -56,7 +56,7 @@ extern struct hyper_dmabuf_private hyper_dmabuf_private;
* is what is created when initial exporting is issued so it
* should not be modified or released by this fuction.
*/
-int hyper_dmabuf_remote_sync(int id, int ops)
+int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops)
{
struct hyper_dmabuf_sgt_info *sgt_info;
struct sgt_list *sgtl;
@@ -66,7 +66,7 @@ int hyper_dmabuf_remote_sync(int id, int ops)
int ret;
/* find a coresponding SGT for the id */
- sgt_info = hyper_dmabuf_find_exported(id);
+ sgt_info = hyper_dmabuf_find_exported(hid);
if (!sgt_info) {
dev_err(hyper_dmabuf_private.device,
@@ -167,9 +167,10 @@ int hyper_dmabuf_remote_sync(int id, int ops)
case HYPER_DMABUF_OPS_RELEASE:
dev_dbg(hyper_dmabuf_private.device,
- "Buffer %d released, references left: %d\n",
- sgt_info->hyper_dmabuf_id,
- sgt_info->importer_exported -1);
+ "Buffer {id:%d key:%d %d %d} released, references left: %d\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2], sgt_info->importer_exported -1);
+
sgt_info->importer_exported--;
/* If there are still importers just break, if no then continue with final cleanup */
if (sgt_info->importer_exported)
@@ -180,15 +181,17 @@ int hyper_dmabuf_remote_sync(int id, int ops)
* If not and buffer was unexported, clean up shared data and remove that buffer.
*/
dev_dbg(hyper_dmabuf_private.device,
- "Buffer %d final released\n", sgt_info->hyper_dmabuf_id);
+ "Buffer {id:%d key:%d %d %d} final released\n",
+ sgt_info->hid.id, sgt_info->hid.rng_key[0], sgt_info->hid.rng_key[1],
+ sgt_info->hid.rng_key[2]);
if (!sgt_info->valid && !sgt_info->importer_exported &&
!sgt_info->unexport_scheduled) {
hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
- hyper_dmabuf_remove_exported(id);
+ hyper_dmabuf_remove_exported(hid);
kfree(sgt_info);
/* store hyper_dmabuf_id in the list for reuse */
- store_reusable_id(id);
+ store_reusable_hid(hid);
}
break;
@@ -25,6 +25,6 @@
#ifndef __HYPER_DMABUF_REMOTE_SYNC_H__
#define __HYPER_DMABUF_REMOTE_SYNC_H__
-int hyper_dmabuf_remote_sync(int id, int ops);
+int hyper_dmabuf_remote_sync(hyper_dmabuf_id_t hid, int ops);
#endif // __HYPER_DMABUF_REMOTE_SYNC_H__
@@ -51,7 +51,7 @@ struct vmap_vaddr_list {
/* Exporter builds pages_info before sharing pages */
struct hyper_dmabuf_pages_info {
- int hyper_dmabuf_id; /* unique id to reference dmabuf in source domain */
+ hyper_dmabuf_id_t hid; /* unique id to reference dmabuf in source domain */
int hyper_dmabuf_rdomain; /* currenting considering just one remote domain access it */
int frst_ofst; /* offset of data in the first page */
int last_len; /* length of data in the last page */
@@ -64,22 +64,27 @@ struct hyper_dmabuf_pages_info {
* Exporter stores references to sgt in a hash table
* Exporter keeps these references for synchronization and tracking purposes
*
- * Importer use this structure exporting to other drivers in the same domain */
+ * Importer use this structure exporting to other drivers in the same domain
+ */
struct hyper_dmabuf_sgt_info {
- int hyper_dmabuf_id; /* unique id to reference dmabuf in remote domain */
+ hyper_dmabuf_id_t hid; /* unique id to reference dmabuf in remote domain */
int hyper_dmabuf_rdomain; /* domain importing this sgt */
struct dma_buf *dma_buf; /* needed to store this for freeing it later */
int nents; /* number of pages, which may be different than sgt->nents */
+
+ /* list of remote activities on dma_buf */
struct sgt_list *active_sgts;
struct attachment_list *active_attached;
struct kmap_vaddr_list *va_kmapped;
struct vmap_vaddr_list *va_vmapped;
- bool valid;
+
+ bool valid; /* set to 0 once unexported. Needed to prevent further mapping by importer */
int importer_exported; /* exported locally on importer's side */
void *refs_info; /* hypervisor-specific info for the references */
struct delayed_work unexport_work;
bool unexport_scheduled;
+
/* owner of buffer
* TODO: that is naiive as buffer may be reused by
* another userspace app, so here list of struct file should be kept
@@ -94,13 +99,16 @@ struct hyper_dmabuf_sgt_info {
* Importer store these references in the table and map it in
* its own memory map once userspace asks for reference for the buffer */
struct hyper_dmabuf_imported_sgt_info {
- int hyper_dmabuf_id; /* unique id to reference dmabuf (HYPER_DMABUF_ID_IMPORTER(source domain id, exporter's hyper_dmabuf_id */
+ hyper_dmabuf_id_t hid; /* unique id for shared dmabuf imported */
+
int ref_handle; /* reference number of top level addressing page of shared pages */
- int frst_ofst; /* start offset in shared page #1 */
+ int frst_ofst; /* start offset in first shared page */
int last_len; /* length of data in the last shared page */
int nents; /* number of pages to be shared */
+
struct dma_buf *dma_buf;
struct sg_table *sgt; /* sgt pointer after importing buffer */
+
void *refs_info;
bool valid;
int num_importers;
@@ -29,8 +29,6 @@
#include "xen/xenbus.h"
#include "../hyper_dmabuf_msg.h"
-#define MAX_NUMBER_OF_OPERANDS 9
-
DEFINE_RING_TYPES(xen_comm, struct hyper_dmabuf_req, struct hyper_dmabuf_resp);
struct xen_comm_tx_ring_info {
@@ -25,6 +25,11 @@
#ifndef __LINUX_PUBLIC_HYPER_DMABUF_H__
#define __LINUX_PUBLIC_HYPER_DMABUF_H__
+typedef struct {
+ int id;
+ int rng_key[3]; /* 12bytes long random number */
+} hyper_dmabuf_id_t;
+
#define IOCTL_HYPER_DMABUF_TX_CH_SETUP \
_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_hyper_dmabuf_tx_ch_setup))
struct ioctl_hyper_dmabuf_tx_ch_setup {
@@ -50,7 +55,7 @@ struct ioctl_hyper_dmabuf_export_remote {
/* Domain id to which buffer should be exported */
int remote_domain;
/* exported dma buf id */
- int hyper_dmabuf_id;
+ hyper_dmabuf_id_t hid;
int private[4];
};
@@ -59,7 +64,7 @@ _IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_hyper_dmabuf_export_fd))
struct ioctl_hyper_dmabuf_export_fd {
/* IN parameters */
/* hyper dmabuf id to be imported */
- int hyper_dmabuf_id;
+ hyper_dmabuf_id_t hid;
/* flags */
int flags;
/* OUT parameters */
@@ -72,7 +77,7 @@ _IOC(_IOC_NONE, 'G', 4, sizeof(struct ioctl_hyper_dmabuf_unexport))
struct ioctl_hyper_dmabuf_unexport {
/* IN parameters */
/* hyper dmabuf id to be unexported */
- int hyper_dmabuf_id;
+ hyper_dmabuf_id_t hid;
/* delay in ms by which unexport processing will be postponed */
int delay_ms;
/* OUT parameters */
@@ -85,7 +90,7 @@ _IOC(_IOC_NONE, 'G', 5, sizeof(struct ioctl_hyper_dmabuf_query))
struct ioctl_hyper_dmabuf_query {
/* in parameters */
/* hyper dmabuf id to be queried */
- int hyper_dmabuf_id;
+ hyper_dmabuf_id_t hid;
/* item to be queried */
int item;
/* OUT parameters */
The length of hyper_dmabuf_id is increased to 128bit by adding random key (96bit) to the id. This is to prevent possible leak of the id by guessing on importer VM (by unauthorized application). hyper_dmabuf_id_t is now defined as, typedef struct { int id; int rng_key[3]; } hyper_dmabuf_id_t; Signed-off-by: Dongwon Kim <dongwon.kim@intel.com> --- drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c | 2 +- drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h | 3 +- drivers/xen/hyper_dmabuf/hyper_dmabuf_id.c | 57 ++++-- drivers/xen/hyper_dmabuf/hyper_dmabuf_id.h | 17 +- drivers/xen/hyper_dmabuf/hyper_dmabuf_imp.c | 51 +++--- drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 199 +++++++++++++-------- drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c | 87 ++++++--- drivers/xen/hyper_dmabuf/hyper_dmabuf_list.h | 10 +- drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c | 115 +++++++----- drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h | 2 +- .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.c | 21 ++- .../xen/hyper_dmabuf/hyper_dmabuf_remote_sync.h | 2 +- drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h | 20 ++- .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.h | 2 - include/uapi/xen/hyper_dmabuf.h | 13 +- 15 files changed, 372 insertions(+), 229 deletions(-)