@@ -19,4 +19,14 @@ config HYPER_DMABUF_SYSFS
Expose information about imported and exported buffers using
hyper_dmabuf driver
+config HYPER_DMABUF_EVENT_GEN
+ bool "Enable event-generation and polling operation"
+ default n
+ depends on HYPER_DMABUF
+ help
+ With this config enabled, hyper_dmabuf driver on the importer side
+ generates events and queue those up in the event list whenever a new
+ shared DMA-BUF is available. Events in the list can be retrieved by
+ read operation.
+
endmenu
@@ -13,6 +13,7 @@ ifneq ($(KERNELRELEASE),)
hyper_dmabuf_id.o \
hyper_dmabuf_remote_sync.o \
hyper_dmabuf_query.o \
+ hyper_dmabuf_event.o \
ifeq ($(CONFIG_XEN), y)
$(TARGET_MODULE)-objs += xen/hyper_dmabuf_xen_comm.o \
@@ -30,7 +30,10 @@
#include <linux/module.h>
#include <linux/miscdevice.h>
#include <linux/workqueue.h>
+#include <linux/slab.h>
#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
#include <linux/dma-buf.h>
#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_conf.h"
@@ -38,6 +41,7 @@
#include "hyper_dmabuf_msg.h"
#include "hyper_dmabuf_list.h"
#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_event.h"
#ifdef CONFIG_HYPER_DMABUF_XEN
#include "xen/hyper_dmabuf_xen_drv.h"
@@ -64,7 +68,7 @@ int hyper_dmabuf_open(struct inode *inode, struct file *filp)
return -EBUSY;
/*
- * Initialize backend if neededm,
+ * Initialize backend if needed,
* use mutex to prevent race conditions when
* two userspace apps will open device at the same time
*/
@@ -91,6 +95,112 @@ int hyper_dmabuf_release(struct inode *inode, struct file *filp)
{
hyper_dmabuf_foreach_exported(hyper_dmabuf_emergency_release, filp);
+ /* clean up event queue */
+ hyper_dmabuf_events_release();
+
+ return 0;
+}
+
+unsigned int hyper_dmabuf_event_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ unsigned int mask = 0;
+
+ poll_wait(filp, &hyper_dmabuf_private.event_wait, wait);
+
+ if (!list_empty(&hyper_dmabuf_private.event_list))
+ mask |= POLLIN | POLLRDNORM;
+
+ return mask;
+}
+
+ssize_t hyper_dmabuf_event_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset)
+{
+ int ret;
+
+ /* only root can read events */
+ if (!capable(CAP_DAC_OVERRIDE))
+ return -EFAULT;
+
+ /* make sure user buffer can be written */
+ if (!access_ok(VERIFY_WRITE, buffer, count))
+ return -EFAULT;
+
+ ret = mutex_lock_interruptible(&hyper_dmabuf_private.event_read_lock);
+ if (ret)
+ return ret;
+
+ while (1) {
+ struct hyper_dmabuf_event *e = NULL;
+
+ spin_lock_irq(&hyper_dmabuf_private.event_lock);
+ if (!list_empty(&hyper_dmabuf_private.event_list)) {
+ e = list_first_entry(&hyper_dmabuf_private.event_list,
+ struct hyper_dmabuf_event, link);
+ list_del(&e->link);
+ }
+ spin_unlock_irq(&hyper_dmabuf_private.event_lock);
+
+ if (!e) {
+ if (ret)
+ break;
+ if (filp->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ mutex_unlock(&hyper_dmabuf_private.event_read_lock);
+ ret = wait_event_interruptible(hyper_dmabuf_private.event_wait,
+ !list_empty(&hyper_dmabuf_private.event_list));
+
+ if (ret >= 0)
+ ret = mutex_lock_interruptible(&hyper_dmabuf_private.event_read_lock);
+
+ if (ret)
+ return ret;
+ } else {
+ unsigned length = (sizeof(struct hyper_dmabuf_event_hdr) + e->event_data.hdr.size);
+
+ if (length > count - ret) {
+put_back_event:
+ spin_lock_irq(&hyper_dmabuf_private.event_lock);
+ list_add(&e->link, &hyper_dmabuf_private.event_list);
+ spin_unlock_irq(&hyper_dmabuf_private.event_lock);
+ break;
+ }
+
+ if (copy_to_user(buffer + ret, &e->event_data.hdr,
+ sizeof(struct hyper_dmabuf_event_hdr))) {
+ if (ret == 0)
+ ret = -EFAULT;
+
+ goto put_back_event;
+ }
+
+ ret += sizeof(struct hyper_dmabuf_event_hdr);
+
+ if (copy_to_user(buffer + ret, e->event_data.data, e->event_data.hdr.size)) {
+ /* error while copying void *data */
+
+ struct hyper_dmabuf_event_hdr dummy_hdr = {0};
+ ret -= sizeof(struct hyper_dmabuf_event_hdr);
+
+ /* nullifying hdr of the event in user buffer */
+ copy_to_user(buffer + ret, &dummy_hdr,
+ sizeof(dummy_hdr));
+
+ ret = -EFAULT;
+
+ goto put_back_event;
+ }
+
+ ret += e->event_data.hdr.size;
+ kfree(e);
+ }
+ }
+
+ mutex_unlock(&hyper_dmabuf_private.event_read_lock);
+
return 0;
}
@@ -99,6 +209,8 @@ static struct file_operations hyper_dmabuf_driver_fops =
.owner = THIS_MODULE,
.open = hyper_dmabuf_open,
.release = hyper_dmabuf_release,
+ .read = hyper_dmabuf_event_read,
+ .poll = hyper_dmabuf_event_poll,
.unlocked_ioctl = hyper_dmabuf_ioctl,
};
@@ -184,6 +296,12 @@ static int __init hyper_dmabuf_drv_init(void)
}
#endif
+ /* Initialize event queue */
+ INIT_LIST_HEAD(&hyper_dmabuf_private.event_list);
+ init_waitqueue_head(&hyper_dmabuf_private.event_wait);
+
+ hyper_dmabuf_private.curr_num_event = 0;
+
/* interrupt for comm should be registered here: */
return ret;
}
@@ -30,6 +30,42 @@
struct hyper_dmabuf_req;
+struct hyper_dmabuf_event {
+ struct hyper_dmabuf_event_data event_data;
+ struct list_head link;
+};
+
+struct hyper_dmabuf_private {
+ struct device *device;
+
+ /* VM(domain) id of current VM instance */
+ int domid;
+
+ /* workqueue dedicated to hyper_dmabuf driver */
+ struct workqueue_struct *work_queue;
+
+ /* list of reusable hyper_dmabuf_ids */
+ struct list_reusable_id *id_queue;
+
+ /* backend ops - hypervisor specific */
+ struct hyper_dmabuf_backend_ops *backend_ops;
+
+ /* device global lock */
+ /* TODO: might need a lock per resource (e.g. EXPORT LIST) */
+ struct mutex lock;
+
+ /* flag that shows whether backend is initialized */
+ bool backend_initialized;
+
+ wait_queue_head_t event_wait;
+ struct list_head event_list;
+
+ spinlock_t event_lock;
+ struct mutex event_read_lock;
+
+ int curr_num_event;
+};
+
struct list_reusable_id {
hyper_dmabuf_id_t hid;
struct list_head list;
@@ -69,16 +105,4 @@ struct hyper_dmabuf_backend_ops {
int (*send_req)(int, struct hyper_dmabuf_req *, int);
};
-struct hyper_dmabuf_private {
- struct device *device;
- int domid;
- struct workqueue_struct *work_queue;
- struct list_reusable_id *id_queue;
-
- /* backend ops - hypervisor specific */
- struct hyper_dmabuf_backend_ops *backend_ops;
- struct mutex lock;
- bool backend_initialized;
-};
-
#endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
new file mode 100644
@@ -0,0 +1,125 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dongwon Kim <dongwon.kim@intel.com>
+ * Mateusz Polrola <mateuszx.potrola@intel.com>
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-buf.h>
+#include <xen/grant_table.h>
+#include <asm/xen/page.h>
+#include "hyper_dmabuf_drv.h"
+#include "hyper_dmabuf_struct.h"
+#include "hyper_dmabuf_list.h"
+#include "hyper_dmabuf_event.h"
+
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
+static void hyper_dmabuf_send_event_locked(struct hyper_dmabuf_event *e)
+{
+ struct hyper_dmabuf_event *oldest;
+
+ assert_spin_locked(&hyper_dmabuf_private.event_lock);
+
+ /* check current number of event then if it hits the max num allowed
+ * then remove the oldest event in the list */
+ if (hyper_dmabuf_private.curr_num_event > MAX_NUMBER_OF_EVENT - 1) {
+ oldest = list_first_entry(&hyper_dmabuf_private.event_list,
+ struct hyper_dmabuf_event, link);
+ list_del(&oldest->link);
+ hyper_dmabuf_private.curr_num_event--;
+ }
+
+ list_add_tail(&e->link,
+ &hyper_dmabuf_private.event_list);
+
+ hyper_dmabuf_private.curr_num_event++;
+
+ wake_up_interruptible(&hyper_dmabuf_private.event_wait);
+}
+
+void hyper_dmabuf_events_release()
+{
+ struct hyper_dmabuf_event *e, *et;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&hyper_dmabuf_private.event_lock, irqflags);
+
+ list_for_each_entry_safe(e, et, &hyper_dmabuf_private.event_list,
+ link) {
+ list_del(&e->link);
+ hyper_dmabuf_private.curr_num_event--;
+ }
+
+ if (hyper_dmabuf_private.curr_num_event) {
+ dev_err(hyper_dmabuf_private.device,
+ "possible leak on event_list\n");
+ }
+
+ spin_unlock_irqrestore(&hyper_dmabuf_private.event_lock, irqflags);
+}
+
+int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid)
+{
+ struct hyper_dmabuf_event *e;
+ struct hyper_dmabuf_imported_sgt_info *imported_sgt_info;
+
+ unsigned long irqflags;
+
+ imported_sgt_info = hyper_dmabuf_find_imported(hid);
+
+ if (!imported_sgt_info) {
+ dev_err(hyper_dmabuf_private.device,
+ "can't find imported_sgt_info in the list\n");
+ return -EINVAL;
+ }
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+
+ e->event_data.hdr.event_type = HYPER_DMABUF_NEW_IMPORT;
+ e->event_data.hdr.hid = hid;
+ e->event_data.data = (void*)&imported_sgt_info->priv[0];
+ e->event_data.hdr.size = 128;
+
+ spin_lock_irqsave(&hyper_dmabuf_private.event_lock, irqflags);
+
+ hyper_dmabuf_send_event_locked(e);
+
+ spin_unlock_irqrestore(&hyper_dmabuf_private.event_lock, irqflags);
+
+ dev_dbg(hyper_dmabuf_private.device,
+ "event number = %d :", hyper_dmabuf_private.curr_num_event);
+
+ dev_dbg(hyper_dmabuf_private.device,
+ "generating events for {%d, %d, %d, %d}\n",
+ imported_sgt_info->hid.id, imported_sgt_info->hid.rng_key[0],
+ imported_sgt_info->hid.rng_key[1], imported_sgt_info->hid.rng_key[2]);
+
+ return 0;
+}
new file mode 100644
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __HYPER_DMABUF_EVENT_H__
+#define __HYPER_DMABUF_EVENT_H__
+
+#define MAX_NUMBER_OF_EVENT 1024
+
+enum hyper_dmabuf_event_type {
+ HYPER_DMABUF_NEW_IMPORT = 0x10000,
+};
+
+void hyper_dmabuf_events_release(void);
+
+int hyper_dmabuf_import_event(hyper_dmabuf_id_t hid);
+
+#endif /* __HYPER_DMABUF_EVENT_H__ */
@@ -87,7 +87,7 @@ static int hyper_dmabuf_send_export_msg(struct hyper_dmabuf_sgt_info *sgt_info,
{
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
struct hyper_dmabuf_req *req;
- int operands[12] = {0};
+ int operands[40] = {0};
int ret, i;
/* now create request for importer via ring */
@@ -109,7 +109,7 @@ static int hyper_dmabuf_send_export_msg(struct hyper_dmabuf_sgt_info *sgt_info,
}
/* driver/application specific private info, max 4x4 bytes */
- memcpy(&operands[8], &sgt_info->priv[0], sizeof(unsigned int) * 4);
+ memcpy(&operands[8], &sgt_info->priv[0], sizeof(unsigned int) * 32);
req = kcalloc(1, sizeof(*req), GFP_KERNEL);
@@ -121,11 +121,7 @@ static int hyper_dmabuf_send_export_msg(struct hyper_dmabuf_sgt_info *sgt_info,
/* composing a message to the importer */
hyper_dmabuf_create_request(req, HYPER_DMABUF_EXPORT, &operands[0]);
- ret = ops->send_req(sgt_info->hyper_dmabuf_rdomain, req, false);
-
- if(ret) {
- dev_err(hyper_dmabuf_private.device, "error while communicating\n");
- }
+ ret = ops->send_req(sgt_info->hyper_dmabuf_rdomain, req, true);
kfree(req);
@@ -141,7 +137,6 @@ static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
struct hyper_dmabuf_pages_info *page_info;
struct hyper_dmabuf_sgt_info *sgt_info;
hyper_dmabuf_id_t hid;
- int i;
int ret = 0;
if (!data) {
@@ -187,10 +182,14 @@ static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
}
/* update private data in sgt_info with new ones */
- memcpy(&sgt_info->priv[0], &export_remote_attr->priv[0], sizeof(unsigned int) * 4);
+ memcpy(&sgt_info->priv[0], &export_remote_attr->priv[0], sizeof(unsigned int) * 32);
+
+ /* send an export msg for updating priv in importer */
+ ret = hyper_dmabuf_send_export_msg(sgt_info, NULL);
- /* TODO: need to send this private info to the importer so that those
- * on importer's side are also updated */
+ if (ret < 0) {
+ dev_err(hyper_dmabuf_private.device, "Failed to send a new private data\n");
+ }
dma_buf_put(dma_buf);
export_remote_attr->hid = hid;
@@ -280,7 +279,7 @@ static int hyper_dmabuf_export_remote_ioctl(struct file *filp, void *data)
INIT_LIST_HEAD(&sgt_info->va_vmapped->list);
/* copy private data to sgt_info */
- memcpy(&sgt_info->priv[0], &export_remote_attr->priv[0], sizeof(unsigned int) * 4);
+ memcpy(&sgt_info->priv[0], &export_remote_attr->priv[0], sizeof(unsigned int) * 32);
page_info = hyper_dmabuf_ext_pgs(sgt);
if (!page_info) {
@@ -37,6 +37,7 @@
#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_list.h"
#include "hyper_dmabuf_id.h"
+#include "hyper_dmabuf_event.h"
extern struct hyper_dmabuf_private hyper_dmabuf_private;
@@ -36,6 +36,7 @@
#include "hyper_dmabuf_msg.h"
#include "hyper_dmabuf_drv.h"
#include "hyper_dmabuf_remote_sync.h"
+#include "hyper_dmabuf_event.h"
#include "hyper_dmabuf_list.h"
extern struct hyper_dmabuf_private hyper_dmabuf_private;
@@ -64,10 +65,10 @@ void hyper_dmabuf_create_request(struct hyper_dmabuf_req *req,
* operands5 : offset of data in the first page
* operands6 : length of data in the last page
* operands7 : top-level reference number for shared pages
- * operands8~11 : Driver-specific private data (e.g. graphic buffer's meta info)
+ * operands8~39 : Driver-specific private data (e.g. graphic buffer's meta info)
*/
- for (i=0; i < 11; i++)
- req->operands[i] = operands[i];
+
+ memcpy(&req->operands[0], &operands[0], 40 * sizeof(int));
break;
case HYPER_DMABUF_NOTIFY_UNEXPORT:
@@ -136,6 +137,32 @@ void cmd_process_work(struct work_struct *work)
* operands7 : top-level reference number for shared pages
* operands8~11 : Driver-specific private data (e.g. graphic buffer's meta info)
*/
+
+ /* if nents == 0, it means it is a message only for priv synchronization
+ * for existing imported_sgt_info so not creating a new one */
+ if (req->operands[4] == 0) {
+ hyper_dmabuf_id_t exist = {req->operands[0],
+ {req->operands[1], req->operands[2],
+ req->operands[3]}};
+
+ imported_sgt_info = hyper_dmabuf_find_imported(exist);
+
+ if (!imported_sgt_info) {
+ dev_err(hyper_dmabuf_private.device,
+ "Can't find imported sgt_info from IMPORT_LIST\n");
+ break;
+ }
+ /* updating pri data */
+ memcpy(&imported_sgt_info->priv[0], &req->operands[8], 32 * sizeof(int));
+
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+ /* generating import event */
+ hyper_dmabuf_import_event(imported_sgt_info->hid);
+#endif
+
+ break;
+ }
+
imported_sgt_info = kcalloc(1, sizeof(*imported_sgt_info), GFP_KERNEL);
if (!imported_sgt_info) {
@@ -163,12 +190,17 @@ void cmd_process_work(struct work_struct *work)
dev_dbg(hyper_dmabuf_private.device, "\tlast len %d\n", req->operands[6]);
dev_dbg(hyper_dmabuf_private.device, "\tgrefid %d\n", req->operands[7]);
- for (i=0; i<4; i++)
- imported_sgt_info->private[i] = req->operands[8+i];
+ memcpy(&imported_sgt_info->priv[0], &req->operands[8], 32 * sizeof(int));
imported_sgt_info->valid = 1;
hyper_dmabuf_register_imported(imported_sgt_info);
- break;
+
+#ifdef CONFIG_HYPER_DMABUF_EVENT_GEN
+ /* generating import event */
+ hyper_dmabuf_import_event(imported_sgt_info->hid);
+#endif
+
+ break;
case HYPER_DMABUF_OPS_TO_REMOTE:
/* notifying dmabuf map/unmap to importer (probably not needed) */
@@ -25,7 +25,7 @@
#ifndef __HYPER_DMABUF_MSG_H__
#define __HYPER_DMABUF_MSG_H__
-#define MAX_NUMBER_OF_OPERANDS 13
+#define MAX_NUMBER_OF_OPERANDS 40
struct hyper_dmabuf_req {
unsigned int request_id;
@@ -90,7 +90,7 @@ struct hyper_dmabuf_sgt_info {
* uses releases hyper_dmabuf device
*/
struct file *filp;
- int priv[4]; /* device specific info (e.g. image's meta info?) */
+ int priv[32]; /* device specific info (e.g. image's meta info?) */
};
/* Importer store references (before mapping) on shared pages
@@ -110,7 +110,7 @@ struct hyper_dmabuf_imported_sgt_info {
void *refs_info;
bool valid;
int num_importers;
- int private[4]; /* device specific info (e.g. image's meta info?) */
+ int priv[32]; /* device specific info (e.g. image's meta info?) */
};
#endif /* __HYPER_DMABUF_STRUCT_H__ */
@@ -32,6 +32,7 @@
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
+#include <linux/time.h>
#include <xen/grant_table.h>
#include <xen/events.h>
#include <xen/xenbus.h>
@@ -474,6 +475,8 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
back_ring_isr, 0,
NULL, (void*)ring_info);
+ return ret;
+
fail_others:
kfree(map_ops);
@@ -545,6 +548,10 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
struct hyper_dmabuf_req *new_req;
struct xen_comm_tx_ring_info *ring_info;
int notify;
+
+ struct timeval tv_start, tv_end;
+ struct timeval tv_diff;
+
int timeout = 1000;
/* find a ring info for the channel */
@@ -559,7 +566,11 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
ring = &ring_info->ring_front;
+ do_gettimeofday(&tv_start);
+
while (RING_FULL(ring)) {
+ dev_dbg(hyper_dmabuf_private.device, "RING_FULL\n");
+
if (timeout == 0) {
dev_err(hyper_dmabuf_private.device,
"Timeout while waiting for an entry in the ring\n");
@@ -609,6 +620,21 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
}
mutex_unlock(&ring_info->lock);
+ do_gettimeofday(&tv_end);
+
+ /* checking time duration for round-trip of a request for debugging */
+ if (tv_end.tv_usec >= tv_start.tv_usec) {
+ tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec;
+ tv_diff.tv_usec = tv_end.tv_usec-tv_start.tv_usec;
+ } else {
+ tv_diff.tv_sec = tv_end.tv_sec-tv_start.tv_sec-1;
+ tv_diff.tv_usec = tv_end.tv_usec+1000000-tv_start.tv_usec;
+ }
+
+ if (tv_diff.tv_sec != 0 && tv_diff.tv_usec > 16000)
+ dev_dbg(hyper_dmabuf_private.device, "send_req:time diff: %ld sec, %ld usec\n",
+ tv_diff.tv_sec, tv_diff.tv_usec);
+
return req_pending.status;
}
@@ -657,6 +683,10 @@ static irqreturn_t back_ring_isr(int irq, void *info)
sizeof(resp));
ring->rsp_prod_pvt++;
+ dev_dbg(hyper_dmabuf_private.device,
+ "sending response to exporter for request id:%d\n",
+ resp.response_id);
+
RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify);
if (notify) {
@@ -696,8 +726,13 @@ static irqreturn_t front_ring_isr(int irq, void *info)
/* update pending request's status with what is
* in the response
*/
- if (req_pending.request_id == resp->response_id)
+
+ dev_dbg(hyper_dmabuf_private.device,
+ "getting response from importer\n");
+
+ if (req_pending.request_id == resp->response_id) {
req_pending.status = resp->status;
+ }
if (resp->status == HYPER_DMABUF_REQ_NEEDS_FOLLOW_UP) {
/* parsing response */
@@ -30,6 +30,17 @@ typedef struct {
int rng_key[3]; /* 12bytes long random number */
} hyper_dmabuf_id_t;
+struct hyper_dmabuf_event_hdr {
+ int event_type; /* one type only for now - new import */
+ hyper_dmabuf_id_t hid; /* hyper_dmabuf_id of specific hyper_dmabuf */
+ size_t size; /* size of data */
+};
+
+struct hyper_dmabuf_event_data {
+ struct hyper_dmabuf_event_hdr hdr;
+ void *data; /* private data */
+};
+
#define IOCTL_HYPER_DMABUF_TX_CH_SETUP \
_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_hyper_dmabuf_tx_ch_setup))
struct ioctl_hyper_dmabuf_tx_ch_setup {
@@ -56,7 +67,7 @@ struct ioctl_hyper_dmabuf_export_remote {
int remote_domain;
/* exported dma buf id */
hyper_dmabuf_id_t hid;
- int priv[4];
+ int priv[32];
};
#define IOCTL_HYPER_DMABUF_EXPORT_FD \
hyper_dmabuf driver on importing domain now generates event every time new hyper_dmabuf is available (visible) to the importer. Each event comes with 128 byte private data, which can contain any meta data or user data specific to the originator of DMA BUF. Signed-off-by: Dongwon Kim <dongwon.kim@intel.com> --- drivers/xen/hyper_dmabuf/Kconfig | 10 ++ drivers/xen/hyper_dmabuf/Makefile | 1 + drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.c | 120 +++++++++++++++++++- drivers/xen/hyper_dmabuf/hyper_dmabuf_drv.h | 48 ++++++-- drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c | 125 +++++++++++++++++++++ drivers/xen/hyper_dmabuf/hyper_dmabuf_event.h | 38 +++++++ drivers/xen/hyper_dmabuf/hyper_dmabuf_ioctl.c | 23 ++-- drivers/xen/hyper_dmabuf/hyper_dmabuf_list.c | 1 + drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.c | 44 +++++++- drivers/xen/hyper_dmabuf/hyper_dmabuf_msg.h | 2 +- drivers/xen/hyper_dmabuf/hyper_dmabuf_struct.h | 4 +- .../xen/hyper_dmabuf/xen/hyper_dmabuf_xen_comm.c | 37 +++++- include/uapi/xen/hyper_dmabuf.h | 13 ++- 13 files changed, 430 insertions(+), 36 deletions(-) create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_event.c create mode 100644 drivers/xen/hyper_dmabuf/hyper_dmabuf_event.h