@@ -56,6 +56,8 @@ static int __init hyper_dmabuf_drv_init(void)
printk( KERN_NOTICE "hyper_dmabuf_starting: Initialization started" );
+ mutex_init(&hyper_dmabuf_private.lock);
+
ret = register_device();
if (ret < 0) {
return -EINVAL;
@@ -76,6 +76,7 @@ struct hyper_dmabuf_private {
/* backend ops - hypervisor specific */
struct hyper_dmabuf_backend_ops *backend_ops;
+ struct mutex lock;
};
#endif /* __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__ */
@@ -260,6 +260,8 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
if (sgt_info == NULL || !sgt_info->valid) /* can't find sgt from the table */
return -1;
+ mutex_lock(&hyper_dmabuf_private.lock);
+
sgt_info->num_importers++;
/* send notification for export_fd to exporter */
@@ -274,6 +276,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
kfree(req);
dev_err(hyper_dmabuf_private.device, "Failed to create sgt or notify exporter\n");
sgt_info->num_importers--;
+ mutex_unlock(&hyper_dmabuf_private.lock);
return -EINVAL;
}
kfree(req);
@@ -282,6 +285,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
dev_err(hyper_dmabuf_private.device,
"Buffer invalid\n");
sgt_info->num_importers--;
+ mutex_unlock(&hyper_dmabuf_private.lock);
return -1;
} else {
dev_dbg(hyper_dmabuf_private.device, "Can import buffer\n");
@@ -303,6 +307,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
if (!data_pages) {
sgt_info->num_importers--;
+ mutex_unlock(&hyper_dmabuf_private.lock);
return -EINVAL;
}
@@ -318,6 +323,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
ret = export_fd_attr->fd;
}
+ mutex_unlock(&hyper_dmabuf_private.lock);
dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
return 0;
}
@@ -278,6 +278,8 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
ring_info->irq = ret;
ring_info->port = alloc_unbound.port;
+ mutex_init(&ring_info->lock);
+
dev_dbg(hyper_dmabuf_private.device,
"%s: allocated eventchannel gref %d port: %d irq: %d\n",
__func__,
@@ -512,6 +514,9 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
return -EINVAL;
}
+
+ mutex_lock(&ring_info->lock);
+
ring = &ring_info->ring_front;
if (RING_FULL(ring))
@@ -519,6 +524,7 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt);
if (!new_req) {
+ mutex_unlock(&ring_info->lock);
dev_err(hyper_dmabuf_private.device,
"NULL REQUEST\n");
return -EIO;
@@ -548,13 +554,17 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
}
if (timeout < 0) {
+ mutex_unlock(&ring_info->lock);
dev_err(hyper_dmabuf_private.device, "request timed-out\n");
return -EBUSY;
}
+ mutex_unlock(&ring_info->lock);
return req_pending.status;
}
+ mutex_unlock(&ring_info->lock);
+
return 0;
}
@@ -39,6 +39,7 @@ struct xen_comm_tx_ring_info {
int gref_ring;
int irq;
int port;
+ struct mutex lock;
struct xenbus_watch watch;
};