@@ -1,6 +1,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/workqueue.h>
+#include <linux/device.h>
#include "hyper_dmabuf_conf.h"
#include "hyper_dmabuf_msg.h"
#include "hyper_dmabuf_drv.h"
@@ -36,7 +37,8 @@ static int hyper_dmabuf_drv_init(void)
hyper_dmabuf_private.backend_ops = &xen_backend_ops;
#endif
- printk( KERN_NOTICE "initializing database for imported/exported dmabufs\n");
+ dev_info(hyper_dmabuf_private.device,
+ "initializing database for imported/exported dmabufs\n");
/* device structure initialization */
/* currently only does work-queue initialization */
@@ -1,6 +1,10 @@
#ifndef __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
#define __LINUX_PUBLIC_HYPER_DMABUF_DRV_H__
+#include <linux/device.h>
+
+struct hyper_dmabuf_req;
+
struct list_reusable_id {
int id;
struct list_head list;
@@ -155,7 +155,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int fo
struct hyper_dmabuf_backend_ops *ops = hyper_dmabuf_private.backend_ops;
if (!sgt_info) {
- printk("invalid hyper_dmabuf_id\n");
+ dev_err(hyper_dmabuf_private.device, "invalid hyper_dmabuf_id\n");
return -EINVAL;
}
@@ -168,7 +168,7 @@ int hyper_dmabuf_cleanup_sgt_info(struct hyper_dmabuf_sgt_info *sgt_info, int fo
!list_empty(&sgt_info->va_vmapped->list) ||
!list_empty(&sgt_info->active_sgts->list) ||
!list_empty(&sgt_info->active_attached->list))) {
- printk("dma-buf is used by importer\n");
+ dev_warn(hyper_dmabuf_private.device, "dma-buf is used by importer\n");
return -EPERM;
}
@@ -273,7 +273,8 @@ static int hyper_dmabuf_ops_attach(struct dma_buf* dmabuf, struct device* dev,
HYPER_DMABUF_OPS_ATTACH);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
return ret;
}
@@ -294,7 +295,8 @@ static void hyper_dmabuf_ops_detach(struct dma_buf* dmabuf, struct dma_buf_attac
HYPER_DMABUF_OPS_DETACH);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
}
@@ -331,7 +333,8 @@ static struct sg_table* hyper_dmabuf_ops_map(struct dma_buf_attachment *attachme
kfree(page_info);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
return st;
@@ -363,7 +366,8 @@ static void hyper_dmabuf_ops_unmap(struct dma_buf_attachment *attachment,
HYPER_DMABUF_OPS_UNMAP);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
}
@@ -403,7 +407,8 @@ static void hyper_dmabuf_ops_release(struct dma_buf *dma_buf)
}
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
/*
@@ -429,7 +434,8 @@ static int hyper_dmabuf_ops_begin_cpu_access(struct dma_buf *dmabuf, enum dma_da
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
return ret;
@@ -448,7 +454,8 @@ static int hyper_dmabuf_ops_end_cpu_access(struct dma_buf *dmabuf, enum dma_data
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_END_CPU_ACCESS);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
return 0;
@@ -467,7 +474,8 @@ static void *hyper_dmabuf_ops_kmap_atomic(struct dma_buf *dmabuf, unsigned long
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_KMAP_ATOMIC);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
return NULL; /* for now NULL.. need to return the address of mapped region */
@@ -486,7 +494,8 @@ static void hyper_dmabuf_ops_kunmap_atomic(struct dma_buf *dmabuf, unsigned long
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_KUNMAP_ATOMIC);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
}
@@ -503,7 +512,8 @@ static void *hyper_dmabuf_ops_kmap(struct dma_buf *dmabuf, unsigned long pgnum)
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_KMAP);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
return NULL; /* for now NULL.. need to return the address of mapped region */
@@ -522,7 +532,8 @@ static void hyper_dmabuf_ops_kunmap(struct dma_buf *dmabuf, unsigned long pgnum,
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_KUNMAP);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
}
@@ -539,7 +550,8 @@ static int hyper_dmabuf_ops_mmap(struct dma_buf *dmabuf, struct vm_area_struct *
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_MMAP);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
return ret;
@@ -558,7 +570,8 @@ static void *hyper_dmabuf_ops_vmap(struct dma_buf *dmabuf)
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_VMAP);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
return NULL;
@@ -577,7 +590,8 @@ static void hyper_dmabuf_ops_vunmap(struct dma_buf *dmabuf, void *vaddr)
ret = hyper_dmabuf_sync_request_and_wait(sgt_info->hyper_dmabuf_id,
HYPER_DMABUF_OPS_VUNMAP);
if (ret < 0) {
- printk("hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
+ dev_err(hyper_dmabuf_private.device,
+ "hyper_dmabuf::%s Error:send dmabuf sync request failed\n", __func__);
}
}
@@ -25,7 +25,7 @@ static int hyper_dmabuf_tx_ch_setup(void *data)
int ret = 0;
if (!data) {
- printk("user data is NULL\n");
+ dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
return -1;
}
tx_ch_attr = (struct ioctl_hyper_dmabuf_tx_ch_setup *)data;
@@ -42,7 +42,7 @@ static int hyper_dmabuf_rx_ch_setup(void *data)
int ret = 0;
if (!data) {
- printk("user data is NULL\n");
+ dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
return -1;
}
@@ -67,7 +67,7 @@ static int hyper_dmabuf_export_remote(void *data)
int ret = 0;
if (!data) {
- printk("user data is NULL\n");
+ dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
return -1;
}
@@ -76,7 +76,7 @@ static int hyper_dmabuf_export_remote(void *data)
dma_buf = dma_buf_get(export_remote_attr->dmabuf_fd);
if (!dma_buf) {
- printk("Cannot get dma buf\n");
+ dev_err(hyper_dmabuf_private.device, "Cannot get dma buf\n");
return -1;
}
@@ -94,7 +94,7 @@ static int hyper_dmabuf_export_remote(void *data)
attachment = dma_buf_attach(dma_buf, hyper_dmabuf_private.device);
if (!attachment) {
- printk("Cannot get attachment\n");
+ dev_err(hyper_dmabuf_private.device, "Cannot get attachment\n");
return -1;
}
@@ -206,8 +206,10 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
int operand;
int ret = 0;
+ dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
+
if (!data) {
- printk("user data is NULL\n");
+ dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
return -EINVAL;
}
@@ -218,12 +220,15 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
if (sgt_info == NULL) /* can't find sgt from the table */
return -1;
- printk("%s Found buffer gref %d off %d last len %d nents %d domain %d\n", __func__,
- sgt_info->ref_handle, sgt_info->frst_ofst,
- sgt_info->last_len, sgt_info->nents,
- HYPER_DMABUF_DOM_ID(sgt_info->hyper_dmabuf_id));
+ dev_dbg(hyper_dmabuf_private.device,
+ "%s Found buffer gref %d off %d last len %d nents %d domain %d\n", __func__,
+ sgt_info->ref_handle, sgt_info->frst_ofst,
+ sgt_info->last_len, sgt_info->nents,
+ HYPER_DMABUF_DOM_ID(sgt_info->hyper_dmabuf_id));
if (!sgt_info->sgt) {
+ dev_dbg(hyper_dmabuf_private.device,
+ "%s buffer %d pages not mapped yet\n", __func__,sgt_info->hyper_dmabuf_id);
data_pages = ops->map_shared_pages(sgt_info->ref_handle,
HYPER_DMABUF_DOM_ID(sgt_info->hyper_dmabuf_id),
sgt_info->nents,
@@ -244,7 +249,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
if (!sgt_info->sgt || ret) {
kfree(req);
- printk("Failed to create sgt or notify exporter\n");
+ dev_err(hyper_dmabuf_private.device, "Failed to create sgt or notify exporter\n");
return -EINVAL;
}
kfree(req);
@@ -258,6 +263,7 @@ static int hyper_dmabuf_export_fd_ioctl(void *data)
sgt_info->num_importers++;
}
+ dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
return ret;
}
@@ -272,8 +278,10 @@ static int hyper_dmabuf_unexport(void *data)
struct hyper_dmabuf_req *req;
int ret;
+ dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
+
if (!data) {
- printk("user data is NULL\n");
+ dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
return -EINVAL;
}
@@ -302,6 +310,8 @@ static int hyper_dmabuf_unexport(void *data)
/* free msg */
kfree(req);
+ dev_dbg(hyper_dmabuf_private.device,
+ "Marking buffer %d as invalid\n", unexport_attr->hyper_dmabuf_id);
/* no longer valid */
sgt_info->valid = 0;
@@ -312,8 +322,9 @@ static int hyper_dmabuf_unexport(void *data)
* is called (importer does this only when there's no
* no consumer of locally exported FDs)
*/
- printk("before claning up buffer completly\n");
if (!sgt_info->importer_exported) {
+ dev_dbg(hyper_dmabuf_private.device,
+ "claning up buffer %d completly\n", unexport_attr->hyper_dmabuf_id);
hyper_dmabuf_cleanup_sgt_info(sgt_info, false);
hyper_dmabuf_remove_exported(unexport_attr->hyper_dmabuf_id);
kfree(sgt_info);
@@ -321,6 +332,7 @@ static int hyper_dmabuf_unexport(void *data)
store_reusable_id(unexport_attr->hyper_dmabuf_id);
}
+ dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
return ret;
}
@@ -332,7 +344,7 @@ static int hyper_dmabuf_query(void *data)
int ret = 0;
if (!data) {
- printk("user data is NULL\n");
+ dev_err(hyper_dmabuf_private.device, "user data is NULL\n");
return -EINVAL;
}
@@ -343,7 +355,7 @@ static int hyper_dmabuf_query(void *data)
/* if dmabuf can't be found in both lists, return */
if (!(sgt_info && imported_sgt_info)) {
- printk("can't find entry anywhere\n");
+ dev_err(hyper_dmabuf_private.device, "can't find entry anywhere\n");
return -EINVAL;
}
@@ -419,25 +431,25 @@ static long hyper_dmabuf_ioctl(struct file *filp,
func = ioctl->func;
if (unlikely(!func)) {
- printk("no function\n");
+ dev_err(hyper_dmabuf_private.device, "no function\n");
return -EINVAL;
}
kdata = kmalloc(_IOC_SIZE(cmd), GFP_KERNEL);
if (!kdata) {
- printk("no memory\n");
+ dev_err(hyper_dmabuf_private.device, "no memory\n");
return -ENOMEM;
}
if (copy_from_user(kdata, (void __user *)param, _IOC_SIZE(cmd)) != 0) {
- printk("failed to copy from user arguments\n");
+ dev_err(hyper_dmabuf_private.device, "failed to copy from user arguments\n");
return -EFAULT;
}
ret = func(kdata);
if (copy_to_user((void __user *)param, kdata, _IOC_SIZE(cmd)) != 0) {
- printk("failed to copy to user arguments\n");
+ dev_err(hyper_dmabuf_private.device, "failed to copy to user arguments\n");
return -EFAULT;
}
@@ -114,12 +114,12 @@ void cmd_process_work(struct work_struct *work)
imported_sgt_info->nents = req->operands[1];
imported_sgt_info->ref_handle = req->operands[4];
- printk("DMABUF was exported\n");
- printk("\thyper_dmabuf_id %d\n", req->operands[0]);
- printk("\tnents %d\n", req->operands[1]);
- printk("\tfirst offset %d\n", req->operands[2]);
- printk("\tlast len %d\n", req->operands[3]);
- printk("\tgrefid %d\n", req->operands[4]);
+ dev_dbg(hyper_dmabuf_private.device, "DMABUF was exported\n");
+ dev_dbg(hyper_dmabuf_private.device, "\thyper_dmabuf_id %d\n", req->operands[0]);
+ dev_dbg(hyper_dmabuf_private.device, "\tnents %d\n", req->operands[1]);
+ dev_dbg(hyper_dmabuf_private.device, "\tfirst offset %d\n", req->operands[2]);
+ dev_dbg(hyper_dmabuf_private.device, "\tlast len %d\n", req->operands[3]);
+ dev_dbg(hyper_dmabuf_private.device, "\tgrefid %d\n", req->operands[4]);
for (i=0; i<4; i++)
imported_sgt_info->private[i] = req->operands[5+i];
@@ -133,7 +133,8 @@ void cmd_process_work(struct work_struct *work)
sgt_info = hyper_dmabuf_find_exported(req->operands[0]);
if (!sgt_info) {
- printk("critical err: requested sgt_info can't be found %d\n", req->operands[0]);
+ dev_err(hyper_dmabuf_private.device,
+ "critical err: requested sgt_info can't be found %d\n", req->operands[0]);
break;
}
@@ -163,13 +164,13 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
int ret;
if (!req) {
- printk("request is NULL\n");
+ dev_err(hyper_dmabuf_private.device, "request is NULL\n");
return -EINVAL;
}
if ((req->command < HYPER_DMABUF_EXPORT) ||
(req->command > HYPER_DMABUF_OPS_TO_SOURCE)) {
- printk("invalid command\n");
+ dev_err(hyper_dmabuf_private.device, "invalid command\n");
return -EINVAL;
}
@@ -183,7 +184,8 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
/* command : HYPER_DMABUF_NOTIFY_UNEXPORT,
* operands0 : hyper_dmabuf_id
*/
-
+ dev_dbg(hyper_dmabuf_private.device,
+ "%s: processing HYPER_DMABUF_NOTIFY_UNEXPORT\n", __func__);
sgt_info = hyper_dmabuf_find_imported(req->operands[0]);
if (sgt_info) {
@@ -216,6 +218,8 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
* operands0 : hyper_dmabuf_id
* operands1 : enum hyper_dmabuf_ops {....}
*/
+ dev_dbg(hyper_dmabuf_private.device,
+ "%s: HYPER_DMABUF_OPS_TO_SOURCE\n", __func__);
ret = hyper_dmabuf_remote_sync(req->operands[0], req->operands[1]);
if (ret)
req->status = HYPER_DMABUF_REQ_ERROR;
@@ -225,6 +229,8 @@ int hyper_dmabuf_msg_parse(int domid, struct hyper_dmabuf_req *req)
return req->command;
}
+ dev_dbg(hyper_dmabuf_private.device,
+ "%s: putting request to workqueue\n", __func__);
temp_req = kmalloc(sizeof(*temp_req), GFP_KERNEL);
memcpy(temp_req, req, sizeof(*temp_req));
@@ -41,7 +41,8 @@ int hyper_dmabuf_remote_sync(int id, int ops)
sgt_info = hyper_dmabuf_find_exported(id);
if (!sgt_info) {
- printk("dmabuf remote sync::can't find exported list\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::can't find exported list\n");
return -EINVAL;
}
@@ -54,7 +55,8 @@ int hyper_dmabuf_remote_sync(int id, int ops)
if (!attachl->attach) {
kfree(attachl);
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_ATTACH\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_ATTACH\n");
return -EINVAL;
}
@@ -63,8 +65,10 @@ int hyper_dmabuf_remote_sync(int id, int ops)
case HYPER_DMABUF_OPS_DETACH:
if (list_empty(&sgt_info->active_attached->list)) {
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_DETACH\n");
- printk("no more dmabuf attachment left to be detached\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_DETACH\n");
+ dev_err(hyper_dmabuf_private.device,
+ "no more dmabuf attachment left to be detached\n");
return -EINVAL;
}
@@ -78,8 +82,10 @@ int hyper_dmabuf_remote_sync(int id, int ops)
case HYPER_DMABUF_OPS_MAP:
if (list_empty(&sgt_info->active_attached->list)) {
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n");
- printk("no more dmabuf attachment left to be detached\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n");
+ dev_err(hyper_dmabuf_private.device,
+ "no more dmabuf attachment left to be detached\n");
return -EINVAL;
}
@@ -90,7 +96,8 @@ int hyper_dmabuf_remote_sync(int id, int ops)
sgtl->sgt = dma_buf_map_attachment(attachl->attach, DMA_BIDIRECTIONAL);
if (!sgtl->sgt) {
kfree(sgtl);
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_MAP\n");
return -EINVAL;
}
list_add(&sgtl->list, &sgt_info->active_sgts->list);
@@ -99,8 +106,10 @@ int hyper_dmabuf_remote_sync(int id, int ops)
case HYPER_DMABUF_OPS_UNMAP:
if (list_empty(&sgt_info->active_sgts->list) ||
list_empty(&sgt_info->active_attached->list)) {
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_UNMAP\n");
- printk("no more SGT or attachment left to be freed\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_UNMAP\n");
+ dev_err(hyper_dmabuf_private.device,
+ "no more SGT or attachment left to be freed\n");
return -EINVAL;
}
@@ -140,7 +149,8 @@ int hyper_dmabuf_remote_sync(int id, int ops)
case HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS:
ret = dma_buf_begin_cpu_access(sgt_info->dma_buf, DMA_BIDIRECTIONAL);
if (!ret) {
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_BEGIN_CPU_ACCESS\n");
ret = -EINVAL;
}
break;
@@ -148,7 +158,8 @@ int hyper_dmabuf_remote_sync(int id, int ops)
case HYPER_DMABUF_OPS_END_CPU_ACCESS:
ret = dma_buf_end_cpu_access(sgt_info->dma_buf, DMA_BIDIRECTIONAL);
if (!ret) {
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_END_CPU_ACCESS\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_END_CPU_ACCESS\n");
ret = -EINVAL;
}
break;
@@ -165,7 +176,8 @@ int hyper_dmabuf_remote_sync(int id, int ops)
if (!va_kmapl->vaddr) {
kfree(va_kmapl);
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KMAP(_ATOMIC)\n");
return -EINVAL;
}
list_add(&va_kmapl->list, &sgt_info->va_kmapped->list);
@@ -174,15 +186,18 @@ int hyper_dmabuf_remote_sync(int id, int ops)
case HYPER_DMABUF_OPS_KUNMAP_ATOMIC:
case HYPER_DMABUF_OPS_KUNMAP:
if (list_empty(&sgt_info->va_kmapped->list)) {
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
- printk("no more dmabuf VA to be freed\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
+ dev_err(hyper_dmabuf_private.device,
+ "no more dmabuf VA to be freed\n");
return -EINVAL;
}
va_kmapl = list_first_entry(&sgt_info->va_kmapped->list,
struct kmap_vaddr_list, list);
if (va_kmapl->vaddr == NULL) {
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_KUNMAP(_ATOMIC)\n");
return -EINVAL;
}
@@ -199,7 +214,8 @@ int hyper_dmabuf_remote_sync(int id, int ops)
case HYPER_DMABUF_OPS_MMAP:
/* currently not supported: looking for a way to create
* a dummy vma */
- printk("dmabuf remote sync::sychronized mmap is not supported\n");
+ dev_warn(hyper_dmabuf_private.device,
+ "dmabuf remote sync::sychronized mmap is not supported\n");
break;
case HYPER_DMABUF_OPS_VMAP:
@@ -210,7 +226,8 @@ int hyper_dmabuf_remote_sync(int id, int ops)
if (!va_vmapl->vaddr) {
kfree(va_vmapl);
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VMAP\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VMAP\n");
return -EINVAL;
}
list_add(&va_vmapl->list, &sgt_info->va_vmapped->list);
@@ -218,14 +235,17 @@ int hyper_dmabuf_remote_sync(int id, int ops)
case HYPER_DMABUF_OPS_VUNMAP:
if (list_empty(&sgt_info->va_vmapped->list)) {
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VUNMAP\n");
- printk("no more dmabuf VA to be freed\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VUNMAP\n");
+ dev_err(hyper_dmabuf_private.device,
+ "no more dmabuf VA to be freed\n");
return -EINVAL;
}
va_vmapl = list_first_entry(&sgt_info->va_vmapped->list,
struct vmap_vaddr_list, list);
if (!va_vmapl || va_vmapl->vaddr == NULL) {
- printk("dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VUNMAP\n");
+ dev_err(hyper_dmabuf_private.device,
+ "dmabuf remote sync::error while processing HYPER_DMABUF_OPS_VUNMAP\n");
return -EINVAL;
}
@@ -10,11 +10,14 @@
#include <asm/xen/page.h>
#include "hyper_dmabuf_xen_comm.h"
#include "hyper_dmabuf_xen_comm_list.h"
+#include "../hyper_dmabuf_drv.h"
static int export_req_id = 0;
struct hyper_dmabuf_req req_pending = {0};
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
/* Creates entry in xen store that will keep details of all
* exporter rings created by this domain
*/
@@ -55,14 +58,16 @@ static int xen_comm_expose_ring_details(int domid, int rdomid,
ret = xenbus_printf(XBT_NIL, buf, "grefid", "%d", gref);
if (ret) {
- printk("Failed to write xenbus entry %s: %d\n", buf, ret);
+ dev_err(hyper_dmabuf_private.device,
+ "Failed to write xenbus entry %s: %d\n", buf, ret);
return ret;
}
ret = xenbus_printf(XBT_NIL, buf, "port", "%d", port);
if (ret) {
- printk("Failed to write xenbus entry %s: %d\n", buf, ret);
+ dev_err(hyper_dmabuf_private.device,
+ "Failed to write xenbus entry %s: %d\n", buf, ret);
return ret;
}
@@ -81,14 +86,16 @@ static int xen_comm_get_ring_details(int domid, int rdomid, int *grefid, int *po
ret = xenbus_scanf(XBT_NIL, buf, "grefid", "%d", grefid);
if (ret <= 0) {
- printk("Failed to read xenbus entry %s: %d\n", buf, ret);
+ dev_err(hyper_dmabuf_private.device,
+ "Failed to read xenbus entry %s: %d\n", buf, ret);
return ret;
}
ret = xenbus_scanf(XBT_NIL, buf, "port", "%d", port);
if (ret <= 0) {
- printk("Failed to read xenbus entry %s: %d\n", buf, ret);
+ dev_err(hyper_dmabuf_private.device,
+ "Failed to read xenbus entry %s: %d\n", buf, ret);
return ret;
}
@@ -161,10 +168,12 @@ static void remote_dom_exporter_watch_cb(struct xenbus_watch *watch,
&grefid, &port);
if (ring_info && ret != 0) {
- printk("Remote exporter closed, cleaninup importer\n");
+ dev_info(hyper_dmabuf_private.device,
+ "Remote exporter closed, cleaninup importer\n");
hyper_dmabuf_xen_cleanup_rx_rbuf(rdom);
} else if (!ring_info && ret == 0) {
- printk("Registering importer\n");
+ dev_info(hyper_dmabuf_private.device,
+ "Registering importer\n");
hyper_dmabuf_xen_init_rx_rbuf(rdom);
}
}
@@ -184,7 +193,8 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
ring_info = xen_comm_find_tx_ring(domid);
if (ring_info) {
- printk("tx ring ch to domid = %d already exist\ngref = %d, port = %d\n",
+ dev_info(hyper_dmabuf_private.device,
+ "tx ring ch to domid = %d already exist\ngref = %d, port = %d\n",
ring_info->rdomain, ring_info->gref_ring, ring_info->port);
return 0;
}
@@ -216,7 +226,8 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
ret = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
&alloc_unbound);
if (ret != 0) {
- printk("Cannot allocate event channel\n");
+ dev_err(hyper_dmabuf_private.device,
+ "Cannot allocate event channel\n");
return -EINVAL;
}
@@ -226,7 +237,8 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
NULL, (void*) ring_info);
if (ret < 0) {
- printk("Failed to setup event channel\n");
+ dev_err(hyper_dmabuf_private.device,
+ "Failed to setup event channel\n");
close.port = alloc_unbound.port;
HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
gnttab_end_foreign_access(ring_info->gref_ring, 0,
@@ -238,7 +250,8 @@ int hyper_dmabuf_xen_init_tx_rbuf(int domid)
ring_info->irq = ret;
ring_info->port = alloc_unbound.port;
- printk("%s: allocated eventchannel gref %d port: %d irq: %d\n",
+ dev_dbg(hyper_dmabuf_private.device,
+ "%s: allocated eventchannel gref %d port: %d irq: %d\n",
__func__,
ring_info->gref_ring,
ring_info->port,
@@ -315,7 +328,8 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
ring_info = xen_comm_find_rx_ring(domid);
if (ring_info) {
- printk("rx ring ch from domid = %d already exist\n", ring_info->sdomain);
+ dev_info(hyper_dmabuf_private.device,
+ "rx ring ch from domid = %d already exist\n", ring_info->sdomain);
return 0;
}
@@ -323,7 +337,8 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
&rx_gref, &rx_port);
if (ret) {
- printk("Domain %d has not created exporter ring for current domain\n", domid);
+ dev_err(hyper_dmabuf_private.device,
+ "Domain %d has not created exporter ring for current domain\n", domid);
return ret;
}
@@ -346,12 +361,12 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
ret = gnttab_map_refs(map_ops, NULL, &shared_ring, 1);
if (ret < 0) {
- printk("Cannot map ring\n");
+ dev_err(hyper_dmabuf_private.device, "Cannot map ring\n");
return -EINVAL;
}
if (map_ops[0].status) {
- printk("Ring mapping failed\n");
+ dev_err(hyper_dmabuf_private.device, "Ring mapping failed\n");
return -EINVAL;
} else {
ring_info->unmap_op.handle = map_ops[0].handle;
@@ -372,7 +387,8 @@ int hyper_dmabuf_xen_init_rx_rbuf(int domid)
ring_info->irq = ret;
- printk("%s: bound to eventchannel port: %d irq: %d\n", __func__,
+ dev_dbg(hyper_dmabuf_private.device,
+ "%s: bound to eventchannel port: %d irq: %d\n", __func__,
rx_port,
ring_info->irq);
@@ -445,7 +461,8 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
/* find a ring info for the channel */
ring_info = xen_comm_find_tx_ring(domid);
if (!ring_info) {
- printk("Can't find ring info for the channel\n");
+ dev_err(hyper_dmabuf_private.device,
+ "Can't find ring info for the channel\n");
return -EINVAL;
}
@@ -456,7 +473,8 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
new_req = RING_GET_REQUEST(ring, ring->req_prod_pvt);
if (!new_req) {
- printk("NULL REQUEST\n");
+ dev_err(hyper_dmabuf_private.device,
+ "NULL REQUEST\n");
return -EIO;
}
@@ -484,7 +502,7 @@ int hyper_dmabuf_xen_send_req(int domid, struct hyper_dmabuf_req *req, int wait)
}
if (timeout < 0) {
- printk("request timed-out\n");
+ dev_err(hyper_dmabuf_private.device, "request timed-out\n");
return -EBUSY;
}
}
@@ -508,6 +526,8 @@ static irqreturn_t back_ring_isr(int irq, void *info)
ring_info = (struct xen_comm_rx_ring_info *)info;
ring = &ring_info->ring_back;
+ dev_dbg(hyper_dmabuf_private.device, "%s\n", __func__);
+
do {
rc = ring->req_cons;
rp = ring->sring->req_prod;
@@ -558,6 +578,8 @@ static irqreturn_t front_ring_isr(int irq, void *info)
ring_info = (struct xen_comm_tx_ring_info *)info;
ring = &ring_info->ring_front;
+ dev_dbg(hyper_dmabuf_private.device, "%s\n", __func__);
+
do {
more_to_do = 0;
rp = ring->sring->rsp_prod;
@@ -576,16 +598,21 @@ static irqreturn_t front_ring_isr(int irq, void *info)
(struct hyper_dmabuf_req *)resp);
if (ret < 0) {
- printk("getting error while parsing response\n");
+ dev_err(hyper_dmabuf_private.device,
+ "getting error while parsing response\n");
}
} else if (resp->status == HYPER_DMABUF_REQ_PROCESSED) {
/* for debugging dma_buf remote synchronization */
- printk("original request = 0x%x\n", resp->command);
- printk("Just got HYPER_DMABUF_REQ_PROCESSED\n");
+ dev_dbg(hyper_dmabuf_private.device,
+ "original request = 0x%x\n", resp->command);
+ dev_dbg(hyper_dmabuf_private.device,
+ "Just got HYPER_DMABUF_REQ_PROCESSED\n");
} else if (resp->status == HYPER_DMABUF_REQ_ERROR) {
/* for debugging dma_buf remote synchronization */
- printk("original request = 0x%x\n", resp->command);
- printk("Just got HYPER_DMABUF_REQ_ERROR\n");
+ dev_dbg(hyper_dmabuf_private.device,
+ "original request = 0x%x\n", resp->command);
+ dev_dbg(hyper_dmabuf_private.device,
+ "Just got HYPER_DMABUF_REQ_ERROR\n");
}
}
@@ -4,9 +4,12 @@
#include <xen/grant_table.h>
#include <asm/xen/page.h>
#include "hyper_dmabuf_xen_drv.h"
+#include "../hyper_dmabuf_drv.h"
#define REFS_PER_PAGE (PAGE_SIZE/sizeof(grant_ref_t))
+extern struct hyper_dmabuf_private hyper_dmabuf_private;
+
/*
* Creates 2 level page directory structure for referencing shared pages.
* Top level page is a single page that contains up to 1024 refids that
@@ -93,9 +96,11 @@ int hyper_dmabuf_xen_share_pages(struct page **pages, int domid, int nents,
/* Store lvl2_table pages to be freed later */
sh_pages_info->lvl2_table = lvl2_table;
+
/* Store exported pages refid to be unshared later */
sh_pages_info->lvl3_gref = lvl3_gref;
+ dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
return lvl3_gref;
}
@@ -104,19 +109,21 @@ int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
int n_lvl2_grefs = (nents/REFS_PER_PAGE + ((nents % REFS_PER_PAGE) ? 1: 0));
int i;
+ dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
if (sh_pages_info->lvl3_table == NULL ||
sh_pages_info->lvl2_table == NULL ||
sh_pages_info->lvl3_gref == -1) {
- printk("gref table for hyper_dmabuf already cleaned up\n");
+ dev_warn(hyper_dmabuf_private.device,
+ "gref table for hyper_dmabuf already cleaned up\n");
return 0;
}
/* End foreign access for data pages, but do not free them */
for (i = 0; i < nents; i++) {
if (gnttab_query_foreign_access(sh_pages_info->lvl2_table[i])) {
- printk("refid not shared !!\n");
+ dev_warn(hyper_dmabuf_private.device, "refid not shared !!\n");
}
gnttab_end_foreign_access_ref(sh_pages_info->lvl2_table[i], 0);
gnttab_free_grant_reference(sh_pages_info->lvl2_table[i]);
@@ -125,17 +132,17 @@ int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
/* End foreign access for 2nd level addressing pages */
for (i = 0; i < n_lvl2_grefs; i++) {
if (gnttab_query_foreign_access(sh_pages_info->lvl3_table[i])) {
- printk("refid not shared !!\n");
+ dev_warn(hyper_dmabuf_private.device, "refid not shared !!\n");
}
if (!gnttab_end_foreign_access_ref(sh_pages_info->lvl3_table[i], 1)) {
- printk("refid still in use!!!\n");
+ dev_warn(hyper_dmabuf_private.device, "refid still in use!!!\n");
}
gnttab_free_grant_reference(sh_pages_info->lvl3_table[i]);
}
/* End foreign access for top level addressing page */
if (gnttab_query_foreign_access(sh_pages_info->lvl3_gref)) {
- printk("gref not shared !!\n");
+ dev_warn(hyper_dmabuf_private.device, "gref not shared !!\n");
}
gnttab_end_foreign_access_ref(sh_pages_info->lvl3_gref, 1);
@@ -151,6 +158,7 @@ int hyper_dmabuf_xen_unshare_pages(void **refs_info, int nents) {
kfree(sh_pages_info);
sh_pages_info = NULL;
+ dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
return 0;
}
@@ -180,6 +188,8 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
int n_lvl2_grefs = (nents / REFS_PER_PAGE) + ((nents_last > 0) ? 1 : 0);
int i, j, k;
+ dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
+
sh_pages_info = kmalloc(sizeof(*sh_pages_info), GFP_KERNEL);
*refs_info = (void *) sh_pages_info;
@@ -194,7 +204,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
/* Map top level addressing page */
if (gnttab_alloc_pages(1, &lvl3_table_page)) {
- printk("Cannot allocate pages\n");
+ dev_err(hyper_dmabuf_private.device, "Cannot allocate pages\n");
return NULL;
}
@@ -206,12 +216,12 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
gnttab_set_unmap_op(&lvl3_unmap_ops, (unsigned long)lvl3_table, GNTMAP_host_map | GNTMAP_readonly, -1);
if (gnttab_map_refs(&lvl3_map_ops, NULL, &lvl3_table_page, 1)) {
- printk("\nxen: dom0: HYPERVISOR map grant ref failed");
+ dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed");
return NULL;
}
if (lvl3_map_ops.status) {
- printk("\nxen: dom0: HYPERVISOR map grant ref failed status = %d",
+ dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed status = %d",
lvl3_map_ops.status);
return NULL;
} else {
@@ -220,7 +230,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
/* Map all second level pages */
if (gnttab_alloc_pages(n_lvl2_grefs, lvl2_table_pages)) {
- printk("Cannot allocate pages\n");
+ dev_err(hyper_dmabuf_private.device, "Cannot allocate pages\n");
return NULL;
}
@@ -233,19 +243,19 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
/* Unmap top level page, as it won't be needed any longer */
if (gnttab_unmap_refs(&lvl3_unmap_ops, NULL, &lvl3_table_page, 1)) {
- printk("\xen: cannot unmap top level page\n");
+ dev_err(hyper_dmabuf_private.device, "xen: cannot unmap top level page\n");
return NULL;
}
if (gnttab_map_refs(lvl2_map_ops, NULL, lvl2_table_pages, n_lvl2_grefs)) {
- printk("\nxen: dom0: HYPERVISOR map grant ref failed");
+ dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed");
return NULL;
}
/* Checks if pages were mapped correctly */
for (i = 0; i < n_lvl2_grefs; i++) {
if (lvl2_map_ops[i].status) {
- printk("\nxen: dom0: HYPERVISOR map grant ref failed status = %d",
+ dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed status = %d",
lvl2_map_ops[i].status);
return NULL;
} else {
@@ -254,7 +264,7 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
}
if (gnttab_alloc_pages(nents, data_pages)) {
- printk("Cannot allocate pages\n");
+ dev_err(hyper_dmabuf_private.device, "Cannot allocate pages\n");
return NULL;
}
@@ -291,20 +301,20 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
}
if (gnttab_map_refs(data_map_ops, NULL, data_pages, nents)) {
- printk("\nxen: dom0: HYPERVISOR map grant ref failed\n");
+ dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed\n");
return NULL;
}
/* unmapping lvl2 table pages */
if (gnttab_unmap_refs(lvl2_unmap_ops, NULL, lvl2_table_pages,
n_lvl2_grefs)) {
- printk("Cannot unmap 2nd level refs\n");
+ dev_err(hyper_dmabuf_private.device, "Cannot unmap 2nd level refs\n");
return NULL;
}
for (i = 0; i < nents; i++) {
if (data_map_ops[i].status) {
- printk("\nxen: dom0: HYPERVISOR map grant ref failed status = %d\n",
+ dev_err(hyper_dmabuf_private.device, "HYPERVISOR map grant ref failed status = %d\n",
data_map_ops[i].status);
return NULL;
} else {
@@ -323,23 +333,26 @@ struct page ** hyper_dmabuf_xen_map_shared_pages(int lvl3_gref, int domid, int n
kfree(lvl2_unmap_ops);
kfree(data_map_ops);
+ dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
return data_pages;
}
int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents) {
struct xen_shared_pages_info *sh_pages_info;
+ dev_dbg(hyper_dmabuf_private.device, "%s entry\n", __func__);
+
sh_pages_info = (struct xen_shared_pages_info *)(*refs_info);
if (sh_pages_info->unmap_ops == NULL ||
sh_pages_info->data_pages == NULL) {
- printk("Imported pages already cleaned up or buffer was not imported yet\n");
+ dev_warn(hyper_dmabuf_private.device, "Imported pages already cleaned up or buffer was not imported yet\n");
return 0;
}
if (gnttab_unmap_refs(sh_pages_info->unmap_ops, NULL,
sh_pages_info->data_pages, nents) ) {
- printk("Cannot unmap data pages\n");
+ dev_err(hyper_dmabuf_private.device, "Cannot unmap data pages\n");
return -EINVAL;
}
@@ -352,5 +365,6 @@ int hyper_dmabuf_xen_unmap_shared_pages(void **refs_info, int nents) {
kfree(sh_pages_info);
sh_pages_info = NULL;
+ dev_dbg(hyper_dmabuf_private.device, "%s exit\n", __func__);
return 0;
}