@@ -40,6 +40,7 @@ struct notifier_block;
struct iommu_sva;
struct iommu_fault_event;
struct iommu_dma_cookie;
+struct iopf_queue;
#define IOMMU_FAULT_PERM_READ (1 << 0) /* read */
#define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */
@@ -479,21 +480,31 @@ struct iommu_fault_event {
* struct iommu_fault_param - per-device IOMMU fault data
* @handler: Callback function to handle IOMMU faults at device level
* @data: handler private data
- * @faults: holds the pending faults which needs response
* @lock: protect pending faults list
+ * @dev: the device that owns this param
+ * @queue: IOPF queue
+ * @queue_list: index into queue->devices
+ * @partial: faults that are part of a Page Request Group for which the last
+ * request hasn't been submitted yet.
+ * @faults: holds the pending faults which needs response
*/
struct iommu_fault_param {
iommu_dev_fault_handler_t handler;
void *data;
+ struct mutex lock;
+
+ struct device *dev;
+ struct iopf_queue *queue;
+ struct list_head queue_list;
+
+ struct list_head partial;
struct list_head faults;
- struct mutex lock;
};
/**
* struct dev_iommu - Collection of per-device IOMMU data
*
* @fault_param: IOMMU detected device fault reporting data
- * @iopf_param: I/O Page Fault queue and data
* @fwspec: IOMMU fwspec data
* @iommu_dev: IOMMU device this device is linked to
* @priv: IOMMU Driver private data
@@ -508,7 +519,6 @@ struct iommu_fault_param {
struct dev_iommu {
struct mutex lock;
struct iommu_fault_param *fault_param;
- struct iopf_device_param *iopf_param;
struct iommu_fwspec *fwspec;
struct iommu_device *iommu_dev;
void *priv;
@@ -25,21 +25,6 @@ struct iopf_queue {
struct mutex lock;
};
-/**
- * struct iopf_device_param - IO Page Fault data attached to a device
- * @dev: the device that owns this param
- * @queue: IOPF queue
- * @queue_list: index into queue->devices
- * @partial: faults that are part of a Page Request Group for which the last
- * request hasn't been submitted yet.
- */
-struct iopf_device_param {
- struct device *dev;
- struct iopf_queue *queue;
- struct list_head queue_list;
- struct list_head partial;
-};
-
struct iopf_fault {
struct iommu_fault fault;
struct list_head list;
@@ -144,7 +129,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
int ret;
struct iopf_group *group;
struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
+ struct iommu_fault_param *iopf_param;
struct device *dev = cookie;
struct dev_iommu *param = dev->iommu;
@@ -159,7 +144,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, void *cookie)
* As long as we're holding param->lock, the queue can't be unlinked
* from the device and therefore cannot disappear.
*/
- iopf_param = param->iopf_param;
+ iopf_param = param->fault_param;
if (!iopf_param)
return -ENODEV;
@@ -229,14 +214,14 @@ EXPORT_SYMBOL_GPL(iommu_queue_iopf);
int iopf_queue_flush_dev(struct device *dev)
{
int ret = 0;
- struct iopf_device_param *iopf_param;
+ struct iommu_fault_param *iopf_param;
struct dev_iommu *param = dev->iommu;
if (!param)
return -ENODEV;
mutex_lock(¶m->lock);
- iopf_param = param->iopf_param;
+ iopf_param = param->fault_param;
if (iopf_param)
flush_workqueue(iopf_param->queue->wq);
else
@@ -260,7 +245,7 @@ EXPORT_SYMBOL_GPL(iopf_queue_flush_dev);
int iopf_queue_discard_partial(struct iopf_queue *queue)
{
struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
+ struct iommu_fault_param *iopf_param;
if (!queue)
return -EINVAL;
@@ -287,34 +272,38 @@ EXPORT_SYMBOL_GPL(iopf_queue_discard_partial);
*/
int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
{
- int ret = -EBUSY;
- struct iopf_device_param *iopf_param;
+ int ret = 0;
struct dev_iommu *param = dev->iommu;
-
- if (!param)
- return -ENODEV;
-
- iopf_param = kzalloc(sizeof(*iopf_param), GFP_KERNEL);
- if (!iopf_param)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&iopf_param->partial);
- iopf_param->queue = queue;
- iopf_param->dev = dev;
+ struct iommu_fault_param *fault_param;
mutex_lock(&queue->lock);
mutex_lock(¶m->lock);
- if (!param->iopf_param) {
- list_add(&iopf_param->queue_list, &queue->devices);
- param->iopf_param = iopf_param;
- ret = 0;
+ if (param->fault_param) {
+ ret = -EBUSY;
+ goto done_unlock;
}
+
+ get_device(dev);
+ fault_param = kzalloc(sizeof(*fault_param), GFP_KERNEL);
+ if (!fault_param) {
+ put_device(dev);
+ ret = -ENOMEM;
+ goto done_unlock;
+ }
+
+ mutex_init(&fault_param->lock);
+ INIT_LIST_HEAD(&fault_param->faults);
+ INIT_LIST_HEAD(&fault_param->partial);
+ fault_param->dev = dev;
+ list_add(&fault_param->queue_list, &queue->devices);
+ fault_param->queue = queue;
+
+ param->fault_param = fault_param;
+
+done_unlock:
mutex_unlock(¶m->lock);
mutex_unlock(&queue->lock);
- if (ret)
- kfree(iopf_param);
-
return ret;
}
EXPORT_SYMBOL_GPL(iopf_queue_add_device);
@@ -330,34 +319,42 @@ EXPORT_SYMBOL_GPL(iopf_queue_add_device);
*/
int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
{
- int ret = -EINVAL;
+ int ret = 0;
struct iopf_fault *iopf, *next;
- struct iopf_device_param *iopf_param;
struct dev_iommu *param = dev->iommu;
-
- if (!param || !queue)
- return -EINVAL;
+ struct iommu_fault_param *fault_param = param->fault_param;
mutex_lock(&queue->lock);
mutex_lock(¶m->lock);
- iopf_param = param->iopf_param;
- if (iopf_param && iopf_param->queue == queue) {
- list_del(&iopf_param->queue_list);
- param->iopf_param = NULL;
- ret = 0;
+ if (!fault_param) {
+ ret = -ENODEV;
+ goto unlock;
}
- mutex_unlock(¶m->lock);
- mutex_unlock(&queue->lock);
- if (ret)
- return ret;
+
+ if (fault_param->queue != queue) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ if (!list_empty(&fault_param->faults)) {
+ ret = -EBUSY;
+ goto unlock;
+ }
+
+ list_del(&fault_param->queue_list);
/* Just in case some faults are still stuck */
- list_for_each_entry_safe(iopf, next, &iopf_param->partial, list)
+ list_for_each_entry_safe(iopf, next, &fault_param->partial, list)
kfree(iopf);
- kfree(iopf_param);
+ param->fault_param = NULL;
+ kfree(fault_param);
+ put_device(dev);
+unlock:
+ mutex_unlock(¶m->lock);
+ mutex_unlock(&queue->lock);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(iopf_queue_remove_device);
@@ -403,7 +400,7 @@ EXPORT_SYMBOL_GPL(iopf_queue_alloc);
*/
void iopf_queue_free(struct iopf_queue *queue)
{
- struct iopf_device_param *iopf_param, *next;
+ struct iommu_fault_param *iopf_param, *next;
if (!queue)
return;
@@ -1325,27 +1325,18 @@ int iommu_register_device_fault_handler(struct device *dev,
struct dev_iommu *param = dev->iommu;
int ret = 0;
- if (!param)
+ if (!param || !param->fault_param)
return -EINVAL;
mutex_lock(¶m->lock);
/* Only allow one fault handler registered for each device */
- if (param->fault_param) {
+ if (param->fault_param->handler) {
ret = -EBUSY;
goto done_unlock;
}
- get_device(dev);
- param->fault_param = kzalloc(sizeof(*param->fault_param), GFP_KERNEL);
- if (!param->fault_param) {
- put_device(dev);
- ret = -ENOMEM;
- goto done_unlock;
- }
param->fault_param->handler = handler;
param->fault_param->data = data;
- mutex_init(¶m->fault_param->lock);
- INIT_LIST_HEAD(¶m->fault_param->faults);
done_unlock:
mutex_unlock(¶m->lock);
@@ -1366,29 +1357,16 @@ EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler);
int iommu_unregister_device_fault_handler(struct device *dev)
{
struct dev_iommu *param = dev->iommu;
- int ret = 0;
- if (!param)
+ if (!param || !param->fault_param)
return -EINVAL;
mutex_lock(¶m->lock);
-
- if (!param->fault_param)
- goto unlock;
-
- /* we cannot unregister handler if there are pending faults */
- if (!list_empty(¶m->fault_param->faults)) {
- ret = -EBUSY;
- goto unlock;
- }
-
- kfree(param->fault_param);
- param->fault_param = NULL;
- put_device(dev);
-unlock:
+ param->fault_param->handler = NULL;
+ param->fault_param->data = NULL;
mutex_unlock(¶m->lock);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler);