@@ -482,7 +482,8 @@ static int arm_smmu_master_sva_enable_iopf(struct arm_smmu_master *master)
if (ret)
return ret;
- ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf, dev);
+ ret = iommu_register_device_fault_handler(dev, iommu_queue_iopf,
+ FAULT_REPORT_FLAT, dev);
if (ret) {
iopf_queue_remove_device(master->smmu->evtq.iopf, dev);
return ret;
@@ -1448,10 +1448,6 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
return -EOPNOTSUPP;
}
- /* Stage-2 is always pinned at the moment */
- if (evt[1] & EVTQ_1_S2)
- return -EFAULT;
-
if (evt[1] & EVTQ_1_RnW)
perm |= IOMMU_FAULT_PERM_READ;
else
@@ -1469,26 +1465,36 @@ static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
.grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
.perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
};
if (ssid_valid) {
flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
}
+
+ if (evt[1] & EVTQ_1_S2) {
+ flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_L2;
+ flt->prm.addr = FIELD_GET(EVTQ_3_IPA, evt[3]);
+ } else
+ flt->prm.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]);
} else {
flt->type = IOMMU_FAULT_DMA_UNRECOV;
flt->event = (struct iommu_fault_unrecoverable) {
.reason = reason,
.flags = IOMMU_FAULT_UNRECOV_ADDR_VALID,
.perm = perm,
- .addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
};
if (ssid_valid) {
flt->event.flags |= IOMMU_FAULT_UNRECOV_PASID_VALID;
flt->event.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
}
+
+ if (evt[1] & EVTQ_1_S2) {
+ flt->event.flags |= IOMMU_FAULT_UNRECOV_L2;
+ flt->event.addr = FIELD_GET(EVTQ_3_IPA, evt[3]);
+ } else
+ flt->event.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]);
}
mutex_lock(&smmu->streams_mutex);
@@ -1056,6 +1056,40 @@ int iommu_group_unregister_notifier(struct iommu_group *group,
}
EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
+/*
+ * iommu_update_device_fault_handler - Update the device fault handler via flags
+ * @dev: the device
+ * @mask: bits(not set) to clear
+ * @set: bits to set
+ *
+ * Update the device fault handler installed by
+ * iommu_register_device_fault_handler().
+ *
+ * Return 0 on success, or an error.
+ */
+int iommu_update_device_fault_handler(struct device *dev, u32 mask, u32 set)
+{
+ struct dev_iommu *param = dev->iommu;
+ int ret = 0;
+
+ if (!param)
+ return -EINVAL;
+
+ mutex_lock(¶m->lock);
+
+ if (param->fault_param) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ param->fault_param->flags = (param->fault_param->flags & mask) | set;
+
+out_unlock:
+ mutex_unlock(¶m->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iommu_update_device_fault_handler);
+
/**
* iommu_register_device_fault_handler() - Register a device fault handler
* @dev: the device
@@ -1076,11 +1110,16 @@ EXPORT_SYMBOL_GPL(iommu_group_unregister_notifier);
*/
int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
- void *data)
+ u32 flags, void *data)
{
struct dev_iommu *param = dev->iommu;
int ret = 0;
+ /* Only under one configuration. */
+ if (flags & FAULT_REPORT_FLAT &&
+ flags & (FAULT_REPORT_NESTED_L1 | FAULT_REPORT_NESTED_L2))
+ return -EINVAL;
+
if (!param)
return -EINVAL;
@@ -1099,6 +1138,7 @@ int iommu_register_device_fault_handler(struct device *dev,
goto done_unlock;
}
param->fault_param->handler = handler;
+ param->fault_param->flags = flags;
param->fault_param->data = data;
mutex_init(¶m->fault_param->lock);
INIT_LIST_HEAD(¶m->fault_param->faults);
@@ -1177,6 +1217,20 @@ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt)
goto done_unlock;
}
+ if (!(fparam->flags & FAULT_REPORT_FLAT)) {
+ bool l2;
+
+ if (evt->fault.type == IOMMU_FAULT_PAGE_REQ)
+ l2 = evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_L2;
+ if (evt->fault.type == IOMMU_FAULT_DMA_UNRECOV)
+ l2 = evt->fault.event.flags & IOMMU_FAULT_UNRECOV_L2;
+
+ if (l2 && !(fparam->flags & FAULT_REPORT_NESTED_L2))
+ return -EOPNOTSUPP;
+ if (!l2 && !(fparam->flags & FAULT_REPORT_NESTED_L1))
+ return -EOPNOTSUPP;
+ }
+
if (evt->fault.type == IOMMU_FAULT_PAGE_REQ &&
(evt->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) {
evt_pending = kmemdup(evt, sizeof(struct iommu_fault_event),
@@ -352,12 +352,19 @@ struct iommu_fault_event {
/**
* struct iommu_fault_param - per-device IOMMU fault data
* @handler: Callback function to handle IOMMU faults at device level
+ * @flags: FAULT_REPORT_ indicates the fault reporting capability under
+ * a specific configuration (1st/2nd-level-only(FLAT), or nested).
+ * Nested mode needs to specify which level/stage is concerned.
* @data: handler private data
* @faults: holds the pending faults which needs response
* @lock: protect pending faults list
*/
struct iommu_fault_param {
iommu_dev_fault_handler_t handler;
+#define FAULT_REPORT_FLAT (1 << 0)
+#define FAULT_REPORT_NESTED_L1 (1 << 1)
+#define FAULT_REPORT_NESTED_L2 (1 << 2)
+ u32 flags;
void *data;
struct list_head faults;
struct mutex lock;
@@ -509,9 +516,11 @@ extern int iommu_group_register_notifier(struct iommu_group *group,
struct notifier_block *nb);
extern int iommu_group_unregister_notifier(struct iommu_group *group,
struct notifier_block *nb);
+extern int iommu_update_device_fault_handler(struct device *dev,
+ u32 mask, u32 set);
extern int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
- void *data);
+ u32 flags, void *data);
extern int iommu_unregister_device_fault_handler(struct device *dev);
@@ -873,10 +882,16 @@ static inline int iommu_group_unregister_notifier(struct iommu_group *group,
return 0;
}
+static inline int iommu_update_device_fault_handler(struct device *dev,
+ u32 mask, u32 set)
+{
+ return -ENODEV;
+}
+
static inline
int iommu_register_device_fault_handler(struct device *dev,
iommu_dev_fault_handler_t handler,
- void *data)
+ u32 flags, void *data)
{
return -ENODEV;
}
@@ -71,6 +71,7 @@ struct iommu_fault_unrecoverable {
#define IOMMU_FAULT_UNRECOV_PASID_VALID (1 << 0)
#define IOMMU_FAULT_UNRECOV_ADDR_VALID (1 << 1)
#define IOMMU_FAULT_UNRECOV_FETCH_ADDR_VALID (1 << 2)
+#define IOMMU_FAULT_UNRECOV_L2 (1 << 3)
__u32 flags;
__u32 pasid;
__u32 perm;
@@ -85,6 +86,8 @@ struct iommu_fault_unrecoverable {
* When IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID is set, the page response
* must have the same PASID value as the page request. When it is clear,
* the page response should not have a PASID.
+ * If IOMMU_FAULT_PAGE_REQUEST_L2 is set, the fault occurred at the
+ * second level/stage, otherwise, occurred at the first level.
* @pasid: Process Address Space ID
* @grpid: Page Request Group Index
* @perm: requested page permissions (IOMMU_FAULT_PERM_* values)
@@ -96,6 +99,7 @@ struct iommu_fault_page_request {
#define IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE (1 << 1)
#define IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA (1 << 2)
#define IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID (1 << 3)
+#define IOMMU_FAULT_PAGE_REQUEST_L2 (1 << 4)
__u32 flags;
__u32 pasid;
__u32 grpid;
This patch follows the discussion here: https://lore.kernel.org/linux-acpi/YAaxjmJW+ZMvrhac@myrica/ Besides SVA/vSVA, such as VFIO may also enable (2nd level) IOPF to remove pinning restriction. In order to better support more scenarios of using device faults, we extend iommu_register_fault_handler() with flags and introduce FAULT_REPORT_ to describe the device fault reporting capability under a specific configuration. Note that we don't further distinguish recoverable and unrecoverable faults by flags in the fault reporting cap, having PAGE_FAULT_REPORT_ + UNRECOV_FAULT_REPORT_ seems not a clean way. In addition, still take VFIO as an example, in nested mode, the 1st level and 2nd level fault reporting may be configured separately and currently each device can only register one iommu dev fault handler, so we add a handler update interface for this. Signed-off-by: Shenming Lu <lushenming@huawei.com> --- .../iommu/arm/arm-smmu-v3/arm-smmu-v3-sva.c | 3 +- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 18 ++++-- drivers/iommu/iommu.c | 56 ++++++++++++++++++- include/linux/iommu.h | 19 ++++++- include/uapi/linux/iommu.h | 4 ++ 5 files changed, 90 insertions(+), 10 deletions(-)