diff mbox series

[RFC,v1,11/11] iommu/virtio: Add support to send page response

Message ID 20210423095147.27922-12-vivek.gautam@arm.com (mailing list archive)
State New, archived
Headers show
Series iommu/virtio: vSVA support with Arm | expand

Commit Message

Vivek Kumar Gautam April 23, 2021, 9:51 a.m. UTC
Add page_response iommu ops callback to send page response to
the device that generated io page fault.

Signed-off-by: Vivek Gautam <vivek.gautam@arm.com>
---
 drivers/iommu/virtio-iommu.c | 47 ++++++++++++++++++++++++++++++++++++
 1 file changed, 47 insertions(+)
diff mbox series

Patch

diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 08f1294baeab..6d62d9eae452 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -1778,6 +1778,52 @@  static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
 	return iommu_fwspec_add_ids(dev, args->args, 1);
 }
 
+static int viommu_page_response(struct device *dev,
+				struct iommu_fault_event *evt,
+				struct iommu_page_response *resp)
+{
+	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+	struct viommu_domain *vdomain = to_viommu_domain(domain);
+	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
+	struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
+	struct viommu_dev *viommu = vdev->viommu;
+	bool pasid_valid = resp->flags & IOMMU_PAGE_RESP_PASID_VALID;
+	struct virtio_iommu_req_page_resp req = {
+		.head.type	= VIRTIO_IOMMU_T_PAGE_RESP,
+		.domain		= cpu_to_le32(vdomain->id),
+		.endpoint	= cpu_to_le32(fwspec->ids[0]),
+	};
+
+	if (vdev->pri_supported) {
+		bool needs_pasid = (evt->fault.prm.flags &
+				    IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID);
+
+		req.pasid_valid	= needs_pasid && pasid_valid;
+		req.flags	= cpu_to_le32((needs_pasid && pasid_valid) ?
+				   VIRTIO_IOMMU_PAGE_RESP_PASID_VALID : 0);
+		req.pasid	= cpu_to_le32(resp->pasid);
+		req.grpid	= cpu_to_le32(resp->grpid);
+
+		switch (resp->code) {
+		case IOMMU_PAGE_RESP_FAILURE:
+			req.resp_code = cpu_to_le16(VIRTIO_IOMMU_PAGE_RESP_FAILURE);
+			break;
+		case IOMMU_PAGE_RESP_INVALID:
+			req.resp_code = cpu_to_le16(VIRTIO_IOMMU_PAGE_RESP_INVALID);
+			break;
+		case IOMMU_PAGE_RESP_SUCCESS:
+			req.resp_code = cpu_to_le16(VIRTIO_IOMMU_PAGE_RESP_SUCCESS);
+			break;
+		default:
+			return -EINVAL;
+		}
+	} else {
+		return -ENODEV;
+	}
+
+	return viommu_send_req_sync(viommu, &req, sizeof(req));
+}
+
 static u32 viommu_sva_get_pasid(struct iommu_sva *handle)
 {
 	struct viommu_sva_bond *bond = sva_to_viommu_bond(handle);
@@ -2155,6 +2201,7 @@  static struct iommu_ops viommu_ops = {
 	.sva_bind		= viommu_sva_bind,
 	.sva_unbind		= viommu_sva_unbind,
 	.sva_get_pasid		= viommu_sva_get_pasid,
+	.page_response		= viommu_page_response,
 };
 
 static int viommu_init_vqs(struct viommu_dev *viommu)