diff mbox series

[v2,1/6] iommu: Add iommu page fault cookie helpers

Message ID 20231026024930.382898-2-baolu.lu@linux.intel.com (mailing list archive)
State New
Headers show
Series IOMMUFD: Deliver IO page faults to user space | expand

Commit Message

Baolu Lu Oct. 26, 2023, 2:49 a.m. UTC
Add an xarray in iommu_fault_param as place holder for per-{device, pasid}
fault cookie. The iommufd will use it to store the iommufd device pointers.
This allows the iommufd to quickly retrieve the device object ID for a
given {device, pasid} pair in the hot path of I/O page fault delivery.

Otherwise, the iommufd would have to maintain its own data structures to
map {device, pasid} pairs to object IDs, and then look up the object ID on
the critical path. This is not performance friendly.

The iommufd is supposed to set the cookie when a fault capable domain is
attached to the physical device or pasid, and clear the fault cookie when
the domain is removed.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
---
 include/linux/iommu.h      |  3 +++
 drivers/iommu/iommu-priv.h | 15 ++++++++++++
 drivers/iommu/io-pgfault.c | 50 ++++++++++++++++++++++++++++++++++++++
 3 files changed, 68 insertions(+)

Comments

Jason Gunthorpe Dec. 1, 2023, 2:38 p.m. UTC | #1
On Thu, Oct 26, 2023 at 10:49:25AM +0800, Lu Baolu wrote:

> +void *iopf_pasid_cookie_get(struct device *dev, ioasid_t pasid)
> +{
> +	struct iommu_fault_param *iopf_param = iopf_get_dev_fault_param(dev);
> +	void *curr;
> +
> +	if (!iopf_param)
> +		return ERR_PTR(-ENODEV);
> +
> +	xa_lock(&iopf_param->pasid_cookie);
> +	curr = xa_load(&iopf_param->pasid_cookie, pasid);
> +	xa_unlock(&iopf_param->pasid_cookie);

No need for this locking, the caller has to provide some kind of
locking to protect the returned pointer.

I'm not sure how this can work really..

What iommfd wants is to increment the device object refcount under
this xa_lock.

I'm not sure this is the right arrangement: Basically you want to
have a cookie per domain attachment for iopf domains that is forwarded
to the handler.

So maybe this entire thing is not quite right, instead of having a
generic iopf attached to the domain the iopf should be supplied at
domain attach time? Something like:

iommu_domain_attach_iopf(struct iommu_domain *, struct device *,
                         ioasid_t pasid, struct iopf *, void *cookie);

The per-attach cookie would be passed to the iopf function
automatically by the infrastructure.

Detach would have the necessary locking to ensure that no handler is
running across detach

Then the cookie is logically placed in the API and properly protected
with natural locking we already need.

Jason
Baolu Lu Dec. 8, 2023, 6:24 a.m. UTC | #2
On 12/1/23 10:38 PM, Jason Gunthorpe wrote:
> On Thu, Oct 26, 2023 at 10:49:25AM +0800, Lu Baolu wrote:
> 
>> +void *iopf_pasid_cookie_get(struct device *dev, ioasid_t pasid)
>> +{
>> +	struct iommu_fault_param *iopf_param = iopf_get_dev_fault_param(dev);
>> +	void *curr;
>> +
>> +	if (!iopf_param)
>> +		return ERR_PTR(-ENODEV);
>> +
>> +	xa_lock(&iopf_param->pasid_cookie);
>> +	curr = xa_load(&iopf_param->pasid_cookie, pasid);
>> +	xa_unlock(&iopf_param->pasid_cookie);
> No need for this locking, the caller has to provide some kind of
> locking to protect the returned pointer.
> 
> I'm not sure how this can work really..
> 
> What iommfd wants is to increment the device object refcount under
> this xa_lock.
> 
> I'm not sure this is the right arrangement: Basically you want to
> have a cookie per domain attachment for iopf domains that is forwarded
> to the handler.
> 
> So maybe this entire thing is not quite right, instead of having a
> generic iopf attached to the domain the iopf should be supplied at
> domain attach time? Something like:
> 
> iommu_domain_attach_iopf(struct iommu_domain *, struct device *,
>                           ioasid_t pasid, struct iopf *, void *cookie);
> 
> The per-attach cookie would be passed to the iopf function
> automatically by the infrastructure.
> 
> Detach would have the necessary locking to ensure that no handler is
> running across detach
> 
> Then the cookie is logically placed in the API and properly protected
> with natural locking we already need.

Great idea! In a subsequent series, we could arrange the enabling and
disabling of IOPF in this API, thereby eliminating the calling of
iommu_dev_enable/disable_feature(dev, IOMMU_DEV_FEAT_IOPF) from the
device drivers.

Best regards,
baolu
diff mbox series

Patch

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2ca3a3eda2e4..615d8a5f9dee 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -608,6 +608,8 @@  struct iommu_device {
  * @dev: the device that owns this param
  * @queue: IOPF queue
  * @queue_list: index into queue->devices
+ * @pasid_cookie: per-pasid fault cookie used by fault message consumers.
+ *                This array is self-protected by xa_lock().
  * @partial: faults that are part of a Page Request Group for which the last
  *           request hasn't been submitted yet.
  * @faults: holds the pending faults which needs response
@@ -619,6 +621,7 @@  struct iommu_fault_param {
 	struct device *dev;
 	struct iopf_queue *queue;
 	struct list_head queue_list;
+	struct xarray pasid_cookie;
 
 	struct list_head partial;
 	struct list_head faults;
diff --git a/drivers/iommu/iommu-priv.h b/drivers/iommu/iommu-priv.h
index 2024a2313348..0dc5ad81cbb6 100644
--- a/drivers/iommu/iommu-priv.h
+++ b/drivers/iommu/iommu-priv.h
@@ -27,4 +27,19 @@  void iommu_device_unregister_bus(struct iommu_device *iommu,
 				 struct bus_type *bus,
 				 struct notifier_block *nb);
 
+#ifdef CONFIG_IOMMU_IOPF
+void *iopf_pasid_cookie_set(struct device *dev, ioasid_t pasid, void *cookie);
+void *iopf_pasid_cookie_get(struct device *dev, ioasid_t pasid);
+#else
+static inline void *iopf_pasid_cookie_set(struct device *dev, ioasid_t pasid, void *cookie)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void *iopf_pasid_cookie_get(struct device *dev, ioasid_t pasid)
+{
+	return ERR_PTR(-ENODEV);
+}
+#endif /* CONFIG_IOMMU_IOPF */
+
 #endif /* __LINUX_IOMMU_PRIV_H */
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index b288c73f2b22..6fa029538deb 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -454,6 +454,7 @@  int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
 	mutex_init(&fault_param->lock);
 	INIT_LIST_HEAD(&fault_param->faults);
 	INIT_LIST_HEAD(&fault_param->partial);
+	xa_init(&fault_param->pasid_cookie);
 	fault_param->dev = dev;
 	fault_param->users = 1;
 	list_add(&fault_param->queue_list, &queue->devices);
@@ -575,3 +576,52 @@  void iopf_queue_free(struct iopf_queue *queue)
 	kfree(queue);
 }
 EXPORT_SYMBOL_GPL(iopf_queue_free);
+
+/**
+ * iopf_pasid_cookie_set - Set a fault cookie for per-{device, pasid}
+ * @dev: the device to set the cookie
+ * @pasid: the pasid on this device
+ * @cookie: the opaque data
+ *
+ * Return the old cookie on success, or ERR_PTR on failure.
+ */
+void *iopf_pasid_cookie_set(struct device *dev, ioasid_t pasid, void *cookie)
+{
+	struct iommu_fault_param *iopf_param = iopf_get_dev_fault_param(dev);
+	void *curr;
+
+	if (!iopf_param)
+		return ERR_PTR(-ENODEV);
+
+	curr = xa_store(&iopf_param->pasid_cookie, pasid, cookie, GFP_KERNEL);
+	iopf_put_dev_fault_param(iopf_param);
+
+	return xa_is_err(curr) ? ERR_PTR(xa_err(curr)) : curr;
+}
+EXPORT_SYMBOL_NS_GPL(iopf_pasid_cookie_set, IOMMUFD_INTERNAL);
+
+/**
+ * iopf_pasid_cookie_get - Get the fault cookie for {device, pasid}
+ * @dev: the device where the cookie was set
+ * @pasid: the pasid on this device
+ *
+ * Return the cookie on success, or ERR_PTR on failure. Note that NULL is
+ * also a successful return.
+ */
+void *iopf_pasid_cookie_get(struct device *dev, ioasid_t pasid)
+{
+	struct iommu_fault_param *iopf_param = iopf_get_dev_fault_param(dev);
+	void *curr;
+
+	if (!iopf_param)
+		return ERR_PTR(-ENODEV);
+
+	xa_lock(&iopf_param->pasid_cookie);
+	curr = xa_load(&iopf_param->pasid_cookie, pasid);
+	xa_unlock(&iopf_param->pasid_cookie);
+
+	iopf_put_dev_fault_param(iopf_param);
+
+	return curr;
+}
+EXPORT_SYMBOL_NS_GPL(iopf_pasid_cookie_get, IOMMUFD_INTERNAL);