diff mbox series

[v9,13/14] iommu: Improve iopf_queue_remove_device()

Message ID 20231220012332.168188-14-baolu.lu@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series iommu: Prepare to deliver page faults to user space | expand

Commit Message

Baolu Lu Dec. 20, 2023, 1:23 a.m. UTC
Convert iopf_queue_remove_device() to return void instead of an error code,
as the return value is never used. This removal helper is designed to be
never-failed, so there's no need for error handling.

Ack all outstanding page requests from the device with the response code of
IOMMU_PAGE_RESP_INVALID, indicating device should not attempt any retry.

Add comments to this helper explaining the steps involved in removing a
device from the iopf queue and disabling its PRI.

Suggested-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Tested-by: Yan Zhao <yan.y.zhao@intel.com>
---
 include/linux/iommu.h       |  5 ++--
 drivers/iommu/intel/iommu.c |  7 +----
 drivers/iommu/io-pgfault.c  | 59 ++++++++++++++++++++++++-------------
 3 files changed, 41 insertions(+), 30 deletions(-)

Comments

Jason Gunthorpe Jan. 5, 2024, 4:25 p.m. UTC | #1
On Wed, Dec 20, 2023 at 09:23:31AM +0800, Lu Baolu wrote:
> -int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
> +void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
>  {
> -	int ret = 0;
>  	struct iopf_fault *iopf, *next;
> +	struct iommu_page_response resp;
>  	struct dev_iommu *param = dev->iommu;
>  	struct iommu_fault_param *fault_param;
> +	const struct iommu_ops *ops = dev_iommu_ops(dev);
>  
>  	mutex_lock(&queue->lock);
>  	mutex_lock(&param->lock);
>  	fault_param = rcu_dereference_check(param->fault_param,
>  					    lockdep_is_held(&param->lock));
> -	if (!fault_param) {
> -		ret = -ENODEV;
> -		goto unlock;
> -	}
> -
> -	if (fault_param->queue != queue) {
> -		ret = -EINVAL;
> -		goto unlock;
> -	}
>  
> -	if (!list_empty(&fault_param->faults)) {
> -		ret = -EBUSY;
> +	if (WARN_ON(!fault_param || fault_param->queue != queue))
>  		goto unlock;
> -	}
> -
> -	list_del(&fault_param->queue_list);
>  
> -	/* Just in case some faults are still stuck */
> +	mutex_lock(&fault_param->lock);
>  	list_for_each_entry_safe(iopf, next, &fault_param->partial, list)
>  		kfree(iopf);
>  
> +	list_for_each_entry_safe(iopf, next, &fault_param->faults, list) {
> +		memset(&resp, 0, sizeof(struct iommu_page_response));
> +		resp.pasid = iopf->fault.prm.pasid;
> +		resp.grpid = iopf->fault.prm.grpid;
> +		resp.code = IOMMU_PAGE_RESP_INVALID;

I would probably move the resp and iopf variables into here:

		struct iopf_fault *iopf = &group->last_fault;
		struct iommu_page_response resp = {
			.pasid = iopf->fault.prm.pasid,
			.grpid = iopf->fault.prm.grpid,
			.code = IOMMU_PAGE_RESP_INVALID
		};

(and call the other one partial_iopf)

But this looks fine either way

Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>

Jason
Baolu Lu Jan. 9, 2024, 3:36 a.m. UTC | #2
On 1/6/24 12:25 AM, Jason Gunthorpe wrote:
> On Wed, Dec 20, 2023 at 09:23:31AM +0800, Lu Baolu wrote:
>> -int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
>> +void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
>>   {
>> -	int ret = 0;
>>   	struct iopf_fault *iopf, *next;
>> +	struct iommu_page_response resp;
>>   	struct dev_iommu *param = dev->iommu;
>>   	struct iommu_fault_param *fault_param;
>> +	const struct iommu_ops *ops = dev_iommu_ops(dev);
>>   
>>   	mutex_lock(&queue->lock);
>>   	mutex_lock(&param->lock);
>>   	fault_param = rcu_dereference_check(param->fault_param,
>>   					    lockdep_is_held(&param->lock));
>> -	if (!fault_param) {
>> -		ret = -ENODEV;
>> -		goto unlock;
>> -	}
>> -
>> -	if (fault_param->queue != queue) {
>> -		ret = -EINVAL;
>> -		goto unlock;
>> -	}
>>   
>> -	if (!list_empty(&fault_param->faults)) {
>> -		ret = -EBUSY;
>> +	if (WARN_ON(!fault_param || fault_param->queue != queue))
>>   		goto unlock;
>> -	}
>> -
>> -	list_del(&fault_param->queue_list);
>>   
>> -	/* Just in case some faults are still stuck */
>> +	mutex_lock(&fault_param->lock);
>>   	list_for_each_entry_safe(iopf, next, &fault_param->partial, list)
>>   		kfree(iopf);
>>   
>> +	list_for_each_entry_safe(iopf, next, &fault_param->faults, list) {
>> +		memset(&resp, 0, sizeof(struct iommu_page_response));
>> +		resp.pasid = iopf->fault.prm.pasid;
>> +		resp.grpid = iopf->fault.prm.grpid;
>> +		resp.code = IOMMU_PAGE_RESP_INVALID;
> 
> I would probably move the resp and iopf variables into here:
> 
> 		struct iopf_fault *iopf = &group->last_fault;
> 		struct iommu_page_response resp = {
> 			.pasid = iopf->fault.prm.pasid,
> 			.grpid = iopf->fault.prm.grpid,
> 			.code = IOMMU_PAGE_RESP_INVALID
> 		};
> 
> (and call the other one partial_iopf)

Yours looks better. Done.

> 
> But this looks fine either way
> 
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>

Thank you very much!

Best regards,
baolu
diff mbox series

Patch

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index c2416aa79922..d8d173309469 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -1465,7 +1465,7 @@  iommu_sva_domain_alloc(struct device *dev, struct mm_struct *mm)
 
 #ifdef CONFIG_IOMMU_IOPF
 int iopf_queue_add_device(struct iopf_queue *queue, struct device *dev);
-int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
+void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev);
 int iopf_queue_flush_dev(struct device *dev);
 struct iopf_queue *iopf_queue_alloc(const char *name);
 void iopf_queue_free(struct iopf_queue *queue);
@@ -1481,10 +1481,9 @@  iopf_queue_add_device(struct iopf_queue *queue, struct device *dev)
 	return -ENODEV;
 }
 
-static inline int
+static inline void
 iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
 {
-	return -ENODEV;
 }
 
 static inline int iopf_queue_flush_dev(struct device *dev)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 29a12f289e2e..a81a2be9b870 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -4455,12 +4455,7 @@  static int intel_iommu_disable_iopf(struct device *dev)
 	 */
 	pci_disable_pri(to_pci_dev(dev));
 	info->pri_enabled = 0;
-
-	/*
-	 * With PRI disabled and outstanding PRQs drained, removing device
-	 * from iopf queue should never fail.
-	 */
-	WARN_ON(iopf_queue_remove_device(iommu->iopf_queue, dev));
+	iopf_queue_remove_device(iommu->iopf_queue, dev);
 
 	return 0;
 }
diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c
index 3a907bad2fcb..3221f6387beb 100644
--- a/drivers/iommu/io-pgfault.c
+++ b/drivers/iommu/io-pgfault.c
@@ -449,42 +449,61 @@  EXPORT_SYMBOL_GPL(iopf_queue_add_device);
  * @queue: IOPF queue
  * @dev: device to remove
  *
- * Caller makes sure that no more faults are reported for this device.
+ * Removing a device from an iopf_queue. It's recommended to follow these
+ * steps when removing a device:
  *
- * Return: 0 on success and <0 on error.
+ * - Disable new PRI reception: Turn off PRI generation in the IOMMU hardware
+ *   and flush any hardware page request queues. This should be done before
+ *   calling into this helper.
+ * - Acknowledge all outstanding PRQs to the device: Respond to all outstanding
+ *   page requests with IOMMU_PAGE_RESP_INVALID, indicating the device should
+ *   not retry. This helper function handles this.
+ * - Disable PRI on the device: After calling this helper, the caller could
+ *   then disable PRI on the device.
+ * - Tear down the iopf infrastructure: Calling iopf_queue_remove_device()
+ *   essentially disassociates the device. The fault_param might still exist,
+ *   but iommu_page_response() will do nothing. The device fault parameter
+ *   reference count has been properly passed from iommu_report_device_fault()
+ *   to the fault handling work, and will eventually be released after
+ *   iommu_page_response().
  */
-int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
+void iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
 {
-	int ret = 0;
 	struct iopf_fault *iopf, *next;
+	struct iommu_page_response resp;
 	struct dev_iommu *param = dev->iommu;
 	struct iommu_fault_param *fault_param;
+	const struct iommu_ops *ops = dev_iommu_ops(dev);
 
 	mutex_lock(&queue->lock);
 	mutex_lock(&param->lock);
 	fault_param = rcu_dereference_check(param->fault_param,
 					    lockdep_is_held(&param->lock));
-	if (!fault_param) {
-		ret = -ENODEV;
-		goto unlock;
-	}
-
-	if (fault_param->queue != queue) {
-		ret = -EINVAL;
-		goto unlock;
-	}
 
-	if (!list_empty(&fault_param->faults)) {
-		ret = -EBUSY;
+	if (WARN_ON(!fault_param || fault_param->queue != queue))
 		goto unlock;
-	}
-
-	list_del(&fault_param->queue_list);
 
-	/* Just in case some faults are still stuck */
+	mutex_lock(&fault_param->lock);
 	list_for_each_entry_safe(iopf, next, &fault_param->partial, list)
 		kfree(iopf);
 
+	list_for_each_entry_safe(iopf, next, &fault_param->faults, list) {
+		memset(&resp, 0, sizeof(struct iommu_page_response));
+		resp.pasid = iopf->fault.prm.pasid;
+		resp.grpid = iopf->fault.prm.grpid;
+		resp.code = IOMMU_PAGE_RESP_INVALID;
+
+		if (iopf->fault.prm.flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID)
+			resp.flags = IOMMU_PAGE_RESP_PASID_VALID;
+
+		ops->page_response(dev, iopf, &resp);
+		list_del(&iopf->list);
+		kfree(iopf);
+	}
+	mutex_unlock(&fault_param->lock);
+
+	list_del(&fault_param->queue_list);
+
 	/* dec the ref owned by iopf_queue_add_device() */
 	rcu_assign_pointer(param->fault_param, NULL);
 	if (refcount_dec_and_test(&fault_param->users))
@@ -492,8 +511,6 @@  int iopf_queue_remove_device(struct iopf_queue *queue, struct device *dev)
 unlock:
 	mutex_unlock(&param->lock);
 	mutex_unlock(&queue->lock);
-
-	return ret;
 }
 EXPORT_SYMBOL_GPL(iopf_queue_remove_device);