diff mbox series

[v9,07/10] iommu/vt-d: Allow qi_submit_sync() to return the QI faults

Message ID 20231228150629.13149-8-yi.l.liu@intel.com (mailing list archive)
State New, archived
Headers show
Series Add iommufd nesting (part 2/2) | expand

Commit Message

Yi Liu Dec. 28, 2023, 3:06 p.m. UTC
From: Lu Baolu <baolu.lu@linux.intel.com>

This allows qi_submit_sync() to return back faults to callers.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Yi Liu <yi.l.liu@intel.com>
---
 drivers/iommu/intel/dmar.c          | 42 ++++++++++++++++++++---------
 drivers/iommu/intel/iommu.h         |  2 +-
 drivers/iommu/intel/irq_remapping.c |  2 +-
 drivers/iommu/intel/pasid.c         |  2 +-
 drivers/iommu/intel/svm.c           |  6 ++---
 5 files changed, 36 insertions(+), 18 deletions(-)

Comments

Tian, Kevin Dec. 29, 2023, 2:52 a.m. UTC | #1
> From: Liu, Yi L <yi.l.liu@intel.com>
> Sent: Thursday, December 28, 2023 11:06 PM
> 
> From: Lu Baolu <baolu.lu@linux.intel.com>
> 
> This allows qi_submit_sync() to return back faults to callers.

this might be useful to add a note that the retry logic itself is being discussed
in a separate thread [1]. Here we keep it intact and just make sure no retry for
the newly added user domain cache invalidation.

[1] https://lore.kernel.org/all/20231228001646.587653-6-haifeng.zhao@linux.intel.com/

> 
> -		if (qi->desc_status[wait_index] == QI_ABORT)
> +		if (qi->desc_status[wait_index] == QI_ABORT) {
> +			/*
> +			 * If the caller is interested in the error, no need
> +			 * to retry, just return the time out error to the
> +			 * caller.
> +			 */
> +			if (fsts)
> +				return -ETIMEDOUT;
> +		}
>  			return -EAGAIN;

indent should be adjusted and it changes the original logic which returns
-EAGAIN only if QI_ABORT is set for the wait_index.

the simpler form is:

		/* No need to retry if the caller is interested in the timeout error */
		if (qi->desc_status[wait_index] == QI_ABORT)
			return fsts ? -ETIMEDOUT : -EAGAIN;

otherwise,

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Yi Liu Dec. 29, 2023, 6:37 a.m. UTC | #2
On 2023/12/29 10:52, Tian, Kevin wrote:
>> From: Liu, Yi L <yi.l.liu@intel.com>
>> Sent: Thursday, December 28, 2023 11:06 PM
>>
>> From: Lu Baolu <baolu.lu@linux.intel.com>
>>
>> This allows qi_submit_sync() to return back faults to callers.
> 
> this might be useful to add a note that the retry logic itself is being discussed
> in a separate thread [1]. Here we keep it intact and just make sure no retry for
> the newly added user domain cache invalidation.
> 
> [1] https://lore.kernel.org/all/20231228001646.587653-6-haifeng.zhao@linux.intel.com/
> 
>>
>> -		if (qi->desc_status[wait_index] == QI_ABORT)
>> +		if (qi->desc_status[wait_index] == QI_ABORT) {
>> +			/*
>> +			 * If the caller is interested in the error, no need
>> +			 * to retry, just return the time out error to the
>> +			 * caller.
>> +			 */
>> +			if (fsts)
>> +				return -ETIMEDOUT;
>> +		}
>>   			return -EAGAIN;
> 
> indent should be adjusted and it changes the original logic which returns
> -EAGAIN only if QI_ABORT is set for the wait_index.

oops. this is a mistake. :) '}' should be after 'return -EAGAIN'

> the simpler form is:
> 
> 		/* No need to retry if the caller is interested in the timeout error */
> 		if (qi->desc_status[wait_index] == QI_ABORT)
> 			return fsts ? -ETIMEDOUT : -EAGAIN;
> 
> otherwise,
> 
> Reviewed-by: Kevin Tian <kevin.tian@intel.com>

sure.
diff mbox series

Patch

diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c
index 23cb80d62a9a..cba5de54e94b 100644
--- a/drivers/iommu/intel/dmar.c
+++ b/drivers/iommu/intel/dmar.c
@@ -1267,7 +1267,8 @@  static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
 	       (unsigned long long)desc->qw1);
 }
 
-static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
+static int qi_check_fault(struct intel_iommu *iommu, int index,
+			  int wait_index, u32 *fsts)
 {
 	u32 fault;
 	int head, tail;
@@ -1278,8 +1279,12 @@  static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
 		return -EAGAIN;
 
 	fault = readl(iommu->reg + DMAR_FSTS_REG);
-	if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
+	fault &= DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE;
+	if (fault) {
+		if (fsts)
+			*fsts |= fault;
 		qi_dump_fault(iommu, fault);
+	}
 
 	/*
 	 * If IQE happens, the head points to the descriptor associated
@@ -1324,7 +1329,15 @@  static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
 			head = (head - 2 + QI_LENGTH) % QI_LENGTH;
 		} while (head != tail);
 
-		if (qi->desc_status[wait_index] == QI_ABORT)
+		if (qi->desc_status[wait_index] == QI_ABORT) {
+			/*
+			 * If the caller is interested in the error, no need
+			 * to retry, just return the time out error to the
+			 * caller.
+			 */
+			if (fsts)
+				return -ETIMEDOUT;
+		}
 			return -EAGAIN;
 	}
 
@@ -1342,9 +1355,11 @@  static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
  * time, a wait descriptor will be appended to each submission to ensure
  * hardware has completed the invalidation before return. Wait descriptors
  * can be part of the submission but it will not be polled for completion.
+ * If callers are interested in the QI faults that occur during the handling
+ * of requests, the QI faults are saved in @fault.
  */
 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
-		   unsigned int count, unsigned long options)
+		   unsigned int count, unsigned long options, u32 *fault)
 {
 	struct q_inval *qi = iommu->qi;
 	s64 devtlb_start_ktime = 0;
@@ -1422,6 +1437,9 @@  int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
 	 */
 	writel(qi->free_head << shift, iommu->reg + DMAR_IQT_REG);
 
+	if (fault)
+		*fault = 0;
+
 	while (qi->desc_status[wait_index] != QI_DONE) {
 		/*
 		 * We will leave the interrupts disabled, to prevent interrupt
@@ -1430,7 +1448,7 @@  int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
 		 * a deadlock where the interrupt context can wait indefinitely
 		 * for free slots in the queue.
 		 */
-		rc = qi_check_fault(iommu, index, wait_index);
+		rc = qi_check_fault(iommu, index, wait_index, fault);
 		if (rc)
 			break;
 
@@ -1476,7 +1494,7 @@  void qi_global_iec(struct intel_iommu *iommu)
 	desc.qw3 = 0;
 
 	/* should never fail */
-	qi_submit_sync(iommu, &desc, 1, 0);
+	qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
@@ -1490,7 +1508,7 @@  void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
 	desc.qw2 = 0;
 	desc.qw3 = 0;
 
-	qi_submit_sync(iommu, &desc, 1, 0);
+	qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -1514,7 +1532,7 @@  void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
 	desc.qw2 = 0;
 	desc.qw3 = 0;
 
-	qi_submit_sync(iommu, &desc, 1, 0);
+	qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
@@ -1545,7 +1563,7 @@  void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
 	desc.qw2 = 0;
 	desc.qw3 = 0;
 
-	qi_submit_sync(iommu, &desc, 1, 0);
+	qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 /* PASID-based IOTLB invalidation */
@@ -1586,7 +1604,7 @@  void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
 				QI_EIOTLB_AM(mask);
 	}
 
-	qi_submit_sync(iommu, &desc, 1, 0);
+	qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 /* PASID-based device IOTLB Invalidate */
@@ -1639,7 +1657,7 @@  void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
 		desc.qw1 |= QI_DEV_EIOTLB_SIZE;
 	}
 
-	qi_submit_sync(iommu, &desc, 1, 0);
+	qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
@@ -1649,7 +1667,7 @@  void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
 
 	desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
 			QI_PC_GRAN(granu) | QI_PC_TYPE;
-	qi_submit_sync(iommu, &desc, 1, 0);
+	qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 /*
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index ce030c5b5772..c6de958e4f54 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -881,7 +881,7 @@  void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
 			  u32 pasid);
 
 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
-		   unsigned int count, unsigned long options);
+		   unsigned int count, unsigned long options, u32 *fault);
 /*
  * Options used in qi_submit_sync:
  * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 29b9e55dcf26..f834afa3672d 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -153,7 +153,7 @@  static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
 	desc.qw2 = 0;
 	desc.qw3 = 0;
 
-	return qi_submit_sync(iommu, &desc, 1, 0);
+	return qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 static int modify_irte(struct irq_2_iommu *irq_iommu,
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 74e8e4c17e81..67f924760ba8 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -467,7 +467,7 @@  pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
 	desc.qw2 = 0;
 	desc.qw3 = 0;
 
-	qi_submit_sync(iommu, &desc, 1, 0);
+	qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 static void
diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c
index ac12f76c1212..660d049ad5b6 100644
--- a/drivers/iommu/intel/svm.c
+++ b/drivers/iommu/intel/svm.c
@@ -543,7 +543,7 @@  void intel_drain_pasid_prq(struct device *dev, u32 pasid)
 			QI_DEV_IOTLB_PFSID(info->pfsid);
 qi_retry:
 	reinit_completion(&iommu->prq_complete);
-	qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
+	qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN, NULL);
 	if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
 		wait_for_completion(&iommu->prq_complete);
 		goto qi_retry;
@@ -646,7 +646,7 @@  static void handle_bad_prq_event(struct intel_iommu *iommu,
 		desc.qw3 = 0;
 	}
 
-	qi_submit_sync(iommu, &desc, 1, 0);
+	qi_submit_sync(iommu, &desc, 1, 0, NULL);
 }
 
 static irqreturn_t prq_event_thread(int irq, void *d)
@@ -811,7 +811,7 @@  int intel_svm_page_response(struct device *dev,
 				ktime_to_ns(ktime_get()) - prm->private_data[0]);
 		}
 
-		qi_submit_sync(iommu, &desc, 1, 0);
+		qi_submit_sync(iommu, &desc, 1, 0, NULL);
 	}
 out:
 	return ret;