@@ -388,17 +388,18 @@ static int flush_context_reg(
return 0;
}
-static int iommu_flush_context_global(
- struct iommu *iommu, int flush_non_present_entry)
+static int __must_check iommu_flush_context_global(struct iommu *iommu,
+ int flush_non_present_entry)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
flush_non_present_entry);
}
-static int iommu_flush_context_device(
- struct iommu *iommu, u16 did, u16 source_id,
- u8 function_mask, int flush_non_present_entry)
+static int __must_check iommu_flush_context_device(struct iommu *iommu,
+ u16 did, u16 source_id,
+ u8 function_mask,
+ int flush_non_present_entry)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
return flush->context(iommu, did, source_id, function_mask,
@@ -473,8 +474,9 @@ static int flush_iotlb_reg(void *_iommu, u16 did,
return 0;
}
-static int iommu_flush_iotlb_global(struct iommu *iommu,
- int flush_non_present_entry, int flush_dev_iotlb)
+static int __must_check iommu_flush_iotlb_global(struct iommu *iommu,
+ int flush_non_present_entry,
+ int flush_dev_iotlb)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
@@ -491,8 +493,9 @@ static int iommu_flush_iotlb_global(struct iommu *iommu,
return status;
}
-static int iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
- int flush_non_present_entry, int flush_dev_iotlb)
+static int __must_check iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
+ int flush_non_present_entry,
+ int flush_dev_iotlb)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
@@ -509,9 +512,10 @@ static int iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
return status;
}
-static int iommu_flush_iotlb_psi(
- struct iommu *iommu, u16 did, u64 addr, unsigned int order,
- int flush_non_present_entry, int flush_dev_iotlb)
+static int __must_check iommu_flush_iotlb_psi(struct iommu *iommu, u16 did,
+ u64 addr, unsigned int order,
+ int flush_non_present_entry,
+ int flush_dev_iotlb)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
@@ -546,17 +550,37 @@ static int __must_check iommu_flush_all(void)
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
int flush_dev_iotlb;
+ int rc = 0;
flush_all_cache();
for_each_drhd_unit ( drhd )
{
+ int context_rc, iotlb_rc;
+
iommu = drhd->iommu;
- iommu_flush_context_global(iommu, 0);
+ context_rc = iommu_flush_context_global(iommu, 0);
flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
- iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb);
+ iotlb_rc = iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb);
+
+ /*
+ * The current logic for returns:
+ * - positive invoke iommu_flush_write_buffer to flush cache.
+ * - zero on success.
+ * - negative on failure. Continue to flush IOMMU IOTLB on a
+ * best effort basis.
+ */
+ if ( context_rc > 0 || iotlb_rc > 0 )
+ iommu_flush_write_buffer(iommu);
+ if ( context_rc >= 0 )
+ rc = context_rc;
+ if ( rc >= 0 )
+ rc = iotlb_rc;
}
- return 0;
+ if ( rc > 0 )
+ rc = 0;
+
+ return rc;
}
static int __must_check iommu_flush_iotlb(struct domain *d,
@@ -569,6 +593,7 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
struct iommu *iommu;
int flush_dev_iotlb;
int iommu_domid;
+ int rc = 0;
/*
* No need pcideves_lock here because we have flush
@@ -587,21 +612,23 @@ static int __must_check iommu_flush_iotlb(struct domain *d,
continue;
if ( page_count != 1 || gfn == INVALID_GFN )
- {
- if ( iommu_flush_iotlb_dsi(iommu, iommu_domid,
- 0, flush_dev_iotlb) )
- iommu_flush_write_buffer(iommu);
- }
+ rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
+ 0, flush_dev_iotlb);
else
+ rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
+ (paddr_t)gfn << PAGE_SHIFT_4K,
+ PAGE_ORDER_4K,
+ !dma_old_pte_present,
+ flush_dev_iotlb);
+
+ if ( rc > 0 )
{
- if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
- (paddr_t)gfn << PAGE_SHIFT_4K, PAGE_ORDER_4K,
- !dma_old_pte_present, flush_dev_iotlb) )
- iommu_flush_write_buffer(iommu);
+ iommu_flush_write_buffer(iommu);
+ rc = 0;
}
}
- return 0;
+ return rc;
}
static int __must_check iommu_flush_iotlb_pages(struct domain *d,
@@ -1290,7 +1317,8 @@ int domain_context_mapping_one(
struct context_entry *context, *context_entries;
u64 maddr, pgd_maddr;
u16 seg = iommu->intel->drhd->segment;
- int agaw;
+ int agaw, rc, ret;
+ int flush_dev_iotlb;
ASSERT(pcidevs_locked());
spin_lock(&iommu->lock);
@@ -1404,14 +1432,24 @@ int domain_context_mapping_one(
spin_unlock(&iommu->lock);
/* Context entry was previously non-present (with domid 0). */
- if ( iommu_flush_context_device(iommu, 0, (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT, 1) )
+ rc = iommu_flush_context_device(iommu, 0, PCI_BDF2(bus, devfn),
+ DMA_CCMD_MASK_NOBIT, 1);
+ flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+ ret = iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
+
+ /*
+ * The current logic for returns:
+ * - positive invoke iommu_flush_write_buffer to flush cache.
+ * - zero on success.
+ * - negative on failure. Continue to flush IOMMU IOTLB on a
+ * best effort basis.
+ */
+ if ( rc > 0 || ret > 0 )
iommu_flush_write_buffer(iommu);
- else
- {
- int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
- iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
- }
+ if ( rc >= 0 )
+ rc = ret;
+ if ( rc > 0 )
+ rc = 0;
set_bit(iommu->index, &hd->arch.iommu_bitmap);
@@ -1420,7 +1458,7 @@ int domain_context_mapping_one(
if ( !seg )
me_wifi_quirk(domain, bus, devfn, MAP_ME_PHANTOM_FUNC);
- return 0;
+ return rc;
}
static int domain_context_mapping(
@@ -1514,7 +1552,8 @@ int domain_context_unmap_one(
{
struct context_entry *context, *context_entries;
u64 maddr;
- int iommu_domid;
+ int iommu_domid, rc, ret;
+ int flush_dev_iotlb;
ASSERT(pcidevs_locked());
spin_lock(&iommu->lock);
@@ -1542,15 +1581,26 @@ int domain_context_unmap_one(
return -EINVAL;
}
- if ( iommu_flush_context_device(iommu, iommu_domid,
- (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT, 0) )
+ rc = iommu_flush_context_device(iommu, iommu_domid,
+ PCI_BDF2(bus, devfn),
+ DMA_CCMD_MASK_NOBIT, 0);
+
+ flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
+ ret = iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb);
+
+ /*
+ * The current logic for returns:
+ * - positive invoke iommu_flush_write_buffer to flush cache.
+ * - zero on success.
+ * - negative on failure. Continue to flush IOMMU IOTLB on a
+ * best effort basis.
+ */
+ if ( rc > 0 || ret > 0 )
iommu_flush_write_buffer(iommu);
- else
- {
- int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
- iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb);
- }
+ if ( rc >= 0 )
+ rc = ret;
+ if ( rc > 0 )
+ rc = 0;
spin_unlock(&iommu->lock);
unmap_vtd_domain_page(context_entries);
@@ -1558,7 +1608,7 @@ int domain_context_unmap_one(
if ( !iommu->intel->drhd->segment )
me_wifi_quirk(domain, bus, devfn, UNMAP_ME_PHANTOM_FUNC);
- return 0;
+ return rc;
}
static int domain_context_unmap(
@@ -1772,6 +1822,7 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
struct domain_iommu *hd = dom_iommu(d);
int flush_dev_iotlb;
int iommu_domid;
+ int rc = 0;
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
@@ -1785,13 +1836,18 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
iommu_domid= domain_iommu_domid(d, iommu);
if ( iommu_domid == -1 )
continue;
- if ( iommu_flush_iotlb_psi(iommu, iommu_domid,
+
+ rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
(paddr_t)gfn << PAGE_SHIFT_4K,
- order, !present, flush_dev_iotlb) )
+ order, !present, flush_dev_iotlb);
+ if ( rc > 0 )
+ {
iommu_flush_write_buffer(iommu);
+ rc = 0;
+ }
}
- return 0;
+ return rc;
}
static int __init vtd_ept_page_compatible(struct iommu *iommu)