@@ -829,7 +829,8 @@ out:
need_modify_vtd_table )
{
if ( iommu_hap_pt_share )
- rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
+ rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order,
+ vtd_pte_present, NONE_LOCK);
else
{
if ( iommu_flags )
@@ -410,7 +410,8 @@ static int iommu_flush_context_device(
/* return value determine if we need a write buffer flush */
static int flush_iotlb_reg(void *_iommu, u16 did,
u64 addr, unsigned int size_order, u64 type,
- int flush_non_present_entry, int flush_dev_iotlb)
+ int flush_non_present_entry, int flush_dev_iotlb,
+ unsigned int lock)
{
struct iommu *iommu = (struct iommu *) _iommu;
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
@@ -475,7 +476,8 @@ static int flush_iotlb_reg(void *_iommu, u16 did,
}
static int iommu_flush_iotlb_global(struct iommu *iommu,
- int flush_non_present_entry, int flush_dev_iotlb)
+ int flush_non_present_entry, int flush_dev_iotlb,
+ unsigned int lock)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
@@ -484,7 +486,8 @@ static int iommu_flush_iotlb_global(struct iommu *iommu,
vtd_ops_preamble_quirk(iommu);
status = flush->iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
- flush_non_present_entry, flush_dev_iotlb);
+ flush_non_present_entry, flush_dev_iotlb,
+ lock);
/* undo platform specific errata workarounds */
vtd_ops_postamble_quirk(iommu);
@@ -493,7 +496,8 @@ static int iommu_flush_iotlb_global(struct iommu *iommu,
}
static int iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
- int flush_non_present_entry, int flush_dev_iotlb)
+ int flush_non_present_entry, int flush_dev_iotlb,
+ unsigned int lock)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
@@ -502,7 +506,8 @@ static int iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
vtd_ops_preamble_quirk(iommu);
status = flush->iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH,
- flush_non_present_entry, flush_dev_iotlb);
+ flush_non_present_entry, flush_dev_iotlb,
+ lock);
/* undo platform specific errata workarounds */
vtd_ops_postamble_quirk(iommu);
@@ -512,7 +517,8 @@ static int iommu_flush_iotlb_dsi(struct iommu *iommu, u16 did,
static int iommu_flush_iotlb_psi(
struct iommu *iommu, u16 did, u64 addr, unsigned int order,
- int flush_non_present_entry, int flush_dev_iotlb)
+ int flush_non_present_entry, int flush_dev_iotlb,
+ unsigned int lock)
{
struct iommu_flush *flush = iommu_get_flush(iommu);
int status;
@@ -521,11 +527,13 @@ static int iommu_flush_iotlb_psi(
/* Fallback to domain selective flush if no PSI support */
if ( !cap_pgsel_inv(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
+ flush_dev_iotlb, lock);
/* Fallback to domain selective flush if size is too big */
if ( order > cap_max_amask_val(iommu->cap) )
- return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry, flush_dev_iotlb);
+ return iommu_flush_iotlb_dsi(iommu, did, flush_non_present_entry,
+ flush_dev_iotlb, lock);
addr >>= PAGE_SHIFT_4K + order;
addr <<= PAGE_SHIFT_4K + order;
@@ -534,7 +542,8 @@ static int iommu_flush_iotlb_psi(
vtd_ops_preamble_quirk(iommu);
status = flush->iotlb(iommu, did, addr, order, DMA_TLB_PSI_FLUSH,
- flush_non_present_entry, flush_dev_iotlb);
+ flush_non_present_entry, flush_dev_iotlb,
+ lock);
/* undo platform specific errata workarounds */
vtd_ops_postamble_quirk(iommu);
@@ -542,7 +551,7 @@ static int iommu_flush_iotlb_psi(
return status;
}
-static int iommu_flush_all(void)
+static int iommu_flush_all(unsigned int lock)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
@@ -554,14 +563,17 @@ static int iommu_flush_all(void)
iommu = drhd->iommu;
iommu_flush_context_global(iommu, 0);
flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
- return iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb);
+ return iommu_flush_iotlb_global(iommu, 0, flush_dev_iotlb,
+ lock);
}
return 0;
}
static int __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
- int dma_old_pte_present, unsigned int page_count)
+ int dma_old_pte_present,
+ unsigned int page_count,
+ unsigned int lock)
{
struct hvm_iommu *hd = domain_hvm_iommu(d);
struct acpi_drhd_unit *drhd;
@@ -588,11 +600,11 @@ static int __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
if ( page_count > 1 || gfn == -1 )
rc = iommu_flush_iotlb_dsi(iommu, iommu_domid,
- 0, flush_dev_iotlb);
+ 0, flush_dev_iotlb, lock);
else
rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
(paddr_t)gfn << PAGE_SHIFT_4K, 0,
- !dma_old_pte_present, flush_dev_iotlb);
+ !dma_old_pte_present, flush_dev_iotlb, lock);
if ( rc )
iommu_flush_write_buffer(iommu);
@@ -604,16 +616,17 @@ static int __intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
static int intel_iommu_iotlb_flush(struct domain *d, unsigned long gfn,
unsigned int page_count, unsigned int lock)
{
- return __intel_iommu_iotlb_flush(d, gfn, 1, page_count);
+ return __intel_iommu_iotlb_flush(d, gfn, 1, page_count, lock);
}
static int intel_iommu_iotlb_flush_all(struct domain *d, unsigned int lock)
{
- return __intel_iommu_iotlb_flush(d, 0, 0, 0);
+ return __intel_iommu_iotlb_flush(d, 0, 0, 0, lock);
}
/* clear one page's page table */
-static int dma_pte_clear_one(struct domain *domain, u64 addr)
+static int dma_pte_clear_one(struct domain *domain, u64 addr,
+ unsigned int lock)
{
struct hvm_iommu *hd = domain_hvm_iommu(domain);
struct dma_pte *page = NULL, *pte = NULL;
@@ -644,7 +657,8 @@ static int dma_pte_clear_one(struct domain *domain, u64 addr)
iommu_flush_cache_entry(pte, sizeof(struct dma_pte));
if ( !this_cpu(iommu_dont_flush_iotlb) )
- rc = __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K, 1, 1);
+ rc = __intel_iommu_iotlb_flush(domain, addr >> PAGE_SHIFT_4K,
+ 1, 1, lock);
unmap_vtd_domain_page(page);
@@ -1273,7 +1287,7 @@ static int __hwdom_init intel_iommu_hwdom_init(struct domain *d)
setup_hwdom_pci_devices(d, setup_hwdom_device);
setup_hwdom_rmrr(d);
- rc = iommu_flush_all();
+ rc = iommu_flush_all(NONE_LOCK);
if ( rc )
return rc;
@@ -1419,7 +1433,8 @@ int domain_context_mapping_one(
{
int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
- rc = iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb);
+ rc = iommu_flush_iotlb_dsi(iommu, 0, 1, flush_dev_iotlb,
+ PCIDEVS_LOCK);
}
set_bit(iommu->index, &hd->arch.iommu_bitmap);
@@ -1559,7 +1574,8 @@ int domain_context_unmap_one(
else
{
int flush_dev_iotlb = find_ats_dev_drhd(iommu) ? 1 : 0;
- rc = iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb);
+ rc = iommu_flush_iotlb_dsi(iommu, iommu_domid, 0, flush_dev_iotlb,
+ PCIDEVS_LOCK);
}
spin_unlock(&iommu->lock);
@@ -1759,7 +1775,8 @@ static int intel_iommu_map_page(
if ( !this_cpu(iommu_dont_flush_iotlb) )
{
- return __intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old), 1);
+ return __intel_iommu_iotlb_flush(d, gfn, dma_pte_present(old),
+ 1, lock);
}
return 0;
@@ -1772,11 +1789,11 @@ static int intel_iommu_unmap_page(struct domain *d, unsigned long gfn,
if ( iommu_passthrough && is_hardware_domain(d) )
return 0;
- return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K);
+ return dma_pte_clear_one(d, (paddr_t)gfn << PAGE_SHIFT_4K, lock);
}
int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
- int order, int present)
+ int order, int present, unsigned int lock)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu = NULL;
@@ -1799,7 +1816,7 @@ int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte,
continue;
rc = iommu_flush_iotlb_psi(iommu, iommu_domid,
(paddr_t)gfn << PAGE_SHIFT_4K,
- order, !present, flush_dev_iotlb);
+ order, !present, flush_dev_iotlb, lock);
if ( rc )
iommu_flush_write_buffer(iommu);
}
@@ -2126,7 +2143,7 @@ static int init_vtd_hw(void)
}
}
- return iommu_flush_all();
+ return iommu_flush_all(NONE_LOCK);
}
static void __hwdom_init setup_hwdom_rmrr(struct domain *d)
@@ -2404,7 +2421,7 @@ static int vtd_suspend(void)
if ( !iommu_enabled )
return 0;
- rc = iommu_flush_all();
+ rc = iommu_flush_all(NONE_LOCK);
if ( rc )
return rc;
@@ -2448,7 +2465,7 @@ static int vtd_crash_shutdown(void)
if ( !iommu_enabled )
return 0;
- rc = iommu_flush_all();
+ rc = iommu_flush_all(NONE_LOCK);
if ( rc )
return rc;
@@ -494,7 +494,8 @@ struct iommu_flush {
int (*context)(void *iommu, u16 did, u16 source_id,
u8 function_mask, u64 type, int non_present_entry_flush);
int (*iotlb)(void *iommu, u16 did, u64 addr, unsigned int size_order,
- u64 type, int flush_non_present_entry, int flush_dev_iotlb);
+ u64 type, int flush_non_present_entry, int flush_dev_iotlb,
+ unsigned int lock);
};
struct intel_iommu {
@@ -315,7 +315,8 @@ static int flush_context_qi(
static int flush_iotlb_qi(
void *_iommu, u16 did,
u64 addr, unsigned int size_order, u64 type,
- int flush_non_present_entry, int flush_dev_iotlb)
+ int flush_non_present_entry, int flush_dev_iotlb,
+ unsigned int lock)
{
u8 dr = 0, dw = 0;
int ret = 0;
@@ -26,7 +26,8 @@ int iommu_setup_hpet_msi(struct msi_desc *);
/* While VT-d specific, this must get declared in a generic header. */
int adjust_vtd_irq_affinities(void);
-int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int order, int present);
+int iommu_pte_flush(struct domain *d, u64 gfn, u64 *pte, int order,
+ int present, unsigned int lock);
int iommu_supports_eim(void);
int iommu_enable_x2apic_IR(void);
void iommu_disable_x2apic_IR(void);
to pass down a flag indicating whether the lock is being held, and check the way up the call trees. Signed-off-by: Quan Xu <quan.xu@intel.com> --- xen/arch/x86/mm/p2m-ept.c | 3 +- xen/drivers/passthrough/vtd/iommu.c | 73 ++++++++++++++++++++++-------------- xen/drivers/passthrough/vtd/iommu.h | 3 +- xen/drivers/passthrough/vtd/qinval.c | 3 +- xen/include/asm-x86/iommu.h | 3 +- 5 files changed, 53 insertions(+), 32 deletions(-)