@@ -1531,8 +1531,7 @@ int p2m_init(struct domain *d)
* shared with the CPU, Xen has to make sure that the PT changes have
* reached the memory
*/
- p2m->clean_pte = iommu_enabled &&
- !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK);
+ p2m->clean_pte = !iommu_has_feature(d, IOMMU_FEAT_COHERENT_WALK);
rc = p2m_alloc_table(d);
@@ -356,7 +356,7 @@ unsigned long __init dom0_compute_nr_pages(
avail -= d->max_vcpus - 1;
/* Reserve memory for iommu_dom0_init() (rough estimate). */
- if ( iommu_enabled )
+ if ( is_iommu_enabled(d) )
{
unsigned int s;
@@ -715,7 +715,7 @@ long arch_do_domctl(
break;
ret = -ESRCH;
- if ( iommu_enabled )
+ if ( is_iommu_enabled(d) )
{
pcidevs_lock();
ret = pt_irq_create_bind(d, bind);
@@ -744,7 +744,7 @@ long arch_do_domctl(
if ( ret )
break;
- if ( iommu_enabled )
+ if ( is_iommu_enabled(d) )
{
pcidevs_lock();
ret = pt_irq_destroy_bind(d, bind);
@@ -465,7 +465,7 @@ void hvm_migrate_timers(struct vcpu *v)
void hvm_migrate_pirq(struct hvm_pirq_dpci *pirq_dpci, const struct vcpu *v)
{
- ASSERT(iommu_enabled &&
+ ASSERT(is_iommu_enabled(v->domain) &&
(is_hardware_domain(v->domain) || hvm_domain_irq(v->domain)->dpci));
if ( (pirq_dpci->flags & HVM_IRQ_DPCI_MACH_MSI) &&
@@ -496,7 +496,7 @@ void hvm_migrate_pirqs(struct vcpu *v)
{
struct domain *d = v->domain;
- if ( !iommu_enabled || !hvm_domain_irq(d)->dpci )
+ if ( !is_iommu_enabled(d) || !hvm_domain_irq(d)->dpci )
return;
spin_lock(&d->event_lock);
@@ -2264,7 +2264,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
}
if ( ((value ^ old_value) & X86_CR0_CD) &&
- iommu_enabled && hvm_funcs.handle_cd &&
+ is_iommu_enabled(d) && hvm_funcs.handle_cd &&
(!rangeset_is_empty(d->iomem_caps) ||
!rangeset_is_empty(d->arch.ioport_caps) ||
has_arch_pdevs(d)) )
@@ -536,7 +536,7 @@ void vioapic_update_EOI(struct domain *d, u8 vector)
ent->fields.remote_irr = 0;
- if ( iommu_enabled )
+ if ( is_iommu_enabled(d) )
{
spin_unlock(&d->arch.hvm.irq_lock);
hvm_dpci_eoi(d, vioapic->base_gsi + pin, ent);
@@ -1087,7 +1087,7 @@ static int construct_vmcs(struct vcpu *v)
vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_CS, VMX_MSR_RW);
vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_ESP, VMX_MSR_RW);
vmx_clear_msr_intercept(v, MSR_IA32_SYSENTER_EIP, VMX_MSR_RW);
- if ( paging_mode_hap(d) && (!iommu_enabled || iommu_snoop) )
+ if ( paging_mode_hap(d) && (!is_iommu_enabled(d) || iommu_snoop) )
vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
if ( (vmexit_ctl & VM_EXIT_CLEAR_BNDCFGS) &&
(vmentry_ctl & VM_ENTRY_LOAD_BNDCFGS) )
@@ -1210,7 +1210,7 @@ static void vmx_handle_cd(struct vcpu *v, unsigned long value)
{
v->arch.hvm.cache_mode = NORMAL_CACHE_MODE;
vmx_set_guest_pat(v, *pat);
- if ( !iommu_enabled || iommu_snoop )
+ if ( !is_iommu_enabled(v->domain) || iommu_snoop )
vmx_clear_msr_intercept(v, MSR_IA32_CR_PAT, VMX_MSR_RW);
hvm_asid_flush_vcpu(v); /* no need to flush cache */
}
@@ -260,7 +260,7 @@ static bool_t ept_split_super_page(struct p2m_domain *p2m,
*epte = *ept_entry;
epte->sp = (level > 1);
epte->mfn += i * trunk;
- epte->snp = (iommu_enabled && iommu_snoop);
+ epte->snp = is_iommu_enabled(p2m->domain) && iommu_snoop;
epte->suppress_ve = 1;
ept_p2m_type_to_flags(p2m, epte, epte->sa_p2mt, epte->access);
@@ -766,7 +766,7 @@ ept_set_entry(struct p2m_domain *p2m, gfn_t gfn_, mfn_t mfn,
new_entry.sp = !!i;
new_entry.sa_p2mt = p2mt;
new_entry.access = p2ma;
- new_entry.snp = (iommu_enabled && iommu_snoop);
+ new_entry.snp = is_iommu_enabled(p2m->domain) && iommu_snoop;
/* the caller should take care of the previous page */
new_entry.mfn = mfn_x(mfn);
@@ -857,7 +857,7 @@ int guest_iommu_init(struct domain* d)
struct guest_iommu *iommu;
struct domain_iommu *hd = dom_iommu(d);
- if ( !is_hvm_domain(d) || !iommu_enabled || !iommuv2_enabled ||
+ if ( !is_hvm_domain(d) || !is_iommu_enabled(d) || !iommuv2_enabled ||
!has_viommu(d) )
return 0;
@@ -29,7 +29,7 @@ int iommu_assign_dt_device(struct domain *d, struct dt_device_node *dev)
int rc = -EBUSY;
struct domain_iommu *hd = dom_iommu(d);
- if ( !iommu_enabled || !hd->platform_ops )
+ if ( !is_iommu_enabled(d) )
return -EINVAL;
if ( !dt_device_is_protected(dev) )
@@ -71,7 +71,7 @@ int iommu_deassign_dt_device(struct domain *d, struct dt_device_node *dev)
const struct domain_iommu *hd = dom_iommu(d);
int rc;
- if ( !iommu_enabled || !hd->platform_ops )
+ if ( !is_iommu_enabled(d) )
return -EINVAL;
if ( !dt_device_is_protected(dev) )
@@ -434,7 +434,7 @@ int pt_irq_create_bind(
if ( vcpu )
pirq_dpci->gmsi.posted = true;
}
- if ( vcpu && iommu_enabled )
+ if ( vcpu && is_iommu_enabled(d) )
hvm_migrate_pirq(pirq_dpci, vcpu);
/* Use interrupt posting if it is supported. */
@@ -817,7 +817,7 @@ int hvm_do_IRQ_dpci(struct domain *d, struct pirq *pirq)
ASSERT(is_hvm_domain(d));
- if ( !iommu_enabled || (!is_hardware_domain(d) && !dpci) ||
+ if ( !is_iommu_enabled(d) || (!is_hardware_domain(d) && !dpci) ||
!pirq_dpci || !(pirq_dpci->flags & HVM_IRQ_DPCI_MAPPED) )
return 0;
@@ -869,7 +869,7 @@ static int _hvm_dpci_msi_eoi(struct domain *d,
void hvm_dpci_msi_eoi(struct domain *d, int vector)
{
- if ( !iommu_enabled ||
+ if ( !is_iommu_enabled(d) ||
(!hvm_domain_irq(d)->dpci && !is_hardware_domain(d)) )
return;
@@ -1001,7 +1001,7 @@ void hvm_dpci_eoi(struct domain *d, unsigned int guest_gsi,
const struct hvm_irq_dpci *hvm_irq_dpci;
const struct hvm_girq_dpci_mapping *girq;
- if ( !iommu_enabled )
+ if ( !is_iommu_enabled(d) )
return;
if ( is_hardware_domain(d) )
@@ -179,7 +179,7 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
check_hwdom_reqs(d);
- if ( !iommu_enabled )
+ if ( !is_iommu_enabled(d) )
return;
register_keyhandler('o', &iommu_dump_p2m_table, "dump iommu p2m table", 0);
@@ -284,7 +284,7 @@ int iommu_construct(struct domain *d)
void iommu_domain_destroy(struct domain *d)
{
- if ( !iommu_enabled || !dom_iommu(d)->platform_ops )
+ if ( !is_iommu_enabled(d) )
return;
iommu_teardown(d);
@@ -300,7 +300,7 @@ int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned long i;
int rc = 0;
- if ( !iommu_enabled || !hd->platform_ops )
+ if ( !is_iommu_enabled(d) )
return 0;
ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
@@ -360,7 +360,7 @@ int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order,
unsigned long i;
int rc = 0;
- if ( !iommu_enabled || !hd->platform_ops )
+ if ( !is_iommu_enabled(d) )
return 0;
ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
@@ -413,7 +413,7 @@ int iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
{
const struct domain_iommu *hd = dom_iommu(d);
- if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->lookup_page )
+ if ( !is_iommu_enabled(d) || !hd->platform_ops->lookup_page )
return -EOPNOTSUPP;
return iommu_call(hd->platform_ops, lookup_page, d, dfn, mfn, flags);
@@ -442,8 +442,8 @@ int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count,
const struct domain_iommu *hd = dom_iommu(d);
int rc;
- if ( !iommu_enabled || !hd->platform_ops ||
- !hd->platform_ops->iotlb_flush || !page_count || !flush_flags )
+ if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush ||
+ !page_count || !flush_flags )
return 0;
if ( dfn_eq(dfn, INVALID_DFN) )
@@ -470,8 +470,8 @@ int iommu_iotlb_flush_all(struct domain *d, unsigned int flush_flags)
const struct domain_iommu *hd = dom_iommu(d);
int rc;
- if ( !iommu_enabled || !hd->platform_ops ||
- !hd->platform_ops->iotlb_flush_all || !flush_flags )
+ if ( !is_iommu_enabled(d) || !hd->platform_ops->iotlb_flush_all ||
+ !flush_flags )
return 0;
/*
@@ -556,7 +556,7 @@ int iommu_do_domctl(
{
int ret = -ENODEV;
- if ( !iommu_enabled )
+ if ( !is_iommu_enabled(d) )
return -ENOSYS;
#ifdef CONFIG_HAS_PCI
@@ -576,9 +576,9 @@ void iommu_share_p2m_table(struct domain* d)
ASSERT(hap_enabled(d));
/*
* iommu_use_hap_pt(d) cannot be used here because during domain
- * construction need_iommu(d) will always return false here.
+ * construction has_iommu_pt(d) will always return false here.
*/
- if ( iommu_enabled && iommu_hap_pt_share )
+ if ( is_iommu_enabled(d) && iommu_hap_pt_share )
iommu_get_ops()->share_p2m(d);
}
@@ -608,10 +608,7 @@ int iommu_get_reserved_device_memory(iommu_grdm_t *func, void *ctxt)
bool_t iommu_has_feature(struct domain *d, enum iommu_feature feature)
{
- if ( !iommu_enabled )
- return 0;
-
- return test_bit(feature, dom_iommu(d)->features);
+ return is_iommu_enabled(d) && test_bit(feature, dom_iommu(d)->features);
}
static void iommu_dump_p2m_table(unsigned char key)
@@ -864,7 +864,7 @@ static int pci_clean_dpci_irqs(struct domain *d)
{
struct hvm_irq_dpci *hvm_irq_dpci = NULL;
- if ( !iommu_enabled )
+ if ( !is_iommu_enabled(d) )
return 0;
if ( !is_hvm_domain(d) )
@@ -1333,7 +1333,7 @@ static int iommu_add_device(struct pci_dev *pdev)
ASSERT(pcidevs_locked());
hd = dom_iommu(pdev->domain);
- if ( !iommu_enabled || !hd->platform_ops )
+ if ( !is_iommu_enabled(pdev->domain) )
return 0;
rc = hd->platform_ops->add_device(pdev->devfn, pci_to_dev(pdev));
@@ -1362,7 +1362,7 @@ static int iommu_enable_device(struct pci_dev *pdev)
ASSERT(pcidevs_locked());
hd = dom_iommu(pdev->domain);
- if ( !iommu_enabled || !hd->platform_ops ||
+ if ( !is_iommu_enabled(pdev->domain) ||
!hd->platform_ops->enable_device )
return 0;
@@ -1378,7 +1378,7 @@ static int iommu_remove_device(struct pci_dev *pdev)
return -EINVAL;
hd = dom_iommu(pdev->domain);
- if ( !iommu_enabled || !hd->platform_ops )
+ if ( !is_iommu_enabled(pdev->domain) )
return 0;
for ( devfn = pdev->devfn ; pdev->phantom_stride; )
@@ -1421,7 +1421,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
struct pci_dev *pdev;
int rc = 0;
- if ( !iommu_enabled || !hd->platform_ops )
+ if ( !is_iommu_enabled(d) )
return 0;
/* Prevent device assign if mem paging or mem sharing have been
@@ -1483,7 +1483,7 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
struct pci_dev *pdev = NULL;
int ret = 0;
- if ( !iommu_enabled || !hd->platform_ops )
+ if ( !is_iommu_enabled(d) )
return -EINVAL;
ASSERT(pcidevs_locked());
@@ -1536,7 +1536,7 @@ static int iommu_get_device_group(
int i = 0;
const struct iommu_ops *ops = hd->platform_ops;
- if ( !iommu_enabled || !ops || !ops->get_device_group_id )
+ if ( !is_iommu_enabled(d) || !ops->get_device_group_id )
return 0;
group_id = ops->get_device_group_id(seg, bus, devfn);
@@ -1757,7 +1757,7 @@ static void iommu_domain_teardown(struct domain *d)
xfree(mrmrr);
}
- ASSERT(iommu_enabled);
+ ASSERT(is_iommu_enabled(d));
/*
* We can't use iommu_use_hap_pt here because either IOMMU state
@@ -51,7 +51,7 @@ void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq)
struct hvm_irq_dpci *dpci = NULL;
ASSERT(isairq < NR_ISAIRQS);
- if ( !iommu_enabled )
+ if ( !is_iommu_enabled(d) )
return;
spin_lock(&d->event_lock);
@@ -178,7 +178,7 @@ int arch_iommu_populate_page_table(struct domain *d)
void __hwdom_init arch_iommu_check_autotranslated_hwdom(struct domain *d)
{
- if ( !iommu_enabled )
+ if ( !is_iommu_enabled(d) )
panic("Presently, iommu must be enabled for PVH hardware domain\n");
}
...rather than testing the global iommu_enabled flag and ops pointer. Now that there is a per-domain flag indicating whether the domain is permitted to use the IOMMU (which determines whether the ops pointer will be set), many tests of the global iommu_enabled flag and ops pointer can be translated into tests of the per-domain flag. Some of the other tests of purely the global iommu_enabled flag can also be translated into tests of the per-domain flag. NOTE: The comment in iommu_share_p2m_table() is also fixed; need_iommu() disappeared some time ago. Signed-off-by: Paul Durrant <paul.durrant@citrix.com> --- Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Julien Grall <julien.grall@arm.com> Cc: Volodymyr Babchuk <Volodymyr_Babchuk@epam.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> Cc: Wei Liu <wl@xen.org> Cc: "Roger Pau Monné" <roger.pau@citrix.com> Cc: Jun Nakajima <jun.nakajima@intel.com> Cc: Kevin Tian <kevin.tian@intel.com> Cc: George Dunlap <george.dunlap@eu.citrix.com> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cc: Brian Woods <brian.woods@amd.com> --- xen/arch/arm/p2m.c | 3 +-- xen/arch/x86/dom0_build.c | 2 +- xen/arch/x86/domctl.c | 4 ++-- xen/arch/x86/hvm/hvm.c | 6 ++--- xen/arch/x86/hvm/vioapic.c | 2 +- xen/arch/x86/hvm/vmx/vmcs.c | 2 +- xen/arch/x86/hvm/vmx/vmx.c | 2 +- xen/arch/x86/mm/p2m-ept.c | 4 ++-- xen/drivers/passthrough/amd/iommu_guest.c | 2 +- xen/drivers/passthrough/device_tree.c | 4 ++-- xen/drivers/passthrough/io.c | 8 +++---- xen/drivers/passthrough/iommu.c | 29 ++++++++++------------- xen/drivers/passthrough/pci.c | 14 +++++------ xen/drivers/passthrough/vtd/iommu.c | 2 +- xen/drivers/passthrough/vtd/x86/hvm.c | 2 +- xen/drivers/passthrough/x86/iommu.c | 2 +- 16 files changed, 42 insertions(+), 46 deletions(-)