@@ -472,9 +472,9 @@ long arch_do_domctl(
ret = -ESRCH;
if ( iommu_enabled )
{
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = pt_irq_create_bind(d, bind);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
if ( ret < 0 )
printk(XENLOG_G_ERR "pt_irq_create_bind failed (%ld) for dom%d\n",
@@ -497,9 +497,9 @@ long arch_do_domctl(
if ( iommu_enabled )
{
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = pt_irq_destroy_bind(d, bind);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
if ( ret < 0 )
printk(XENLOG_G_ERR "pt_irq_destroy_bind failed (%ld) for dom%d\n",
@@ -388,7 +388,7 @@ int msixtbl_pt_register(struct domain *d, struct pirq *pirq, uint64_t gtable)
struct msixtbl_entry *entry, *new_entry;
int r = -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
ASSERT(spin_is_locked(&d->event_lock));
if ( !has_vlapic(d) )
@@ -446,7 +446,7 @@ void msixtbl_pt_unregister(struct domain *d, struct pirq *pirq)
struct pci_dev *pdev;
struct msixtbl_entry *entry;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
ASSERT(spin_is_locked(&d->event_lock));
if ( !has_vlapic(d) )
@@ -1960,7 +1960,7 @@ int map_domain_pirq(
struct pci_dev *pdev;
unsigned int nr = 0;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
ret = -ENODEV;
if ( !cpu_has_apic )
@@ -2105,7 +2105,7 @@ int unmap_domain_pirq(struct domain *d, int pirq)
if ( (pirq < 0) || (pirq >= d->nr_pirqs) )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
ASSERT(spin_is_locked(&d->event_lock));
info = pirq_info(d, pirq);
@@ -2231,7 +2231,7 @@ void free_domain_pirqs(struct domain *d)
{
int i;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
spin_lock(&d->event_lock);
for ( i = 0; i < d->nr_pirqs; i++ )
@@ -2239,7 +2239,7 @@ void free_domain_pirqs(struct domain *d)
unmap_domain_pirq(d, i);
spin_unlock(&d->event_lock);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
static void dump_irqs(unsigned char key)
@@ -694,7 +694,7 @@ static int msi_capability_init(struct pci_dev *dev,
u8 slot = PCI_SLOT(dev->devfn);
u8 func = PCI_FUNC(dev->devfn);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
if ( !pos )
return -ENODEV;
@@ -852,7 +852,7 @@ static int msix_capability_init(struct pci_dev *dev,
u8 func = PCI_FUNC(dev->devfn);
bool_t maskall = msix->host_maskall;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
control = pci_conf_read16(seg, bus, slot, func, msix_control_reg(pos));
/*
@@ -1042,7 +1042,7 @@ static int __pci_enable_msi(struct msi_info *msi, struct msi_desc **desc)
struct pci_dev *pdev;
struct msi_desc *old_desc;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
if ( !pdev )
return -ENODEV;
@@ -1103,7 +1103,7 @@ static int __pci_enable_msix(struct msi_info *msi, struct msi_desc **desc)
u8 func = PCI_FUNC(msi->devfn);
struct msi_desc *old_desc;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
pos = pci_find_cap_offset(msi->seg, msi->bus, slot, func, PCI_CAP_ID_MSIX);
if ( !pdev || !pos )
@@ -1205,7 +1205,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool_t off)
if ( !pos )
return -ENODEV;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(seg, bus, devfn);
if ( !pdev )
rc = -ENODEV;
@@ -1224,7 +1224,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool_t off)
rc = msix_capability_init(pdev, pos, NULL, NULL,
multi_msix_capable(control));
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
@@ -1235,7 +1235,7 @@ int pci_prepare_msix(u16 seg, u8 bus, u8 devfn, bool_t off)
*/
int pci_enable_msi(struct msi_info *msi, struct msi_desc **desc)
{
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
if ( !use_msi )
return -EPERM;
@@ -1351,7 +1351,7 @@ int pci_restore_msi_state(struct pci_dev *pdev)
unsigned int type = 0, pos = 0;
u16 control = 0;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
if ( !use_msi )
return -EOPNOTSUPP;
@@ -88,13 +88,13 @@ int pci_conf_write_intercept(unsigned int seg, unsigned int bdf,
if ( reg < 64 || reg >= 256 )
return 0;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(seg, PCI_BUS(bdf), PCI_DEVFN2(bdf));
if ( pdev )
rc = pci_msi_conf_write_intercept(pdev, reg, size, data);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
@@ -167,7 +167,7 @@ int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p,
goto free_domain;
}
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
/* Verify or get pirq. */
spin_lock(&d->event_lock);
pirq = domain_irq_to_pirq(d, irq);
@@ -237,7 +237,7 @@ int physdev_map_pirq(domid_t domid, int type, int *index, int *pirq_p,
done:
spin_unlock(&d->event_lock);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( ret != 0 )
switch ( type )
{
@@ -275,11 +275,11 @@ int physdev_unmap_pirq(domid_t domid, int pirq)
goto free_domain;
}
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
spin_lock(&d->event_lock);
ret = unmap_domain_pirq(d, pirq);
spin_unlock(&d->event_lock);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
free_domain:
rcu_unlock_domain(d);
@@ -689,10 +689,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
if ( copy_from_guest(&restore_msi, arg, 1) != 0 )
break;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(0, restore_msi.bus, restore_msi.devfn);
ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV;
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
break;
}
@@ -704,10 +704,10 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
if ( copy_from_guest(&dev, arg, 1) != 0 )
break;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(dev.seg, dev.bus, dev.devfn);
ret = pdev ? pci_restore_msi_state(pdev) : -ENODEV;
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
break;
}
@@ -426,7 +426,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
break;
}
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(dev.seg, dev.bus, dev.devfn);
if ( !pdev )
node = XEN_INVALID_DEV;
@@ -434,7 +434,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
node = XEN_INVALID_NODE_ID;
else
node = pdev->node;
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( copy_to_guest_offset(ti->nodes, i, &node, 1) )
{
@@ -673,9 +673,9 @@ void parse_ppr_log_entry(struct amd_iommu *iommu, u32 entry[])
bus = PCI_BUS(device_id);
devfn = PCI_DEVFN2(device_id);
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_real_pdev(iommu->seg, bus, devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( pdev )
guest_iommu_add_ppr_log(pdev->domain, entry);
@@ -787,10 +787,10 @@ static bool_t __init set_iommu_interrupt_handler(struct amd_iommu *iommu)
return 0;
}
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
iommu->msi.dev = pci_get_pdev(iommu->seg, PCI_BUS(iommu->bdf),
PCI_DEVFN2(iommu->bdf));
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !iommu->msi.dev )
{
AMD_IOMMU_DEBUG("IOMMU: no pdev for %04x:%02x:%02x.%u\n",
@@ -593,7 +593,7 @@ static int update_paging_mode(struct domain *d, unsigned long gfn)
hd->arch.paging_mode = level;
hd->arch.root_table = new_root;
- if ( !spin_is_locked(&pcidevs_lock) )
+ if ( !pcidevs_is_locked() )
AMD_IOMMU_DEBUG("%s Try to access pdev_list "
"without aquiring pcidevs_lock.\n", __func__);
@@ -158,7 +158,7 @@ static void amd_iommu_setup_domain_device(
spin_unlock_irqrestore(&iommu->lock, flags);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
!pci_ats_enabled(iommu->seg, bus, pdev->devfn) )
@@ -345,7 +345,7 @@ void amd_iommu_disable_domain_device(struct domain *domain,
}
spin_unlock_irqrestore(&iommu->lock, flags);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
if ( devfn == pdev->devfn &&
pci_ats_device(iommu->seg, bus, devfn) &&
@@ -48,7 +48,7 @@ struct pci_seg {
} bus2bridge[MAX_BUSES];
};
-spinlock_t pcidevs_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t _pcidevs_lock = SPIN_LOCK_UNLOCKED;
static struct radix_tree_root pci_segments;
static inline struct pci_seg *get_pseg(u16 seg)
@@ -103,6 +103,26 @@ static int pci_segments_iterate(
return rc;
}
+void pcidevs_lock(void)
+{
+ spin_lock_recursive(&_pcidevs_lock);
+}
+
+void pcidevs_unlock(void)
+{
+ spin_unlock_recursive(&_pcidevs_lock);
+}
+
+int pcidevs_is_locked(void)
+{
+ return spin_is_locked(&_pcidevs_lock);
+}
+
+int pcidevs_trylock(void)
+{
+ return spin_trylock_recursive(&_pcidevs_lock);
+}
+
void __init pt_pci_init(void)
{
radix_tree_init(&pci_segments);
@@ -412,14 +432,14 @@ int __init pci_hide_device(int bus, int devfn)
struct pci_dev *pdev;
int rc = -ENOMEM;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = alloc_pdev(get_pseg(0), bus, devfn);
if ( pdev )
{
_pci_hide_device(pdev);
rc = 0;
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
@@ -456,7 +476,7 @@ struct pci_dev *pci_get_pdev(int seg, int bus, int devfn)
struct pci_seg *pseg = get_pseg(seg);
struct pci_dev *pdev = NULL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
ASSERT(seg != -1 || bus == -1);
ASSERT(bus != -1 || devfn == -1);
@@ -581,9 +601,9 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn,
pdev_type = "extended function";
else if (info->is_virtfn)
{
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(seg, info->physfn.bus, info->physfn.devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !pdev )
pci_add_device(seg, info->physfn.bus, info->physfn.devfn,
NULL, node);
@@ -601,7 +621,7 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn,
ret = -ENOMEM;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pseg = alloc_pseg(seg);
if ( !pseg )
goto out;
@@ -703,7 +723,7 @@ int pci_add_device(u16 seg, u8 bus, u8 devfn,
pci_enable_acs(pdev);
out:
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !ret )
{
printk(XENLOG_DEBUG "PCI add %s %04x:%02x:%02x.%u\n", pdev_type,
@@ -735,7 +755,7 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
if ( !pseg )
return -ENODEV;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
if ( pdev->bus == bus && pdev->devfn == devfn )
{
@@ -749,7 +769,7 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
break;
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return ret;
}
@@ -807,11 +827,11 @@ int pci_release_devices(struct domain *d)
u8 bus, devfn;
int ret;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = pci_clean_dpci_irqs(d);
if ( ret )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return ret;
}
while ( (pdev = pci_get_pdev_by_domain(d, -1, -1, -1)) )
@@ -823,7 +843,7 @@ int pci_release_devices(struct domain *d)
d->domain_id, pdev->seg, bus,
PCI_SLOT(devfn), PCI_FUNC(devfn));
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return 0;
}
@@ -920,7 +940,7 @@ void pci_check_disable_device(u16 seg, u8 bus, u8 devfn)
s_time_t now = NOW();
u16 cword;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_real_pdev(seg, bus, devfn);
if ( pdev )
{
@@ -931,7 +951,7 @@ void pci_check_disable_device(u16 seg, u8 bus, u8 devfn)
if ( ++pdev->fault.count < PT_FAULT_THRESHOLD )
pdev = NULL;
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !pdev )
return;
@@ -988,9 +1008,9 @@ int __init scan_pci_devices(void)
{
int ret;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = pci_segments_iterate(_scan_pci_devices, NULL);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return ret;
}
@@ -1054,17 +1074,17 @@ static int __hwdom_init _setup_hwdom_pci_devices(struct pci_seg *pseg, void *arg
if ( iommu_verbose )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
process_pending_softirqs();
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
}
}
if ( !iommu_verbose )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
process_pending_softirqs();
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
}
}
@@ -1076,9 +1096,9 @@ void __hwdom_init setup_hwdom_pci_devices(
{
struct setup_hwdom ctxt = { .d = d, .handler = handler };
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pci_segments_iterate(_setup_hwdom_pci_devices, &ctxt);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
#ifdef CONFIG_ACPI
@@ -1206,9 +1226,9 @@ static int _dump_pci_devices(struct pci_seg *pseg, void *arg)
static void dump_pci_devices(unsigned char ch)
{
printk("==== PCI devices ====\n");
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pci_segments_iterate(_dump_pci_devices, NULL);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
static int __init setup_dump_pcidevs(void)
@@ -1242,7 +1262,7 @@ int iommu_add_device(struct pci_dev *pdev)
if ( !pdev->domain )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
hd = domain_hvm_iommu(pdev->domain);
if ( !iommu_enabled || !hd->platform_ops )
@@ -1271,7 +1291,7 @@ int iommu_enable_device(struct pci_dev *pdev)
if ( !pdev->domain )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
hd = domain_hvm_iommu(pdev->domain);
if ( !iommu_enabled || !hd->platform_ops ||
@@ -1320,9 +1340,9 @@ static int device_assigned(u16 seg, u8 bus, u8 devfn)
{
struct pci_dev *pdev;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev_by_domain(hardware_domain, seg, bus, devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return pdev ? 0 : -EBUSY;
}
@@ -1344,13 +1364,13 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
p2m_get_hostp2m(d)->global_logdirty)) )
return -EXDEV;
- if ( !spin_trylock(&pcidevs_lock) )
+ if ( !pcidevs_trylock() )
return -ERESTART;
rc = iommu_construct(d);
if ( rc )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
@@ -1381,7 +1401,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
done:
if ( !has_arch_pdevs(d) && need_iommu(d) )
iommu_teardown(d);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return rc;
}
@@ -1396,7 +1416,7 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
if ( !iommu_enabled || !hd->platform_ops )
return -EINVAL;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
pdev = pci_get_pdev_by_domain(d, seg, bus, devfn);
if ( !pdev )
return -ENODEV;
@@ -1451,7 +1471,7 @@ static int iommu_get_device_group(
group_id = ops->get_device_group_id(seg, bus, devfn);
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
for_each_pdev( d, pdev )
{
if ( (pdev->seg != seg) ||
@@ -1470,14 +1490,14 @@ static int iommu_get_device_group(
if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
{
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return -1;
}
i++;
}
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
return i;
}
@@ -1605,9 +1625,9 @@ int iommu_do_pci_domctl(
bus = PCI_BUS(machine_sbdf);
devfn = PCI_DEVFN2(machine_sbdf);
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
ret = deassign_device(d, seg, bus, devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( ret )
printk(XENLOG_G_ERR
"deassign %04x:%02x:%02x.%u from dom%d failed (%d)\n",
@@ -984,7 +984,7 @@ int pi_update_irte(const struct vcpu *v, const struct pirq *pirq,
spin_unlock_irq(&desc->lock);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
/*
* FIXME: For performance reasons we should store the 'iommu' pointer in
@@ -1282,7 +1282,7 @@ int domain_context_mapping_one(
u16 seg = iommu->intel->drhd->segment;
int agaw;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
spin_lock(&iommu->lock);
maddr = bus_to_context_maddr(iommu, bus);
context_entries = (struct context_entry *)map_vtd_domain_page(maddr);
@@ -1424,7 +1424,7 @@ static int domain_context_mapping(
if ( !drhd )
return -ENODEV;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
switch ( pdev->type )
{
@@ -1506,7 +1506,7 @@ int domain_context_unmap_one(
u64 maddr;
int iommu_domid;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
spin_lock(&iommu->lock);
maddr = bus_to_context_maddr(iommu, bus);
@@ -1816,7 +1816,7 @@ static int rmrr_identity_mapping(struct domain *d, bool_t map,
struct mapped_rmrr *mrmrr;
struct hvm_iommu *hd = domain_hvm_iommu(d);
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
ASSERT(rmrr->base_address < rmrr->end_address);
/*
@@ -1881,7 +1881,7 @@ static int intel_iommu_add_device(u8 devfn, struct pci_dev *pdev)
u16 bdf;
int ret, i;
- ASSERT(spin_is_locked(&pcidevs_lock));
+ ASSERT(pcidevs_is_locked());
if ( !pdev->domain )
return -EINVAL;
@@ -2109,7 +2109,7 @@ static void __hwdom_init setup_hwdom_rmrr(struct domain *d)
u16 bdf;
int ret, i;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
for_each_rmrr_device ( rmrr, bdf, i )
{
/*
@@ -2123,7 +2123,7 @@ static void __hwdom_init setup_hwdom_rmrr(struct domain *d)
dprintk(XENLOG_ERR VTDPREFIX,
"IOMMU: mapping reserved region failed\n");
}
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
}
int __init intel_vtd_setup(void)
@@ -117,9 +117,9 @@ void __init video_endboot(void)
const struct pci_dev *pdev;
u8 b = bus, df = devfn, sb;
- spin_lock(&pcidevs_lock);
+ pcidevs_lock();
pdev = pci_get_pdev(0, bus, devfn);
- spin_unlock(&pcidevs_lock);
+ pcidevs_unlock();
if ( !pdev ||
pci_conf_read16(0, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
@@ -94,7 +94,10 @@ struct pci_dev {
* interrupt handling related (the mask bit register).
*/
-extern spinlock_t pcidevs_lock;
+void pcidevs_lock(void);
+void pcidevs_unlock(void);
+int pcidevs_is_locked(void);
+int pcidevs_trylock(void);
bool_t pci_known_segment(u16 seg);
bool_t pci_device_detect(u16 seg, u8 bus, u8 dev, u8 func);