@@ -34,15 +34,18 @@ static int vpci_mmio_read(struct vcpu *v, mmio_info_t *info,
/* data is needed to prevent a pointer cast on 32bit */
unsigned long data;
+ read_lock(&v->domain->pci_lock);
if ( vpci_ecam_read(sbdf, ECAM_REG_OFFSET(info->gpa),
1U << info->dabt.size, &data) )
{
*r = data;
+ read_unlock(&v->domain->pci_lock);
return 1;
}
*r = invalid;
+ read_unlock(&v->domain->pci_lock);
return 0;
}
@@ -51,9 +54,13 @@ static int vpci_mmio_write(struct vcpu *v, mmio_info_t *info,
{
struct pci_host_bridge *bridge = p;
pci_sbdf_t sbdf = vpci_sbdf_from_gpa(bridge, info->gpa);
+ int ret;
- return vpci_ecam_write(sbdf, ECAM_REG_OFFSET(info->gpa),
- 1U << info->dabt.size, r);
+ write_lock(&v->domain->pci_lock);
+ ret = vpci_ecam_write(sbdf, ECAM_REG_OFFSET(info->gpa),
+ 1U << info->dabt.size, r);
+ write_unlock(&v->domain->pci_lock);
+ return ret;
}
static const struct mmio_handler_ops vpci_mmio_handler = {
@@ -261,7 +261,7 @@ static int cf_check vpci_portio_read(
const struct hvm_io_handler *handler, uint64_t addr, uint32_t size,
uint64_t *data)
{
- const struct domain *d = current->domain;
+ struct domain *d = current->domain;
unsigned int reg;
pci_sbdf_t sbdf;
uint32_t cf8;
@@ -285,7 +285,9 @@ static int cf_check vpci_portio_read(
if ( !vpci_access_allowed(reg, size) )
return X86EMUL_OKAY;
+ read_lock(&d->pci_lock);
*data = vpci_read(sbdf, reg, size);
+ read_unlock(&d->pci_lock);
return X86EMUL_OKAY;
}
@@ -316,7 +318,9 @@ static int cf_check vpci_portio_write(
if ( !vpci_access_allowed(reg, size) )
return X86EMUL_OKAY;
+ write_lock(&d->pci_lock);
vpci_write(sbdf, reg, size, data);
+ write_unlock(&d->pci_lock);
return X86EMUL_OKAY;
}
@@ -424,7 +428,9 @@ static int cf_check vpci_mmcfg_read(
read_unlock(&d->arch.hvm.mmcfg_lock);
/* Failed reads are not propagated to the caller */
+ read_lock(&d->pci_lock);
vpci_ecam_read(sbdf, reg, len, data);
+ read_unlock(&d->pci_lock);
return X86EMUL_OKAY;
}
@@ -449,7 +455,9 @@ static int cf_check vpci_mmcfg_write(
read_unlock(&d->arch.hvm.mmcfg_lock);
/* Failed writes are not propagated to the caller */
+ write_lock(&d->pci_lock);
vpci_ecam_write(sbdf, reg, len, data);
+ write_unlock(&d->pci_lock);
return X86EMUL_OKAY;
}
@@ -440,6 +440,8 @@ uint32_t vpci_read(pci_sbdf_t sbdf, unsigned int reg, unsigned int size)
unsigned int data_offset = 0;
uint32_t data = ~(uint32_t)0;
+ ASSERT(rw_is_locked(&d->pci_lock));
+
if ( !size )
{
ASSERT_UNREACHABLE();
@@ -452,15 +454,11 @@ uint32_t vpci_read(pci_sbdf_t sbdf, unsigned int reg, unsigned int size)
* If this is hwdom and the device is assigned to DomXEN, acquiring hwdom's
* pci_lock is sufficient.
*/
- read_lock(&d->pci_lock);
pdev = pci_get_pdev(d, sbdf);
if ( !pdev && is_hardware_domain(d) )
pdev = pci_get_pdev(dom_xen, sbdf);
if ( !pdev || !pdev->vpci )
- {
- read_unlock(&d->pci_lock);
return vpci_read_hw(sbdf, reg, size);
- }
spin_lock(&pdev->vpci->lock);
@@ -507,7 +505,6 @@ uint32_t vpci_read(pci_sbdf_t sbdf, unsigned int reg, unsigned int size)
ASSERT(data_offset < size);
}
spin_unlock(&pdev->vpci->lock);
- read_unlock(&d->pci_lock);
if ( data_offset < size )
{
@@ -555,6 +552,8 @@ void vpci_write(pci_sbdf_t sbdf, unsigned int reg, unsigned int size,
const struct vpci_register *r;
unsigned int data_offset = 0;
+ ASSERT(rw_is_write_locked(&d->pci_lock));
+
if ( !size )
{
ASSERT_UNREACHABLE();
@@ -570,7 +569,6 @@ void vpci_write(pci_sbdf_t sbdf, unsigned int reg, unsigned int size,
* TODO: We need to take pci_locks in exclusive mode only if we
* are modifying BARs, so there is a room for improvement.
*/
- write_lock(&d->pci_lock);
pdev = pci_get_pdev(d, sbdf);
if ( !pdev && is_hardware_domain(d) )
pdev = pci_get_pdev(dom_xen, sbdf);
@@ -579,8 +577,6 @@ void vpci_write(pci_sbdf_t sbdf, unsigned int reg, unsigned int size,
/* Ignore writes to read-only devices, which have no ->vpci. */
const unsigned long *ro_map = pci_get_ro_map(sbdf.seg);
- write_unlock(&d->pci_lock);
-
if ( !ro_map || !test_bit(sbdf.bdf, ro_map) )
vpci_write_hw(sbdf, reg, size, data);
return;
@@ -622,7 +618,6 @@ void vpci_write(pci_sbdf_t sbdf, unsigned int reg, unsigned int size,
ASSERT(data_offset < size);
}
spin_unlock(&pdev->vpci->lock);
- write_unlock(&d->pci_lock);
if ( data_offset < size )
/* Tailing gap, write the remaining. */
@@ -651,8 +646,10 @@ bool vpci_access_allowed(unsigned int reg, unsigned int len)
}
bool vpci_ecam_write(pci_sbdf_t sbdf, unsigned int reg, unsigned int len,
- unsigned long data)
+ unsigned long data)
{
+ ASSERT(rw_is_write_locked(¤t->domain->pci_lock));
+
if ( !vpci_access_allowed(reg, len) ||
(reg + len) > PCI_CFG_SPACE_EXP_SIZE )
return false;
@@ -667,8 +664,10 @@ bool vpci_ecam_write(pci_sbdf_t sbdf, unsigned int reg, unsigned int len,
}
bool vpci_ecam_read(pci_sbdf_t sbdf, unsigned int reg, unsigned int len,
- unsigned long *data)
+ unsigned long *data)
{
+ ASSERT(rw_is_locked(¤t->domain->pci_lock));
+
if ( !vpci_access_allowed(reg, len) ||
(reg + len) > PCI_CFG_SPACE_EXP_SIZE )
return false;
In preparation for translating virtual PCI bus topology (where a pdev lookup will be needed in the vPCI I/O handlers), acquire d->pci_lock in the vPCI I/O handlers. While here, adjust indentation in vpci_ecam_{read,write}. Signed-off-by: Stewart Hildebrand <stewart.hildebrand@amd.com> --- v18->v19: * new patch While the current patch series focus is Arm, this is also prerequisite for eventually enabling x86 PVH domU passthrough. Similar to Arm, a call to vpci_translate_virtual_device() would be added in xen/arch/x86/hvm/io.c:vpci_portio_{read,write} and vpci_mmcfg_{read,write}. --- xen/arch/arm/vpci.c | 11 +++++++++-- xen/arch/x86/hvm/io.c | 10 +++++++++- xen/drivers/vpci/vpci.c | 21 ++++++++++----------- 3 files changed, 28 insertions(+), 14 deletions(-)