@@ -688,8 +688,8 @@ static int vpci_msi_update(const struct pci_dev *pdev, uint32_t data,
{
gdprintk(XENLOG_ERR,
"%04x:%02x:%02x.%u: failed to bind PIRQ %u: %d\n",
- pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), pirq + i, rc);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, pirq + i, rc);
while ( bind.machine_irq-- > pirq )
pt_irq_destroy_bind(pdev->domain, &bind);
return rc;
@@ -727,9 +727,9 @@ static int vpci_msi_enable(const struct pci_dev *pdev, uint32_t data,
paddr_t table_base, uint32_t mask)
{
struct msi_info msi_info = {
- .seg = pdev->seg,
- .bus = pdev->bus,
- .devfn = pdev->devfn,
+ .seg = pdev->sbdf.seg,
+ .bus = pdev->sbdf.bus,
+ .devfn = pdev->sbdf.extfunc,
.table_base = table_base,
.entry_nr = nr,
};
@@ -744,8 +744,8 @@ static int vpci_msi_enable(const struct pci_dev *pdev, uint32_t data,
if ( rc )
{
gdprintk(XENLOG_ERR, "%04x:%02x:%02x.%u: failed to map PIRQ: %d\n",
- pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), rc);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, rc);
return rc;
}
@@ -124,13 +124,13 @@ static void msix_put_fixmap(struct arch_msix *msix, int idx)
static bool memory_decoded(const struct pci_dev *dev)
{
- u8 bus, slot, func;
+ uint8_t bus, slot, func;
if ( !dev->info.is_virtfn )
{
- bus = dev->bus;
- slot = PCI_SLOT(dev->devfn);
- func = PCI_FUNC(dev->devfn);
+ bus = dev->sbdf.bus;
+ slot = dev->sbdf.dev;
+ func = dev->sbdf.func;
}
else
{
@@ -139,14 +139,14 @@ static bool memory_decoded(const struct pci_dev *dev)
func = PCI_FUNC(dev->info.physfn.devfn);
}
- return !!(pci_conf_read16(dev->seg, bus, slot, func, PCI_COMMAND) &
+ return !!(pci_conf_read16(dev->sbdf.seg, bus, slot, func, PCI_COMMAND) &
PCI_COMMAND_MEMORY);
}
static bool msix_memory_decoded(const struct pci_dev *dev, unsigned int pos)
{
- u16 control = pci_conf_read16(dev->seg, dev->bus, PCI_SLOT(dev->devfn),
- PCI_FUNC(dev->devfn), msix_control_reg(pos));
+ u16 control = pci_conf_read16(dev->sbdf.seg, dev->sbdf.bus, dev->sbdf.dev,
+ dev->sbdf.func, msix_control_reg(pos));
if ( !(control & PCI_MSIX_FLAGS_ENABLE) )
return false;
@@ -200,10 +200,10 @@ static bool read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
- u16 data, seg = dev->seg;
- u8 bus = dev->bus;
- u8 slot = PCI_SLOT(dev->devfn);
- u8 func = PCI_FUNC(dev->devfn);
+ uint16_t data, seg = dev->sbdf.seg;
+ uint8_t bus = dev->sbdf.bus;
+ uint8_t slot = dev->sbdf.dev;
+ uint8_t func = dev->sbdf.func;
msg->address_lo = pci_conf_read32(seg, bus, slot, func,
msi_lower_address_reg(pos));
@@ -265,10 +265,10 @@ static int write_msi_msg(struct msi_desc *entry, struct msi_msg *msg)
{
struct pci_dev *dev = entry->dev;
int pos = entry->msi_attrib.pos;
- u16 seg = dev->seg;
- u8 bus = dev->bus;
- u8 slot = PCI_SLOT(dev->devfn);
- u8 func = PCI_FUNC(dev->devfn);
+ uint16_t seg = dev->sbdf.seg;
+ uint8_t bus = dev->sbdf.bus;
+ uint8_t slot = dev->sbdf.dev;
+ uint8_t func = dev->sbdf.func;
int nr = entry->msi_attrib.entry_nr;
ASSERT((msg->data & (entry[-nr].msi.nvec - 1)) == nr);
@@ -348,10 +348,10 @@ void __msi_set_enable(u16 seg, u8 bus, u8 slot, u8 func, int pos, int enable)
static void msi_set_enable(struct pci_dev *dev, int enable)
{
int pos;
- u16 seg = dev->seg;
- u8 bus = dev->bus;
- u8 slot = PCI_SLOT(dev->devfn);
- u8 func = PCI_FUNC(dev->devfn);
+ uint16_t seg = dev->sbdf.seg;
+ uint8_t bus = dev->sbdf.bus;
+ uint8_t slot = dev->sbdf.dev;
+ uint8_t func = dev->sbdf.func;
pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
if ( pos )
@@ -361,10 +361,10 @@ static void msi_set_enable(struct pci_dev *dev, int enable)
static void msix_set_enable(struct pci_dev *dev, int enable)
{
int pos;
- u16 control, seg = dev->seg;
- u8 bus = dev->bus;
- u8 slot = PCI_SLOT(dev->devfn);
- u8 func = PCI_FUNC(dev->devfn);
+ uint16_t control, seg = dev->sbdf.seg;
+ uint8_t bus = dev->sbdf.bus;
+ uint8_t slot = dev->sbdf.dev;
+ uint8_t func = dev->sbdf.func;
pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSIX);
if ( pos )
@@ -388,17 +388,17 @@ static bool msi_set_mask_bit(struct irq_desc *desc, bool host, bool guest)
{
struct msi_desc *entry = desc->msi_desc;
struct pci_dev *pdev;
- u16 seg, control;
- u8 bus, slot, func;
+ uint16_t seg, control;
+ uint8_t bus, slot, func;
bool flag = host || guest, maskall;
ASSERT(spin_is_locked(&desc->lock));
BUG_ON(!entry || !entry->dev);
pdev = entry->dev;
- seg = pdev->seg;
- bus = pdev->bus;
- slot = PCI_SLOT(pdev->devfn);
- func = PCI_FUNC(pdev->devfn);
+ seg = pdev->sbdf.seg;
+ bus = pdev->sbdf.bus;
+ slot = pdev->sbdf.dev;
+ func = pdev->sbdf.func;
switch ( entry->msi_attrib.type )
{
case PCI_CAP_ID_MSI:
@@ -475,9 +475,8 @@ static int msi_get_mask_bit(const struct msi_desc *entry)
case PCI_CAP_ID_MSI:
if ( !entry->msi_attrib.maskbit )
break;
- return (pci_conf_read32(entry->dev->seg, entry->dev->bus,
- PCI_SLOT(entry->dev->devfn),
- PCI_FUNC(entry->dev->devfn),
+ return (pci_conf_read32(entry->dev->sbdf.seg, entry->dev->sbdf.bus,
+ entry->dev->sbdf.dev, entry->dev->sbdf.func,
entry->msi.mpos) >>
entry->msi_attrib.entry_nr) & 1;
case PCI_CAP_ID_MSIX:
@@ -594,11 +593,11 @@ int setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc)
if ( msidesc->msi_attrib.type == PCI_CAP_ID_MSIX )
{
- control = pci_conf_read16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), cpos);
+ control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func, cpos);
if ( !(control & PCI_MSIX_FLAGS_ENABLE) )
- pci_conf_write16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), cpos,
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, cpos,
control | (PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL));
}
@@ -608,8 +607,8 @@ int setup_msi_irq(struct irq_desc *desc, struct msi_desc *msidesc)
: &pci_msi_nonmaskable);
if ( !(control & PCI_MSIX_FLAGS_ENABLE) )
- pci_conf_write16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), cpos, control);
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, cpos, control);
return rc;
}
@@ -689,10 +688,10 @@ static int msi_capability_init(struct pci_dev *dev,
struct msi_desc *entry;
int pos;
unsigned int i, maxvec, mpos;
- u16 control, seg = dev->seg;
- u8 bus = dev->bus;
- u8 slot = PCI_SLOT(dev->devfn);
- u8 func = PCI_FUNC(dev->devfn);
+ uint16_t control, seg = dev->sbdf.seg;
+ uint8_t bus = dev->sbdf.bus;
+ uint8_t slot = dev->sbdf.dev;
+ uint8_t func = dev->sbdf.func;
ASSERT(pcidevs_locked());
pos = pci_find_cap_offset(seg, bus, slot, func, PCI_CAP_ID_MSI);
@@ -856,10 +855,10 @@ static int msix_capability_init(struct pci_dev *dev,
u64 table_paddr;
u32 table_offset;
u8 bir, pbus, pslot, pfunc;
- u16 seg = dev->seg;
- u8 bus = dev->bus;
- u8 slot = PCI_SLOT(dev->devfn);
- u8 func = PCI_FUNC(dev->devfn);
+ uint16_t seg = dev->sbdf.seg;
+ uint8_t bus = dev->sbdf.bus;
+ uint8_t slot = dev->sbdf.dev;
+ uint8_t func = dev->sbdf.func;
bool maskall = msix->host_maskall;
ASSERT(pcidevs_locked());
@@ -913,7 +912,7 @@ static int msix_capability_init(struct pci_dev *dev,
pbus = dev->info.physfn.bus;
pslot = PCI_SLOT(dev->info.physfn.devfn);
pfunc = PCI_FUNC(dev->info.physfn.devfn);
- vf = PCI_BDF2(dev->bus, dev->devfn);
+ vf = dev->sbdf.bdf;
}
table_paddr = read_pci_mem_bar(seg, pbus, pslot, pfunc, bir, vf);
@@ -1172,10 +1171,10 @@ static void _pci_cleanup_msix(struct arch_msix *msix)
static void __pci_disable_msix(struct msi_desc *entry)
{
struct pci_dev *dev = entry->dev;
- u16 seg = dev->seg;
- u8 bus = dev->bus;
- u8 slot = PCI_SLOT(dev->devfn);
- u8 func = PCI_FUNC(dev->devfn);
+ uint16_t seg = dev->sbdf.seg;
+ uint8_t bus = dev->sbdf.bus;
+ uint8_t slot = dev->sbdf.dev;
+ uint8_t func = dev->sbdf.func;
unsigned int pos = pci_find_cap_offset(seg, bus, slot, func,
PCI_CAP_ID_MSIX);
u16 control = pci_conf_read16(seg, bus, slot, func,
@@ -1295,10 +1294,10 @@ void pci_cleanup_msi(struct pci_dev *pdev)
int pci_msi_conf_write_intercept(struct pci_dev *pdev, unsigned int reg,
unsigned int size, uint32_t *data)
{
- u16 seg = pdev->seg;
- u8 bus = pdev->bus;
- u8 slot = PCI_SLOT(pdev->devfn);
- u8 func = PCI_FUNC(pdev->devfn);
+ uint16_t seg = pdev->sbdf.seg;
+ uint8_t bus = pdev->sbdf.bus;
+ uint8_t slot = pdev->sbdf.dev;
+ uint8_t func = pdev->sbdf.func;
struct msi_desc *entry;
unsigned int pos;
@@ -1365,7 +1364,7 @@ int pci_restore_msi_state(struct pci_dev *pdev)
struct msi_desc *entry, *tmp;
struct irq_desc *desc;
struct msi_msg msg;
- u8 slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
+ uint8_t slot = pdev->sbdf.dev, func = pdev->sbdf.func;
unsigned int type = 0, pos = 0;
u16 control = 0;
@@ -1374,9 +1373,7 @@ int pci_restore_msi_state(struct pci_dev *pdev)
if ( !use_msi )
return -EOPNOTSUPP;
- ret = xsm_resource_setup_pci(XSM_PRIV,
- (pdev->seg << 16) | (pdev->bus << 8) |
- pdev->devfn);
+ ret = xsm_resource_setup_pci(XSM_PRIV, pdev->sbdf.sbdf);
if ( ret )
return ret;
@@ -1396,10 +1393,10 @@ int pci_restore_msi_state(struct pci_dev *pdev)
bogus:
dprintk(XENLOG_ERR,
"Restore MSI for %04x:%02x:%02x:%u entry %u not set?\n",
- pdev->seg, pdev->bus, slot, func, i);
+ pdev->sbdf.seg, pdev->sbdf.bus, slot, func, i);
spin_unlock_irqrestore(&desc->lock, flags);
if ( type == PCI_CAP_ID_MSIX )
- pci_conf_write16(pdev->seg, pdev->bus, slot, func,
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func,
msix_control_reg(pos),
control & ~PCI_MSIX_FLAGS_ENABLE);
return -EINVAL;
@@ -1414,16 +1411,16 @@ int pci_restore_msi_state(struct pci_dev *pdev)
}
else if ( !type && entry->msi_attrib.type == PCI_CAP_ID_MSIX )
{
- control = pci_conf_read16(pdev->seg, pdev->bus, slot, func,
- msix_control_reg(pos));
- pci_conf_write16(pdev->seg, pdev->bus, slot, func,
+ control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, slot,
+ func, msix_control_reg(pos));
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func,
msix_control_reg(pos),
control | (PCI_MSIX_FLAGS_ENABLE |
PCI_MSIX_FLAGS_MASKALL));
if ( unlikely(!memory_decoded(pdev)) )
{
spin_unlock_irqrestore(&desc->lock, flags);
- pci_conf_write16(pdev->seg, pdev->bus, slot, func,
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func,
msix_control_reg(pos),
control & ~PCI_MSIX_FLAGS_ENABLE);
return -ENXIO;
@@ -1457,17 +1454,18 @@ int pci_restore_msi_state(struct pci_dev *pdev)
{
unsigned int cpos = msi_control_reg(pos);
- control = pci_conf_read16(pdev->seg, pdev->bus, slot, func, cpos) &
- ~PCI_MSI_FLAGS_QSIZE;
+ control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, slot,
+ func, cpos) & ~PCI_MSI_FLAGS_QSIZE;
multi_msi_enable(control, entry->msi.nvec);
- pci_conf_write16(pdev->seg, pdev->bus, slot, func, cpos, control);
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func, cpos,
+ control);
msi_set_enable(pdev, 1);
}
}
if ( type == PCI_CAP_ID_MSIX )
- pci_conf_write16(pdev->seg, pdev->bus, slot, func,
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, slot, func,
msix_control_reg(pos),
control | PCI_MSIX_FLAGS_ENABLE);
@@ -289,23 +289,23 @@ void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
if ( !ats_enabled )
return;
- if ( !pci_ats_enabled(pdev->seg, pdev->bus, pdev->devfn) )
+ if ( !pci_ats_enabled(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.extfunc) )
return;
- iommu = find_iommu_for_device(pdev->seg, PCI_BDF2(pdev->bus, pdev->devfn));
+ iommu = find_iommu_for_device(pdev->sbdf.seg, pdev->sbdf.bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("%s: Can't find iommu for %04x:%02x:%02x.%u\n",
- __func__, pdev->seg, pdev->bus,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ __func__, pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func);
return;
}
if ( !iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
return;
- req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(pdev->bus, devfn));
+ req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(pdev->sbdf.bus, devfn));
queueid = req_id;
maxpend = pdev->ats.queue_depth & 0xff;
@@ -326,13 +326,13 @@ static void amd_iommu_flush_all_iotlbs(struct domain *d, daddr_t daddr,
for_each_pdev( d, pdev )
{
- u8 devfn = pdev->devfn;
+ uint8_t devfn = pdev->sbdf.extfunc;
do {
amd_iommu_flush_iotlb(devfn, pdev, daddr, order);
devfn += pdev->phantom_stride;
- } while ( devfn != pdev->devfn &&
- PCI_SLOT(devfn) == PCI_SLOT(pdev->devfn) );
+ } while ( devfn != pdev->sbdf.extfunc &&
+ PCI_SLOT(devfn) == pdev->sbdf.dev );
}
}
@@ -525,8 +525,8 @@ int amd_iommu_msi_msg_update_ire(
unsigned int i, nr = 1;
u32 data;
- bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf;
- seg = pdev ? pdev->seg : hpet_sbdf.seg;
+ bdf = pdev ? pdev->sbdf.bdf : hpet_sbdf.bdf;
+ seg = pdev ? pdev->sbdf.seg : hpet_sbdf.seg;
iommu = _find_iommu_for_device(seg, bdf);
if ( IS_ERR_OR_NULL(iommu) )
@@ -544,12 +544,12 @@ int amd_iommu_msi_msg_update_ire(
if ( !pdev || !pdev->phantom_stride )
break;
bdf += pdev->phantom_stride;
- } while ( PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
+ } while ( PCI_SLOT(bdf) == pdev->sbdf.dev );
for ( i = 0; i < nr; ++i )
msi_desc[i].remap_index = -1;
if ( pdev )
- bdf = PCI_BDF2(pdev->bus, pdev->devfn);
+ bdf = pdev->sbdf.bdf;
}
if ( !msg )
@@ -562,7 +562,7 @@ int amd_iommu_msi_msg_update_ire(
if ( rc || !pdev || !pdev->phantom_stride )
break;
bdf += pdev->phantom_stride;
- } while ( PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
+ } while ( PCI_SLOT(bdf) == pdev->sbdf.dev );
if ( !rc )
{
@@ -579,8 +579,8 @@ void amd_iommu_read_msi_from_ire(
{
unsigned int offset = msg->data & (INTREMAP_ENTRIES - 1);
const struct pci_dev *pdev = msi_desc->dev;
- u16 bdf = pdev ? PCI_BDF2(pdev->bus, pdev->devfn) : hpet_sbdf.bdf;
- u16 seg = pdev ? pdev->seg : hpet_sbdf.seg;
+ uint16_t bdf = pdev ? pdev->sbdf.bdf : hpet_sbdf.bdf;
+ uint16_t seg = pdev ? pdev->sbdf.seg : hpet_sbdf.seg;
const u32 *entry;
if ( IS_ERR_OR_NULL(_find_iommu_for_device(seg, bdf)) )
@@ -324,8 +324,8 @@ static int update_paging_mode(struct domain *d, unsigned long dfn)
if ( pdev->type == DEV_TYPE_PCI_HOST_BRIDGE )
continue;
- bdf = PCI_BDF2(pdev->bus, pdev->devfn);
- iommu = find_iommu_for_device(pdev->seg, bdf);
+ bdf = pdev->sbdf.bdf;
+ iommu = find_iommu_for_device(pdev->sbdf.seg, bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("%s Fail to find iommu.\n", __func__);
@@ -334,7 +334,7 @@ static int update_paging_mode(struct domain *d, unsigned long dfn)
spin_lock_irqsave(&iommu->lock, flags);
do {
- req_id = get_dma_requestor_id(pdev->seg, bdf);
+ req_id = get_dma_requestor_id(pdev->sbdf.seg, bdf);
table = iommu->dev_table.buffer;
dte = &table[req_id];
@@ -346,8 +346,8 @@ static int update_paging_mode(struct domain *d, unsigned long dfn)
amd_iommu_flush_device(iommu, req_id);
bdf += pdev->phantom_stride;
- } while ( PCI_DEVFN2(bdf) != pdev->devfn &&
- PCI_SLOT(bdf) == PCI_SLOT(pdev->devfn) );
+ } while ( PCI_DEVFN2(bdf) != pdev->sbdf.extfunc &&
+ PCI_SLOT(bdf) == pdev->sbdf.dev );
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -94,7 +94,7 @@ static void amd_iommu_setup_domain_device(
unsigned long flags;
int req_id, valid = 1;
int dte_i = 0;
- u8 bus = pdev->bus;
+ uint8_t bus = pdev->sbdf.bus;
const struct domain_iommu *hd = dom_iommu(domain);
BUG_ON( !hd->arch.root_table || !hd->arch.paging_mode ||
@@ -120,7 +120,7 @@ static void amd_iommu_setup_domain_device(
dte, page_to_maddr(hd->arch.root_table), domain->domain_id,
hd->arch.paging_mode, valid);
- if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
+ if ( pci_ats_device(iommu->seg, bus, pdev->sbdf.extfunc) &&
iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
dte->i = dte_i;
@@ -138,10 +138,10 @@ static void amd_iommu_setup_domain_device(
ASSERT(pcidevs_locked());
- if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
- !pci_ats_enabled(iommu->seg, bus, pdev->devfn) )
+ if ( pci_ats_device(iommu->seg, bus, pdev->sbdf.extfunc) &&
+ !pci_ats_enabled(iommu->seg, bus, pdev->sbdf.extfunc) )
{
- if ( devfn == pdev->devfn )
+ if ( devfn == pdev->sbdf.extfunc )
enable_ats_device(pdev, &iommu->ats_devices);
amd_iommu_flush_iotlb(devfn, pdev, INV_IOMMU_ALL_PAGES_ADDRESS, 0);
@@ -261,7 +261,7 @@ void amd_iommu_disable_domain_device(struct domain *domain,
struct amd_iommu_dte *table, *dte;
unsigned long flags;
int req_id;
- u8 bus = pdev->bus;
+ uint8_t bus = pdev->sbdf.bus;
BUG_ON ( iommu->dev_table.buffer == NULL );
req_id = get_dma_requestor_id(iommu->seg, PCI_BDF2(bus, devfn));
@@ -274,7 +274,7 @@ void amd_iommu_disable_domain_device(struct domain *domain,
dte->tv = 0;
dte->v = 0;
- if ( pci_ats_device(iommu->seg, bus, pdev->devfn) &&
+ if ( pci_ats_device(iommu->seg, bus, pdev->sbdf.extfunc) &&
iommu_has_cap(iommu, PCI_CAP_IOTLB_SHIFT) )
dte->i = 0;
@@ -289,7 +289,7 @@ void amd_iommu_disable_domain_device(struct domain *domain,
ASSERT(pcidevs_locked());
- if ( devfn == pdev->devfn &&
+ if ( devfn == pdev->sbdf.extfunc &&
pci_ats_device(iommu->seg, bus, devfn) &&
pci_ats_enabled(iommu->seg, bus, devfn) )
disable_ats_device(pdev);
@@ -299,23 +299,22 @@ static int reassign_device(struct domain *source, struct domain *target,
u8 devfn, struct pci_dev *pdev)
{
struct amd_iommu *iommu;
- int bdf, rc;
+ int rc;
struct domain_iommu *t = dom_iommu(target);
- bdf = PCI_BDF2(pdev->bus, pdev->devfn);
- iommu = find_iommu_for_device(pdev->seg, bdf);
+ iommu = find_iommu_for_device(pdev->sbdf.seg, pdev->sbdf.bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("Fail to find iommu."
" %04x:%02x:%x02.%x cannot be assigned to dom%d\n",
- pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- target->domain_id);
+ pdev->sbdf.seg, pdev->sbdf.bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), target->domain_id);
return -ENODEV;
}
amd_iommu_disable_domain_device(source, iommu, devfn, pdev);
- if ( devfn == pdev->devfn )
+ if ( devfn == pdev->sbdf.extfunc )
{
list_move(&pdev->domain_list, &target->arch.pdev_list);
pdev->domain = target;
@@ -327,8 +326,8 @@ static int reassign_device(struct domain *source, struct domain *target,
amd_iommu_setup_domain_device(target, iommu, devfn, pdev);
AMD_IOMMU_DEBUG("Re-assign %04x:%02x:%02x.%u from dom%d to dom%d\n",
- pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- source->domain_id, target->domain_id);
+ pdev->sbdf.seg, pdev->sbdf.bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), source->domain_id, target->domain_id);
return 0;
}
@@ -337,9 +336,9 @@ static int amd_iommu_assign_device(struct domain *d, u8 devfn,
struct pci_dev *pdev,
u32 flag)
{
- struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->seg);
- int bdf = PCI_BDF2(pdev->bus, devfn);
- int req_id = get_dma_requestor_id(pdev->seg, bdf);
+ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(pdev->sbdf.seg);
+ int bdf = PCI_BDF2(pdev->sbdf.bus, devfn);
+ int req_id = get_dma_requestor_id(pdev->sbdf.seg, bdf);
if ( ivrs_mappings[req_id].unity_map_enable )
{
@@ -420,13 +419,11 @@ static void amd_iommu_domain_destroy(struct domain *d)
static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
{
struct amd_iommu *iommu;
- u16 bdf;
if ( !pdev->domain )
return -EINVAL;
- bdf = PCI_BDF2(pdev->bus, pdev->devfn);
- iommu = find_iommu_for_device(pdev->seg, bdf);
+ iommu = find_iommu_for_device(pdev->sbdf.seg, pdev->sbdf.bdf);
if ( unlikely(!iommu) )
{
/* Filter bridge devices. */
@@ -434,14 +431,14 @@ static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
is_hardware_domain(pdev->domain) )
{
AMD_IOMMU_DEBUG("Skipping host bridge %04x:%02x:%02x.%u\n",
- pdev->seg, pdev->bus, PCI_SLOT(devfn),
- PCI_FUNC(devfn));
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func);
return 0;
}
AMD_IOMMU_DEBUG("No iommu for %04x:%02x:%02x.%u; cannot be handed to d%d\n",
- pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- pdev->domain->domain_id);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, pdev->domain->domain_id);
return -ENODEV;
}
@@ -452,18 +449,17 @@ static int amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
static int amd_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
{
struct amd_iommu *iommu;
- u16 bdf;
+
if ( !pdev->domain )
return -EINVAL;
- bdf = PCI_BDF2(pdev->bus, pdev->devfn);
- iommu = find_iommu_for_device(pdev->seg, bdf);
+ iommu = find_iommu_for_device(pdev->sbdf.seg, pdev->sbdf.bdf);
if ( !iommu )
{
AMD_IOMMU_DEBUG("Fail to find iommu."
" %04x:%02x:%02x.%u cannot be removed from dom%d\n",
- pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- pdev->domain->domain_id);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, pdev->domain->domain_id);
return -ENODEV;
}
@@ -238,10 +238,10 @@ static void check_pdev(const struct pci_dev *pdev)
(PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | \
PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | \
PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY)
- u16 seg = pdev->seg;
- u8 bus = pdev->bus;
- u8 dev = PCI_SLOT(pdev->devfn);
- u8 func = PCI_FUNC(pdev->devfn);
+ uint16_t seg = pdev->sbdf.seg;
+ uint8_t bus = pdev->sbdf.bus;
+ uint8_t dev = pdev->sbdf.dev;
+ uint8_t func = pdev->sbdf.func;
u16 val;
if ( command_mask )
@@ -289,12 +289,12 @@ static void check_pdev(const struct pci_dev *pdev)
static void apply_quirks(struct pci_dev *pdev)
{
- uint16_t vendor = pci_conf_read16(pdev->seg, pdev->bus,
- PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), PCI_VENDOR_ID);
- uint16_t device = pci_conf_read16(pdev->seg, pdev->bus,
- PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), PCI_DEVICE_ID);
+ uint16_t vendor = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func,
+ PCI_VENDOR_ID);
+ uint16_t device = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func,
+ PCI_DEVICE_ID);
static const struct {
uint16_t vendor, device;
} ignore_bars[] = {
@@ -332,16 +332,14 @@ static struct pci_dev *alloc_pdev(struct pci_seg *pseg, u8 bus, u8 devfn)
struct pci_dev *pdev;
list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
- if ( pdev->bus == bus && pdev->devfn == devfn )
+ if ( pdev->sbdf.bus == bus && pdev->sbdf.extfunc == devfn )
return pdev;
pdev = xzalloc(struct pci_dev);
if ( !pdev )
return NULL;
- *(u16*) &pdev->seg = pseg->nr;
- *((u8*) &pdev->bus) = bus;
- *((u8*) &pdev->devfn) = devfn;
+ *(uint32_t *) &pdev->sbdf.sbdf = PCI_SBDF3(pseg->nr, bus, devfn);
pdev->domain = NULL;
INIT_LIST_HEAD(&pdev->msi_list);
@@ -436,20 +434,18 @@ static void free_pdev(struct pci_seg *pseg, struct pci_dev *pdev)
/* update bus2bridge */
switch ( pdev->type )
{
- u8 dev, func, sec_bus, sub_bus;
+ uint8_t sec_bus, sub_bus;
case DEV_TYPE_PCIe2PCI_BRIDGE:
case DEV_TYPE_LEGACY_PCI_BRIDGE:
- dev = PCI_SLOT(pdev->devfn);
- func = PCI_FUNC(pdev->devfn);
- sec_bus = pci_conf_read8(pseg->nr, pdev->bus, dev, func,
- PCI_SECONDARY_BUS);
- sub_bus = pci_conf_read8(pseg->nr, pdev->bus, dev, func,
- PCI_SUBORDINATE_BUS);
+ sec_bus = pci_conf_read8(pseg->nr, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_SECONDARY_BUS);
+ sub_bus = pci_conf_read8(pseg->nr, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_SUBORDINATE_BUS);
spin_lock(&pseg->bus2bridge_lock);
for ( ; sec_bus <= sub_bus; sec_bus++ )
- pseg->bus2bridge[sec_bus] = pseg->bus2bridge[pdev->bus];
+ pseg->bus2bridge[sec_bus] = pseg->bus2bridge[pdev->sbdf.bus];
spin_unlock(&pseg->bus2bridge_lock);
break;
@@ -539,8 +535,8 @@ struct pci_dev *pci_get_pdev(int seg, int bus, int devfn)
do {
list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
- if ( (pdev->bus == bus || bus == -1) &&
- (pdev->devfn == devfn || devfn == -1) )
+ if ( (pdev->sbdf.bus == bus || bus == -1) &&
+ (pdev->sbdf.extfunc == devfn || devfn == -1) )
return pdev;
} while ( radix_tree_gang_lookup(&pci_segments, (void **)&pseg,
pseg->nr + 1, 1) );
@@ -588,8 +584,8 @@ struct pci_dev *pci_get_pdev_by_domain(const struct domain *d, int seg,
do {
list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
- if ( (pdev->bus == bus || bus == -1) &&
- (pdev->devfn == devfn || devfn == -1) &&
+ if ( (pdev->sbdf.bus == bus || bus == -1) &&
+ (pdev->sbdf.extfunc == devfn || devfn == -1) &&
(pdev->domain == d) )
return pdev;
} while ( radix_tree_gang_lookup(&pci_segments, (void **)&pseg,
@@ -605,15 +601,15 @@ struct pci_dev *pci_get_pdev_by_domain(const struct domain *d, int seg,
static void pci_enable_acs(struct pci_dev *pdev)
{
int pos;
- u16 cap, ctrl, seg = pdev->seg;
- u8 bus = pdev->bus;
- u8 dev = PCI_SLOT(pdev->devfn);
- u8 func = PCI_FUNC(pdev->devfn);
+ uint16_t cap, ctrl, seg = pdev->sbdf.seg;
+ uint8_t bus = pdev->sbdf.bus;
+ uint8_t dev = pdev->sbdf.dev;
+ uint8_t func = pdev->sbdf.func;
if ( !iommu_enabled )
return;
- pos = pci_find_ext_capability(seg, bus, pdev->devfn, PCI_EXT_CAP_ID_ACS);
+ pos = pci_find_ext_capability(seg, bus, pdev->sbdf.extfunc, PCI_EXT_CAP_ID_ACS);
if (!pos)
return;
@@ -845,7 +841,7 @@ int pci_remove_device(u16 seg, u8 bus, u8 devfn)
pcidevs_lock();
list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
- if ( pdev->bus == bus && pdev->devfn == devfn )
+ if ( pdev->sbdf.bus == bus && pdev->sbdf.extfunc == devfn )
{
ret = iommu_remove_device(pdev);
if ( pdev->domain )
@@ -924,11 +920,11 @@ int pci_release_devices(struct domain *d)
}
while ( (pdev = pci_get_pdev_by_domain(d, -1, -1, -1)) )
{
- bus = pdev->bus;
- devfn = pdev->devfn;
- if ( deassign_device(d, pdev->seg, bus, devfn) )
+ bus = pdev->sbdf.bus;
+ devfn = pdev->sbdf.extfunc;
+ if ( deassign_device(d, pdev->sbdf.seg, bus, devfn) )
printk("domain %d: deassign device (%04x:%02x:%02x.%u) failed!\n",
- d->domain_id, pdev->seg, bus,
+ d->domain_id, pdev->sbdf.seg, bus,
PCI_SLOT(devfn), PCI_FUNC(devfn));
}
pcidevs_unlock();
@@ -1047,10 +1043,9 @@ void pci_check_disable_device(u16 seg, u8 bus, u8 devfn)
/* Tell the device to stop DMAing; we can't rely on the guest to
* control it for us. */
- devfn = pdev->devfn;
- cword = pci_conf_read16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ cword = pci_conf_read16(seg, bus, pdev->sbdf.dev, pdev->sbdf.func,
PCI_COMMAND);
- pci_conf_write16(seg, bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ pci_conf_write16(seg, bus, pdev->sbdf.dev, pdev->sbdf.func,
PCI_COMMAND, cword & ~PCI_COMMAND_MASTER);
}
@@ -1113,7 +1108,7 @@ struct setup_hwdom {
static void __hwdom_init setup_one_hwdom_device(const struct setup_hwdom *ctxt,
struct pci_dev *pdev)
{
- u8 devfn = pdev->devfn;
+ uint8_t devfn = pdev->sbdf.extfunc;
int err;
do {
@@ -1121,14 +1116,14 @@ static void __hwdom_init setup_one_hwdom_device(const struct setup_hwdom *ctxt,
if ( err )
{
printk(XENLOG_ERR "setup %04x:%02x:%02x.%u for d%d failed (%d)\n",
- pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
- ctxt->d->domain_id, err);
- if ( devfn == pdev->devfn )
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, ctxt->d->domain_id, err);
+ if ( devfn == pdev->sbdf.extfunc )
return;
}
devfn += pdev->phantom_stride;
- } while ( devfn != pdev->devfn &&
- PCI_SLOT(devfn) == PCI_SLOT(pdev->devfn) );
+ } while ( devfn != pdev->sbdf.extfunc &&
+ PCI_SLOT(devfn) == pdev->sbdf.dev );
err = vpci_add_handlers(pdev);
if ( err )
@@ -1203,24 +1198,22 @@ void __hwdom_init setup_hwdom_pci_devices(
static int hest_match_pci(const struct acpi_hest_aer_common *p,
const struct pci_dev *pdev)
{
- return ACPI_HEST_SEGMENT(p->bus) == pdev->seg &&
- ACPI_HEST_BUS(p->bus) == pdev->bus &&
- p->device == PCI_SLOT(pdev->devfn) &&
- p->function == PCI_FUNC(pdev->devfn);
+ return ACPI_HEST_SEGMENT(p->bus) == pdev->sbdf.seg &&
+ ACPI_HEST_BUS(p->bus) == pdev->sbdf.bus &&
+ p->device == pdev->sbdf.dev &&
+ p->function == pdev->sbdf.func;
}
static bool_t hest_match_type(const struct acpi_hest_header *hest_hdr,
const struct pci_dev *pdev)
{
- unsigned int pos = pci_find_cap_offset(pdev->seg, pdev->bus,
- PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn),
+ unsigned int pos = pci_find_cap_offset(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func,
PCI_CAP_ID_EXP);
- u8 pcie = MASK_EXTR(pci_conf_read16(pdev->seg, pdev->bus,
- PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn),
- pos + PCI_EXP_FLAGS),
- PCI_EXP_FLAGS_TYPE);
+ uint8_t pcie = MASK_EXTR(pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func,
+ pos + PCI_EXP_FLAGS),
+ PCI_EXP_FLAGS_TYPE);
switch ( hest_hdr->type )
{
@@ -1229,8 +1222,8 @@ static bool_t hest_match_type(const struct acpi_hest_header *hest_hdr,
case ACPI_HEST_TYPE_AER_ENDPOINT:
return pcie == PCI_EXP_TYPE_ENDPOINT;
case ACPI_HEST_TYPE_AER_BRIDGE:
- return pci_conf_read16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), PCI_CLASS_DEVICE) ==
+ return pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_CLASS_DEVICE) ==
PCI_CLASS_BRIDGE_PCI;
}
@@ -1289,8 +1282,8 @@ bool_t pcie_aer_get_firmware_first(const struct pci_dev *pdev)
{
struct aer_hest_parse_info info = { .pdev = pdev };
- return pci_find_cap_offset(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), PCI_CAP_ID_EXP) &&
+ return pci_find_cap_offset(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_CAP_ID_EXP) &&
apei_hest_parse(aer_hest_parse, &info) >= 0 &&
info.firmware_first;
}
@@ -1306,8 +1299,7 @@ static int _dump_pci_devices(struct pci_seg *pseg, void *arg)
list_for_each_entry ( pdev, &pseg->alldevs_list, alldevs_list )
{
printk("%04x:%02x:%02x.%u - dom %-3d - node %-3d - MSIs < ",
- pseg->nr, pdev->bus,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
+ pseg->nr, pdev->sbdf.bus, pdev->sbdf.dev, pdev->sbdf.func,
pdev->domain ? pdev->domain->domain_id : -1,
(pdev->node != NUMA_NO_NODE) ? pdev->node : -1);
list_for_each_entry ( msi, &pdev->msi_list, list )
@@ -1362,19 +1354,20 @@ static int iommu_add_device(struct pci_dev *pdev)
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- rc = hd->platform_ops->add_device(pdev->devfn, pci_to_dev(pdev));
+ rc = hd->platform_ops->add_device(pdev->sbdf.extfunc, pci_to_dev(pdev));
if ( rc || !pdev->phantom_stride )
return rc;
- for ( devfn = pdev->devfn ; ; )
+ for ( devfn = pdev->sbdf.extfunc ; ; )
{
devfn += pdev->phantom_stride;
- if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
+ if ( PCI_SLOT(devfn) != pdev->sbdf.dev )
return 0;
rc = hd->platform_ops->add_device(devfn, pci_to_dev(pdev));
if ( rc )
printk(XENLOG_WARNING "IOMMU: add %04x:%02x:%02x.%u failed (%d)\n",
- pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn), rc);
+ pdev->sbdf.seg, pdev->sbdf.bus, PCI_SLOT(devfn),
+ PCI_FUNC(devfn), rc);
}
}
@@ -1398,7 +1391,7 @@ static int iommu_enable_device(struct pci_dev *pdev)
static int iommu_remove_device(struct pci_dev *pdev)
{
const struct domain_iommu *hd;
- u8 devfn;
+ uint8_t devfn;
if ( !pdev->domain )
return -EINVAL;
@@ -1407,23 +1400,24 @@ static int iommu_remove_device(struct pci_dev *pdev)
if ( !iommu_enabled || !hd->platform_ops )
return 0;
- for ( devfn = pdev->devfn ; pdev->phantom_stride; )
+ for ( devfn = pdev->sbdf.extfunc ; pdev->phantom_stride; )
{
int rc;
devfn += pdev->phantom_stride;
- if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
+ if ( PCI_SLOT(devfn) != pdev->sbdf.dev )
break;
rc = hd->platform_ops->remove_device(devfn, pci_to_dev(pdev));
if ( !rc )
continue;
printk(XENLOG_ERR "IOMMU: remove %04x:%02x:%02x.%u failed (%d)\n",
- pdev->seg, pdev->bus, PCI_SLOT(devfn), PCI_FUNC(devfn), rc);
+ pdev->sbdf.seg, pdev->sbdf.bus, PCI_SLOT(devfn), PCI_FUNC(devfn),
+ rc);
return rc;
}
- return hd->platform_ops->remove_device(pdev->devfn, pci_to_dev(pdev));
+ return hd->platform_ops->remove_device(pdev->sbdf.extfunc, pci_to_dev(pdev));
}
/*
@@ -1485,7 +1479,7 @@ static int assign_device(struct domain *d, u16 seg, u8 bus, u8 devfn, u32 flag)
for ( ; pdev->phantom_stride; rc = 0 )
{
devfn += pdev->phantom_stride;
- if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
+ if ( PCI_SLOT(devfn) != pdev->sbdf.dev )
break;
rc = hd->platform_ops->assign_device(d, devfn, pci_to_dev(pdev), flag);
if ( rc )
@@ -1520,7 +1514,7 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
while ( pdev->phantom_stride )
{
devfn += pdev->phantom_stride;
- if ( PCI_SLOT(devfn) != PCI_SLOT(pdev->devfn) )
+ if ( PCI_SLOT(devfn) != pdev->sbdf.dev )
break;
ret = hd->platform_ops->reassign_device(d, hardware_domain, devfn,
pci_to_dev(pdev));
@@ -1532,7 +1526,7 @@ int deassign_device(struct domain *d, u16 seg, u8 bus, u8 devfn)
return ret;
}
- devfn = pdev->devfn;
+ devfn = pdev->sbdf.extfunc;
ret = hd->platform_ops->reassign_device(d, hardware_domain, devfn,
pci_to_dev(pdev));
if ( ret )
@@ -1558,7 +1552,6 @@ static int iommu_get_device_group(
const struct domain_iommu *hd = dom_iommu(d);
struct pci_dev *pdev;
int group_id, sdev_id;
- u32 bdf;
int i = 0;
const struct iommu_ops *ops = hd->platform_ops;
@@ -1570,19 +1563,18 @@ static int iommu_get_device_group(
pcidevs_lock();
for_each_pdev( d, pdev )
{
- if ( (pdev->seg != seg) ||
- ((pdev->bus == bus) && (pdev->devfn == devfn)) )
+ if ( (pdev->sbdf.seg != seg) ||
+ ((pdev->sbdf.bus == bus) && (pdev->sbdf.extfunc == devfn)) )
continue;
- if ( xsm_get_device_group(XSM_HOOK, (seg << 16) | (pdev->bus << 8) | pdev->devfn) )
+ if ( xsm_get_device_group(XSM_HOOK, (seg << 16) | pdev->sbdf.bdf) )
continue;
- sdev_id = ops->get_device_group_id(seg, pdev->bus, pdev->devfn);
+ sdev_id = ops->get_device_group_id(seg, pdev->sbdf.bus,
+ pdev->sbdf.extfunc);
if ( (sdev_id == group_id) && (i < max_sdevs) )
{
- bdf = 0;
- bdf |= (pdev->bus & 0xff) << 16;
- bdf |= (pdev->devfn & 0xff) << 8;
+ uint32_t bdf = pdev->sbdf.bdf;
if ( unlikely(copy_to_guest_offset(buf, i, &bdf, 1)) )
{
@@ -1618,8 +1610,8 @@ void iommu_dev_iotlb_flush_timeout(struct domain *d, struct pci_dev *pdev)
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
"dom%d: ATS device %04x:%02x:%02x.%u flush failed\n",
- d->domain_id, pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ d->domain_id, pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -219,18 +219,18 @@ struct acpi_drhd_unit *acpi_find_matched_drhd_unit(const struct pci_dev *pdev)
}
else if ( pdev->info.is_extfn )
{
- bus = pdev->bus;
+ bus = pdev->sbdf.bus;
devfn = 0;
}
else
{
- bus = pdev->bus;
- devfn = pdev->devfn;
+ bus = pdev->sbdf.bus;
+ devfn = pdev->sbdf.extfunc;
}
list_for_each_entry ( drhd, &acpi_drhd_units, list )
{
- if ( drhd->segment != pdev->seg )
+ if ( drhd->segment != pdev->sbdf.seg )
continue;
for (i = 0; i < drhd->scope.devices_cnt; i++)
@@ -253,10 +253,10 @@ struct acpi_atsr_unit *acpi_find_matched_atsr_unit(const struct pci_dev *pdev)
list_for_each_entry ( atsr, &acpi_atsr_units, list )
{
- if ( atsr->segment != pdev->seg )
+ if ( atsr->segment != pdev->sbdf.seg )
continue;
- if ( test_bit(pdev->bus, atsr->scope.buses) )
+ if ( test_bit(pdev->sbdf.bus, atsr->scope.buses) )
return atsr;
if ( atsr->all_ports )
@@ -483,9 +483,9 @@ static void set_msi_source_id(struct pci_dev *pdev, struct iremap_entry *ire)
if ( !pdev || !ire )
return;
- seg = pdev->seg;
- bus = pdev->bus;
- devfn = pdev->devfn;
+ seg = pdev->sbdf.seg;
+ bus = pdev->sbdf.bus;
+ devfn = pdev->sbdf.extfunc;
switch ( pdev->type )
{
unsigned int sq;
@@ -517,7 +517,7 @@ static void set_msi_source_id(struct pci_dev *pdev, struct iremap_entry *ire)
{
if ( pdev_type(seg, bus, devfn) == DEV_TYPE_PCIe2PCI_BRIDGE )
set_ire_sid(ire, SVT_VERIFY_BUS, SQ_ALL_16,
- (bus << 8) | pdev->bus);
+ (bus << 8) | pdev->sbdf.bus);
else
set_ire_sid(ire, SVT_VERIFY_SID_SQ, SQ_ALL_16,
PCI_BDF2(bus, devfn));
@@ -1484,7 +1484,7 @@ static int domain_context_mapping(struct domain *domain, u8 devfn,
{
struct acpi_drhd_unit *drhd;
int ret = 0;
- u8 seg = pdev->seg, bus = pdev->bus, secbus;
+ uint8_t seg = pdev->sbdf.seg, bus = pdev->sbdf.bus, secbus;
drhd = acpi_find_matched_drhd_unit(pdev);
if ( !drhd )
@@ -1515,7 +1515,7 @@ static int domain_context_mapping(struct domain *domain, u8 devfn,
PCI_SLOT(devfn), PCI_FUNC(devfn));
ret = domain_context_mapping_one(domain, drhd->iommu, bus, devfn,
pdev);
- if ( !ret && devfn == pdev->devfn && ats_device(pdev, drhd) > 0 )
+ if ( !ret && devfn == pdev->sbdf.extfunc && ats_device(pdev, drhd) > 0 )
enable_ats_device(pdev, &drhd->iommu->ats_devices);
break;
@@ -1543,7 +1543,7 @@ static int domain_context_mapping(struct domain *domain, u8 devfn,
* behind the bridge. Map that id as well if we didn't already.
*/
if ( !ret && pdev_type(seg, bus, devfn) == DEV_TYPE_PCIe2PCI_BRIDGE &&
- (secbus != pdev->bus || pdev->devfn != 0) )
+ (secbus != pdev->sbdf.bus || pdev->sbdf.extfunc != 0) )
ret = domain_context_mapping_one(domain, drhd->iommu, secbus, 0,
pci_get_pdev(seg, secbus, 0));
@@ -1557,7 +1557,7 @@ static int domain_context_mapping(struct domain *domain, u8 devfn,
break;
}
- if ( !ret && devfn == pdev->devfn )
+ if ( !ret && devfn == pdev->sbdf.extfunc )
pci_vtd_quirk(pdev);
return ret;
@@ -1635,7 +1635,8 @@ static int domain_context_unmap(struct domain *domain, u8 devfn,
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
int ret = 0;
- u8 seg = pdev->seg, bus = pdev->bus, tmp_bus, tmp_devfn, secbus;
+ uint8_t seg = pdev->sbdf.seg, bus = pdev->sbdf.bus, tmp_bus, tmp_devfn,
+ secbus;
int found = 0;
drhd = acpi_find_matched_drhd_unit(pdev);
@@ -1665,7 +1666,7 @@ static int domain_context_unmap(struct domain *domain, u8 devfn,
domain->domain_id, seg, bus,
PCI_SLOT(devfn), PCI_FUNC(devfn));
ret = domain_context_unmap_one(domain, iommu, bus, devfn);
- if ( !ret && devfn == pdev->devfn && ats_device(pdev, drhd) > 0 )
+ if ( !ret && devfn == pdev->sbdf.extfunc && ats_device(pdev, drhd) > 0 )
disable_ats_device(pdev);
break;
@@ -1711,7 +1712,8 @@ static int domain_context_unmap(struct domain *domain, u8 devfn,
*/
for_each_pdev ( domain, pdev )
{
- if ( pdev->seg == seg && pdev->bus == bus && pdev->devfn == devfn )
+ if ( pdev->sbdf.seg == seg && pdev->sbdf.bus == bus &&
+ pdev->sbdf.extfunc == devfn )
continue;
drhd = acpi_find_matched_drhd_unit(pdev);
@@ -2050,8 +2052,8 @@ static int intel_iommu_add_device(u8 devfn, struct pci_dev *pdev)
for_each_rmrr_device ( rmrr, bdf, i )
{
- if ( rmrr->segment == pdev->seg &&
- PCI_BUS(bdf) == pdev->bus &&
+ if ( rmrr->segment == pdev->sbdf.seg &&
+ PCI_BUS(bdf) == pdev->sbdf.bus &&
PCI_DEVFN2(bdf) == devfn )
{
/*
@@ -2096,8 +2098,8 @@ static int intel_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
for_each_rmrr_device ( rmrr, bdf, i )
{
- if ( rmrr->segment != pdev->seg ||
- PCI_BUS(bdf) != pdev->bus ||
+ if ( rmrr->segment != pdev->sbdf.seg ||
+ PCI_BUS(bdf) != pdev->sbdf.bus ||
PCI_DEVFN2(bdf) != devfn )
continue;
@@ -2421,8 +2423,8 @@ static int reassign_device_ownership(
unsigned int i;
for_each_rmrr_device( rmrr, bdf, i )
- if ( rmrr->segment == pdev->seg &&
- PCI_BUS(bdf) == pdev->bus &&
+ if ( rmrr->segment == pdev->sbdf.seg &&
+ PCI_BUS(bdf) == pdev->sbdf.bus &&
PCI_DEVFN2(bdf) == devfn )
{
/*
@@ -2451,7 +2453,7 @@ static int reassign_device_ownership(
return ret;
}
- if ( devfn == pdev->devfn )
+ if ( devfn == pdev->sbdf.extfunc )
{
list_move(&pdev->domain_list, &target->arch.pdev_list);
pdev->domain = target;
@@ -2474,8 +2476,8 @@ static int intel_iommu_assign_device(
if ( list_empty(&acpi_drhd_units) )
return -ENODEV;
- seg = pdev->seg;
- bus = pdev->bus;
+ seg = pdev->sbdf.seg;
+ bus = pdev->sbdf.bus;
/*
* In rare cases one given rmrr is shared by multiple devices but
* obviously this would put the security of a system at risk. So
@@ -257,7 +257,7 @@ int qinval_device_iotlb_sync(struct iommu *iommu, struct pci_dev *pdev,
qinval_entry->q.dev_iotlb_inv_dsc.lo.res_1 = 0;
qinval_entry->q.dev_iotlb_inv_dsc.lo.max_invs_pend = pdev->ats.queue_depth;
qinval_entry->q.dev_iotlb_inv_dsc.lo.res_2 = 0;
- qinval_entry->q.dev_iotlb_inv_dsc.lo.sid = PCI_BDF2(pdev->bus, pdev->devfn);
+ qinval_entry->q.dev_iotlb_inv_dsc.lo.sid = pdev->sbdf.bdf;
qinval_entry->q.dev_iotlb_inv_dsc.lo.res_3 = 0;
qinval_entry->q.dev_iotlb_inv_dsc.hi.size = size;
@@ -413,10 +413,10 @@ int me_wifi_quirk(struct domain *domain, u8 bus, u8 devfn, int map)
void pci_vtd_quirk(const struct pci_dev *pdev)
{
- int seg = pdev->seg;
- int bus = pdev->bus;
- int dev = PCI_SLOT(pdev->devfn);
- int func = PCI_FUNC(pdev->devfn);
+ int seg = pdev->sbdf.seg;
+ int bus = pdev->sbdf.bus;
+ int dev = pdev->sbdf.dev;
+ int func = pdev->sbdf.func;
int pos;
bool_t ff;
u32 val, val2;
@@ -454,11 +454,11 @@ void pci_vtd_quirk(const struct pci_dev *pdev)
/* Sandybridge-EP (Romley) */
case 0x3c00: /* host bridge */
case 0x3c01 ... 0x3c0b: /* root ports */
- pos = pci_find_ext_capability(seg, bus, pdev->devfn,
+ pos = pci_find_ext_capability(seg, bus, pdev->sbdf.extfunc,
PCI_EXT_CAP_ID_ERR);
if ( !pos )
{
- pos = pci_find_ext_capability(seg, bus, pdev->devfn,
+ pos = pci_find_ext_capability(seg, bus, pdev->sbdf.extfunc,
PCI_EXT_CAP_ID_VNDR);
while ( pos )
{
@@ -468,8 +468,8 @@ void pci_vtd_quirk(const struct pci_dev *pdev)
pos += PCI_VNDR_HEADER;
break;
}
- pos = pci_find_next_ext_capability(seg, bus, pdev->devfn, pos,
- PCI_EXT_CAP_ID_VNDR);
+ pos = pci_find_next_ext_capability(seg, bus, pdev->sbdf.extfunc,
+ pos, PCI_EXT_CAP_ID_VNDR);
}
ff = 0;
}
@@ -57,8 +57,8 @@ int ats_device(const struct pci_dev *pdev, const struct acpi_drhd_unit *drhd)
return 0;
ats_drhd = find_ats_dev_drhd(drhd->iommu);
- pos = pci_find_ext_capability(pdev->seg, pdev->bus, pdev->devfn,
- PCI_EXT_CAP_ID_ATS);
+ pos = pci_find_ext_capability(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.extfunc, PCI_EXT_CAP_ID_ATS);
if ( pos && (ats_drhd == NULL) )
{
@@ -79,19 +79,19 @@ static int device_in_domain(const struct iommu *iommu,
int tt, found = 0;
root_entry = (struct root_entry *) map_vtd_domain_page(iommu->root_maddr);
- if ( !root_entry || !root_present(root_entry[pdev->bus]) )
+ if ( !root_entry || !root_present(root_entry[pdev->sbdf.bus]) )
goto out;
ctxt_entry = (struct context_entry *)
- map_vtd_domain_page(root_entry[pdev->bus].val);
+ map_vtd_domain_page(root_entry[pdev->sbdf.bus].val);
if ( ctxt_entry == NULL )
goto out;
- if ( context_domain_id(ctxt_entry[pdev->devfn]) != did )
+ if ( context_domain_id(ctxt_entry[pdev->sbdf.extfunc]) != did )
goto out;
- tt = context_translation_type(ctxt_entry[pdev->devfn]);
+ tt = context_translation_type(ctxt_entry[pdev->sbdf.extfunc]);
if ( tt != CONTEXT_TT_DEV_IOTLB )
goto out;
@@ -23,8 +23,8 @@ boolean_param("ats", ats_enabled);
int enable_ats_device(struct pci_dev *pdev, struct list_head *ats_list)
{
u32 value;
- u16 seg = pdev->seg;
- u8 bus = pdev->bus, devfn = pdev->devfn;
+ uint16_t seg = pdev->sbdf.seg;
+ uint16_t bus = pdev->sbdf.bus, devfn = pdev->sbdf.extfunc;
int pos;
pos = pci_find_ext_capability(seg, bus, devfn, PCI_EXT_CAP_ID_ATS);
@@ -76,8 +76,8 @@ int enable_ats_device(struct pci_dev *pdev, struct list_head *ats_list)
void disable_ats_device(struct pci_dev *pdev)
{
u32 value;
- u16 seg = pdev->seg;
- u8 bus = pdev->bus, devfn = pdev->devfn;
+ uint16_t seg = pdev->sbdf.seg;
+ uint8_t bus = pdev->sbdf.bus, devfn = pdev->sbdf.extfunc;
BUG_ON(!pdev->ats.cap_pos);
@@ -117,10 +117,10 @@ int pci_find_next_ext_capability(int seg, int bus, int devfn, int start, int cap
void pci_intx(const struct pci_dev *pdev, bool enable)
{
- uint16_t seg = pdev->seg;
- uint8_t bus = pdev->bus;
- uint8_t slot = PCI_SLOT(pdev->devfn);
- uint8_t func = PCI_FUNC(pdev->devfn);
+ uint16_t seg = pdev->sbdf.seg;
+ uint8_t bus = pdev->sbdf.bus;
+ uint8_t slot = pdev->sbdf.dev;
+ uint8_t func = pdev->sbdf.func;
uint16_t cmd = pci_conf_read16(seg, bus, slot, func, PCI_COMMAND);
if ( enable )
@@ -85,7 +85,6 @@ static void modify_decoding(const struct pci_dev *pdev, uint16_t cmd,
bool rom_only)
{
struct vpci_header *header = &pdev->vpci->header;
- uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
bool map = cmd & PCI_COMMAND_MEMORY;
unsigned int i;
@@ -113,7 +112,8 @@ static void modify_decoding(const struct pci_dev *pdev, uint16_t cmd,
(map ? PCI_ROM_ADDRESS_ENABLE : 0);
header->bars[i].enabled = header->rom_enabled = map;
- pci_conf_write32(pdev->seg, pdev->bus, slot, func, rom_pos, val);
+ pci_conf_write32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, rom_pos, val);
return;
}
@@ -123,8 +123,8 @@ static void modify_decoding(const struct pci_dev *pdev, uint16_t cmd,
}
if ( !rom_only )
- pci_conf_write16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND,
- cmd);
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_COMMAND, cmd);
else
ASSERT_UNREACHABLE();
}
@@ -335,8 +335,8 @@ static int modify_bars(const struct pci_dev *pdev, uint16_t cmd, bool rom_only)
static void cmd_write(const struct pci_dev *pdev, unsigned int reg,
uint32_t cmd, void *data)
{
- uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
- uint16_t current_cmd = pci_conf_read16(pdev->seg, pdev->bus, slot, func,
+ uint16_t current_cmd = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func,
reg);
/*
@@ -352,14 +352,14 @@ static void cmd_write(const struct pci_dev *pdev, unsigned int reg,
*/
modify_bars(pdev, cmd, false);
else
- pci_conf_write16(pdev->seg, pdev->bus, slot, func, reg, cmd);
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, reg, cmd);
}
static void bar_write(const struct pci_dev *pdev, unsigned int reg,
uint32_t val, void *data)
{
struct vpci_bar *bar = data;
- uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
bool hi = false;
if ( bar->type == VPCI_BAR_MEM64_HI )
@@ -371,15 +371,15 @@ static void bar_write(const struct pci_dev *pdev, unsigned int reg,
else
val &= PCI_BASE_ADDRESS_MEM_MASK;
- if ( pci_conf_read16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND) &
- PCI_COMMAND_MEMORY )
+ if ( pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_COMMAND) & PCI_COMMAND_MEMORY )
{
/* If the value written is the current one avoid printing a warning. */
if ( val != (uint32_t)(bar->addr >> (hi ? 32 : 0)) )
gprintk(XENLOG_WARNING,
"%04x:%02x:%02x.%u: ignored BAR %lu write with memory decoding enabled\n",
- pdev->seg, pdev->bus, slot, func,
- bar - pdev->vpci->header.bars + hi);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, bar - pdev->vpci->header.bars + hi);
return;
}
@@ -399,8 +399,8 @@ static void bar_write(const struct pci_dev *pdev, unsigned int reg,
val |= bar->prefetchable ? PCI_BASE_ADDRESS_MEM_PREFETCH : 0;
}
- pci_conf_write32(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), reg, val);
+ pci_conf_write32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, reg, val);
}
static void rom_write(const struct pci_dev *pdev, unsigned int reg,
@@ -408,8 +408,8 @@ static void rom_write(const struct pci_dev *pdev, unsigned int reg,
{
struct vpci_header *header = &pdev->vpci->header;
struct vpci_bar *rom = data;
- uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
- uint16_t cmd = pci_conf_read16(pdev->seg, pdev->bus, slot, func,
+ uint16_t cmd = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func,
PCI_COMMAND);
bool new_enabled = val & PCI_ROM_ADDRESS_ENABLE;
@@ -417,7 +417,8 @@ static void rom_write(const struct pci_dev *pdev, unsigned int reg,
{
gprintk(XENLOG_WARNING,
"%04x:%02x:%02x.%u: ignored ROM BAR write with memory decoding enabled\n",
- pdev->seg, pdev->bus, slot, func);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func);
return;
}
@@ -432,7 +433,8 @@ static void rom_write(const struct pci_dev *pdev, unsigned int reg,
{
/* Just update the ROM BAR field. */
header->rom_enabled = new_enabled;
- pci_conf_write32(pdev->seg, pdev->bus, slot, func, reg, val);
+ pci_conf_write32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, reg, val);
}
/*
* Pass PCI_COMMAND_MEMORY or 0 to signal a map/unmap request, note that
@@ -455,19 +457,15 @@ static void rom_write(const struct pci_dev *pdev, unsigned int reg,
static int init_bars(struct pci_dev *pdev)
{
- uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
uint16_t cmd;
uint64_t addr, size;
unsigned int i, num_bars, rom_reg;
struct vpci_header *header = &pdev->vpci->header;
struct vpci_bar *bars = header->bars;
- pci_sbdf_t sbdf = {
- .sbdf = PCI_SBDF3(pdev->seg, pdev->bus, pdev->devfn),
- };
int rc;
- switch ( pci_conf_read8(pdev->seg, pdev->bus, slot, func, PCI_HEADER_TYPE)
- & 0x7f )
+ switch ( pci_conf_read8(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_HEADER_TYPE) & 0x7f )
{
case PCI_HEADER_TYPE_NORMAL:
num_bars = PCI_HEADER_NORMAL_NR_BARS;
@@ -493,9 +491,11 @@ static int init_bars(struct pci_dev *pdev)
return 0;
/* Disable memory decoding before sizing. */
- cmd = pci_conf_read16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND);
+ cmd = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_COMMAND);
if ( cmd & PCI_COMMAND_MEMORY )
- pci_conf_write16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND,
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_COMMAND,
cmd & ~PCI_COMMAND_MEMORY);
for ( i = 0; i < num_bars; i++ )
@@ -510,15 +510,16 @@ static int init_bars(struct pci_dev *pdev)
4, &bars[i]);
if ( rc )
{
- pci_conf_write16(pdev->seg, pdev->bus, slot, func,
- PCI_COMMAND, cmd);
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_COMMAND, cmd);
return rc;
}
continue;
}
- val = pci_conf_read32(pdev->seg, pdev->bus, slot, func, reg);
+ val = pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, reg);
if ( (val & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO )
{
bars[i].type = VPCI_BAR_IO;
@@ -530,12 +531,12 @@ static int init_bars(struct pci_dev *pdev)
else
bars[i].type = VPCI_BAR_MEM32;
- rc = pci_size_mem_bar(sbdf, reg, &addr, &size,
+ rc = pci_size_mem_bar(pdev->sbdf, reg, &addr, &size,
(i == num_bars - 1) ? PCI_BAR_LAST : 0);
if ( rc < 0 )
{
- pci_conf_write16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND,
- cmd);
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_COMMAND, cmd);
return rc;
}
@@ -553,14 +554,14 @@ static int init_bars(struct pci_dev *pdev)
&bars[i]);
if ( rc )
{
- pci_conf_write16(pdev->seg, pdev->bus, slot, func, PCI_COMMAND,
- cmd);
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, PCI_COMMAND, cmd);
return rc;
}
}
/* Check expansion ROM. */
- rc = pci_size_mem_bar(sbdf, rom_reg, &addr, &size, PCI_BAR_ROM);
+ rc = pci_size_mem_bar(pdev->sbdf, rom_reg, &addr, &size, PCI_BAR_ROM);
if ( rc > 0 && size )
{
struct vpci_bar *rom = &header->bars[num_bars];
@@ -568,7 +569,8 @@ static int init_bars(struct pci_dev *pdev)
rom->type = VPCI_BAR_ROM;
rom->size = size;
rom->addr = addr;
- header->rom_enabled = pci_conf_read32(pdev->seg, pdev->bus, slot, func,
+ header->rom_enabled = pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func,
rom_reg) & PCI_ROM_ADDRESS_ENABLE;
rc = vpci_add_register(pdev->vpci, vpci_hw_read32, rom_write, rom_reg,
@@ -77,9 +77,8 @@ static void control_write(const struct pci_dev *pdev, unsigned int reg,
msi->vectors = vectors;
msi->enabled = new_enabled;
- pci_conf_write16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), reg,
- control_read(pdev, reg, data));
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, reg, control_read(pdev, reg, data));
}
static void update_msi(const struct pci_dev *pdev, struct vpci_msi *msi)
@@ -187,8 +186,8 @@ static void mask_write(const struct pci_dev *pdev, unsigned int reg,
static int init_msi(struct pci_dev *pdev)
{
- uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
- unsigned int pos = pci_find_cap_offset(pdev->seg, pdev->bus, slot, func,
+ unsigned int pos = pci_find_cap_offset(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func,
PCI_CAP_ID_MSI);
uint16_t control;
int ret;
@@ -211,8 +210,8 @@ static int init_msi(struct pci_dev *pdev)
return ret;
/* Get the maximum number of vectors the device supports. */
- control = pci_conf_read16(pdev->seg, pdev->bus, slot, func,
- msi_control_reg(pos));
+ control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, msi_control_reg(pos));
/*
* FIXME: I've only been able to test this code with devices using a single
@@ -293,8 +292,8 @@ void vpci_dump_msi(void)
msi = pdev->vpci->msi;
if ( msi && msi->enabled )
{
- printk("%04x:%02x:%02x.%u MSI\n", pdev->seg, pdev->bus,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ printk("%04x:%02x:%02x.%u MSI\n", pdev->sbdf.seg,
+ pdev->sbdf.bus, pdev->sbdf.dev, pdev->sbdf.func);
printk(" enabled: %d 64-bit: %d",
msi->enabled, msi->address64);
@@ -311,8 +310,8 @@ void vpci_dump_msi(void)
{
int rc;
- printk("%04x:%02x:%02x.%u MSI-X\n", pdev->seg, pdev->bus,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ printk("%04x:%02x:%02x.%u MSI-X\n", pdev->sbdf.seg,
+ pdev->sbdf.bus, pdev->sbdf.dev, pdev->sbdf.func);
printk(" entries: %u maskall: %d enabled: %d\n",
msix->max_entries, msix->masked, msix->enabled);
@@ -42,7 +42,6 @@ static uint32_t control_read(const struct pci_dev *pdev, unsigned int reg,
static int update_entry(struct vpci_msix_entry *entry,
const struct pci_dev *pdev, unsigned int nr)
{
- uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
int rc = vpci_msix_arch_disable_entry(entry, pdev);
/* Ignore ENOENT, it means the entry wasn't setup. */
@@ -50,7 +49,8 @@ static int update_entry(struct vpci_msix_entry *entry,
{
gprintk(XENLOG_WARNING,
"%04x:%02x:%02x.%u: unable to disable entry %u for update: %d\n",
- pdev->seg, pdev->bus, slot, func, nr, rc);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev, pdev->sbdf.func,
+ nr, rc);
return rc;
}
@@ -61,7 +61,8 @@ static int update_entry(struct vpci_msix_entry *entry,
{
gprintk(XENLOG_WARNING,
"%04x:%02x:%02x.%u: unable to enable entry %u: %d\n",
- pdev->seg, pdev->bus, slot, func, nr, rc);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev, pdev->sbdf.func,
+ nr, rc);
/* Entry is likely not properly configured. */
return rc;
}
@@ -72,7 +73,6 @@ static int update_entry(struct vpci_msix_entry *entry,
static void control_write(const struct pci_dev *pdev, unsigned int reg,
uint32_t val, void *data)
{
- uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
struct vpci_msix *msix = data;
bool new_masked = val & PCI_MSIX_FLAGS_MASKALL;
bool new_enabled = val & PCI_MSIX_FLAGS_ENABLE;
@@ -135,7 +135,8 @@ static void control_write(const struct pci_dev *pdev, unsigned int reg,
default:
gprintk(XENLOG_WARNING,
"%04x:%02x:%02x.%u: unable to disable entry %u: %d\n",
- pdev->seg, pdev->bus, slot, func, i, rc);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, i, rc);
return;
}
}
@@ -146,7 +147,8 @@ static void control_write(const struct pci_dev *pdev, unsigned int reg,
val = control_read(pdev, reg, data);
if ( pci_msi_conf_write_intercept(msix->pdev, reg, 2, &val) >= 0 )
- pci_conf_write16(pdev->seg, pdev->bus, slot, func, reg, val);
+ pci_conf_write16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, reg, val);
}
static struct vpci_msix *msix_find(const struct domain *d, unsigned long addr)
@@ -181,7 +183,7 @@ static bool access_allowed(const struct pci_dev *pdev, unsigned long addr,
gprintk(XENLOG_WARNING,
"%04x:%02x:%02x.%u: unaligned or invalid size MSI-X table access\n",
- pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev, pdev->sbdf.func);
return false;
}
@@ -433,8 +435,8 @@ int vpci_make_msix_hole(const struct pci_dev *pdev)
gprintk(XENLOG_WARNING,
"%04x:%02x:%02x.%u: existing mapping (mfn: %" PRI_mfn
"type: %d) at %#lx clobbers MSIX MMIO area\n",
- pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), mfn_x(mfn), t, start);
+ pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, mfn_x(mfn), t, start);
return -EEXIST;
}
put_gfn(d, start);
@@ -447,18 +449,18 @@ int vpci_make_msix_hole(const struct pci_dev *pdev)
static int init_msix(struct pci_dev *pdev)
{
struct domain *d = pdev->domain;
- uint8_t slot = PCI_SLOT(pdev->devfn), func = PCI_FUNC(pdev->devfn);
unsigned int msix_offset, i, max_entries;
uint16_t control;
int rc;
- msix_offset = pci_find_cap_offset(pdev->seg, pdev->bus, slot, func,
+ msix_offset = pci_find_cap_offset(pdev->sbdf.seg, pdev->sbdf.bus,
+ pdev->sbdf.dev, pdev->sbdf.func,
PCI_CAP_ID_MSIX);
if ( !msix_offset )
return 0;
- control = pci_conf_read16(pdev->seg, pdev->bus, slot, func,
- msix_control_reg(msix_offset));
+ control = pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, msix_control_reg(msix_offset));
max_entries = msix_table_size(control);
@@ -470,11 +472,11 @@ static int init_msix(struct pci_dev *pdev)
pdev->vpci->msix->pdev = pdev;
pdev->vpci->msix->tables[VPCI_MSIX_TABLE] =
- pci_conf_read32(pdev->seg, pdev->bus, slot, func,
- msix_table_offset_reg(msix_offset));
+ pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, msix_table_offset_reg(msix_offset));
pdev->vpci->msix->tables[VPCI_MSIX_PBA] =
- pci_conf_read32(pdev->seg, pdev->bus, slot, func,
- msix_pba_offset_reg(msix_offset));
+ pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, msix_pba_offset_reg(msix_offset));
for ( i = 0; i < pdev->vpci->msix->max_entries; i++)
{
@@ -114,15 +114,15 @@ static void vpci_ignored_write(const struct pci_dev *pdev, unsigned int reg,
uint32_t vpci_hw_read16(const struct pci_dev *pdev, unsigned int reg,
void *data)
{
- return pci_conf_read16(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), reg);
+ return pci_conf_read16(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, reg);
}
uint32_t vpci_hw_read32(const struct pci_dev *pdev, unsigned int reg,
void *data)
{
- return pci_conf_read32(pdev->seg, pdev->bus, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), reg);
+ return pci_conf_read32(pdev->sbdf.seg, pdev->sbdf.bus, pdev->sbdf.dev,
+ pdev->sbdf.func, reg);
}
int vpci_add_register(struct vpci *vpci, vpci_read_t *read_handler,
@@ -80,9 +80,8 @@ struct pci_dev {
struct arch_msix *msix;
struct domain *domain;
- const u16 seg;
- const u8 bus;
- const u8 devfn;
+
+ const pci_sbdf_t sbdf;
u8 phantom_stride;
This patch replaces the seg, bus and devfn fields of the struct and fixes the callers. While there instances of u<size> have also been replaced with uint<size>_t. No functional change intended. Signed-off-by: Roger Pau Monné <roger.pau@citrix.com> --- Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> Cc: Wei Liu <wei.liu2@citrix.com> Cc: George Dunlap <George.Dunlap@eu.citrix.com> Cc: Ian Jackson <ian.jackson@eu.citrix.com> Cc: Julien Grall <julien.grall@arm.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Tim Deegan <tim@xen.org> Cc: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> Cc: Brian Woods <brian.woods@amd.com> Cc: Kevin Tian <kevin.tian@intel.com> --- xen/arch/x86/hvm/vmsi.c | 14 +- xen/arch/x86/msi.c | 136 ++++++++-------- xen/drivers/passthrough/amd/iommu_cmd.c | 16 +- xen/drivers/passthrough/amd/iommu_intr.c | 14 +- xen/drivers/passthrough/amd/iommu_map.c | 10 +- xen/drivers/passthrough/amd/pci_amd_iommu.c | 58 ++++--- xen/drivers/passthrough/pci.c | 162 ++++++++++---------- xen/drivers/passthrough/vtd/dmar.c | 12 +- xen/drivers/passthrough/vtd/intremap.c | 8 +- xen/drivers/passthrough/vtd/iommu.c | 34 ++-- xen/drivers/passthrough/vtd/qinval.c | 2 +- xen/drivers/passthrough/vtd/quirks.c | 16 +- xen/drivers/passthrough/vtd/x86/ats.c | 12 +- xen/drivers/passthrough/x86/ats.c | 8 +- xen/drivers/pci/pci.c | 8 +- xen/drivers/vpci/header.c | 74 ++++----- xen/drivers/vpci/msi.c | 21 ++- xen/drivers/vpci/msix.c | 36 +++-- xen/drivers/vpci/vpci.c | 8 +- xen/include/xen/pci.h | 5 +- 20 files changed, 322 insertions(+), 332 deletions(-)