@@ -246,7 +246,7 @@ void amd_iommu_flush_intremap(struct amd_iommu *iommu, uint16_t bdf);
void amd_iommu_flush_all_caches(struct amd_iommu *iommu);
/* find iommu for bdf */
-struct amd_iommu *find_iommu_for_device(int seg, int bdf);
+struct amd_iommu *find_iommu_for_device(pci_sbdf_t sbdf);
/* interrupt remapping */
bool cf_check iov_supports_xt(void);
@@ -239,17 +239,17 @@ static int __init register_range_for_device(
unsigned int bdf, paddr_t base, paddr_t limit,
bool iw, bool ir, bool exclusion)
{
- int seg = 0; /* XXX */
- struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
+ pci_sbdf_t sbdf = { .seg = 0 /* XXX */, .bdf = bdf };
+ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(sbdf.seg);
struct amd_iommu *iommu;
u16 req;
int rc = 0;
- iommu = find_iommu_for_device(seg, bdf);
+ iommu = find_iommu_for_device(sbdf);
if ( !iommu )
{
AMD_IOMMU_WARN("IVMD: no IOMMU for device %pp - ignoring constrain\n",
- &PCI_SBDF(seg, bdf));
+ &sbdf);
return 0;
}
req = ivrs_mappings[bdf].dte_requestor_id;
@@ -263,9 +263,9 @@ static int __init register_range_for_device(
paddr_t length = limit + PAGE_SIZE - base;
/* reserve unity-mapped page entries for device */
- rc = reserve_unity_map_for_device(seg, bdf, base, length, iw, ir,
+ rc = reserve_unity_map_for_device(sbdf.seg, bdf, base, length, iw, ir,
false) ?:
- reserve_unity_map_for_device(seg, req, base, length, iw, ir,
+ reserve_unity_map_for_device(sbdf.seg, req, base, length, iw, ir,
false);
}
else
@@ -297,7 +297,7 @@ static int __init register_range_for_iommu_devices(
/* reserve unity-mapped page entries for devices */
for ( bdf = rc = 0; !rc && bdf < ivrs_bdf_entries; bdf++ )
{
- if ( iommu != find_iommu_for_device(iommu->seg, bdf) )
+ if ( iommu != find_iommu_for_device(PCI_SBDF(iommu->seg, bdf)) )
continue;
req = get_ivrs_mappings(iommu->seg)[bdf].dte_requestor_id;
@@ -288,7 +288,7 @@ void amd_iommu_flush_iotlb(u8 devfn, const struct pci_dev *pdev,
if ( !pci_ats_enabled(pdev->seg, pdev->bus, pdev->devfn) )
return;
- iommu = find_iommu_for_device(pdev->seg, pdev->sbdf.bdf);
+ iommu = find_iommu_for_device(pdev->sbdf);
if ( !iommu )
{
@@ -1540,13 +1540,13 @@ static void invalidate_all_domain_pages(void)
static int cf_check _invalidate_all_devices(
u16 seg, struct ivrs_mappings *ivrs_mappings)
{
- unsigned int bdf;
+ unsigned int bdf;
u16 req_id;
struct amd_iommu *iommu;
for ( bdf = 0; bdf < ivrs_bdf_entries; bdf++ )
{
- iommu = find_iommu_for_device(seg, bdf);
+ iommu = find_iommu_for_device(PCI_SBDF(seg, bdf));
req_id = ivrs_mappings[bdf].dte_requestor_id;
if ( iommu )
{
@@ -337,7 +337,7 @@ void cf_check amd_iommu_ioapic_update_ire(
/* get device id of ioapic devices */
bdf = ioapic_sbdf[idx].bdf;
seg = ioapic_sbdf[idx].seg;
- iommu = find_iommu_for_device(seg, bdf);
+ iommu = find_iommu_for_device(PCI_SBDF(seg, bdf));
if ( !iommu )
{
AMD_IOMMU_WARN("failed to find IOMMU for IO-APIC @ %04x:%04x\n",
@@ -383,7 +383,7 @@ unsigned int cf_check amd_iommu_read_ioapic_from_ire(
seg = ioapic_sbdf[idx].seg;
bdf = ioapic_sbdf[idx].bdf;
- iommu = find_iommu_for_device(seg, bdf);
+ iommu = find_iommu_for_device(PCI_SBDF(seg, bdf));
if ( !iommu )
return val;
req_id = get_intremap_requestor_id(seg, bdf);
@@ -495,19 +495,19 @@ static int update_intremap_entry_from_msi_msg(
return fresh;
}
-static struct amd_iommu *_find_iommu_for_device(int seg, int bdf)
+static struct amd_iommu *_find_iommu_for_device(pci_sbdf_t sbdf)
{
struct amd_iommu *iommu;
for_each_amd_iommu ( iommu )
- if ( iommu->seg == seg && iommu->bdf == bdf )
+ if ( iommu->sbdf.sbdf == sbdf.sbdf )
return NULL;
- iommu = find_iommu_for_device(seg, bdf);
+ iommu = find_iommu_for_device(sbdf);
if ( iommu )
return iommu;
- AMD_IOMMU_DEBUG("No IOMMU for MSI dev = %pp\n", &PCI_SBDF(seg, bdf));
+ AMD_IOMMU_DEBUG("No IOMMU for MSI dev = %pp\n", &sbdf);
return ERR_PTR(-EINVAL);
}
@@ -523,7 +523,7 @@ int cf_check amd_iommu_msi_msg_update_ire(
bdf = pdev ? pdev->sbdf.bdf : hpet_sbdf.bdf;
seg = pdev ? pdev->seg : hpet_sbdf.seg;
- iommu = _find_iommu_for_device(seg, bdf);
+ iommu = _find_iommu_for_device(PCI_SBDF(seg, bdf));
if ( IS_ERR_OR_NULL(iommu) )
return PTR_ERR(iommu);
@@ -660,8 +660,8 @@ bool __init cf_check iov_supports_xt(void)
if ( idx == MAX_IO_APICS )
return false;
- if ( !find_iommu_for_device(ioapic_sbdf[idx].seg,
- ioapic_sbdf[idx].bdf) )
+ if ( !find_iommu_for_device(PCI_SBDF(ioapic_sbdf[idx].seg,
+ ioapic_sbdf[idx].bdf)) )
{
AMD_IOMMU_WARN("no IOMMU for IO-APIC %#x (ID %x)\n",
apic, IO_APIC_ID(apic));
@@ -690,7 +690,7 @@ int __init cf_check amd_setup_hpet_msi(struct msi_desc *msi_desc)
return -ENODEV;
}
- iommu = find_iommu_for_device(hpet_sbdf.seg, hpet_sbdf.bdf);
+ iommu = find_iommu_for_device(PCI_SBDF(hpet_sbdf.seg, hpet_sbdf.bdf));
if ( !iommu )
return -ENXIO;
@@ -717,7 +717,7 @@ int cf_check amd_iommu_get_reserved_device_memory(
pcidevs_unlock();
if ( pdev )
- iommu = find_iommu_for_device(seg, bdf);
+ iommu = find_iommu_for_device(sbdf);
if ( !iommu )
continue;
}
@@ -32,35 +32,35 @@ static bool __read_mostly init_done;
static const struct iommu_init_ops _iommu_init_ops;
-struct amd_iommu *find_iommu_for_device(int seg, int bdf)
+struct amd_iommu *find_iommu_for_device(pci_sbdf_t sbdf)
{
- struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
+ struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(sbdf.seg);
- if ( !ivrs_mappings || bdf >= ivrs_bdf_entries )
+ if ( !ivrs_mappings || sbdf.bdf >= ivrs_bdf_entries )
return NULL;
- if ( unlikely(!ivrs_mappings[bdf].iommu) && likely(init_done) )
+ if ( unlikely(!ivrs_mappings[sbdf.bdf].iommu) && likely(init_done) )
{
- unsigned int bd0 = bdf & ~PCI_FUNC(~0);
+ unsigned int bd0 = sbdf.bdf & ~PCI_FUNC(~0);
- if ( ivrs_mappings[bd0].iommu && ivrs_mappings[bd0].iommu->bdf != bdf )
+ if ( ivrs_mappings[bd0].iommu && ivrs_mappings[bd0].iommu->bdf != sbdf.bdf )
{
struct ivrs_mappings tmp = ivrs_mappings[bd0];
tmp.iommu = NULL;
if ( tmp.dte_requestor_id == bd0 )
- tmp.dte_requestor_id = bdf;
- ivrs_mappings[bdf] = tmp;
+ tmp.dte_requestor_id = sbdf.bdf;
+ ivrs_mappings[sbdf.bdf] = tmp;
printk(XENLOG_WARNING "%pp not found in ACPI tables;"
- " using same IOMMU as function 0\n", &PCI_SBDF(seg, bdf));
+ " using same IOMMU as function 0\n", &sbdf);
/* write iommu field last */
- ivrs_mappings[bdf].iommu = ivrs_mappings[bd0].iommu;
+ ivrs_mappings[sbdf.bdf].iommu = ivrs_mappings[bd0].iommu;
}
}
- return ivrs_mappings[bdf].iommu;
+ return ivrs_mappings[sbdf.bdf].iommu;
}
/*
@@ -107,7 +107,7 @@ static bool any_pdev_behind_iommu(const struct domain *d,
if ( pdev == exclude )
continue;
- if ( find_iommu_for_device(pdev->seg, pdev->sbdf.bdf) == iommu )
+ if ( find_iommu_for_device(pdev->sbdf) == iommu )
return true;
}
@@ -468,7 +468,7 @@ static int cf_check reassign_device(
struct amd_iommu *iommu;
int rc;
- iommu = find_iommu_for_device(pdev->seg, pdev->sbdf.bdf);
+ iommu = find_iommu_for_device(pdev->sbdf);
if ( !iommu )
{
AMD_IOMMU_WARN("failed to find IOMMU: %pp cannot be assigned to %pd\n",
@@ -578,10 +578,10 @@ static int cf_check amd_iommu_add_device(u8 devfn, struct pci_dev *pdev)
return -EINVAL;
for_each_amd_iommu(iommu)
- if ( pdev->seg == iommu->seg && pdev->sbdf.bdf == iommu->bdf )
+ if ( pdev->sbdf.sbdf == iommu->sbdf.sbdf )
return is_hardware_domain(pdev->domain) ? 0 : -ENODEV;
- iommu = find_iommu_for_device(pdev->seg, pdev->sbdf.bdf);
+ iommu = find_iommu_for_device(pdev->sbdf);
if ( unlikely(!iommu) )
{
/* Filter bridge devices. */
@@ -666,7 +666,7 @@ static int cf_check amd_iommu_remove_device(u8 devfn, struct pci_dev *pdev)
if ( !pdev->domain )
return -EINVAL;
- iommu = find_iommu_for_device(pdev->seg, pdev->sbdf.bdf);
+ iommu = find_iommu_for_device(pdev->sbdf);
if ( !iommu )
{
AMD_IOMMU_WARN("failed to find IOMMU: %pp cannot be removed from %pd\n",