@@ -1061,7 +1061,7 @@ static int __p2m_set_entry(struct p2m_domain *p2m,
flush_flags |= IOMMU_FLUSHF_added;
rc = iommu_iotlb_flush(p2m->domain, _dfn(gfn_x(sgfn)),
- 1UL << page_order, flush_flags);
+ page_order, 1, flush_flags);
}
else
rc = 0;
@@ -843,7 +843,7 @@ out:
need_modify_vtd_table )
{
if ( iommu_use_hap_pt(d) )
- rc = iommu_iotlb_flush(d, _dfn(gfn), (1u << order),
+ rc = iommu_iotlb_flush(d, _dfn(gfn), (1u << order), 1,
(iommu_flags ? IOMMU_FLUSHF_added : 0) |
(vtd_pte_present ? IOMMU_FLUSHF_modified
: 0));
@@ -851,12 +851,12 @@ int xenmem_add_to_physmap(struct domain *d, struct xen_add_to_physmap *xatp,
this_cpu(iommu_dont_flush_iotlb) = 0;
- ret = iommu_iotlb_flush(d, _dfn(xatp->idx - done), done,
+ ret = iommu_iotlb_flush(d, _dfn(xatp->idx - done), 0, done,
IOMMU_FLUSHF_added | IOMMU_FLUSHF_modified);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
- ret = iommu_iotlb_flush(d, _dfn(xatp->gpfn - done), done,
+ ret = iommu_iotlb_flush(d, _dfn(xatp->gpfn - done), 0, done,
IOMMU_FLUSHF_added | IOMMU_FLUSHF_modified);
if ( unlikely(ret) && rc >= 0 )
rc = ret;
@@ -231,7 +231,7 @@ int amd_iommu_reserve_domain_unity_map(struct domain *domain,
paddr_t phys_addr, unsigned long size,
int iw, int ir);
int __must_check amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags);
int __must_check amd_iommu_flush_iotlb_all(struct domain *d);
@@ -351,7 +351,7 @@ int amd_iommu_unmap_page(struct domain *d, dfn_t dfn,
return 0;
}
-static unsigned long flush_count(unsigned long dfn, unsigned int page_count,
+static unsigned long flush_count(unsigned long dfn, unsigned long page_count,
unsigned int order)
{
unsigned long start = dfn >> order;
@@ -362,7 +362,7 @@ static unsigned long flush_count(unsigned long dfn, unsigned int page_count,
}
int amd_iommu_flush_iotlb_pages(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags)
{
unsigned long dfn_l = dfn_x(dfn);
@@ -945,7 +945,7 @@ static int __must_check ipmmu_iotlb_flush_all(struct domain *d)
}
static int __must_check ipmmu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags)
{
ASSERT(flush_flags);
@@ -2534,7 +2534,7 @@ static int __must_check arm_smmu_iotlb_flush_all(struct domain *d)
}
static int __must_check arm_smmu_iotlb_flush(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags)
{
ASSERT(flush_flags);
@@ -235,8 +235,8 @@ void iommu_domain_destroy(struct domain *d)
}
int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int page_order, unsigned int flags,
- unsigned int *flush_flags)
+ unsigned int page_order, unsigned int page_count,
+ unsigned int flags, unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
@@ -248,7 +248,7 @@ int iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
ASSERT(IS_ALIGNED(mfn_x(mfn), (1ul << page_order)));
- for ( i = 0; i < (1ul << page_order); i++ )
+ for ( i = 0; i < ((unsigned long)page_count << page_order); i++ )
{
rc = iommu_call(hd->platform_ops, map_page, d, dfn_add(dfn, i),
mfn_add(mfn, i), flags, flush_flags);
@@ -285,16 +285,16 @@ int iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
unsigned int page_order, unsigned int flags)
{
unsigned int flush_flags = 0;
- int rc = iommu_map(d, dfn, mfn, page_order, flags, &flush_flags);
+ int rc = iommu_map(d, dfn, mfn, page_order, 1, flags, &flush_flags);
if ( !this_cpu(iommu_dont_flush_iotlb) && !rc )
- rc = iommu_iotlb_flush(d, dfn, (1u << page_order), flush_flags);
+ rc = iommu_iotlb_flush(d, dfn, (1u << page_order), 1, flush_flags);
return rc;
}
int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order,
- unsigned int *flush_flags)
+ unsigned int page_count, unsigned int *flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
unsigned long i;
@@ -305,7 +305,7 @@ int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order,
ASSERT(IS_ALIGNED(dfn_x(dfn), (1ul << page_order)));
- for ( i = 0; i < (1ul << page_order); i++ )
+ for ( i = 0; i < ((unsigned long)page_count << page_order); i++ )
{
int err = iommu_call(hd->platform_ops, unmap_page, d, dfn_add(dfn, i),
flush_flags);
@@ -338,10 +338,10 @@ int iommu_unmap(struct domain *d, dfn_t dfn, unsigned int page_order,
int iommu_legacy_unmap(struct domain *d, dfn_t dfn, unsigned int page_order)
{
unsigned int flush_flags = 0;
- int rc = iommu_unmap(d, dfn, page_order, &flush_flags);
+ int rc = iommu_unmap(d, dfn, page_order, 1, &flush_flags);
if ( !this_cpu(iommu_dont_flush_iotlb) && ! rc )
- rc = iommu_iotlb_flush(d, dfn, (1u << page_order), flush_flags);
+ rc = iommu_iotlb_flush(d, dfn, (1u << page_order), 1, flush_flags);
return rc;
}
@@ -357,8 +357,8 @@ int iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
return iommu_call(hd->platform_ops, lookup_page, d, dfn, mfn, flags);
}
-int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count,
- unsigned int flush_flags)
+int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_order,
+ unsigned int page_count, unsigned int flush_flags)
{
const struct domain_iommu *hd = dom_iommu(d);
int rc;
@@ -370,14 +370,15 @@ int iommu_iotlb_flush(struct domain *d, dfn_t dfn, unsigned int page_count,
if ( dfn_eq(dfn, INVALID_DFN) )
return -EINVAL;
- rc = iommu_call(hd->platform_ops, iotlb_flush, d, dfn, page_count,
- flush_flags);
+ rc = iommu_call(hd->platform_ops, iotlb_flush, d, dfn,
+ (unsigned long)page_count << page_order, flush_flags);
if ( unlikely(rc) )
{
if ( !d->is_shutting_down && printk_ratelimit() )
printk(XENLOG_ERR
- "d%d: IOMMU IOTLB flush failed: %d, dfn %"PRI_dfn", page count %u flags %x\n",
- d->domain_id, rc, dfn_x(dfn), page_count, flush_flags);
+ "d%d: IOMMU IOTLB flush failed: %d, dfn %"PRI_dfn", page order %u, page count %u flags %x\n",
+ d->domain_id, rc, dfn_x(dfn), page_order, page_count,
+ flush_flags);
if ( !is_hardware_domain(d) )
domain_crash(d);
@@ -584,7 +584,7 @@ static int __must_check iommu_flush_all(void)
static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
bool_t dma_old_pte_present,
- unsigned int page_count)
+ unsigned long page_count)
{
struct domain_iommu *hd = dom_iommu(d);
struct acpi_drhd_unit *drhd;
@@ -632,7 +632,7 @@ static int __must_check iommu_flush_iotlb(struct domain *d, dfn_t dfn,
static int __must_check iommu_flush_iotlb_pages(struct domain *d,
dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags)
{
ASSERT(page_count && !dfn_eq(dfn, INVALID_DFN));
@@ -244,7 +244,7 @@ void __hwdom_init arch_iommu_hwdom_init(struct domain *d)
else if ( paging_mode_translate(d) )
rc = set_identity_p2m_entry(d, pfn, p2m_access_rw, 0);
else
- rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K,
+ rc = iommu_map(d, _dfn(pfn), _mfn(pfn), PAGE_ORDER_4K, 1,
IOMMUF_readable | IOMMUF_writable, &flush_flags);
if ( rc )
@@ -146,10 +146,10 @@ enum
#define IOMMU_FLUSHF_modified (1u << _IOMMU_FLUSHF_modified)
int __must_check iommu_map(struct domain *d, dfn_t dfn, mfn_t mfn,
- unsigned int page_order, unsigned int flags,
- unsigned int *flush_flags);
+ unsigned int page_order, unsigned int page_count,
+ unsigned int flags, unsigned int *flush_flags);
int __must_check iommu_unmap(struct domain *d, dfn_t dfn,
- unsigned int page_order,
+ unsigned int page_order, unsigned int page_count,
unsigned int *flush_flags);
int __must_check iommu_legacy_map(struct domain *d, dfn_t dfn, mfn_t mfn,
@@ -162,6 +162,7 @@ int __must_check iommu_lookup_page(struct domain *d, dfn_t dfn, mfn_t *mfn,
unsigned int *flags);
int __must_check iommu_iotlb_flush(struct domain *d, dfn_t dfn,
+ unsigned int page_order,
unsigned int page_count,
unsigned int flush_flags);
int __must_check iommu_iotlb_flush_all(struct domain *d,
@@ -281,7 +282,7 @@ struct iommu_ops {
void (*share_p2m)(struct domain *d);
void (*crash_shutdown)(void);
int __must_check (*iotlb_flush)(struct domain *d, dfn_t dfn,
- unsigned int page_count,
+ unsigned long page_count,
unsigned int flush_flags);
int __must_check (*iotlb_flush_all)(struct domain *d);
int (*get_reserved_device_memory)(iommu_grdm_t *, void *);