@@ -1178,7 +1178,9 @@ out:
if ( flush )
{
flush_tlb_domain(d);
- iommu_iotlb_flush(d, sgfn, egfn - sgfn);
+ ret = iommu_iotlb_flush(d, sgfn, egfn - sgfn);
+ if ( !rc )
+ rc = ret;
}
while ( (pg = page_list_remove_head(&free_pages)) )
@@ -683,9 +683,17 @@ static int xenmem_add_to_physmap(struct domain *d,
#ifdef CONFIG_HAS_PASSTHROUGH
if ( need_iommu(d) )
{
+ int ret;
+
this_cpu(iommu_dont_flush_iotlb) = 0;
- iommu_iotlb_flush(d, xatp->idx - done, done);
- iommu_iotlb_flush(d, xatp->gpfn - done, done);
+
+ ret = iommu_iotlb_flush(d, xatp->idx - done, done);
+ if ( unlikely(ret) && rc >= 0 )
+ rc = ret;
+
+ ret = iommu_iotlb_flush(d, xatp->gpfn - done, done);
+ if ( unlikely(ret) && rc >= 0 )
+ rc = ret;
}
#endif
@@ -311,24 +311,29 @@ static void iommu_free_pagetables(unsigned long unused)
cpumask_cycle(smp_processor_id(), &cpu_online_map));
}
-void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count)
+int iommu_iotlb_flush(struct domain *d, unsigned long gfn,
+ unsigned int page_count)
{
const struct domain_iommu *hd = dom_iommu(d);
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush )
- return;
+ return 0;
hd->platform_ops->iotlb_flush(d, gfn, page_count);
+
+ return 0;
}
-void iommu_iotlb_flush_all(struct domain *d)
+int iommu_iotlb_flush_all(struct domain *d)
{
const struct domain_iommu *hd = dom_iommu(d);
if ( !iommu_enabled || !hd->platform_ops || !hd->platform_ops->iotlb_flush_all )
- return;
+ return 0;
hd->platform_ops->iotlb_flush_all(d);
+
+ return 0;
}
int __init iommu_setup(void)
@@ -104,8 +104,9 @@ int arch_iommu_populate_page_table(struct domain *d)
this_cpu(iommu_dont_flush_iotlb) = 0;
if ( !rc )
- iommu_iotlb_flush_all(d);
- else if ( rc != -ERESTART )
+ rc = iommu_iotlb_flush_all(d);
+
+ if ( rc && rc != -ERESTART )
iommu_teardown(d);
return rc;
@@ -200,8 +200,9 @@ int iommu_do_pci_domctl(struct xen_domctl *, struct domain *d,
int iommu_do_domctl(struct xen_domctl *, struct domain *d,
XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
-void iommu_iotlb_flush(struct domain *d, unsigned long gfn, unsigned int page_count);
-void iommu_iotlb_flush_all(struct domain *d);
+int __must_check iommu_iotlb_flush(struct domain *d, unsigned long gfn,
+ unsigned int page_count);
+int __must_check iommu_iotlb_flush_all(struct domain *d);
/*
* The purpose of the iommu_dont_flush_iotlb optional cpu flag is to