@@ -45,6 +45,8 @@ void do_suspend_lowlevel(void);
static int device_power_down(void)
{
+ int rc;
+
console_suspend();
time_suspend();
@@ -53,7 +55,9 @@ static int device_power_down(void)
ioapic_suspend();
- iommu_suspend();
+ rc = iommu_suspend();
+ if ( rc )
+ return rc;
lapic_suspend();
@@ -170,7 +170,8 @@ static void nmi_shootdown_cpus(void)
/* Crash shutdown any IOMMU functionality as the crashdump kernel is not
* happy when booting if interrupt/dma remapping is still enabled */
- iommu_crash_shutdown();
+ if ( iommu_crash_shutdown() )
+ printk("Failed to shut down IOMMU.\n");
__stop_this_cpu();
@@ -1627,7 +1627,10 @@ int __init construct_dom0(
}
if ( d->domain_id == hardware_domid )
- iommu_hwdom_init(d);
+ {
+ rc = iommu_hwdom_init(d);
+ BUG_ON(rc != 0);
+ }
return 0;
@@ -2445,9 +2445,13 @@ static int __get_page_type(struct page_info *page, unsigned long type,
if ( (x & PGT_type_mask) == PGT_writable_page )
iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
else if ( type == PGT_writable_page )
- iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
- page_to_mfn(page),
- IOMMUF_readable|IOMMUF_writable);
+ {
+ rc = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+ page_to_mfn(page),
+ IOMMUF_readable|IOMMUF_writable);
+ if ( rc )
+ iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+ }
}
}
@@ -829,12 +829,19 @@ out:
need_modify_vtd_table )
{
if ( iommu_hap_pt_share )
- iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
+ rc = iommu_pte_flush(d, gfn, &ept_entry->epte, order, vtd_pte_present);
else
{
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
- iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+ {
+ rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+ if ( rc )
+ {
+ while ( i-- > 0 )
+ iommu_unmap_page(d, gfn + i);
+ }
+ }
else
for ( i = 0; i < (1 << order); i++ )
iommu_unmap_page(d, gfn + i);
@@ -675,8 +675,15 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
}
else if ( iommu_pte_flags )
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
- iommu_pte_flags);
+ {
+ rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
+ iommu_pte_flags);
+ if ( rc )
+ {
+ while ( i-- > 0 )
+ iommu_unmap_page(p2m->domain, gfn + i);
+ }
+ }
else
for ( i = 0; i < (1UL << page_order); i++ )
iommu_unmap_page(p2m->domain, gfn + i);
@@ -228,7 +228,7 @@ static int late_hwdom_init(struct domain *d)
rcu_unlock_domain(dom0);
- iommu_hwdom_init(d);
+ rv = iommu_hwdom_init(d);
return rv;
#else
@@ -919,8 +919,9 @@ __gnttab_map_grant_ref(
{
nr_gets++;
(void)get_page(pg, rd);
- if ( !(op->flags & GNTMAP_readonly) )
- get_page_type(pg, PGT_writable_page);
+ if ( !(op->flags & GNTMAP_readonly) &&
+ get_page_type(pg, PGT_writable_page) )
+ goto could_not_pin;
}
}
}
@@ -631,8 +631,9 @@ static int xenmem_add_to_physmap(struct domain *d,
if ( need_iommu(d) )
{
this_cpu(iommu_dont_flush_iotlb) = 0;
- iommu_iotlb_flush(d, xatp->idx - done, done);
- iommu_iotlb_flush(d, xatp->gpfn - done, done);
+ rc = iommu_iotlb_flush(d, xatp->idx - done, done);
+ if ( !rc )
+ rc = iommu_iotlb_flush(d, xatp->gpfn - done, done);
}
#endif
This patch checks all kinds of error and all the way up the call trees of VT-d Device-TLB flush(MMU part). Signed-off-by: Quan Xu <quan.xu@intel.com> --- xen/arch/x86/acpi/power.c | 6 +++++- xen/arch/x86/crash.c | 3 ++- xen/arch/x86/domain_build.c | 5 ++++- xen/arch/x86/mm.c | 10 +++++++--- xen/arch/x86/mm/p2m-ept.c | 11 +++++++++-- xen/arch/x86/mm/p2m-pt.c | 11 +++++++++-- xen/common/domain.c | 2 +- xen/common/grant_table.c | 5 +++-- xen/common/memory.c | 5 +++-- 9 files changed, 43 insertions(+), 15 deletions(-)