@@ -2467,7 +2467,7 @@ static int __get_page_type(struct page_info *page, unsigned long type,
int preemptible)
{
unsigned long nx, x, y = page->u.inuse.type_info;
- int rc = 0;
+ int rc = 0, iommu_ret = 0;
ASSERT(!(type & ~(PGT_type_mask | PGT_pae_xen_l2)));
@@ -2578,11 +2578,11 @@ static int __get_page_type(struct page_info *page, unsigned long type,
if ( d && is_pv_domain(d) && unlikely(need_iommu(d)) )
{
if ( (x & PGT_type_mask) == PGT_writable_page )
- iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
+ iommu_ret = iommu_unmap_page(d, mfn_to_gmfn(d, page_to_mfn(page)));
else if ( type == PGT_writable_page )
- iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
- page_to_mfn(page),
- IOMMUF_readable|IOMMUF_writable);
+ iommu_ret = iommu_map_page(d, mfn_to_gmfn(d, page_to_mfn(page)),
+ page_to_mfn(page),
+ IOMMUF_readable|IOMMUF_writable);
}
}
@@ -2599,6 +2599,9 @@ static int __get_page_type(struct page_info *page, unsigned long type,
if ( (x & PGT_partial) && !(nx & PGT_partial) )
put_page(page);
+ if ( !rc )
+ rc = iommu_ret;
+
return rc;
}
@@ -667,6 +667,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
unsigned long gfn_remainder = gfn;
unsigned int i, target = order / EPT_TABLE_ORDER;
int ret, rc = 0;
+ bool_t entry_written = 0;
bool_t direct_mmio = (p2mt == p2m_mmio_direct);
uint8_t ipat = 0;
bool_t need_modify_vtd_table = 1;
@@ -812,10 +813,15 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
rc = atomic_write_ept_entry(ept_entry, new_entry, target);
if ( unlikely(rc) )
old_entry.epte = 0;
- else if ( p2mt != p2m_invalid &&
- (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
- /* Track the highest gfn for which we have ever had a valid mapping */
- p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+ else
+ {
+ entry_written = 1;
+
+ if ( p2mt != p2m_invalid &&
+ (gfn + (1UL << order) - 1 > p2m->max_mapped_pfn) )
+ /* Track the highest gfn for which we have ever had a valid mapping */
+ p2m->max_mapped_pfn = gfn + (1UL << order) - 1;
+ }
out:
if ( needs_sync )
@@ -831,10 +837,24 @@ out:
{
if ( iommu_flags )
for ( i = 0; i < (1 << order); i++ )
- iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+ {
+ rc = iommu_map_page(d, gfn + i, mfn_x(mfn) + i, iommu_flags);
+ if ( unlikely(rc) )
+ {
+ while ( i-- )
+ if ( iommu_unmap_page(p2m->domain, gfn + i) )
+ continue;
+
+ break;
+ }
+ }
else
for ( i = 0; i < (1 << order); i++ )
- iommu_unmap_page(d, gfn + i);
+ {
+ ret = iommu_unmap_page(d, gfn + i);
+ if ( !rc )
+ rc = ret;
+ }
}
}
@@ -847,7 +867,7 @@ out:
if ( is_epte_present(&old_entry) )
ept_free_entry(p2m, &old_entry, target);
- if ( rc == 0 && p2m_is_hostp2m(p2m) )
+ if ( entry_written && p2m_is_hostp2m(p2m) )
p2m_altp2m_propagate_change(d, _gfn(gfn), mfn, order, p2mt, p2ma);
return rc;
@@ -673,6 +673,8 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
if ( iommu_enabled && need_iommu(p2m->domain) &&
(iommu_old_flags != iommu_pte_flags || old_mfn != mfn_x(mfn)) )
{
+ ASSERT(rc == 0);
+
if ( iommu_use_hap_pt(p2m->domain) )
{
if ( iommu_old_flags )
@@ -680,11 +682,26 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
}
else if ( iommu_pte_flags )
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
- iommu_pte_flags);
+ {
+ rc = iommu_map_page(p2m->domain, gfn + i, mfn_x(mfn) + i,
+ iommu_pte_flags);
+ if ( unlikely(rc) )
+ {
+ while ( i-- )
+ if ( iommu_unmap_page(p2m->domain, gfn + i) )
+ continue;
+
+ break;
+ }
+ }
else
for ( i = 0; i < (1UL << page_order); i++ )
- iommu_unmap_page(p2m->domain, gfn + i);
+ {
+ int ret = iommu_unmap_page(p2m->domain, gfn + i);
+
+ if ( !rc )
+ rc = ret;
+ }
}
/*
@@ -641,10 +641,20 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
if ( !paging_mode_translate(p2m->domain) )
{
+ int rc = 0;
+
if ( need_iommu(p2m->domain) )
+ {
for ( i = 0; i < (1 << page_order); i++ )
- iommu_unmap_page(p2m->domain, mfn + i);
- return 0;
+ {
+ int ret = iommu_unmap_page(p2m->domain, mfn + i);
+
+ if ( !rc )
+ rc = ret;
+ }
+ }
+
+ return rc;
}
ASSERT(gfn_locked_by_me(p2m, gfn));
@@ -700,7 +710,9 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
if ( rc != 0 )
{
while ( i-- > 0 )
- iommu_unmap_page(d, mfn + i);
+ if ( iommu_unmap_page(d, mfn + i) )
+ continue;
+
return rc;
}
}
@@ -1436,7 +1436,9 @@ int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
if ( i != epfn )
{
while (i-- > old_max)
- iommu_unmap_page(hardware_domain, i);
+ if ( iommu_unmap_page(hardware_domain, i) )
+ continue;
+
goto destroy_m2p;
}
}
@@ -282,6 +282,8 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
if ( !iommu_passthrough && !need_iommu(d) )
{
+ int rc = 0;
+
/* Set up 1:1 page table for dom0 */
for ( i = 0; i < max_pdx; i++ )
{
@@ -292,12 +294,21 @@ static void __hwdom_init amd_iommu_hwdom_init(struct domain *d)
* a pfn_valid() check would seem desirable here.
*/
if ( mfn_valid(pfn) )
- amd_iommu_map_page(d, pfn, pfn,
- IOMMUF_readable|IOMMUF_writable);
+ {
+ int ret = amd_iommu_map_page(d, pfn, pfn,
+ IOMMUF_readable|IOMMUF_writable);
+
+ if ( !rc )
+ rc = ret;
+ }
if ( !(i & 0xfffff) )
process_pending_softirqs();
}
+
+ if ( rc )
+ AMD_IOMMU_DEBUG("d%d: IOMMU mapping failed: %d\n",
+ d->domain_id, rc);
}
for_each_amd_iommu ( iommu )
@@ -171,20 +171,31 @@ void __hwdom_init iommu_hwdom_init(struct domain *d)
{
struct page_info *page;
unsigned int i = 0;
+ int rc = 0;
+
page_list_for_each ( page, &d->page_list )
{
unsigned long mfn = page_to_mfn(page);
unsigned long gfn = mfn_to_gmfn(d, mfn);
unsigned int mapping = IOMMUF_readable;
+ int ret;
if ( ((page->u.inuse.type_info & PGT_count_mask) == 0) ||
((page->u.inuse.type_info & PGT_type_mask)
== PGT_writable_page) )
mapping |= IOMMUF_writable;
- hd->platform_ops->map_page(d, gfn, mfn, mapping);
+
+ ret = hd->platform_ops->map_page(d, gfn, mfn, mapping);
+ if ( !rc )
+ rc = ret;
+
if ( !(i++ & 0xfffff) )
process_pending_softirqs();
}
+
+ if ( rc )
+ printk(XENLOG_WARNING "d%d: IOMMU mapping failed: %d\n",
+ d->domain_id, rc);
}
return hd->platform_ops->hwdom_init(d);
@@ -118,6 +118,8 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
for ( i = 0; i < top; i++ )
{
+ int rc = 0;
+
/*
* Set up 1:1 mapping for dom0. Default to use only conventional RAM
* areas and let RMRRs include needed reserved regions. When set, the
@@ -140,8 +142,17 @@ void __hwdom_init vtd_set_hwdom_mapping(struct domain *d)
tmp = 1 << (PAGE_SHIFT - PAGE_SHIFT_4K);
for ( j = 0; j < tmp; j++ )
- iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
- IOMMUF_readable|IOMMUF_writable);
+ {
+ int ret = iommu_map_page(d, pfn * tmp + j, pfn * tmp + j,
+ IOMMUF_readable|IOMMUF_writable);
+
+ if( !rc )
+ rc = ret;
+ }
+
+ if ( rc )
+ printk(XENLOG_WARNING VTDPREFIX " d%d: IOMMU mapping failed: %d\n",
+ d->domain_id, rc);
if (!(i & (0xfffff >> (PAGE_SHIFT - PAGE_SHIFT_4K))))
process_pending_softirqs();
@@ -74,9 +74,9 @@ void iommu_teardown(struct domain *d);
#define IOMMUF_readable (1u<<_IOMMUF_readable)
#define _IOMMUF_writable 1
#define IOMMUF_writable (1u<<_IOMMUF_writable)
-int iommu_map_page(struct domain *d, unsigned long gfn, unsigned long mfn,
- unsigned int flags);
-int iommu_unmap_page(struct domain *d, unsigned long gfn);
+int __must_check iommu_map_page(struct domain *d, unsigned long gfn,
+ unsigned long mfn, unsigned int flags);
+int __must_check iommu_unmap_page(struct domain *d, unsigned long gfn);
enum iommu_feature
{