@@ -5508,7 +5508,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
{
bool locking = system_state > SYS_STATE_boot;
l3_pgentry_t *pl3e = NULL;
- l2_pgentry_t *pl2e;
+ l2_pgentry_t *pl2e = NULL;
l1_pgentry_t *pl1e;
unsigned int i;
unsigned long v = s;
@@ -5524,6 +5524,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
while ( v < e )
{
/* Clean up mappings mapped in the previous iteration. */
+ UNMAP_DOMAIN_PAGE(pl2e);
UNMAP_DOMAIN_PAGE(pl3e);
pl3e = virt_to_xen_l3e(v);
@@ -5541,6 +5542,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
if ( l3e_get_flags(*pl3e) & _PAGE_PSE )
{
l2_pgentry_t *l2t;
+ mfn_t l2mfn;
if ( l2_table_offset(v) == 0 &&
l1_table_offset(v) == 0 &&
@@ -5557,35 +5559,38 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
}
/* PAGE1GB: shatter the superpage and fall through. */
- l2t = alloc_xen_pagetable();
- if ( !l2t )
+ l2mfn = alloc_xen_pagetable_new();
+ if ( mfn_eq(l2mfn, INVALID_MFN) )
goto out;
+ l2t = map_domain_page(l2mfn);
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
l2e_write(l2t + i,
l2e_from_pfn(l3e_get_pfn(*pl3e) +
(i << PAGETABLE_ORDER),
l3e_get_flags(*pl3e)));
+ UNMAP_DOMAIN_PAGE(l2t);
+
if ( locking )
spin_lock(&map_pgdir_lock);
if ( (l3e_get_flags(*pl3e) & _PAGE_PRESENT) &&
(l3e_get_flags(*pl3e) & _PAGE_PSE) )
{
- l3e_write_atomic(pl3e, l3e_from_mfn(virt_to_mfn(l2t),
- __PAGE_HYPERVISOR));
- l2t = NULL;
+ l3e_write_atomic(pl3e,
+ l3e_from_mfn(l2mfn, __PAGE_HYPERVISOR));
+ l2mfn = INVALID_MFN;
}
if ( locking )
spin_unlock(&map_pgdir_lock);
- if ( l2t )
- free_xen_pagetable(l2t);
+
+ free_xen_pagetable_new(l2mfn);
}
/*
* The L3 entry has been verified to be present, and we've dealt with
* 1G pages as well, so the L2 table cannot require allocation.
*/
- pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(v);
+ pl2e = map_l2t_from_l3e(*pl3e) + l2_table_offset(v);
if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
{
@@ -5613,41 +5618,45 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
else
{
l1_pgentry_t *l1t;
-
/* PSE: shatter the superpage and try again. */
- l1t = alloc_xen_pagetable();
- if ( !l1t )
+ mfn_t l1mfn = alloc_xen_pagetable_new();
+
+ if ( mfn_eq(l1mfn, INVALID_MFN) )
goto out;
+ l1t = map_domain_page(l1mfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
l1e_write(&l1t[i],
l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
l2e_get_flags(*pl2e) & ~_PAGE_PSE));
+ UNMAP_DOMAIN_PAGE(l1t);
+
if ( locking )
spin_lock(&map_pgdir_lock);
if ( (l2e_get_flags(*pl2e) & _PAGE_PRESENT) &&
(l2e_get_flags(*pl2e) & _PAGE_PSE) )
{
- l2e_write_atomic(pl2e, l2e_from_mfn(virt_to_mfn(l1t),
+ l2e_write_atomic(pl2e, l2e_from_mfn(l1mfn,
__PAGE_HYPERVISOR));
- l1t = NULL;
+ l1mfn = INVALID_MFN;
}
if ( locking )
spin_unlock(&map_pgdir_lock);
- if ( l1t )
- free_xen_pagetable(l1t);
+
+ free_xen_pagetable_new(l1mfn);
}
}
else
{
l1_pgentry_t nl1e, *l1t;
+ mfn_t l1mfn;
/*
* Ordinary 4kB mapping: The L2 entry has been verified to be
* present, and we've dealt with 2M pages as well, so the L1 table
* cannot require allocation.
*/
- pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(v);
+ pl1e = map_l1t_from_l2e(*pl2e) + l1_table_offset(v);
/* Confirm the caller isn't trying to create new mappings. */
if ( !(l1e_get_flags(*pl1e) & _PAGE_PRESENT) )
@@ -5658,6 +5667,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
(l1e_get_flags(*pl1e) & ~FLAGS_MASK) | nf);
l1e_write_atomic(pl1e, nl1e);
+ UNMAP_DOMAIN_PAGE(pl1e);
v += PAGE_SIZE;
/*
@@ -5687,10 +5697,12 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
continue;
}
- l1t = l2e_to_l1e(*pl2e);
+ l1mfn = l2e_get_mfn(*pl2e);
+ l1t = map_domain_page(l1mfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
if ( l1e_get_intpte(l1t[i]) != 0 )
break;
+ UNMAP_DOMAIN_PAGE(l1t);
if ( i == L1_PAGETABLE_ENTRIES )
{
/* Empty: zap the L2E and free the L1 page. */
@@ -5698,7 +5710,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
if ( locking )
spin_unlock(&map_pgdir_lock);
flush_area(NULL, FLUSH_TLB_GLOBAL); /* flush before free */
- free_xen_pagetable(l1t);
+ free_xen_pagetable_new(l1mfn);
}
else if ( locking )
spin_unlock(&map_pgdir_lock);
@@ -5729,11 +5741,13 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
{
l2_pgentry_t *l2t;
+ mfn_t l2mfn = l3e_get_mfn(*pl3e);
- l2t = l3e_to_l2e(*pl3e);
+ l2t = map_domain_page(l2mfn);
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
if ( l2e_get_intpte(l2t[i]) != 0 )
break;
+ UNMAP_DOMAIN_PAGE(l2t);
if ( i == L2_PAGETABLE_ENTRIES )
{
/* Empty: zap the L3E and free the L2 page. */
@@ -5741,7 +5755,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
if ( locking )
spin_unlock(&map_pgdir_lock);
flush_area(NULL, FLUSH_TLB_GLOBAL); /* flush before free */
- free_xen_pagetable(l2t);
+ free_xen_pagetable_new(l2mfn);
}
else if ( locking )
spin_unlock(&map_pgdir_lock);
@@ -5754,6 +5768,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, unsigned int nf)
rc = 0;
out:
+ UNMAP_DOMAIN_PAGE(pl2e);
UNMAP_DOMAIN_PAGE(pl3e);
return rc;
}