@@ -623,7 +623,10 @@ int __init dom0_construct_pv(struct domain *d,
if ( !is_pv_32bit_domain(d) )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
- l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ l4start = l4tab = __va(mpt_alloc);
+ map_pages_to_xen((unsigned long)l4start, maddr_to_mfn(mpt_alloc), 1,
+ PAGE_HYPERVISOR);
+ mpt_alloc += PAGE_SIZE;
clear_page(l4tab);
init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)),
d, INVALID_MFN, true);
@@ -633,9 +636,12 @@ int __init dom0_construct_pv(struct domain *d,
{
/* Monitor table already created by switch_compat(). */
l4start = l4tab = __va(pagetable_get_paddr(v->arch.guest_table));
+ map_pages_to_xen((unsigned long)l4start,
+ pagetable_get_mfn(v->arch.guest_table), 1, PAGE_HYPERVISOR);
/* See public/xen.h on why the following is needed. */
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l3_page_table;
- l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ l3start = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+ mpt_alloc += PAGE_SIZE;
}
l4tab += l4_table_offset(v_start);
@@ -645,14 +651,18 @@ int __init dom0_construct_pv(struct domain *d,
if ( !((unsigned long)l1tab & (PAGE_SIZE-1)) )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l1_page_table;
- l1start = l1tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ UNMAP_XEN_PAGETABLE(l1start);
+ l1start = l1tab = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+ mpt_alloc += PAGE_SIZE;
clear_page(l1tab);
if ( count == 0 )
l1tab += l1_table_offset(v_start);
if ( !((unsigned long)l2tab & (PAGE_SIZE-1)) )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
- l2start = l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ UNMAP_XEN_PAGETABLE(l2start);
+ l2start = l2tab = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+ mpt_alloc += PAGE_SIZE;
clear_page(l2tab);
if ( count == 0 )
l2tab += l2_table_offset(v_start);
@@ -662,19 +672,21 @@ int __init dom0_construct_pv(struct domain *d,
{
maddr_to_page(mpt_alloc)->u.inuse.type_info =
PGT_l3_page_table;
- l3start = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
+ UNMAP_XEN_PAGETABLE(l3start);
+ l3start = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+ mpt_alloc += PAGE_SIZE;
}
l3tab = l3start;
clear_page(l3tab);
if ( count == 0 )
l3tab += l3_table_offset(v_start);
- *l4tab = l4e_from_paddr(__pa(l3start), L4_PROT);
+ *l4tab = l4e_from_paddr(virt_to_maddr_walk(l3start), L4_PROT);
l4tab++;
}
- *l3tab = l3e_from_paddr(__pa(l2start), L3_PROT);
+ *l3tab = l3e_from_paddr(virt_to_maddr_walk(l2start), L3_PROT);
l3tab++;
}
- *l2tab = l2e_from_paddr(__pa(l1start), L2_PROT);
+ *l2tab = l2e_from_paddr(virt_to_maddr_walk(l1start), L2_PROT);
l2tab++;
}
if ( count < initrd_pfn || count >= initrd_pfn + PFN_UP(initrd_len) )
@@ -701,9 +713,11 @@ int __init dom0_construct_pv(struct domain *d,
if ( !l3e_get_intpte(*l3tab) )
{
maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l2_page_table;
- l2tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
- clear_page(l2tab);
- *l3tab = l3e_from_paddr(__pa(l2tab), L3_PROT);
+ UNMAP_XEN_PAGETABLE(l2start);
+ l2start = map_xen_pagetable(maddr_to_mfn(mpt_alloc));
+ mpt_alloc += PAGE_SIZE;
+ clear_page(l2start);
+ *l3tab = l3e_from_paddr(virt_to_maddr_walk(l2start), L3_PROT);
}
if ( i == 3 )
l3e_get_page(*l3tab)->u.inuse.type_info |= PGT_pae_xen_l2;
@@ -714,6 +728,10 @@ int __init dom0_construct_pv(struct domain *d,
UNMAP_XEN_PAGETABLE(l2t);
}
+ UNMAP_XEN_PAGETABLE(l1start);
+ UNMAP_XEN_PAGETABLE(l2start);
+ UNMAP_XEN_PAGETABLE(l3start);
+
/* Pages that are part of page tables must be read only. */
mark_pv_pt_pages_rdonly(d, l4start, vpt_start, nr_pt_pages);
@@ -645,6 +645,7 @@ void free_xen_pagetable(mfn_t mfn);
l1_pgentry_t *virt_to_xen_l1e(unsigned long v);
unsigned long virt_to_mfn_walk(void *va);
struct page_info *virt_to_page_walk(void *va);
+#define virt_to_maddr_walk(va) mfn_to_maddr(_mfn(virt_to_mfn_walk(va)))
DECLARE_PER_CPU(mfn_t, root_pgt_mfn);