@@ -14,8 +14,10 @@
#include <xen/sched.h>
#include <xen/vmap.h>
#include <asm/current.h>
+#include <asm/fixmap.h>
#include <asm/flushtlb.h>
#include <asm/hardirq.h>
+#include <asm/pmap.h>
#include <asm/setup.h>
static DEFINE_PER_CPU(struct vcpu *, override);
@@ -35,10 +37,11 @@ static inline struct vcpu *mapcache_current_vcpu(void)
/*
* When using efi runtime page tables, we have the equivalent of the idle
* domain's page tables but current may point at another domain's VCPU.
- * Return NULL as though current is not properly set up yet.
+ * Return the idle domains's vcpu on that core because the efi per-domain
+ * region (where the mapcache is) is in-sync with the idle domain.
*/
if ( efi_rs_using_pgtables() )
- return NULL;
+ return idle_vcpu[smp_processor_id()];
/*
* If guest_table is NULL, and we are running a paravirtualised guest,
@@ -77,18 +80,24 @@ void *map_domain_page(mfn_t mfn)
struct vcpu_maphash_entry *hashent;
#ifdef NDEBUG
- if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+ if ( arch_mfns_in_directmap(mfn_x(mfn), 1) )
return mfn_to_virt(mfn_x(mfn));
#endif
v = mapcache_current_vcpu();
- if ( !v )
- return mfn_to_virt(mfn_x(mfn));
+ if ( !v || !v->domain->arch.mapcache.inuse )
+ {
+ if ( arch_mfns_in_directmap(mfn_x(mfn), 1) )
+ return mfn_to_virt(mfn_x(mfn));
+ else
+ {
+ BUG_ON(system_state >= SYS_STATE_smp_boot);
+ return pmap_map(mfn);
+ }
+ }
dcache = &v->domain->arch.mapcache;
vcache = &v->arch.mapcache;
- if ( !dcache->inuse )
- return mfn_to_virt(mfn_x(mfn));
perfc_incr(map_domain_page_count);
@@ -184,6 +193,12 @@ void unmap_domain_page(const void *ptr)
if ( !va || va >= DIRECTMAP_VIRT_START )
return;
+ if ( va >= FIXADDR_START && va < FIXADDR_TOP )
+ {
+ pmap_unmap((void *)ptr);
+ return;
+ }
+
ASSERT(va >= MAPCACHE_VIRT_START && va < MAPCACHE_VIRT_END);
v = mapcache_current_vcpu();
@@ -237,7 +252,7 @@ int mapcache_domain_init(struct domain *d)
unsigned int bitmap_pages;
#ifdef NDEBUG
- if ( !mem_hotplug && max_page <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+ if ( !mem_hotplug && arch_mfn_in_directmap(0, max_page) )
return 0;
#endif
@@ -308,7 +323,7 @@ void *map_domain_page_global(mfn_t mfn)
local_irq_is_enabled()));
#ifdef NDEBUG
- if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+ if ( arch_mfn_in_directmap(mfn_x(mfn, 1)) )
return mfn_to_virt(mfn_x(mfn));
#endif
@@ -335,6 +350,23 @@ mfn_t domain_page_map_to_mfn(const void *ptr)
if ( va >= DIRECTMAP_VIRT_START )
return _mfn(virt_to_mfn(ptr));
+ /*
+ * The fixmap is stealing the top-end of the VMAP. So the check for
+ * the PMAP *must* happen first.
+ *
+ * Also, the fixmap translate a slot to an address backwards. The
+ * logic will rely on it to avoid any complexity. So check at
+ * compile time this will always hold.
+ */
+ BUILD_BUG_ON(fix_to_virt(FIX_PMAP_BEGIN) < fix_to_virt(FIX_PMAP_END));
+
+ if ( ((unsigned long)fix_to_virt(FIX_PMAP_END) <= va) &&
+ ((va & PAGE_MASK) <= (unsigned long)fix_to_virt(FIX_PMAP_BEGIN)) )
+ {
+ BUG_ON(system_state >= SYS_STATE_smp_boot);
+ return l1e_get_mfn(l1_fixmap[l1_table_offset(va)]);
+ }
+
if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
return vmap_to_mfn(va);