@@ -1273,6 +1273,12 @@ static int construct_vmcs(struct vcpu *v)
ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
__vmwrite(EPT_POINTER, ept->eptp);
+
+ if ( cpu_has_vmx_ept_spp ) {
+ struct spp_data *spp = &p2m->spptp;
+ spp->mfn = pagetable_get_pfn(p2m_get_spp_pagetable(p2m));
+ __vmwrite(SPPT_POINT, spp->sppt_point);
+ }
}
if ( paging_mode_hap(d) )
@@ -609,7 +609,7 @@ void p2m_free_ptp(struct p2m_domain *p2m, struct page_info *pg)
*/
int p2m_alloc_table(struct p2m_domain *p2m)
{
- struct page_info *p2m_top;
+ struct page_info *p2m_top, *p2m_spp;
struct domain *d = p2m->domain;
int rc = 0;
@@ -639,8 +639,17 @@ int p2m_alloc_table(struct p2m_domain *p2m)
return -ENOMEM;
}
+ p2m_spp = p2m_alloc_ptp(p2m, PGT_l4_page_table);
+ if ( p2m_spp == NULL )
+ {
+ p2m_unlock(p2m);
+ return -ENOMEM;
+ }
+
p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top));
+ p2m->spp_phys_table = pagetable_from_mfn(page_to_mfn(p2m_spp));
+
if ( hap_enabled(d) )
iommu_share_p2m_table(d);
@@ -678,6 +687,7 @@ void p2m_teardown(struct p2m_domain *p2m)
p2m_lock(p2m);
ASSERT(atomic_read(&d->shr_pages) == 0);
p2m->phys_table = pagetable_null();
+ p2m->spp_phys_table = pagetable_null();
while ( (pg = page_list_remove_head(&p2m->pages)) )
d->arch.paging.free_page(d, pg);
@@ -56,6 +56,16 @@ struct ept_data {
cpumask_var_t invalidate;
};
+struct spp_data {
+ union {
+ struct {
+ u32 reserved:12;
+ u64 mfn:52;
+ };
+ u64 sppt_point;
+ };
+};
+
#define _VMX_DOMAIN_PML_ENABLED 0
#define VMX_DOMAIN_PML_ENABLED (1ul << _VMX_DOMAIN_PML_ENABLED)
struct vmx_domain {
@@ -391,6 +401,7 @@ enum vmcs_field {
VMWRITE_BITMAP = 0x00002028,
VIRT_EXCEPTION_INFO = 0x0000202a,
XSS_EXIT_BITMAP = 0x0000202c,
+ SPPT_POINT = 0x00002030,
TSC_MULTIPLIER = 0x00002032,
GUEST_PHYSICAL_ADDRESS = 0x00002400,
VMCS_LINK_POINTER = 0x00002800,
@@ -193,6 +193,8 @@ struct p2m_domain {
/* Shadow translated domain: p2m mapping */
pagetable_t phys_table;
+ pagetable_t spp_phys_table;
+
/* Same as domain_dirty_cpumask but limited to
* this p2m and those physical cpus whose vcpu's are in
* guestmode.
@@ -339,6 +341,9 @@ struct p2m_domain {
struct ept_data ept;
/* NPT-equivalent structure could be added here. */
};
+ union {
+ struct spp_data spptp;
+ };
struct {
spinlock_t lock;
@@ -385,7 +390,8 @@ static inline bool_t p2m_is_altp2m(const struct p2m_domain *p2m)
return p2m->p2m_class == p2m_alternate;
}
-#define p2m_get_pagetable(p2m) ((p2m)->phys_table)
+#define p2m_get_pagetable(p2m) ((p2m)->phys_table)
+#define p2m_get_spp_pagetable(p2m) ((p2m)->spp_phys_table)
/*
* Ensure any deferred p2m TLB flush has been completed on all VCPUs.