diff mbox series

[v2,8/8] x86/hyperv: setup VP assist page

Message ID 20200103160825.19377-9-liuwe@microsoft.com (mailing list archive)
State Superseded
Headers show
Series More Hyper-V infrastructure | expand

Commit Message

Wei Liu Jan. 3, 2020, 4:08 p.m. UTC
VP assist page is rather important as we need to toggle some bits in
for efficient nested virtualisation.

Preemptively split out set_vp_assist page which will be used in the resume
path.

Signed-off-by: Wei Liu <liuwe@microsoft.com>
---
v2:
1. Use HV_HYP_PAGE_SHIFT instead
---
 xen/arch/x86/guest/hyperv/hyperv.c | 34 ++++++++++++++++++++++++++++++
 xen/include/asm-x86/guest/hyperv.h |  1 +
 2 files changed, 35 insertions(+)

Comments

Wei Liu Jan. 5, 2020, 4:41 p.m. UTC | #1
On Fri, Jan 03, 2020 at 04:08:25PM +0000, Wei Liu wrote:
> 
> Preemptively split out set_vp_assist page which will be used in the resume
> path.

After going through TLFS's section on reenlightenment, I don't think
this is necessary.

Wei.
diff mbox series

Patch

diff --git a/xen/arch/x86/guest/hyperv/hyperv.c b/xen/arch/x86/guest/hyperv/hyperv.c
index 5c5aed46cb..cf6ad13e48 100644
--- a/xen/arch/x86/guest/hyperv/hyperv.c
+++ b/xen/arch/x86/guest/hyperv/hyperv.c
@@ -29,6 +29,7 @@  struct ms_hyperv_info __read_mostly ms_hyperv;
 extern char hv_hypercall_page[];
 DEFINE_PER_CPU_READ_MOSTLY(struct hyperv_pcpu_page, hv_pcpu_input_arg);
 DEFINE_PER_CPU_READ_MOSTLY(unsigned int, hv_vp_index);
+DEFINE_PER_CPU_READ_MOSTLY(struct hyperv_pcpu_page, hv_vp_assist);
 
 static const struct hypervisor_ops ops;
 const struct hypervisor_ops *__init hyperv_probe(void)
@@ -107,15 +108,48 @@  static void setup_hypercall_pcpu_arg(void)
     this_cpu(hv_vp_index) = vp_index_msr;
 }
 
+static void set_vp_assist(void)
+{
+    uint64_t val = paddr_to_pfn(this_cpu(hv_vp_assist).maddr);
+
+    val = (val << HV_HYP_PAGE_SHIFT) | HV_X64_MSR_VP_ASSIST_PAGE_ENABLE;
+
+    wrmsrl(HV_X64_MSR_VP_ASSIST_PAGE, val);
+}
+
+static void setup_vp_assist(void)
+{
+    struct page_info *pg;
+    void *mapping;
+    unsigned int cpu = smp_processor_id();
+
+    pg = alloc_domheap_page(NULL, 0);
+    if ( !pg )
+        panic("Failed to allocate vp_assist page for %u\n", cpu);
+
+    mapping = __map_domain_page_global(pg);
+    if ( !mapping )
+        panic("Failed to map vp_assist page for %u\n", cpu);
+
+    clear_page(mapping);
+
+    this_cpu(hv_vp_assist).maddr = page_to_maddr(pg);
+    this_cpu(hv_vp_assist).mapping = mapping;
+
+    set_vp_assist();
+}
+
 static void __init setup(void)
 {
     setup_hypercall_page();
     setup_hypercall_pcpu_arg();
+    setup_vp_assist();
 }
 
 static void ap_setup(void)
 {
     setup_hypercall_pcpu_arg();
+    setup_vp_assist();
 }
 
 static const struct hypervisor_ops ops = {
diff --git a/xen/include/asm-x86/guest/hyperv.h b/xen/include/asm-x86/guest/hyperv.h
index 4b635829f3..917f4e02c2 100644
--- a/xen/include/asm-x86/guest/hyperv.h
+++ b/xen/include/asm-x86/guest/hyperv.h
@@ -71,6 +71,7 @@  struct hyperv_pcpu_page {
 };
 DECLARE_PER_CPU(struct hyperv_pcpu_page, hv_pcpu_input_arg);
 DECLARE_PER_CPU(unsigned int, hv_vp_index);
+DECLARE_PER_CPU(struct hyperv_pcpu_page, hv_vp_assist);
 
 const struct hypervisor_ops *hyperv_probe(void);