diff mbox

[for-next,7/8] x86/domain: move PV specific code to pv/domain.c

Message ID 20170410132716.31610-8-wei.liu2@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Wei Liu April 10, 2017, 1:27 p.m. UTC
Move all the PV specific code along with the supporting code to
pv/domain.c.

This in turn requires exporting a few functions in header files. Export
paravirt context switch functions in domain.h and create pv/pv.h for
the rest.

No functional change.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/domain.c        | 250 +--------------------------------------
 xen/arch/x86/pv/Makefile     |   1 +
 xen/arch/x86/pv/domain.c     | 270 +++++++++++++++++++++++++++++++++++++++++++
 xen/include/asm-x86/domain.h |   3 +
 xen/include/asm-x86/pv/pv.h  |  29 +++++
 5 files changed, 306 insertions(+), 247 deletions(-)
 create mode 100644 xen/arch/x86/pv/domain.c
 create mode 100644 xen/include/asm-x86/pv/pv.h

Comments

Jan Beulich April 24, 2017, 12:39 p.m. UTC | #1
>>> On 10.04.17 at 15:27, <wei.liu2@citrix.com> wrote:
> Move all the PV specific code along with the supporting code to
> pv/domain.c.

Had you done this series in a different order, or had the earlier
patches moved their broken out functions right away, this patch
would have been quite a bit smaller. Anyways, it looks to be pure
code motion so ought to be fine once re-based over the changes
expected to the earlier patches (this re-basing would also be
easier if the new functions were moved right away).

> --- /dev/null
> +++ b/xen/arch/x86/pv/domain.c
> @@ -0,0 +1,270 @@
> +/******************************************************************************
> + * arch/x86/pv/domain.c
> + *
> + * PV-specific domain handling
> + */
> +
> +/*
> + *  Copyright (C) 1995  Linus Torvalds
> + *
> + *  Pentium III FXSR, SSE support
> + *  Gareth Hughes <gareth@valinux.com>, May 2000
> + */

It's probably necessary/appropriate to keep (copy) this, but I highly
doubt there is any bit left here of Linux origin.

Jan
Wei Liu April 24, 2017, 2:24 p.m. UTC | #2
On Mon, Apr 24, 2017 at 06:39:38AM -0600, Jan Beulich wrote:
> >>> On 10.04.17 at 15:27, <wei.liu2@citrix.com> wrote:
> > Move all the PV specific code along with the supporting code to
> > pv/domain.c.
> 
> Had you done this series in a different order, or had the earlier
> patches moved their broken out functions right away, this patch
> would have been quite a bit smaller. Anyways, it looks to be pure

It is just trading one kind of code churn for another. For example,
free_compact_l4 is used by several PV functions. Should I choose to move
those PV functions (pv_vcpu_initialise and _destroy) as I go along I
will then need to export these static helper functions while I am doing
it then unexport them when I am finished.

I am not too fuss either way in this particular series. I am inclined to
move everything in one or more patches when the refactoring is done. But
please let me know if you feel strongly about how it should be done.

Wei.
Jan Beulich April 24, 2017, 3:57 p.m. UTC | #3
>>> On 24.04.17 at 16:24, <wei.liu2@citrix.com> wrote:
> On Mon, Apr 24, 2017 at 06:39:38AM -0600, Jan Beulich wrote:
>> >>> On 10.04.17 at 15:27, <wei.liu2@citrix.com> wrote:
>> > Move all the PV specific code along with the supporting code to
>> > pv/domain.c.
>> 
>> Had you done this series in a different order, or had the earlier
>> patches moved their broken out functions right away, this patch
>> would have been quite a bit smaller. Anyways, it looks to be pure
> 
> It is just trading one kind of code churn for another. For example,
> free_compact_l4 is used by several PV functions. Should I choose to move
> those PV functions (pv_vcpu_initialise and _destroy) as I go along I
> will then need to export these static helper functions while I am doing
> it then unexport them when I am finished.

Ah, true.

> I am not too fuss either way in this particular series. I am inclined to
> move everything in one or more patches when the refactoring is done. But
> please let me know if you feel strongly about how it should be done.

As long as there are reasons behind how things are done, I don't
think my personal taste matters all that much.

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index ed16adf77a..4a2363fc96 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -63,6 +63,7 @@ 
 #include <xen/iommu.h>
 #include <compat/vcpu.h>
 #include <asm/psr.h>
+#include <asm/pv/pv.h>
 
 DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
 
@@ -70,9 +71,6 @@  static void default_idle(void);
 void (*pm_idle) (void) __read_mostly = default_idle;
 void (*dead_idle) (void) __read_mostly = default_dead_idle;
 
-static void paravirt_ctxt_switch_from(struct vcpu *v);
-static void paravirt_ctxt_switch_to(struct vcpu *v);
-
 static void default_idle(void)
 {
     local_irq_disable();
@@ -145,13 +143,6 @@  static void noreturn continue_idle_domain(struct vcpu *v)
     reset_stack_and_jump(idle_loop);
 }
 
-static void noreturn continue_nonidle_domain(struct vcpu *v)
-{
-    check_wakeup_from_wait();
-    mark_regs_dirty(guest_cpu_user_regs());
-    reset_stack_and_jump(ret_from_intr);
-}
-
 void dump_pageframe_info(struct domain *d)
 {
     struct page_info *page;
@@ -313,129 +304,6 @@  void free_vcpu_struct(struct vcpu *v)
     free_xenheap_page(v);
 }
 
-static int setup_compat_l4(struct vcpu *v)
-{
-    struct page_info *pg;
-    l4_pgentry_t *l4tab;
-
-    pg = alloc_domheap_page(v->domain, MEMF_no_owner);
-    if ( pg == NULL )
-        return -ENOMEM;
-
-    /* This page needs to look like a pagetable so that it can be shadowed */
-    pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
-
-    l4tab = __map_domain_page(pg);
-    clear_page(l4tab);
-    init_guest_l4_table(l4tab, v->domain, 1);
-    unmap_domain_page(l4tab);
-
-    v->arch.guest_table = pagetable_from_page(pg);
-    v->arch.guest_table_user = v->arch.guest_table;
-
-    return 0;
-}
-
-static void release_compat_l4(struct vcpu *v)
-{
-    free_domheap_page(pagetable_get_page(v->arch.guest_table));
-    v->arch.guest_table = pagetable_null();
-    v->arch.guest_table_user = pagetable_null();
-}
-
-int switch_compat(struct domain *d)
-{
-    struct vcpu *v;
-    int rc;
-
-    if ( is_hvm_domain(d) || d->tot_pages != 0 )
-        return -EACCES;
-    if ( is_pv_32bit_domain(d) )
-        return 0;
-
-    d->arch.has_32bit_shinfo = 1;
-    if ( is_pv_domain(d) )
-        d->arch.is_32bit_pv = 1;
-
-    for_each_vcpu( d, v )
-    {
-        rc = setup_compat_arg_xlat(v);
-        if ( !rc )
-            rc = setup_compat_l4(v);
-
-        if ( rc )
-            goto undo_and_fail;
-    }
-
-    domain_set_alloc_bitsize(d);
-    recalculate_cpuid_policy(d);
-
-    d->arch.x87_fip_width = 4;
-
-    return 0;
-
- undo_and_fail:
-    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
-    for_each_vcpu( d, v )
-    {
-        free_compat_arg_xlat(v);
-
-        if ( !pagetable_is_null(v->arch.guest_table) )
-            release_compat_l4(v);
-    }
-
-    return rc;
-}
-
-static int pv_vcpu_initialise(struct vcpu *v)
-{
-    struct domain *d = v->domain;
-    int rc = 0;
-
-    spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
-
-    if ( !is_idle_domain(d) )
-    {
-        rc = create_perdomain_mapping(d, GDT_VIRT_START(v),
-                                      1 << GDT_LDT_VCPU_SHIFT,
-                                      d->arch.pv_domain.gdt_ldt_l1tab, NULL);
-        if ( rc )
-            goto done;
-
-        BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
-                     PAGE_SIZE);
-        v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
-                                                  NR_VECTORS);
-        if ( !v->arch.pv_vcpu.trap_ctxt )
-        {
-            rc = -ENOMEM;
-            goto done;
-        }
-
-        /* PV guests by default have a 100Hz ticker. */
-        v->periodic_period = MILLISECS(10);
-    }
-    else
-        v->arch.cr3 = __pa(idle_pg_table);
-
-    v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
-
-    if ( is_pv_32bit_domain(d) )
-    {
-        if ( (rc = setup_compat_arg_xlat(v)) )
-            goto done;
-
-        if ( (rc = setup_compat_l4(v)) )
-        {
-            free_compat_arg_xlat(v);
-            goto done;
-        }
-    }
-
- done:
-    return rc;
-}
-
 int vcpu_initialise(struct vcpu *v)
 {
     struct domain *d = v->domain;
@@ -479,17 +347,6 @@  int vcpu_initialise(struct vcpu *v)
     return rc;
 }
 
-static void pv_vcpu_destroy(struct vcpu *v)
-{
-    if ( is_pv_32bit_vcpu(v) )
-    {
-        free_compat_arg_xlat(v);
-        release_compat_l4(v);
-    }
-
-    xfree(v->arch.pv_vcpu.trap_ctxt);
-}
-
 void vcpu_destroy(struct vcpu *v)
 {
     xfree(v->arch.vm_event);
@@ -527,64 +384,6 @@  static bool emulation_flags_ok(const struct domain *d, uint32_t emflags)
     return true;
 }
 
-static void pv_domain_destroy(struct domain *d)
-{
-    xfree(d->arch.pv_domain.cpuidmasks);
-    free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
-}
-
-static int pv_domain_initialise(struct domain *d, unsigned int domcr_flags)
-{
-    static const struct arch_csw pv_csw = {
-        .from = paravirt_ctxt_switch_from,
-        .to   = paravirt_ctxt_switch_to,
-        .tail = continue_nonidle_domain,
-    };
-    int rc = -ENOMEM;
-
-    d->arch.pv_domain.gdt_ldt_l1tab =
-        alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
-    if ( !d->arch.pv_domain.gdt_ldt_l1tab )
-        goto fail;
-    clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
-
-    if ( levelling_caps & ~LCAP_faulting )
-    {
-        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
-        if ( !d->arch.pv_domain.cpuidmasks )
-            goto fail;
-        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
-    }
-
-    rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
-                                  GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
-                                  NULL, NULL);
-    if ( rc )
-        goto fail;
-
-    d->arch.ctxt_switch = &pv_csw;
-
-    /* 64-bit PV guest by default. */
-    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
-
-    return 0;
-
-fail:
-    if ( d->arch.pv_domain.gdt_ldt_l1tab )
-    {
-        free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
-        d->arch.pv_domain.gdt_ldt_l1tab = NULL;
-    }
-
-    if ( d->arch.pv_domain.cpuidmasks )
-    {
-        xfree(d->arch.pv_domain.cpuidmasks);
-        d->arch.pv_domain.cpuidmasks = NULL;
-    }
-
-    return rc;
-}
-
 int arch_domain_create(struct domain *d, unsigned int domcr_flags,
                        struct xen_arch_domainconfig *config)
 {
@@ -862,49 +661,6 @@  int arch_domain_soft_reset(struct domain *d)
     return ret;
 }
 
-/*
- * These are the masks of CR4 bits (subject to hardware availability) which a
- * PV guest may not legitimiately attempt to modify.
- */
-static unsigned long __read_mostly pv_cr4_mask, compat_pv_cr4_mask;
-
-static int __init init_pv_cr4_masks(void)
-{
-    unsigned long common_mask = ~X86_CR4_TSD;
-
-    /*
-     * All PV guests may attempt to modify TSD, DE and OSXSAVE.
-     */
-    if ( cpu_has_de )
-        common_mask &= ~X86_CR4_DE;
-    if ( cpu_has_xsave )
-        common_mask &= ~X86_CR4_OSXSAVE;
-
-    pv_cr4_mask = compat_pv_cr4_mask = common_mask;
-
-    /*
-     * 64bit PV guests may attempt to modify FSGSBASE.
-     */
-    if ( cpu_has_fsgsbase )
-        pv_cr4_mask &= ~X86_CR4_FSGSBASE;
-
-    return 0;
-}
-__initcall(init_pv_cr4_masks);
-
-unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
-{
-    unsigned long hv_cr4 = real_cr4_to_pv_guest_cr4(read_cr4());
-    unsigned long mask = is_pv_32bit_vcpu(v) ? compat_pv_cr4_mask : pv_cr4_mask;
-
-    if ( (guest_cr4 & mask) != (hv_cr4 & mask) )
-        printk(XENLOG_G_WARNING
-               "d%d attempted to change %pv's CR4 flags %08lx -> %08lx\n",
-               current->domain->domain_id, v, hv_cr4, guest_cr4);
-
-    return (hv_cr4 & mask) | (guest_cr4 & ~mask);
-}
-
 #define xen_vcpu_guest_context vcpu_guest_context
 #define fpu_ctxt fpu_ctxt.x
 CHECK_FIELD_(struct, vcpu_guest_context, fpu_ctxt);
@@ -1917,7 +1673,7 @@  static void save_segments(struct vcpu *v)
 
 #define switch_kernel_stack(v) ((void)0)
 
-static void paravirt_ctxt_switch_from(struct vcpu *v)
+void paravirt_ctxt_switch_from(struct vcpu *v)
 {
     save_segments(v);
 
@@ -1931,7 +1687,7 @@  static void paravirt_ctxt_switch_from(struct vcpu *v)
         write_debugreg(7, 0);
 }
 
-static void paravirt_ctxt_switch_to(struct vcpu *v)
+void paravirt_ctxt_switch_to(struct vcpu *v)
 {
     unsigned long cr4;
 
diff --git a/xen/arch/x86/pv/Makefile b/xen/arch/x86/pv/Makefile
index ea94599438..2737824e81 100644
--- a/xen/arch/x86/pv/Makefile
+++ b/xen/arch/x86/pv/Makefile
@@ -1,2 +1,3 @@ 
 obj-y += hypercall.o
 obj-bin-y += dom0_build.init.o
+obj-y += domain.o
diff --git a/xen/arch/x86/pv/domain.c b/xen/arch/x86/pv/domain.c
new file mode 100644
index 0000000000..0b82ee2bbc
--- /dev/null
+++ b/xen/arch/x86/pv/domain.c
@@ -0,0 +1,270 @@ 
+/******************************************************************************
+ * arch/x86/pv/domain.c
+ *
+ * PV-specific domain handling
+ */
+
+/*
+ *  Copyright (C) 1995  Linus Torvalds
+ *
+ *  Pentium III FXSR, SSE support
+ *  Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+
+#include <xen/domain_page.h>
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+
+static void noreturn continue_nonidle_domain(struct vcpu *v)
+{
+    check_wakeup_from_wait();
+    mark_regs_dirty(guest_cpu_user_regs());
+    reset_stack_and_jump(ret_from_intr);
+}
+
+static int setup_compat_l4(struct vcpu *v)
+{
+    struct page_info *pg;
+    l4_pgentry_t *l4tab;
+
+    pg = alloc_domheap_page(v->domain, MEMF_no_owner);
+    if ( pg == NULL )
+        return -ENOMEM;
+
+    /* This page needs to look like a pagetable so that it can be shadowed */
+    pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
+
+    l4tab = __map_domain_page(pg);
+    clear_page(l4tab);
+    init_guest_l4_table(l4tab, v->domain, 1);
+    unmap_domain_page(l4tab);
+
+    v->arch.guest_table = pagetable_from_page(pg);
+    v->arch.guest_table_user = v->arch.guest_table;
+
+    return 0;
+}
+
+static void release_compat_l4(struct vcpu *v)
+{
+    free_domheap_page(pagetable_get_page(v->arch.guest_table));
+    v->arch.guest_table = pagetable_null();
+    v->arch.guest_table_user = pagetable_null();
+}
+
+int switch_compat(struct domain *d)
+{
+    struct vcpu *v;
+    int rc;
+
+    if ( is_hvm_domain(d) || d->tot_pages != 0 )
+        return -EACCES;
+    if ( is_pv_32bit_domain(d) )
+        return 0;
+
+    d->arch.has_32bit_shinfo = 1;
+    if ( is_pv_domain(d) )
+        d->arch.is_32bit_pv = 1;
+
+    for_each_vcpu( d, v )
+    {
+        rc = setup_compat_arg_xlat(v);
+        if ( !rc )
+            rc = setup_compat_l4(v);
+
+        if ( rc )
+            goto undo_and_fail;
+    }
+
+    domain_set_alloc_bitsize(d);
+    recalculate_cpuid_policy(d);
+
+    d->arch.x87_fip_width = 4;
+
+    return 0;
+
+ undo_and_fail:
+    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+    for_each_vcpu( d, v )
+    {
+        free_compat_arg_xlat(v);
+
+        if ( !pagetable_is_null(v->arch.guest_table) )
+            release_compat_l4(v);
+    }
+
+    return rc;
+}
+
+int pv_vcpu_initialise(struct vcpu *v)
+{
+    struct domain *d = v->domain;
+    int rc = 0;
+
+    spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
+
+    if ( !is_idle_domain(d) )
+    {
+        rc = create_perdomain_mapping(d, GDT_VIRT_START(v),
+                                      1 << GDT_LDT_VCPU_SHIFT,
+                                      d->arch.pv_domain.gdt_ldt_l1tab, NULL);
+        if ( rc )
+            goto done;
+
+        BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
+                     PAGE_SIZE);
+        v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
+                                                  NR_VECTORS);
+        if ( !v->arch.pv_vcpu.trap_ctxt )
+        {
+            rc = -ENOMEM;
+            goto done;
+        }
+
+        /* PV guests by default have a 100Hz ticker. */
+        v->periodic_period = MILLISECS(10);
+    }
+    else
+        v->arch.cr3 = __pa(idle_pg_table);
+
+    v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
+
+    if ( is_pv_32bit_domain(d) )
+    {
+        if ( (rc = setup_compat_arg_xlat(v)) )
+            goto done;
+
+        if ( (rc = setup_compat_l4(v)) )
+        {
+            free_compat_arg_xlat(v);
+            goto done;
+        }
+    }
+
+ done:
+    return rc;
+}
+
+void pv_vcpu_destroy(struct vcpu *v)
+{
+    if ( is_pv_32bit_vcpu(v) )
+    {
+        free_compat_arg_xlat(v);
+        release_compat_l4(v);
+    }
+
+    xfree(v->arch.pv_vcpu.trap_ctxt);
+}
+
+void pv_domain_destroy(struct domain *d)
+{
+    xfree(d->arch.pv_domain.cpuidmasks);
+    free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+}
+
+int pv_domain_initialise(struct domain *d, unsigned int domcr_flags)
+{
+    static const struct arch_csw pv_csw = {
+        .from = paravirt_ctxt_switch_from,
+        .to   = paravirt_ctxt_switch_to,
+        .tail = continue_nonidle_domain,
+    };
+    int rc = -ENOMEM;
+
+    d->arch.pv_domain.gdt_ldt_l1tab =
+        alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
+    if ( !d->arch.pv_domain.gdt_ldt_l1tab )
+        goto fail;
+    clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
+
+    if ( levelling_caps & ~LCAP_faulting )
+    {
+        d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
+        if ( !d->arch.pv_domain.cpuidmasks )
+            goto fail;
+        *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
+    }
+
+    rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
+                                  GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
+                                  NULL, NULL);
+    if ( rc )
+        goto fail;
+
+    d->arch.ctxt_switch = &pv_csw;
+
+    /* 64-bit PV guest by default. */
+    d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+
+    return 0;
+
+fail:
+    if ( d->arch.pv_domain.gdt_ldt_l1tab )
+    {
+        free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+        d->arch.pv_domain.gdt_ldt_l1tab = NULL;
+    }
+
+    if ( d->arch.pv_domain.cpuidmasks )
+    {
+        xfree(d->arch.pv_domain.cpuidmasks);
+        d->arch.pv_domain.cpuidmasks = NULL;
+    }
+
+    return rc;
+}
+
+/*
+ * These are the masks of CR4 bits (subject to hardware availability) which a
+ * PV guest may not legitimiately attempt to modify.
+ */
+static unsigned long __read_mostly pv_cr4_mask, compat_pv_cr4_mask;
+
+static int __init init_pv_cr4_masks(void)
+{
+    unsigned long common_mask = ~X86_CR4_TSD;
+
+    /*
+     * All PV guests may attempt to modify TSD, DE and OSXSAVE.
+     */
+    if ( cpu_has_de )
+        common_mask &= ~X86_CR4_DE;
+    if ( cpu_has_xsave )
+        common_mask &= ~X86_CR4_OSXSAVE;
+
+    pv_cr4_mask = compat_pv_cr4_mask = common_mask;
+
+    /*
+     * 64bit PV guests may attempt to modify FSGSBASE.
+     */
+    if ( cpu_has_fsgsbase )
+        pv_cr4_mask &= ~X86_CR4_FSGSBASE;
+
+    return 0;
+}
+__initcall(init_pv_cr4_masks);
+
+unsigned long pv_guest_cr4_fixup(const struct vcpu *v, unsigned long guest_cr4)
+{
+    unsigned long hv_cr4 = real_cr4_to_pv_guest_cr4(read_cr4());
+    unsigned long mask = is_pv_32bit_vcpu(v) ? compat_pv_cr4_mask : pv_cr4_mask;
+
+    if ( (guest_cr4 & mask) != (hv_cr4 & mask) )
+        printk(XENLOG_G_WARNING
+               "d%d attempted to change %pv's CR4 flags %08lx -> %08lx\n",
+               current->domain->domain_id, v, hv_cr4, guest_cr4);
+
+    return (hv_cr4 & mask) | (guest_cr4 & ~mask);
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 6ab987f231..e6262178e8 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -74,6 +74,9 @@  int mapcache_domain_init(struct domain *);
 int mapcache_vcpu_init(struct vcpu *);
 void mapcache_override_current(struct vcpu *);
 
+void paravirt_ctxt_switch_from(struct vcpu *v);
+void paravirt_ctxt_switch_to(struct vcpu *v);
+
 /* x86/64: toggle guest between kernel and user modes. */
 void toggle_guest_mode(struct vcpu *);
 
diff --git a/xen/include/asm-x86/pv/pv.h b/xen/include/asm-x86/pv/pv.h
new file mode 100644
index 0000000000..ba2d054d08
--- /dev/null
+++ b/xen/include/asm-x86/pv/pv.h
@@ -0,0 +1,29 @@ 
+/*
+ * pv/pv.h
+ *
+ * PV guest interface definitions
+ *
+ * Copyright (C) 2017 Wei Liu <wei.liu2@citrix.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms and conditions of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __X86_PV_H__
+#define __X86_PV_H__
+
+int pv_vcpu_initialise(struct vcpu *v);
+void pv_vcpu_destroy(struct vcpu *v);
+void pv_domain_destroy(struct domain *d);
+int pv_domain_initialise(struct domain *d, unsigned int domcr_flags);
+
+#endif	/* __X86_PV_H__ */