@@ -63,6 +63,7 @@
#include <xen/iommu.h>
#include <compat/vcpu.h>
#include <asm/psr.h>
+#include <asm/pv/domain.h>
DEFINE_PER_CPU(struct vcpu *, curr_vcpu);
@@ -70,9 +71,6 @@ static void default_idle(void);
void (*pm_idle) (void) __read_mostly = default_idle;
void (*dead_idle) (void) __read_mostly = default_dead_idle;
-static void paravirt_ctxt_switch_from(struct vcpu *v);
-static void paravirt_ctxt_switch_to(struct vcpu *v);
-
static void default_idle(void)
{
local_irq_disable();
@@ -145,13 +143,6 @@ static void noreturn continue_idle_domain(struct vcpu *v)
reset_stack_and_jump(idle_loop);
}
-static void noreturn continue_nonidle_domain(struct vcpu *v)
-{
- check_wakeup_from_wait();
- mark_regs_dirty(guest_cpu_user_regs());
- reset_stack_and_jump(ret_from_intr);
-}
-
void dump_pageframe_info(struct domain *d)
{
struct page_info *page;
@@ -313,137 +304,6 @@ void free_vcpu_struct(struct vcpu *v)
free_xenheap_page(v);
}
-static int setup_compat_l4(struct vcpu *v)
-{
- struct page_info *pg;
- l4_pgentry_t *l4tab;
-
- pg = alloc_domheap_page(v->domain, MEMF_no_owner);
- if ( pg == NULL )
- return -ENOMEM;
-
- /* This page needs to look like a pagetable so that it can be shadowed */
- pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
-
- l4tab = __map_domain_page(pg);
- clear_page(l4tab);
- init_guest_l4_table(l4tab, v->domain, 1);
- unmap_domain_page(l4tab);
-
- v->arch.guest_table = pagetable_from_page(pg);
- v->arch.guest_table_user = v->arch.guest_table;
-
- return 0;
-}
-
-static void release_compat_l4(struct vcpu *v)
-{
- if ( !pagetable_is_null(v->arch.guest_table) )
- free_domheap_page(pagetable_get_page(v->arch.guest_table));
- v->arch.guest_table = pagetable_null();
- v->arch.guest_table_user = pagetable_null();
-}
-
-int switch_compat(struct domain *d)
-{
- struct vcpu *v;
- int rc;
-
- if ( is_hvm_domain(d) || d->tot_pages != 0 )
- return -EACCES;
- if ( is_pv_32bit_domain(d) )
- return 0;
-
- d->arch.has_32bit_shinfo = 1;
- if ( is_pv_domain(d) )
- d->arch.is_32bit_pv = 1;
-
- for_each_vcpu( d, v )
- {
- rc = setup_compat_arg_xlat(v);
- if ( !rc )
- rc = setup_compat_l4(v);
-
- if ( rc )
- goto undo_and_fail;
- }
-
- domain_set_alloc_bitsize(d);
- recalculate_cpuid_policy(d);
-
- d->arch.x87_fip_width = 4;
-
- return 0;
-
- undo_and_fail:
- d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
- for_each_vcpu( d, v )
- {
- free_compat_arg_xlat(v);
- release_compat_l4(v);
- }
-
- return rc;
-}
-
-static int pv_create_gdt_ldt_l1tab(struct vcpu *v)
-{
- return create_perdomain_mapping(v->domain, GDT_VIRT_START(v),
- 1U << GDT_LDT_VCPU_SHIFT,
- v->domain->arch.pv_domain.gdt_ldt_l1tab,
- NULL);
-}
-
-static void pv_destroy_gdt_ldt_l1tab(struct vcpu *v)
-{
- destroy_perdomain_mapping(v->domain, GDT_VIRT_START(v),
- 1U << GDT_LDT_VCPU_SHIFT);
-}
-
-static void pv_vcpu_destroy(struct vcpu *v);
-static int pv_vcpu_initialise(struct vcpu *v)
-{
- struct domain *d = v->domain;
- int rc;
-
- ASSERT(!is_idle_domain(d));
-
- spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
-
- rc = pv_create_gdt_ldt_l1tab(v);
- if ( rc )
- return rc;
-
- BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
- PAGE_SIZE);
- v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
- NR_VECTORS);
- if ( !v->arch.pv_vcpu.trap_ctxt )
- {
- rc = -ENOMEM;
- goto done;
- }
-
- /* PV guests by default have a 100Hz ticker. */
- v->periodic_period = MILLISECS(10);
-
- v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
-
- if ( is_pv_32bit_domain(d) )
- {
- if ( (rc = setup_compat_arg_xlat(v)) )
- goto done;
-
- if ( (rc = setup_compat_l4(v)) )
- goto done;
- }
-
- done:
- if ( rc )
- pv_vcpu_destroy(v);
- return rc;
-}
-
int vcpu_initialise(struct vcpu *v)
{
struct domain *d = v->domain;
@@ -488,19 +348,6 @@ int vcpu_initialise(struct vcpu *v)
return rc;
}
-static void pv_vcpu_destroy(struct vcpu *v)
-{
- if ( is_pv_32bit_vcpu(v) )
- {
- free_compat_arg_xlat(v);
- release_compat_l4(v);
- }
-
- pv_destroy_gdt_ldt_l1tab(v);
- xfree(v->arch.pv_vcpu.trap_ctxt);
- v->arch.pv_vcpu.trap_ctxt = NULL;
-}
-
void vcpu_destroy(struct vcpu *v)
{
xfree(v->arch.vm_event);
@@ -538,61 +385,6 @@ static bool emulation_flags_ok(const struct domain *d, uint32_t emflags)
return true;
}
-static void pv_domain_destroy(struct domain *d)
-{
- destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
- GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
-
- xfree(d->arch.pv_domain.cpuidmasks);
- d->arch.pv_domain.cpuidmasks = NULL;
-
- free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
- d->arch.pv_domain.gdt_ldt_l1tab = NULL;
-}
-
-static int pv_domain_initialise(struct domain *d, unsigned int domcr_flags,
- struct xen_arch_domainconfig *config)
-{
- static const struct arch_csw pv_csw = {
- .from = paravirt_ctxt_switch_from,
- .to = paravirt_ctxt_switch_to,
- .tail = continue_nonidle_domain,
- };
- int rc = -ENOMEM;
-
- d->arch.pv_domain.gdt_ldt_l1tab =
- alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
- if ( !d->arch.pv_domain.gdt_ldt_l1tab )
- goto fail;
- clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
-
- if ( levelling_caps & ~LCAP_faulting )
- {
- d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
- if ( !d->arch.pv_domain.cpuidmasks )
- goto fail;
- *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
- }
-
- rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
- GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
- NULL, NULL);
- if ( rc )
- goto fail;
-
- d->arch.ctxt_switch = &pv_csw;
-
- /* 64-bit PV guest by default. */
- d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
-
- return 0;
-
- fail:
- pv_domain_destroy(d);
-
- return rc;
-}
-
int arch_domain_create(struct domain *d, unsigned int domcr_flags,
struct xen_arch_domainconfig *config)
{
@@ -1924,7 +1716,7 @@ static void save_segments(struct vcpu *v)
#define switch_kernel_stack(v) ((void)0)
-static void paravirt_ctxt_switch_from(struct vcpu *v)
+void paravirt_ctxt_switch_from(struct vcpu *v)
{
save_segments(v);
@@ -1938,7 +1730,7 @@ static void paravirt_ctxt_switch_from(struct vcpu *v)
write_debugreg(7, 0);
}
-static void paravirt_ctxt_switch_to(struct vcpu *v)
+void paravirt_ctxt_switch_to(struct vcpu *v)
{
unsigned long cr4;
@@ -1,2 +1,3 @@
obj-y += hypercall.o
obj-bin-y += dom0_build.init.o
+obj-y += domain.o
new file mode 100644
@@ -0,0 +1,233 @@
+/******************************************************************************
+ * arch/x86/pv/domain.c
+ *
+ * PV domain handling
+ */
+
+/*
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+
+#include <xen/domain_page.h>
+#include <xen/errno.h>
+#include <xen/lib.h>
+#include <xen/sched.h>
+
+static void noreturn continue_nonidle_domain(struct vcpu *v)
+{
+ check_wakeup_from_wait();
+ mark_regs_dirty(guest_cpu_user_regs());
+ reset_stack_and_jump(ret_from_intr);
+}
+
+static int setup_compat_l4(struct vcpu *v)
+{
+ struct page_info *pg;
+ l4_pgentry_t *l4tab;
+
+ pg = alloc_domheap_page(v->domain, MEMF_no_owner);
+ if ( pg == NULL )
+ return -ENOMEM;
+
+ /* This page needs to look like a pagetable so that it can be shadowed */
+ pg->u.inuse.type_info = PGT_l4_page_table|PGT_validated|1;
+
+ l4tab = __map_domain_page(pg);
+ clear_page(l4tab);
+ init_guest_l4_table(l4tab, v->domain, 1);
+ unmap_domain_page(l4tab);
+
+ v->arch.guest_table = pagetable_from_page(pg);
+ v->arch.guest_table_user = v->arch.guest_table;
+
+ return 0;
+}
+
+static void release_compat_l4(struct vcpu *v)
+{
+ if ( !pagetable_is_null(v->arch.guest_table) )
+ free_domheap_page(pagetable_get_page(v->arch.guest_table));
+ v->arch.guest_table = pagetable_null();
+ v->arch.guest_table_user = pagetable_null();
+}
+
+int switch_compat(struct domain *d)
+{
+ struct vcpu *v;
+ int rc;
+
+ if ( is_hvm_domain(d) || d->tot_pages != 0 )
+ return -EACCES;
+ if ( is_pv_32bit_domain(d) )
+ return 0;
+
+ d->arch.has_32bit_shinfo = 1;
+ if ( is_pv_domain(d) )
+ d->arch.is_32bit_pv = 1;
+
+ for_each_vcpu( d, v )
+ {
+ rc = setup_compat_arg_xlat(v);
+ if ( !rc )
+ rc = setup_compat_l4(v);
+
+ if ( rc )
+ goto undo_and_fail;
+ }
+
+ domain_set_alloc_bitsize(d);
+ recalculate_cpuid_policy(d);
+
+ d->arch.x87_fip_width = 4;
+
+ return 0;
+
+ undo_and_fail:
+ d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+ for_each_vcpu( d, v )
+ {
+ free_compat_arg_xlat(v);
+ release_compat_l4(v);
+ }
+
+ return rc;
+}
+
+static int pv_create_gdt_ldt_l1tab(struct vcpu *v)
+{
+ return create_perdomain_mapping(v->domain, GDT_VIRT_START(v),
+ 1U << GDT_LDT_VCPU_SHIFT,
+ v->domain->arch.pv_domain.gdt_ldt_l1tab,
+ NULL);
+}
+
+static void pv_destroy_gdt_ldt_l1tab(struct vcpu *v)
+{
+ destroy_perdomain_mapping(v->domain, GDT_VIRT_START(v),
+ 1U << GDT_LDT_VCPU_SHIFT);
+}
+
+void pv_vcpu_destroy(struct vcpu *v)
+{
+ if ( is_pv_32bit_vcpu(v) )
+ {
+ free_compat_arg_xlat(v);
+ release_compat_l4(v);
+ }
+
+ pv_destroy_gdt_ldt_l1tab(v);
+ xfree(v->arch.pv_vcpu.trap_ctxt);
+ v->arch.pv_vcpu.trap_ctxt = NULL;
+}
+
+int pv_vcpu_initialise(struct vcpu *v)
+{
+ struct domain *d = v->domain;
+ int rc;
+
+ ASSERT(!is_idle_domain(d));
+
+ spin_lock_init(&v->arch.pv_vcpu.shadow_ldt_lock);
+
+ rc = pv_create_gdt_ldt_l1tab(v);
+ if ( rc )
+ return rc;
+
+ BUILD_BUG_ON(NR_VECTORS * sizeof(*v->arch.pv_vcpu.trap_ctxt) >
+ PAGE_SIZE);
+ v->arch.pv_vcpu.trap_ctxt = xzalloc_array(struct trap_info,
+ NR_VECTORS);
+ if ( !v->arch.pv_vcpu.trap_ctxt )
+ {
+ rc = -ENOMEM;
+ goto done;
+ }
+
+ /* PV guests by default have a 100Hz ticker. */
+ v->periodic_period = MILLISECS(10);
+
+ v->arch.pv_vcpu.ctrlreg[4] = real_cr4_to_pv_guest_cr4(mmu_cr4_features);
+
+ if ( is_pv_32bit_domain(d) )
+ {
+ if ( (rc = setup_compat_arg_xlat(v)) )
+ goto done;
+
+ if ( (rc = setup_compat_l4(v)) )
+ goto done;
+ }
+
+ done:
+ if ( rc )
+ pv_vcpu_destroy(v);
+ return rc;
+}
+
+void pv_domain_destroy(struct domain *d)
+{
+ destroy_perdomain_mapping(d, GDT_LDT_VIRT_START,
+ GDT_LDT_MBYTES << (20 - PAGE_SHIFT));
+
+ xfree(d->arch.pv_domain.cpuidmasks);
+ d->arch.pv_domain.cpuidmasks = NULL;
+
+ free_xenheap_page(d->arch.pv_domain.gdt_ldt_l1tab);
+ d->arch.pv_domain.gdt_ldt_l1tab = NULL;
+}
+
+
+int pv_domain_initialise(struct domain *d, unsigned int domcr_flags,
+ struct xen_arch_domainconfig *config)
+{
+ static const struct arch_csw pv_csw = {
+ .from = paravirt_ctxt_switch_from,
+ .to = paravirt_ctxt_switch_to,
+ .tail = continue_nonidle_domain,
+ };
+ int rc = -ENOMEM;
+
+ d->arch.pv_domain.gdt_ldt_l1tab =
+ alloc_xenheap_pages(0, MEMF_node(domain_to_node(d)));
+ if ( !d->arch.pv_domain.gdt_ldt_l1tab )
+ goto fail;
+ clear_page(d->arch.pv_domain.gdt_ldt_l1tab);
+
+ if ( levelling_caps & ~LCAP_faulting )
+ {
+ d->arch.pv_domain.cpuidmasks = xmalloc(struct cpuidmasks);
+ if ( !d->arch.pv_domain.cpuidmasks )
+ goto fail;
+ *d->arch.pv_domain.cpuidmasks = cpuidmask_defaults;
+ }
+
+ rc = create_perdomain_mapping(d, GDT_LDT_VIRT_START,
+ GDT_LDT_MBYTES << (20 - PAGE_SHIFT),
+ NULL, NULL);
+ if ( rc )
+ goto fail;
+
+ d->arch.ctxt_switch = &pv_csw;
+
+ /* 64-bit PV guest by default. */
+ d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
+
+ return 0;
+
+ fail:
+ pv_domain_destroy(d);
+
+ return rc;
+}
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
@@ -655,6 +655,9 @@ static inline void pv_inject_page_fault(int errcode, unsigned long cr2)
pv_inject_event(&event);
}
+void paravirt_ctxt_switch_from(struct vcpu *v);
+void paravirt_ctxt_switch_to(struct vcpu *v);
+
#endif /* __ASM_DOMAIN_H__ */
/*
new file mode 100644
@@ -0,0 +1,57 @@
+/*
+ * pv/domain.h
+ *
+ * PV guest interface definitions
+ *
+ * Copyright (C) 2017 Wei Liu <wei.liu2@citrix.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms and conditions of the GNU General Public
+ * License, version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __X86_PV_DOMAIN_H__
+#define __X86_PV_DOMAIN_H__
+
+#ifdef CONFIG_PV
+
+void pv_vcpu_destroy(struct vcpu *v);
+int pv_vcpu_initialise(struct vcpu *v);
+void pv_domain_destroy(struct domain *d);
+int pv_domain_initialise(struct domain *d, unsigned int domcr_flags,
+ struct xen_arch_domainconfig *config);
+
+#else /* !CONFIG_PV */
+
+#include <xen/errno.h>
+
+static inline void pv_vcpu_destroy(struct vcpu *v) {}
+static inline int pv_vcpu_initialise(struct vcpu *v) { return -EOPNOTSUPP; }
+static inline void pv_domain_destroy(struct domain *d) {}
+static inline int pv_domain_initialise(struct domain *d,
+ unsigned int domcr_flags,
+ struct xen_arch_domainconfig *config);
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_PV */
+
+#endif /* __X86_PV_DOMAIN_H__ */
+
+/*
+ * Local variables:
+ * mode: C
+ * c-file-style: "BSD"
+ * c-basic-offset: 4
+ * tab-width: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
Move all the PV specific code along with the supporting code to pv/domain.c. This in turn requires exporting a few functions in header files. Create pv/domain.h for that. Move paravirt_ctxt_switch_{from,to} declarations to domain.h. No functional change. Signed-off-by: Wei Liu <wei.liu2@citrix.com> --- v3: 1. move paravirt_ctxt_switch_* declarations 2. use CONFIG_PV Cc: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> --- xen/arch/x86/domain.c | 214 +----------------------------------- xen/arch/x86/pv/Makefile | 1 + xen/arch/x86/pv/domain.c | 233 ++++++++++++++++++++++++++++++++++++++++ xen/include/asm-x86/domain.h | 3 + xen/include/asm-x86/pv/domain.h | 57 ++++++++++ 5 files changed, 297 insertions(+), 211 deletions(-) create mode 100644 xen/arch/x86/pv/domain.c create mode 100644 xen/include/asm-x86/pv/domain.h