@@ -82,7 +82,7 @@ int vmce_restore_vcpu(struct vcpu *v, const struct hvm_vmce_vcpu *ctxt)
{
dprintk(XENLOG_G_ERR, "%s restore: unsupported MCA capabilities"
" %#" PRIx64 " for %pv (supported: %#Lx)\n",
- has_hvm_container_vcpu(v) ? "HVM" : "PV", ctxt->caps,
+ is_hvm_vcpu(v) ? "HVM" : "PV", ctxt->caps,
v, guest_mcg_cap & ~MCG_CAP_COUNT);
return -EPERM;
}
@@ -364,7 +364,7 @@ int inject_vmce(struct domain *d, int vcpu)
if ( !v->is_initialised )
continue;
- if ( (has_hvm_container_domain(d) ||
+ if ( (is_hvm_domain(d) ||
guest_has_trap_callback(d, v->vcpu_id, TRAP_machine_check)) &&
!test_and_set_bool(v->mce_pending) )
{
@@ -444,7 +444,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn, unsigned long gfn)
if ( !mfn_valid(mfn) )
return -EINVAL;
- if ( !has_hvm_container_domain(d) || !paging_mode_hap(d) )
+ if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
return -EOPNOTSUPP;
rc = -1;
@@ -237,7 +237,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
vpmu->arch_vpmu_ops->arch_vpmu_save(sampling, 1);
vpmu_reset(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED);
- if ( has_hvm_container_vcpu(sampled) )
+ if ( is_hvm_vcpu(sampled) )
*flags = 0;
else
*flags = PMU_SAMPLE_PV;
@@ -288,7 +288,7 @@ void vpmu_do_interrupt(struct cpu_user_regs *regs)
r->sp = cur_regs->rsp;
r->flags = cur_regs->rflags;
- if ( !has_hvm_container_vcpu(sampled) )
+ if ( !is_hvm_vcpu(sampled) )
{
r->ss = cur_regs->ss;
r->cs = cur_regs->cs;
@@ -305,8 +305,8 @@ static int amd_vpmu_save(struct vcpu *v, bool_t to_guest)
context_save(v);
- if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
- has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
+ is_msr_bitmap_on(vpmu) )
amd_vpmu_unset_msr_bitmap(v);
if ( to_guest )
@@ -367,7 +367,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
return -EINVAL;
/* For all counters, enable guest only mode for HVM guest */
- if ( has_hvm_container_vcpu(v) && (type == MSR_TYPE_CTRL) &&
+ if ( is_hvm_vcpu(v) && (type == MSR_TYPE_CTRL) &&
!is_guest_mode(msr_content) )
{
set_guest_mode(msr_content);
@@ -381,7 +381,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
return 0;
vpmu_set(vpmu, VPMU_RUNNING);
- if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
+ if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_set_msr_bitmap(v);
}
@@ -390,7 +390,7 @@ static int amd_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
(is_pmu_enabled(msr_content) == 0) && vpmu_is_set(vpmu, VPMU_RUNNING) )
{
vpmu_reset(vpmu, VPMU_RUNNING);
- if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
+ if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_unset_msr_bitmap(v);
release_pmu_ownership(PMU_OWNER_HVM);
}
@@ -433,7 +433,7 @@ static void amd_vpmu_destroy(struct vcpu *v)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( has_hvm_container_vcpu(v) && is_msr_bitmap_on(vpmu) )
+ if ( is_hvm_vcpu(v) && is_msr_bitmap_on(vpmu) )
amd_vpmu_unset_msr_bitmap(v);
xfree(vpmu->context);
@@ -306,7 +306,7 @@ static inline void __core2_vpmu_save(struct vcpu *v)
for ( i = 0; i < arch_pmc_cnt; i++ )
rdmsrl(MSR_IA32_PERFCTR0 + i, xen_pmu_cntr_pair[i].counter);
- if ( !has_hvm_container_vcpu(v) )
+ if ( !is_hvm_vcpu(v) )
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, core2_vpmu_cxt->global_status);
}
@@ -314,7 +314,7 @@ static int core2_vpmu_save(struct vcpu *v, bool_t to_guest)
{
struct vpmu_struct *vpmu = vcpu_vpmu(v);
- if ( !has_hvm_container_vcpu(v) )
+ if ( !is_hvm_vcpu(v) )
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
if ( !vpmu_are_all_set(vpmu, VPMU_CONTEXT_SAVE | VPMU_CONTEXT_LOADED) )
@@ -323,8 +323,8 @@ static int core2_vpmu_save(struct vcpu *v, bool_t to_guest)
__core2_vpmu_save(v);
/* Unset PMU MSR bitmap to trap lazy load. */
- if ( !vpmu_is_set(vpmu, VPMU_RUNNING) &&
- has_hvm_container_vcpu(v) && cpu_has_vmx_msr_bitmap )
+ if ( !vpmu_is_set(vpmu, VPMU_RUNNING) && is_hvm_vcpu(v) &&
+ cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
if ( to_guest )
@@ -362,7 +362,7 @@ static inline void __core2_vpmu_load(struct vcpu *v)
if ( vpmu_is_set(vcpu_vpmu(v), VPMU_CPU_HAS_DS) )
wrmsrl(MSR_IA32_DS_AREA, core2_vpmu_cxt->ds_area);
- if ( !has_hvm_container_vcpu(v) )
+ if ( !is_hvm_vcpu(v) )
{
wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, core2_vpmu_cxt->global_ovf_ctrl);
core2_vpmu_cxt->global_ovf_ctrl = 0;
@@ -413,7 +413,7 @@ static int core2_vpmu_verify(struct vcpu *v)
}
if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) &&
- !(has_hvm_container_vcpu(v)
+ !(is_hvm_vcpu(v)
? is_canonical_address(core2_vpmu_cxt->ds_area)
: __addr_ok(core2_vpmu_cxt->ds_area)) )
return -EINVAL;
@@ -474,7 +474,7 @@ static int core2_vpmu_alloc_resource(struct vcpu *v)
if ( !acquire_pmu_ownership(PMU_OWNER_HVM) )
return 0;
- if ( has_hvm_container_vcpu(v) )
+ if ( is_hvm_vcpu(v) )
{
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
if ( vmx_add_host_load_msr(MSR_CORE_PERF_GLOBAL_CTRL) )
@@ -539,7 +539,7 @@ static int core2_vpmu_msr_common_check(u32 msr_index, int *type, int *index)
{
__core2_vpmu_load(current);
vpmu_set(vpmu, VPMU_CONTEXT_LOADED);
- if ( has_hvm_container_vcpu(current) &&
+ if ( is_hvm_vcpu(current) &&
cpu_has_vmx_msr_bitmap )
core2_vpmu_set_msr_bitmap(current->arch.hvm_vmx.msr_bitmap);
}
@@ -612,9 +612,8 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
return -EINVAL;
if ( vpmu_is_set(vpmu, VPMU_CPU_HAS_DS) )
{
- if ( !(has_hvm_container_vcpu(v)
- ? is_canonical_address(msr_content)
- : __addr_ok(msr_content)) )
+ if ( !(is_hvm_vcpu(v) ? is_canonical_address(msr_content)
+ : __addr_ok(msr_content)) )
{
gdprintk(XENLOG_WARNING,
"Illegal address for IA32_DS_AREA: %#" PRIx64 "x\n",
@@ -635,7 +634,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
if ( msr_content & fixed_ctrl_mask )
return -EINVAL;
- if ( has_hvm_container_vcpu(v) )
+ if ( is_hvm_vcpu(v) )
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
&core2_vpmu_cxt->global_ctrl);
else
@@ -704,7 +703,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
if ( blocked )
return -EINVAL;
- if ( has_hvm_container_vcpu(v) )
+ if ( is_hvm_vcpu(v) )
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL,
&core2_vpmu_cxt->global_ctrl);
else
@@ -723,7 +722,7 @@ static int core2_vpmu_do_wrmsr(unsigned int msr, uint64_t msr_content,
wrmsrl(msr, msr_content);
else
{
- if ( has_hvm_container_vcpu(v) )
+ if ( is_hvm_vcpu(v) )
vmx_write_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
else
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
@@ -757,7 +756,7 @@ static int core2_vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
*msr_content = core2_vpmu_cxt->global_status;
break;
case MSR_CORE_PERF_GLOBAL_CTRL:
- if ( has_hvm_container_vcpu(v) )
+ if ( is_hvm_vcpu(v) )
vmx_read_guest_msr(MSR_CORE_PERF_GLOBAL_CTRL, msr_content);
else
rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, *msr_content);
@@ -858,7 +857,7 @@ static void core2_vpmu_destroy(struct vcpu *v)
vpmu->context = NULL;
xfree(vpmu->priv_context);
vpmu->priv_context = NULL;
- if ( has_hvm_container_vcpu(v) && cpu_has_vmx_msr_bitmap )
+ if ( is_hvm_vcpu(v) && cpu_has_vmx_msr_bitmap )
core2_vpmu_unset_msr_bitmap(v->arch.hvm_vmx.msr_bitmap);
release_pmu_ownership(PMU_OWNER_HVM);
vpmu_clear(vpmu);
@@ -762,7 +762,7 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
res->c |= cpufeat_mask(X86_FEATURE_DSCPL);
}
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
/* OSXSAVE clear in policy. Fast-forward CR4 back in. */
if ( v->arch.hvm_vcpu.guest_cr[4] & X86_CR4_OSXSAVE )
@@ -918,11 +918,11 @@ void guest_cpuid(const struct vcpu *v, uint32_t leaf,
case 0x80000001:
/* SYSCALL is hidden outside of long mode on Intel. */
if ( p->x86_vendor == X86_VENDOR_INTEL &&
- has_hvm_container_domain(d) && !hvm_long_mode_enabled(v) )
+ is_hvm_domain(d) && !hvm_long_mode_enabled(v) )
res->d &= ~cpufeat_mask(X86_FEATURE_SYSCALL);
common_leaf1_adjustments:
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
/* Fast-forward MSR_APIC_BASE.EN. */
if ( vlapic_hw_disabled(vcpu_vlapic(v)) )
@@ -168,7 +168,7 @@ unsigned int dbg_rw_guest_mem(struct domain *dp, void * __user gaddr,
pagecnt = min_t(long, PAGE_SIZE - (addr & ~PAGE_MASK), len);
- mfn = (has_hvm_container_domain(dp)
+ mfn = (is_hvm_domain(dp)
? dbg_hvm_va2mfn(addr, dp, toaddr, &gfn)
: dbg_pv_va2mfn(addr, dp, pgd3));
if ( mfn_eq(mfn, INVALID_MFN) )
@@ -187,7 +187,7 @@ void dump_pageframe_info(struct domain *d)
spin_unlock(&d->page_alloc_lock);
}
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
p2m_pod_dump_data(d);
spin_lock(&d->page_alloc_lock);
@@ -390,7 +390,7 @@ int vcpu_initialise(struct vcpu *v)
spin_lock_init(&v->arch.vpmu.vpmu_lock);
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
rc = hvm_vcpu_initialise(v);
goto done;
@@ -461,7 +461,7 @@ void vcpu_destroy(struct vcpu *v)
vcpu_destroy_fpu(v);
- if ( has_hvm_container_vcpu(v) )
+ if ( is_hvm_vcpu(v) )
hvm_vcpu_destroy(v);
else
xfree(v->arch.pv_vcpu.trap_ctxt);
@@ -548,7 +548,7 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
d->arch.emulation_flags = emflags;
}
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
d->arch.hvm_domain.hap_enabled =
hvm_funcs.hap_supported && (domcr_flags & DOMCRF_hap);
@@ -622,7 +622,7 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
if ( (rc = psr_domain_init(d)) != 0 )
goto fail;
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
if ( (rc = hvm_domain_initialise(d)) != 0 )
goto fail;
@@ -681,7 +681,7 @@ int arch_domain_create(struct domain *d, unsigned int domcr_flags,
void arch_domain_destroy(struct domain *d)
{
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
hvm_domain_destroy(d);
xfree(d->arch.e820);
@@ -733,8 +733,8 @@ int arch_domain_soft_reset(struct domain *d)
p2m_type_t p2mt;
unsigned int i;
- /* Soft reset is supported for HVM/PVH domains only. */
- if ( !has_hvm_container_domain(d) )
+ /* Soft reset is supported for HVM domains only. */
+ if ( !is_hvm_domain(d) )
return -EINVAL;
hvm_domain_soft_reset(d);
@@ -924,7 +924,7 @@ int arch_set_info_guest(
v->fpu_initialised = !!(flags & VGCF_I387_VALID);
v->arch.flags &= ~TF_kernel_mode;
- if ( (flags & VGCF_in_kernel) || has_hvm_container_domain(d)/*???*/ )
+ if ( (flags & VGCF_in_kernel) || is_hvm_domain(d)/*???*/ )
v->arch.flags |= TF_kernel_mode;
v->arch.vgc_flags = flags;
@@ -969,7 +969,7 @@ int arch_set_info_guest(
}
}
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
v->arch.debugreg[i] = c(debugreg[i]);
@@ -1993,7 +1993,7 @@ static void __context_switch(void)
if ( xcr0 != get_xcr0() && !set_xcr0(xcr0) )
BUG();
- if ( cpu_has_xsaves && has_hvm_container_vcpu(n) )
+ if ( cpu_has_xsaves && is_hvm_vcpu(n) )
set_msr_xss(n->arch.hvm_vcpu.msr_xss);
}
vcpu_restore_fpu_eager(n);
@@ -2083,7 +2083,7 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
if ( is_pv_domain(nextd) &&
(is_idle_domain(prevd) ||
- has_hvm_container_domain(prevd) ||
+ is_hvm_domain(prevd) ||
is_pv_32bit_domain(prevd) != is_pv_32bit_domain(nextd)) )
{
uint64_t efer = read_efer();
@@ -2385,7 +2385,7 @@ int domain_relinquish_resources(struct domain *d)
pit_deinit(d);
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
hvm_domain_relinquish_resources(d);
return 0;
@@ -2428,7 +2428,7 @@ void vcpu_mark_events_pending(struct vcpu *v)
if ( already_pending )
return;
- if ( has_hvm_container_vcpu(v) )
+ if ( is_hvm_vcpu(v) )
hvm_assert_evtchn_irq(v);
else
vcpu_kick(v);
@@ -360,9 +360,8 @@ static unsigned long __init compute_dom0_nr_pages(
avail -= max_pdx >> s;
}
- need_paging = has_hvm_container_domain(d)
- ? !iommu_hap_pt_share || !paging_mode_hap(d)
- : opt_dom0_shadow;
+ need_paging = is_hvm_domain(d) ? !iommu_hap_pt_share || !paging_mode_hap(d)
+ : opt_dom0_shadow;
for ( ; ; need_paging = 0 )
{
nr_pages = dom0_nrpages;
@@ -1524,7 +1524,7 @@ void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
for ( i = 0; i < ARRAY_SIZE(v->arch.debugreg); ++i )
c(debugreg[i] = v->arch.debugreg[i]);
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
struct segment_register sreg;
@@ -283,7 +283,7 @@ static int dm_op(domid_t domid,
if ( rc )
return rc;
- if ( !has_hvm_container_domain(d) )
+ if ( !is_hvm_domain(d) )
goto out;
rc = xsm_dm_op(XSM_DM_PRIV, d);
@@ -3052,7 +3052,7 @@ static enum hvm_copy_result __hvm_copy(
char *p;
int count, todo = size;
- ASSERT(has_hvm_container_vcpu(v));
+ ASSERT(is_hvm_vcpu(v));
/*
* XXX Disable for 4.1.0: PV-on-HVM drivers will do grant-table ops
@@ -3993,7 +3993,7 @@ static int hvmop_set_param(
return -ESRCH;
rc = -EINVAL;
- if ( !has_hvm_container_domain(d) )
+ if ( !is_hvm_domain(d) )
goto out;
rc = hvm_allow_set_param(d, &a);
@@ -4248,7 +4248,7 @@ static int hvmop_get_param(
return -ESRCH;
rc = -EINVAL;
- if ( !has_hvm_container_domain(d) )
+ if ( !is_hvm_domain(d) )
goto out;
rc = hvm_allow_get_param(d, &a);
@@ -480,7 +480,7 @@ int hvm_local_events_need_delivery(struct vcpu *v)
void arch_evtchn_inject(struct vcpu *v)
{
- if ( has_hvm_container_vcpu(v) )
+ if ( is_hvm_vcpu(v) )
hvm_assert_evtchn_irq(v);
}
@@ -540,7 +540,7 @@ int hvm_get_mem_pinned_cacheattr(struct domain *d, gfn_t gfn,
uint64_t mask = ~(uint64_t)0 << order;
int rc = -ENXIO;
- ASSERT(has_hvm_container_domain(d));
+ ASSERT(is_hvm_domain(d));
rcu_read_lock(&pinned_cacheattr_rcu_lock);
list_for_each_entry_rcu ( range,
@@ -560,8 +560,7 @@ void msixtbl_init(struct domain *d)
{
struct hvm_io_handler *handler;
- if ( !has_hvm_container_domain(d) || !has_vlapic(d) ||
- msixtbl_initialised(d) )
+ if ( !is_hvm_domain(d) || !has_vlapic(d) || msixtbl_initialised(d) )
return;
INIT_LIST_HEAD(&d->arch.hvm_domain.msixtbl_list);
@@ -768,7 +768,7 @@ void vmx_vmcs_exit(struct vcpu *v)
{
/* Don't confuse vmx_do_resume (for @v or @current!) */
vmx_clear_vmcs(v);
- if ( has_hvm_container_vcpu(current) )
+ if ( is_hvm_vcpu(current) )
vmx_load_vmcs(current);
spin_unlock(&v->arch.hvm_vmx.vmcs_lock);
@@ -1930,7 +1930,7 @@ static void vmcs_dump(unsigned char ch)
for_each_domain ( d )
{
- if ( !has_hvm_container_domain(d) )
+ if ( !is_hvm_domain(d) )
continue;
printk("\n>>> Domain %d <<<\n", d->domain_id);
for_each_vcpu ( d, v )
@@ -240,7 +240,7 @@ static void vmx_pi_do_resume(struct vcpu *v)
/* This function is called when pcidevs_lock is held */
void vmx_pi_hooks_assign(struct domain *d)
{
- if ( !iommu_intpost || !has_hvm_container_domain(d) )
+ if ( !iommu_intpost || !is_hvm_domain(d) )
return;
ASSERT(!d->arch.hvm_domain.pi_ops.vcpu_block);
@@ -254,7 +254,7 @@ void vmx_pi_hooks_assign(struct domain *d)
/* This function is called when pcidevs_lock is held */
void vmx_pi_hooks_deassign(struct domain *d)
{
- if ( !iommu_intpost || !has_hvm_container_domain(d) )
+ if ( !iommu_intpost || !is_hvm_domain(d) )
return;
ASSERT(d->arch.hvm_domain.pi_ops.vcpu_block);
@@ -438,7 +438,7 @@ int page_is_ram_type(unsigned long mfn, unsigned long mem_type)
unsigned long domain_get_maximum_gpfn(struct domain *d)
{
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
return p2m_get_hostp2m(d)->max_mapped_pfn;
/* NB. PV guests specify nr_pfns rather than max_pfn so we adjust here. */
return (arch_get_max_pfn(d) ?: 1) - 1;
@@ -3184,7 +3184,7 @@ long do_mmuext_op(
break;
}
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
switch ( op.cmd )
{
@@ -420,7 +420,7 @@ static int paging_log_dirty_op(struct domain *d,
* Mark dirty all currently write-mapped pages on e.g. the
* final iteration of a save operation.
*/
- if ( has_hvm_container_domain(d) &&
+ if ( is_hvm_domain(d) &&
(sc->mode & XEN_DOMCTL_SHADOW_LOGDIRTY_FINAL) )
hvm_mapped_guest_frames_mark_dirty(d);
@@ -319,7 +319,7 @@ const struct x86_emulate_ops *shadow_init_emulation(
struct vcpu *v = current;
unsigned long addr;
- ASSERT(has_hvm_container_vcpu(v));
+ ASSERT(is_hvm_vcpu(v));
memset(sh_ctxt, 0, sizeof(*sh_ctxt));
@@ -363,7 +363,7 @@ void shadow_continue_emulation(struct sh_emulate_ctxt *sh_ctxt,
struct vcpu *v = current;
unsigned long addr, diff;
- ASSERT(has_hvm_container_vcpu(v));
+ ASSERT(is_hvm_vcpu(v));
/*
* We don't refetch the segment bases, because we don't emulate
@@ -1700,9 +1700,8 @@ void *sh_emulate_map_dest(struct vcpu *v, unsigned long vaddr,
#ifndef NDEBUG
/* We don't emulate user-mode writes to page tables. */
- if ( has_hvm_container_domain(d)
- ? hvm_get_cpl(v) == 3
- : !guest_kernel_mode(v, guest_cpu_user_regs()) )
+ if ( is_hvm_domain(d) ? hvm_get_cpl(v) == 3
+ : !guest_kernel_mode(v, guest_cpu_user_regs()) )
{
gdprintk(XENLOG_DEBUG, "User-mode write to pagetable reached "
"emulate_map_dest(). This should never happen!\n");
@@ -1723,7 +1723,7 @@ void __hwdom_init setup_io_bitmap(struct domain *d)
{
int rc;
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
bitmap_fill(d->arch.hvm_domain.io_bitmap, 0x10000);
rc = rangeset_report_ranges(d->arch.ioport_caps, 0, 0x10000,
@@ -940,7 +940,7 @@ static void __update_vcpu_system_time(struct vcpu *v, int force)
}
else
{
- if ( has_hvm_container_domain(d) && hvm_tsc_scaling_supported )
+ if ( is_hvm_domain(d) && hvm_tsc_scaling_supported )
{
tsc_stamp = hvm_scale_tsc(d, t->stamp.local_tsc);
_u.tsc_to_system_mul = d->arch.vtsc_to_ns.mul_frac;
@@ -1950,7 +1950,7 @@ void tsc_get_info(struct domain *d, uint32_t *tsc_mode,
uint64_t *elapsed_nsec, uint32_t *gtsc_khz,
uint32_t *incarnation)
{
- bool_t enable_tsc_scaling = has_hvm_container_domain(d) &&
+ bool_t enable_tsc_scaling = is_hvm_domain(d) &&
hvm_tsc_scaling_supported && !d->arch.vtsc;
*incarnation = d->arch.incarnation;
@@ -2030,7 +2030,7 @@ void tsc_set_info(struct domain *d,
* PV: guest has not migrated yet (and thus arch.tsc_khz == cpu_khz)
*/
if ( tsc_mode == TSC_MODE_DEFAULT && host_tsc_is_safe() &&
- (has_hvm_container_domain(d) ?
+ (is_hvm_domain(d) ?
(d->arch.tsc_khz == cpu_khz ||
hvm_get_tsc_scaling_ratio(d->arch.tsc_khz)) :
incarnation == 0) )
@@ -2045,8 +2045,7 @@ void tsc_set_info(struct domain *d,
case TSC_MODE_PVRDTSCP:
d->arch.vtsc = !boot_cpu_has(X86_FEATURE_RDTSCP) ||
!host_tsc_is_safe();
- enable_tsc_scaling = has_hvm_container_domain(d) &&
- !d->arch.vtsc &&
+ enable_tsc_scaling = is_hvm_domain(d) && !d->arch.vtsc &&
hvm_get_tsc_scaling_ratio(gtsc_khz ?: cpu_khz);
d->arch.tsc_khz = (enable_tsc_scaling && gtsc_khz) ? gtsc_khz : cpu_khz;
set_time_scale(&d->arch.vtsc_to_ns, d->arch.tsc_khz * 1000 );
@@ -2063,7 +2062,7 @@ void tsc_set_info(struct domain *d,
break;
}
d->arch.incarnation = incarnation + 1;
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
{
if ( hvm_tsc_scaling_supported && !d->arch.vtsc )
d->arch.hvm_domain.tsc_scaling_ratio =
@@ -799,7 +799,7 @@ void do_trap(struct cpu_user_regs *regs)
}
if ( ((trapnr == TRAP_copro_error) || (trapnr == TRAP_simd_error)) &&
- system_state >= SYS_STATE_active && has_hvm_container_vcpu(curr) &&
+ system_state >= SYS_STATE_active && is_hvm_vcpu(curr) &&
curr->arch.hvm_vcpu.fpu_exception_callback )
{
curr->arch.hvm_vcpu.fpu_exception_callback(
@@ -976,7 +976,7 @@ void cpuid_hypervisor_leaves(const struct vcpu *v, uint32_t leaf,
break;
case 4: /* HVM hypervisor leaf. */
- if ( !has_hvm_container_domain(d) || subleaf != 0 )
+ if ( !is_hvm_domain(d) || subleaf != 0 )
break;
if ( cpu_has_vmx_apic_reg_virt )
@@ -88,7 +88,7 @@ void show_registers(const struct cpu_user_regs *regs)
enum context context;
struct vcpu *v = system_state >= SYS_STATE_smp_boot ? current : NULL;
- if ( guest_mode(regs) && has_hvm_container_vcpu(v) )
+ if ( guest_mode(regs) && is_hvm_vcpu(v) )
{
struct segment_register sreg;
context = CTXT_hvm_guest;
@@ -623,7 +623,7 @@ static void hypercall_page_initialise_ring3_kernel(void *hypercall_page)
void hypercall_page_initialise(struct domain *d, void *hypercall_page)
{
memset(hypercall_page, 0xCC, PAGE_SIZE);
- if ( has_hvm_container_domain(d) )
+ if ( is_hvm_domain(d) )
hvm_hypercall_page_initialise(d, hypercall_page);
else if ( !is_pv_32bit_domain(d) )
hypercall_page_initialise_ring3_kernel(hypercall_page);
@@ -55,7 +55,7 @@ int arch_iommu_populate_page_table(struct domain *d)
while ( !rc && (page = page_list_remove_head(&d->page_list)) )
{
- if ( has_hvm_container_domain(d) ||
+ if ( is_hvm_domain(d) ||
(page->u.inuse.type_info & PGT_type_mask) == PGT_writable_page )
{
unsigned long mfn = page_to_mfn(page);
@@ -16,7 +16,7 @@
#define is_pv_32bit_domain(d) ((d)->arch.is_32bit_pv)
#define is_pv_32bit_vcpu(v) (is_pv_32bit_domain((v)->domain))
-#define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \
+#define is_hvm_pv_evtchn_domain(d) (is_hvm_domain(d) && \
d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector)
#define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
#define is_domain_direct_mapped(d) ((void)(d), 0)
@@ -26,7 +26,7 @@ static inline int local_events_need_delivery(void)
ASSERT(!is_idle_vcpu(v));
- return (has_hvm_container_vcpu(v) ? hvm_local_events_need_delivery(v) :
+ return (is_hvm_vcpu(v) ? hvm_local_events_need_delivery(v) :
(vcpu_info(v, evtchn_upcall_pending) &&
!vcpu_info(v, evtchn_upcall_mask)));
}
@@ -14,27 +14,27 @@
/* Raw access functions: no type checking. */
#define raw_copy_to_guest(dst, src, len) \
- (has_hvm_container_vcpu(current) ? \
+ (is_hvm_vcpu(current) ? \
copy_to_user_hvm((dst), (src), (len)) : \
copy_to_user((dst), (src), (len)))
#define raw_copy_from_guest(dst, src, len) \
- (has_hvm_container_vcpu(current) ? \
+ (is_hvm_vcpu(current) ? \
copy_from_user_hvm((dst), (src), (len)) : \
copy_from_user((dst), (src), (len)))
#define raw_clear_guest(dst, len) \
- (has_hvm_container_vcpu(current) ? \
+ (is_hvm_vcpu(current) ? \
clear_user_hvm((dst), (len)) : \
clear_user((dst), (len)))
#define __raw_copy_to_guest(dst, src, len) \
- (has_hvm_container_vcpu(current) ? \
+ (is_hvm_vcpu(current) ? \
copy_to_user_hvm((dst), (src), (len)) : \
__copy_to_user((dst), (src), (len)))
#define __raw_copy_from_guest(dst, src, len) \
- (has_hvm_container_vcpu(current) ? \
+ (is_hvm_vcpu(current) ? \
copy_from_user_hvm((dst), (src), (len)) : \
__copy_from_user((dst), (src), (len)))
#define __raw_clear_guest(dst, len) \
- (has_hvm_container_vcpu(current) ? \
+ (is_hvm_vcpu(current) ? \
clear_user_hvm((dst), (len)) : \
clear_user((dst), (len)))
@@ -621,7 +621,7 @@ unsigned long hvm_cr4_guest_valid_bits(const struct vcpu *v, bool restore);
#define arch_vcpu_block(v) ({ \
struct vcpu *v_ = (v); \
struct domain *d_ = v_->domain; \
- if ( has_hvm_container_domain(d_) && \
+ if ( is_hvm_domain(d_) && \
(d_->arch.hvm_domain.pi_ops.vcpu_block) ) \
d_->arch.hvm_domain.pi_ops.vcpu_block(v_); \
})
@@ -874,8 +874,6 @@ void watchdog_domain_destroy(struct domain *d);
#define is_pv_vcpu(v) (is_pv_domain((v)->domain))
#define is_hvm_domain(d) ((d)->guest_type == guest_type_hvm)
#define is_hvm_vcpu(v) (is_hvm_domain(v->domain))
-#define has_hvm_container_domain(d) ((d)->guest_type != guest_type_pv)
-#define has_hvm_container_vcpu(v) (has_hvm_container_domain((v)->domain))
#define is_pinned_vcpu(v) ((v)->domain->is_pinned || \
cpumask_weight((v)->cpu_hard_affinity) == 1)
#ifdef CONFIG_HAS_PASSTHROUGH
@@ -185,9 +185,8 @@ typedef XEN_GUEST_HANDLE_PARAM(char) tmem_cli_va_param_t;
static inline int tmem_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
{
#ifdef CONFIG_COMPAT
- if ( has_hvm_container_vcpu(current) ?
- hvm_guest_x86_mode(current) != 8 :
- is_pv_32bit_vcpu(current) )
+ if ( is_hvm_vcpu(current) ? hvm_guest_x86_mode(current) != 8
+ : is_pv_32bit_vcpu(current) )
{
int rc;
enum XLAT_tmem_op_u u;