@@ -272,6 +272,29 @@ static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
#include <asm/kvm_book3s_64.h>
#endif
+#if defined(CONFIG_KVM_BOOK3S_PR) && defined(CONFIG_KVM_BOOK3S_64_HV)
+static inline int kvmppc_vcpu_pr(struct kvm_vcpu *vcpu)
+{
+ return !vcpu->arch.use_hv;
+}
+
+static inline int kvmppc_vcpu_hv(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.use_hv;
+}
+
+#else
+static inline int kvmppc_vcpu_pr(struct kvm_vcpu *vcpu)
+{
+ return IS_ENABLED(CONFIG_KVM_BOOK3S_PR);
+}
+
+static inline int kvmppc_vcpu_hv(struct kvm_vcpu *vcpu)
+{
+ return IS_ENABLED(CONFIG_KVM_BOOK3S_64_HV);
+}
+#endif
+
static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
{
vcpu->arch.gpr[num] = val;
@@ -366,28 +389,38 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
return vcpu->arch.fault_dar;
}
-#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_HANDLER
static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
{
- return to_book3s(vcpu)->hior;
+ if (kvmppc_vcpu_pr(vcpu))
+ return to_book3s(vcpu)->hior;
+ return 0;
}
static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
unsigned long pending_now, unsigned long old_pending)
{
- if (pending_now)
- vcpu->arch.shared->int_pending = 1;
- else if (old_pending)
- vcpu->arch.shared->int_pending = 0;
+ if (kvmppc_vcpu_pr(vcpu)) {
+ if (pending_now)
+ vcpu->arch.shared->int_pending = 1;
+ else if (old_pending)
+ vcpu->arch.shared->int_pending = 0;
+ }
}
static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
{
- ulong crit_raw = vcpu->arch.shared->critical;
- ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
+ ulong crit_raw;
+ ulong crit_r1;
bool crit;
+ if (!kvmppc_vcpu_pr(vcpu))
+ return false;
+
+ crit_raw = vcpu->arch.shared->critical;
+ crit_r1 = kvmppc_get_gpr(vcpu, 1);
+
/* Truncate crit indicators in 32 bit mode */
if (!(vcpu->arch.shared->msr & MSR_SF)) {
crit_raw &= 0xffffffff;
@@ -401,23 +434,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
return crit;
}
-#else /* CONFIG_KVM_BOOK3S_PR */
-
-static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
-{
- return 0;
-}
-
-static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
- unsigned long pending_now, unsigned long old_pending)
-{
-}
-
-static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
-{
- return false;
-}
-#endif
+#endif /* CONFIG_KVM_BOOK3S_HANDLER */
/* Magic register values loaded into r3 and r4 before the 'sc' assembly
* instruction for the OSI hypercalls */
@@ -237,6 +237,7 @@ struct kvm_arch_memory_slot {
struct kvm_arch {
unsigned int lpid;
+ int kvm_mode;
#ifdef CONFIG_KVM_BOOK3S_64_HV
unsigned long hpt_virt;
struct revmap_entry *revmap;
@@ -278,6 +279,10 @@ struct kvm_arch {
#endif
};
+/* Values for kvm_mode */
+#define KVM_MODE_PR 1
+#define KVM_MODE_HV 2
+
/*
* Struct for a virtual core.
* Note: entry_exit_count combines an entry count in the bottom 8 bits
@@ -409,6 +414,7 @@ struct kvm_vcpu_arch {
ulong host_stack;
u32 host_pid;
#ifdef CONFIG_PPC_BOOK3S
+ bool use_hv;
struct kvmppc_slb slb[64];
int slb_max; /* 1 + index of last valid entry in slb[] */
int slb_nr; /* total number of entries in SLB */
@@ -283,7 +283,8 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu);
-static inline int kvm_book3s_hv_possible(void) { return 1; }
+extern int kvm_book3s_hv_possible(void);
+extern int kvm_is_book3s_hv(struct kvm *kvm);
#else
static inline void __init kvm_cma_reserve(void)
@@ -306,6 +307,8 @@ static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
}
static inline int kvm_book3s_hv_possible(void) { return 0; }
+static inline int kvm_is_book3s_hv(struct kvm *kvm) { return 0; }
+
#endif
#ifdef CONFIG_KVM_XICS
@@ -89,9 +89,20 @@ config KVM_BOOK3S_64_HV
If unsure, say N.
config KVM_BOOK3S_64_PR
- def_bool y
- depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
+ bool "KVM support without using hypervisor mode in host"
+ depends on KVM_BOOK3S_64
select KVM_BOOK3S_PR
+ ---help---
+ Support running guest kernels in virtual machines on processors
+ without using hypervisor mode in the host, by running the
+ guest in user mode (problem state) and emulating all
+ privileged instructions and registers.
+
+ This is not as fast as using hypervisor mode, but works on
+ machines where hypervisor mode is not available or not usable,
+ and can emulate processors that are different from the host
+ processor, including emulating 32-bit processors on a 64-bit
+ host.
config KVM_BOOKE_HV
bool
@@ -53,32 +53,33 @@ kvm-e500mc-objs := \
e500_emulate.o
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) := \
+ book3s_64_vio_hv.o
+
kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
$(KVM)/coalesced_mmio.o \
fpu.o \
book3s_paired_singles.o \
book3s_pr.o \
book3s_pr_papr.o \
- book3s_64_vio_hv.o \
book3s_emulate.o \
book3s_interrupts.o \
book3s_mmu_hpte.o \
book3s_64_mmu_host.o \
book3s_64_mmu.o \
book3s_32_mmu.o
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_PR) += \
book3s_rmhandlers.o
-kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_HV) += \
book3s_hv.o \
book3s_hv_interrupts.o \
book3s_64_mmu_hv.o
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
book3s_hv_rm_xics.o
-kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) := \
+kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HV) += \
book3s_hv_rmhandlers.o \
book3s_hv_rm_mmu.o \
- book3s_64_vio_hv.o \
book3s_hv_ras.o \
book3s_hv_builtin.o \
book3s_hv_cma.o \
@@ -61,19 +61,54 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ NULL }
};
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+static int hv_ok;
+
+int kvm_book3s_hv_possible(void)
+{
+ return hv_ok;
+}
+
+int kvm_is_book3s_hv(struct kvm *kvm)
+{
+ return kvm->arch.kvm_mode == KVM_MODE_HV;
+}
+#endif
+
#ifdef CONFIG_KVM_BOOK3S_PR
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+/* Do x if the VM mode is PR */
+#define DO_IF_PR(kvm, x) if ((kvm)->arch.kvm_mode == KVM_MODE_PR) { x; }
+/* Do x if the VM mode is HV */
+#define DO_IF_HV(kvm, x) if ((kvm)->arch.kvm_mode == KVM_MODE_HV) { x; }
+
+/* Do x for PR vcpus */
+#define VCPU_DO_PR(vcpu, x) if (!(vcpu)->arch.use_hv) { x; }
+/* Do x for HV vcpus */
+#define VCPU_DO_HV(vcpu, x) if ((vcpu)->arch.use_hv) { x; }
+
+#else
#define DO_IF_PR(kvm, x) x
#define DO_IF_HV(kvm, x)
#define VCPU_DO_PR(vcpu, x) x
#define VCPU_DO_HV(vcpu, x)
#endif
+#else
#ifdef CONFIG_KVM_BOOK3S_64_HV
#define DO_IF_PR(kvm, x)
#define DO_IF_HV(kvm, x) x
#define VCPU_DO_PR(vcpu, x)
#define VCPU_DO_HV(vcpu, x) x
+
+#else
+#define DO_IF_PR(kvm, x)
+#define DO_IF_HV(kvm, x)
+#define VCPU_DO_PR(vcpu, x)
+#define VCPU_DO_HV(vcpu, x)
#endif
+#endif /* CONFIG_KVM_BOOK3S_PR */
+
void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
{
@@ -867,11 +902,16 @@ int kvmppc_core_init_vm(struct kvm *kvm)
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
#endif
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ if (hv_ok) {
+ err = kvmppc_core_init_vm_hv(kvm);
+ kvm->arch.kvm_mode = KVM_MODE_HV;
+ return err;
+ }
+#endif
#ifdef CONFIG_KVM_BOOK3S_PR
err = kvmppc_core_init_vm_pr(kvm);
-#endif
-#ifdef CONFIG_KVM_BOOK3S_64_HV
- err = kvmppc_core_init_vm_hv(kvm);
+ kvm->arch.kvm_mode = KVM_MODE_PR;
#endif
return err;
@@ -890,7 +930,7 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
int kvmppc_core_check_processor_compat(void)
{
-#if defined(CONFIG_KVM_BOOK3S_64_HV)
+#if defined(CONFIG_KVM_BOOK3S_64_HV) && !defined(CONFIG_KVM_BOOK3S_PR)
if (!cpu_has_feature(CPU_FTR_HVMODE))
return -EIO;
#endif
@@ -905,11 +945,13 @@ static int kvmppc_book3s_init(void)
if (r)
return r;
-#ifdef CONFIG_KVM_BOOK3S_PR
- r = kvmppc_mmu_hpte_sysinit();
-#endif
#ifdef CONFIG_KVM_BOOK3S_64_HV
r = kvmppc_mmu_hv_init();
+ if (!r)
+ hv_ok = 1;
+#endif
+#ifdef CONFIG_KVM_BOOK3S_PR
+ r = kvmppc_mmu_hpte_sysinit();
#endif
return r;
@@ -74,3 +74,4 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
/* Didn't find the liobn, punt it to userspace */
return H_TOO_HARD;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
@@ -95,6 +95,9 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
int ra = get_ra(inst);
int rb = get_rb(inst);
+ if (kvmppc_vcpu_hv(vcpu))
+ return EMULATE_FAIL;
+
switch (get_op(inst)) {
case 19:
switch (get_xop(inst)) {
@@ -349,6 +352,9 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
+ if (kvmppc_vcpu_hv(vcpu))
+ return EMULATE_FAIL;
+
switch (sprn) {
case SPRN_SDR1:
if (!spr_allowed(vcpu, PRIV_HYPER))
@@ -472,6 +478,9 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
int emulated = EMULATE_DONE;
+ if (kvmppc_vcpu_hv(vcpu))
+ return EMULATE_FAIL;
+
switch (sprn) {
case SPRN_IBAT0U ... SPRN_IBAT3L:
case SPRN_IBAT4U ... SPRN_IBAT7L:
@@ -22,7 +22,8 @@
#ifdef CONFIG_KVM_BOOK3S_64_HV
EXPORT_SYMBOL_GPL(kvmppc_hv_entry_trampoline);
-#else
+#endif
+#ifdef CONFIG_KVM_BOOK3S_PR
EXPORT_SYMBOL_GPL(kvmppc_entry_trampoline);
EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
#ifdef CONFIG_ALTIVEC
@@ -959,6 +959,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, unsigned int id)
vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
if (!vcpu)
goto out;
+ vcpu->arch.use_hv = true;
err = kvm_vcpu_init(vcpu, kvm, id);
if (err)
@@ -1921,6 +1922,7 @@ void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
kvmppc_free_hpt(kvm);
}
+#ifndef CONFIG_KVM_BOOK3S_PR
/* We don't need to emulate any privileged instructions or dcbz */
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
@@ -1937,3 +1939,4 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
{
return EMULATE_FAIL;
}
+#endif
@@ -569,6 +569,10 @@ kvmppc_interrupt:
lbz r9, HSTATE_IN_GUEST(r13)
cmpwi r9, KVM_GUEST_MODE_HOST_HV
beq kvmppc_bad_host_intr
+#ifdef CONFIG_KVM_BOOK3S_PR
+ cmpwi r9, KVM_GUEST_MODE_GUEST
+ beq kvmppc_interrupt_pr
+#endif
/* We're now back in the host but in guest MMU context */
li r9, KVM_GUEST_MODE_HOST_HV
stb r9, HSTATE_IN_GUEST(r13)
@@ -161,8 +161,15 @@ kvmppc_handler_trampoline_enter_end:
.global kvmppc_handler_trampoline_exit
kvmppc_handler_trampoline_exit:
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+.global kvmppc_interrupt_pr
+kvmppc_interrupt_pr:
+ ld r9, HSTATE_SCRATCH2(r13)
+
+#else
.global kvmppc_interrupt
kvmppc_interrupt:
+#endif
/* Register usage at this point:
*
@@ -818,7 +818,7 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
}
/* Check for real mode returning too hard */
- if (xics->real_mode)
+ if (xics->real_mode && kvmppc_vcpu_hv(vcpu))
return kvmppc_xics_rm_complete(vcpu, req);
switch (req) {
@@ -50,7 +50,6 @@ int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
return 1;
}
-#ifndef CONFIG_KVM_BOOK3S_64_HV
/*
* Common checks before entering the guest world. Call with interrupts
* disabled.
@@ -125,7 +124,6 @@ int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
return r;
}
-#endif /* CONFIG_KVM_BOOK3S_64_HV */
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
@@ -193,8 +191,8 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
goto out;
#ifdef CONFIG_KVM_BOOK3S_64_HV
- /* HV KVM can only do PAPR mode for now */
- if (!vcpu->arch.papr_enabled)
+ /* HV KVM can only do PAPR mode */
+ if (!vcpu->arch.papr_enabled && kvmppc_vcpu_hv(vcpu))
goto out;
#endif
@@ -298,6 +296,12 @@ void kvm_arch_sync_events(struct kvm *kvm)
{
}
+#if defined(CONFIG_KVM_BOOK3S_64_HV) && !defined(CONFIG_KVM_BOOK3S_PR)
+#define KVM_IS_BOOK3S_HV_ONLY 1
+#else
+#define KVM_IS_BOOK3S_HV_ONLY 0
+#endif
+
int kvm_dev_ioctl_check_extension(long ext)
{
int r;
@@ -320,22 +324,24 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_DEVICE_CTRL:
r = 1;
break;
-#ifndef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_PAIRED_SINGLES:
case KVM_CAP_PPC_OSI:
case KVM_CAP_PPC_GET_PVINFO:
#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
case KVM_CAP_SW_TLB:
#endif
-#ifdef CONFIG_KVM_MPIC
- case KVM_CAP_IRQ_MPIC:
-#endif
- r = 1;
+ r = !KVM_IS_BOOK3S_HV_ONLY;
break;
+#ifdef CONFIG_KVM_MMIO
case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
#endif
+#ifdef CONFIG_KVM_MPIC
+ case KVM_CAP_IRQ_MPIC:
+ r = 1;
+ break;
+#endif
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_PPC_ALLOC_HTAB:
@@ -348,30 +354,32 @@ int kvm_dev_ioctl_check_extension(long ext)
#endif /* CONFIG_PPC_BOOK3S_64 */
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_SMT:
- r = threads_per_core;
+ r = 0;
+ if (kvm_book3s_hv_possible())
+ r = threads_per_core;
break;
case KVM_CAP_PPC_RMA:
- r = 1;
- /* PPC970 requires an RMA */
- if (cpu_has_feature(CPU_FTR_ARCH_201))
+ r = kvm_book3s_hv_possible();
+ /* PPC970 requires an RMA for HV KVM */
+ if (r && cpu_has_feature(CPU_FTR_ARCH_201))
r = 2;
break;
#endif
case KVM_CAP_SYNC_MMU:
#ifdef CONFIG_KVM_BOOK3S_64_HV
- r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
+ r = !KVM_IS_BOOK3S_HV_ONLY ||
+ cpu_has_feature(CPU_FTR_ARCH_206);
#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
r = 1;
#else
r = 0;
- break;
#endif
+ break;
#ifdef CONFIG_KVM_BOOK3S_64_HV
case KVM_CAP_PPC_HTAB_FD:
- r = 1;
+ r = kvm_book3s_hv_possible();
break;
#endif
- break;
case KVM_CAP_NR_VCPUS:
/*
* Recommending a number of CPUs is somewhat arbitrary; we
@@ -379,10 +387,10 @@ int kvm_dev_ioctl_check_extension(long ext)
* will have secondary threads "offline"), and for other KVM
* implementations just count online CPUs.
*/
-#ifdef CONFIG_KVM_BOOK3S_64_HV
- r = num_present_cpus();
-#else
r = num_online_cpus();
+#ifdef CONFIG_KVM_BOOK3S_64_HV
+ if (kvm_book3s_hv_possible())
+ r = num_present_cpus();
#endif
break;
case KVM_CAP_MAX_VCPUS:
@@ -1027,6 +1035,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
struct kvm_allocate_rma rma;
struct kvm *kvm = filp->private_data;
+ r = -ENOTTY;
+ if (!kvm_is_book3s_hv(kvm))
+ break;
r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
r = -EFAULT;
@@ -1036,6 +1047,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
case KVM_PPC_ALLOCATE_HTAB: {
u32 htab_order;
+ r = -ENOTTY;
+ if (!kvm_is_book3s_hv(kvm))
+ break;
r = -EFAULT;
if (get_user(htab_order, (u32 __user *)argp))
break;
@@ -1052,6 +1066,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
case KVM_PPC_GET_HTAB_FD: {
struct kvm_get_htab_fd ghf;
+ r = -ENOTTY;
+ if (!kvm_is_book3s_hv(kvm))
+ break;
r = -EFAULT;
if (copy_from_user(&ghf, argp, sizeof(ghf)))
break;
This makes the config options for PR and HV KVM independently selectable, making it possible to compile a KVM module with both PR and HV code in it. This adds fields to struct kvm_arch and struct kvm_vcpu_arch to indicate whether the guest is using PR or HV KVM, though at this stage all guests in a given kernel instance are of the same type: HV KVM if HV is enabled and the machine supports it (i.e. has suitable CPUs and has a working hypervisor mode available), otherwise PR. Since the code in book3s_64_vio_hv.c is called from real mode with HV KVM, and therefore has to be built into the main kernel binary, this makes it always built-in rather than part of the KVM module. It gets called from the KVM module by PR KVM, so this adds an EXPORT_SYMBOL_GPL(). If both HV and PR KVM are included, interrupts come in to the HV version of the kvmppc_interrupt code, which then jumps to the PR handler, renamed to kvmppc_interrupt_pr, if the guest is a PR guest. Allowing both PR and HV in the same kernel required some changes to kvm_dev_ioctl_check_extension(), since the values returned now can't be selected with #ifdefs as much as previously. For capabilities that are only provided by HV KVM (for example, KVM_PPC_ALLOCATE_HTAB), we return the HV value only if HV KVM is possible on the current machine. For capabilities provided by PR KVM but not HV, we return the PR value unless only HV KVM has been configured. Signed-off-by: Paul Mackerras <paulus@samba.org> --- arch/powerpc/include/asm/kvm_book3s.h | 67 +++++++++++++++++++++------------ arch/powerpc/include/asm/kvm_host.h | 6 +++ arch/powerpc/include/asm/kvm_ppc.h | 5 ++- arch/powerpc/kvm/Kconfig | 15 +++++++- arch/powerpc/kvm/Makefile | 11 +++--- arch/powerpc/kvm/book3s.c | 56 +++++++++++++++++++++++---- arch/powerpc/kvm/book3s_64_vio_hv.c | 1 + arch/powerpc/kvm/book3s_emulate.c | 9 +++++ arch/powerpc/kvm/book3s_exports.c | 3 +- arch/powerpc/kvm/book3s_hv.c | 3 ++ arch/powerpc/kvm/book3s_hv_rmhandlers.S | 4 ++ arch/powerpc/kvm/book3s_segment.S | 7 ++++ arch/powerpc/kvm/book3s_xics.c | 2 +- arch/powerpc/kvm/powerpc.c | 57 ++++++++++++++++++---------- 14 files changed, 184 insertions(+), 62 deletions(-)