diff mbox

[01/27] KVM: PPC: Name generic 64-bit code generic

Message ID 1271369518-11247-2-git-send-email-agraf@suse.de (mailing list archive)
State New, archived
Headers show

Commit Message

Alexander Graf April 15, 2010, 10:11 p.m. UTC
None
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index ee79921..7670e2a 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -22,7 +22,7 @@ 
 
 #include <linux/types.h>
 #include <linux/kvm_host.h>
-#include <asm/kvm_book3s_64_asm.h>
+#include <asm/kvm_book3s_asm.h>
 
 struct kvmppc_slb {
 	u64 esid;
diff --git a/arch/powerpc/include/asm/kvm_book3s_64_asm.h b/arch/powerpc/include/asm/kvm_book3s_64_asm.h
deleted file mode 100644
index 183461b..0000000
--- a/arch/powerpc/include/asm/kvm_book3s_64_asm.h
+++ /dev/null
@@ -1,76 +0,0 @@ 
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- *
- * Copyright SUSE Linux Products GmbH 2009
- *
- * Authors: Alexander Graf <agraf@suse.de>
- */
-
-#ifndef __ASM_KVM_BOOK3S_ASM_H__
-#define __ASM_KVM_BOOK3S_ASM_H__
-
-#ifdef __ASSEMBLY__
-
-#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-
-#include <asm/kvm_asm.h>
-
-.macro DO_KVM intno
-	.if (\intno == BOOK3S_INTERRUPT_SYSTEM_RESET) || \
-	    (\intno == BOOK3S_INTERRUPT_MACHINE_CHECK) || \
-	    (\intno == BOOK3S_INTERRUPT_DATA_STORAGE) || \
-	    (\intno == BOOK3S_INTERRUPT_INST_STORAGE) || \
-	    (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \
-	    (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \
-	    (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \
-	    (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \
-	    (\intno == BOOK3S_INTERRUPT_PROGRAM) || \
-	    (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \
-	    (\intno == BOOK3S_INTERRUPT_DECREMENTER) || \
-	    (\intno == BOOK3S_INTERRUPT_SYSCALL) || \
-	    (\intno == BOOK3S_INTERRUPT_TRACE) || \
-	    (\intno == BOOK3S_INTERRUPT_PERFMON) || \
-	    (\intno == BOOK3S_INTERRUPT_ALTIVEC) || \
-	    (\intno == BOOK3S_INTERRUPT_VSX)
-
-	b	kvmppc_trampoline_\intno
-kvmppc_resume_\intno:
-
-	.endif
-.endm
-
-#else
-
-.macro DO_KVM intno
-.endm
-
-#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
-
-#else  /*__ASSEMBLY__ */
-
-struct kvmppc_book3s_shadow_vcpu {
-	ulong gpr[14];
-	u32 cr;
-	u32 xer;
-	ulong host_r1;
-	ulong host_r2;
-	ulong handler;
-	ulong scratch0;
-	ulong scratch1;
-	ulong vmhandler;
-};
-
-#endif /*__ASSEMBLY__ */
-
-#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
new file mode 100644
index 0000000..183461b
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -0,0 +1,76 @@ 
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#ifndef __ASM_KVM_BOOK3S_ASM_H__
+#define __ASM_KVM_BOOK3S_ASM_H__
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
+
+#include <asm/kvm_asm.h>
+
+.macro DO_KVM intno
+	.if (\intno == BOOK3S_INTERRUPT_SYSTEM_RESET) || \
+	    (\intno == BOOK3S_INTERRUPT_MACHINE_CHECK) || \
+	    (\intno == BOOK3S_INTERRUPT_DATA_STORAGE) || \
+	    (\intno == BOOK3S_INTERRUPT_INST_STORAGE) || \
+	    (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \
+	    (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \
+	    (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \
+	    (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \
+	    (\intno == BOOK3S_INTERRUPT_PROGRAM) || \
+	    (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \
+	    (\intno == BOOK3S_INTERRUPT_DECREMENTER) || \
+	    (\intno == BOOK3S_INTERRUPT_SYSCALL) || \
+	    (\intno == BOOK3S_INTERRUPT_TRACE) || \
+	    (\intno == BOOK3S_INTERRUPT_PERFMON) || \
+	    (\intno == BOOK3S_INTERRUPT_ALTIVEC) || \
+	    (\intno == BOOK3S_INTERRUPT_VSX)
+
+	b	kvmppc_trampoline_\intno
+kvmppc_resume_\intno:
+
+	.endif
+.endm
+
+#else
+
+.macro DO_KVM intno
+.endm
+
+#endif /* CONFIG_KVM_BOOK3S_64_HANDLER */
+
+#else  /*__ASSEMBLY__ */
+
+struct kvmppc_book3s_shadow_vcpu {
+	ulong gpr[14];
+	u32 cr;
+	u32 xer;
+	ulong host_r1;
+	ulong host_r2;
+	ulong handler;
+	ulong scratch0;
+	ulong scratch1;
+	ulong vmhandler;
+};
+
+#endif /*__ASSEMBLY__ */
+
+#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a011603..dc3ccdf 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -23,7 +23,7 @@ 
 #include <asm/page.h>
 #include <asm/exception-64e.h>
 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#include <asm/kvm_book3s_64_asm.h>
+#include <asm/kvm_book3s_asm.h>
 #endif
 
 register struct paca_struct *local_paca asm("r13");
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index bed9a29..844a44b 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -37,7 +37,7 @@ 
 #include <asm/firmware.h>
 #include <asm/page_64.h>
 #include <asm/irqflags.h>
-#include <asm/kvm_book3s_64_asm.h>
+#include <asm/kvm_book3s_asm.h>
 
 /* The physical memory is layed out such that the secondary processor
  * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -169,7 +169,7 @@  exception_marker:
 /* KVM trampoline code needs to be close to the interrupt handlers */
 
 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
-#include "../kvm/book3s_64_rmhandlers.S"
+#include "../kvm/book3s_rmhandlers.S"
 #endif
 
 _GLOBAL(generic_secondary_thread_init)
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index eba721e..0a67310 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -14,7 +14,7 @@  CFLAGS_emulate.o  := -I.
 
 common-objs-y += powerpc.o emulate.o
 obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
-obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_64_exports.o
+obj-$(CONFIG_KVM_BOOK3S_64_HANDLER) += book3s_exports.o
 
 AFLAGS_booke_interrupts.o := -I$(obj)
 
@@ -43,8 +43,8 @@  kvm-book3s_64-objs := \
 	fpu.o \
 	book3s_paired_singles.o \
 	book3s.o \
-	book3s_64_emulate.o \
-	book3s_64_interrupts.o \
+	book3s_emulate.o \
+	book3s_interrupts.o \
 	book3s_64_mmu_host.o \
 	book3s_64_mmu.o \
 	book3s_32_mmu.o
diff --git a/arch/powerpc/kvm/book3s_64_emulate.c b/arch/powerpc/kvm/book3s_64_emulate.c
deleted file mode 100644
index 8f50776..0000000
--- a/arch/powerpc/kvm/book3s_64_emulate.c
+++ /dev/null
@@ -1,566 +0,0 @@ 
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- *
- * Copyright SUSE Linux Products GmbH 2009
- *
- * Authors: Alexander Graf <agraf@suse.de>
- */
-
-#include <asm/kvm_ppc.h>
-#include <asm/disassemble.h>
-#include <asm/kvm_book3s.h>
-#include <asm/reg.h>
-
-#define OP_19_XOP_RFID		18
-#define OP_19_XOP_RFI		50
-
-#define OP_31_XOP_MFMSR		83
-#define OP_31_XOP_MTMSR		146
-#define OP_31_XOP_MTMSRD	178
-#define OP_31_XOP_MTSR		210
-#define OP_31_XOP_MTSRIN	242
-#define OP_31_XOP_TLBIEL	274
-#define OP_31_XOP_TLBIE		306
-#define OP_31_XOP_SLBMTE	402
-#define OP_31_XOP_SLBIE		434
-#define OP_31_XOP_SLBIA		498
-#define OP_31_XOP_MFSR		595
-#define OP_31_XOP_MFSRIN	659
-#define OP_31_XOP_DCBA		758
-#define OP_31_XOP_SLBMFEV	851
-#define OP_31_XOP_EIOIO		854
-#define OP_31_XOP_SLBMFEE	915
-
-/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
-#define OP_31_XOP_DCBZ		1010
-
-#define OP_LFS			48
-#define OP_LFD			50
-#define OP_STFS			52
-#define OP_STFD			54
-
-#define SPRN_GQR0		912
-#define SPRN_GQR1		913
-#define SPRN_GQR2		914
-#define SPRN_GQR3		915
-#define SPRN_GQR4		916
-#define SPRN_GQR5		917
-#define SPRN_GQR6		918
-#define SPRN_GQR7		919
-
-int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
-                           unsigned int inst, int *advance)
-{
-	int emulated = EMULATE_DONE;
-
-	switch (get_op(inst)) {
-	case 19:
-		switch (get_xop(inst)) {
-		case OP_19_XOP_RFID:
-		case OP_19_XOP_RFI:
-			vcpu->arch.pc = vcpu->arch.srr0;
-			kvmppc_set_msr(vcpu, vcpu->arch.srr1);
-			*advance = 0;
-			break;
-
-		default:
-			emulated = EMULATE_FAIL;
-			break;
-		}
-		break;
-	case 31:
-		switch (get_xop(inst)) {
-		case OP_31_XOP_MFMSR:
-			kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr);
-			break;
-		case OP_31_XOP_MTMSRD:
-		{
-			ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
-			if (inst & 0x10000) {
-				vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
-				vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
-			} else
-				kvmppc_set_msr(vcpu, rs);
-			break;
-		}
-		case OP_31_XOP_MTMSR:
-			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
-			break;
-		case OP_31_XOP_MFSR:
-		{
-			int srnum;
-
-			srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
-			if (vcpu->arch.mmu.mfsrin) {
-				u32 sr;
-				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
-				kvmppc_set_gpr(vcpu, get_rt(inst), sr);
-			}
-			break;
-		}
-		case OP_31_XOP_MFSRIN:
-		{
-			int srnum;
-
-			srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
-			if (vcpu->arch.mmu.mfsrin) {
-				u32 sr;
-				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
-				kvmppc_set_gpr(vcpu, get_rt(inst), sr);
-			}
-			break;
-		}
-		case OP_31_XOP_MTSR:
-			vcpu->arch.mmu.mtsrin(vcpu,
-				(inst >> 16) & 0xf,
-				kvmppc_get_gpr(vcpu, get_rs(inst)));
-			break;
-		case OP_31_XOP_MTSRIN:
-			vcpu->arch.mmu.mtsrin(vcpu,
-				(kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
-				kvmppc_get_gpr(vcpu, get_rs(inst)));
-			break;
-		case OP_31_XOP_TLBIE:
-		case OP_31_XOP_TLBIEL:
-		{
-			bool large = (inst & 0x00200000) ? true : false;
-			ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
-			vcpu->arch.mmu.tlbie(vcpu, addr, large);
-			break;
-		}
-		case OP_31_XOP_EIOIO:
-			break;
-		case OP_31_XOP_SLBMTE:
-			if (!vcpu->arch.mmu.slbmte)
-				return EMULATE_FAIL;
-
-			vcpu->arch.mmu.slbmte(vcpu,
-					kvmppc_get_gpr(vcpu, get_rs(inst)),
-					kvmppc_get_gpr(vcpu, get_rb(inst)));
-			break;
-		case OP_31_XOP_SLBIE:
-			if (!vcpu->arch.mmu.slbie)
-				return EMULATE_FAIL;
-
-			vcpu->arch.mmu.slbie(vcpu,
-					kvmppc_get_gpr(vcpu, get_rb(inst)));
-			break;
-		case OP_31_XOP_SLBIA:
-			if (!vcpu->arch.mmu.slbia)
-				return EMULATE_FAIL;
-
-			vcpu->arch.mmu.slbia(vcpu);
-			break;
-		case OP_31_XOP_SLBMFEE:
-			if (!vcpu->arch.mmu.slbmfee) {
-				emulated = EMULATE_FAIL;
-			} else {
-				ulong t, rb;
-
-				rb = kvmppc_get_gpr(vcpu, get_rb(inst));
-				t = vcpu->arch.mmu.slbmfee(vcpu, rb);
-				kvmppc_set_gpr(vcpu, get_rt(inst), t);
-			}
-			break;
-		case OP_31_XOP_SLBMFEV:
-			if (!vcpu->arch.mmu.slbmfev) {
-				emulated = EMULATE_FAIL;
-			} else {
-				ulong t, rb;
-
-				rb = kvmppc_get_gpr(vcpu, get_rb(inst));
-				t = vcpu->arch.mmu.slbmfev(vcpu, rb);
-				kvmppc_set_gpr(vcpu, get_rt(inst), t);
-			}
-			break;
-		case OP_31_XOP_DCBA:
-			/* Gets treated as NOP */
-			break;
-		case OP_31_XOP_DCBZ:
-		{
-			ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
-			ulong ra = 0;
-			ulong addr, vaddr;
-			u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
-			u32 dsisr;
-			int r;
-
-			if (get_ra(inst))
-				ra = kvmppc_get_gpr(vcpu, get_ra(inst));
-
-			addr = (ra + rb) & ~31ULL;
-			if (!(vcpu->arch.msr & MSR_SF))
-				addr &= 0xffffffff;
-			vaddr = addr;
-
-			r = kvmppc_st(vcpu, &addr, 32, zeros, true);
-			if ((r == -ENOENT) || (r == -EPERM)) {
-				*advance = 0;
-				vcpu->arch.dear = vaddr;
-				vcpu->arch.fault_dear = vaddr;
-
-				dsisr = DSISR_ISSTORE;
-				if (r == -ENOENT)
-					dsisr |= DSISR_NOHPTE;
-				else if (r == -EPERM)
-					dsisr |= DSISR_PROTFAULT;
-
-				to_book3s(vcpu)->dsisr = dsisr;
-				vcpu->arch.fault_dsisr = dsisr;
-
-				kvmppc_book3s_queue_irqprio(vcpu,
-					BOOK3S_INTERRUPT_DATA_STORAGE);
-			}
-
-			break;
-		}
-		default:
-			emulated = EMULATE_FAIL;
-		}
-		break;
-	default:
-		emulated = EMULATE_FAIL;
-	}
-
-	if (emulated == EMULATE_FAIL)
-		emulated = kvmppc_emulate_paired_single(run, vcpu);
-
-	return emulated;
-}
-
-void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
-                    u32 val)
-{
-	if (upper) {
-		/* Upper BAT */
-		u32 bl = (val >> 2) & 0x7ff;
-		bat->bepi_mask = (~bl << 17);
-		bat->bepi = val & 0xfffe0000;
-		bat->vs = (val & 2) ? 1 : 0;
-		bat->vp = (val & 1) ? 1 : 0;
-		bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
-	} else {
-		/* Lower BAT */
-		bat->brpn = val & 0xfffe0000;
-		bat->wimg = (val >> 3) & 0xf;
-		bat->pp = val & 3;
-		bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
-	}
-}
-
-static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn)
-{
-	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
-	struct kvmppc_bat *bat;
-
-	switch (sprn) {
-	case SPRN_IBAT0U ... SPRN_IBAT3L:
-		bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
-		break;
-	case SPRN_IBAT4U ... SPRN_IBAT7L:
-		bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
-		break;
-	case SPRN_DBAT0U ... SPRN_DBAT3L:
-		bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
-		break;
-	case SPRN_DBAT4U ... SPRN_DBAT7L:
-		bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
-		break;
-	default:
-		BUG();
-	}
-
-	if (sprn % 2)
-		return bat->raw >> 32;
-	else
-		return bat->raw;
-}
-
-static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
-{
-	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
-	struct kvmppc_bat *bat;
-
-	switch (sprn) {
-	case SPRN_IBAT0U ... SPRN_IBAT3L:
-		bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
-		break;
-	case SPRN_IBAT4U ... SPRN_IBAT7L:
-		bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
-		break;
-	case SPRN_DBAT0U ... SPRN_DBAT3L:
-		bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
-		break;
-	case SPRN_DBAT4U ... SPRN_DBAT7L:
-		bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
-		break;
-	default:
-		BUG();
-	}
-
-	kvmppc_set_bat(vcpu, bat, !(sprn % 2), val);
-}
-
-int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
-{
-	int emulated = EMULATE_DONE;
-	ulong spr_val = kvmppc_get_gpr(vcpu, rs);
-
-	switch (sprn) {
-	case SPRN_SDR1:
-		to_book3s(vcpu)->sdr1 = spr_val;
-		break;
-	case SPRN_DSISR:
-		to_book3s(vcpu)->dsisr = spr_val;
-		break;
-	case SPRN_DAR:
-		vcpu->arch.dear = spr_val;
-		break;
-	case SPRN_HIOR:
-		to_book3s(vcpu)->hior = spr_val;
-		break;
-	case SPRN_IBAT0U ... SPRN_IBAT3L:
-	case SPRN_IBAT4U ... SPRN_IBAT7L:
-	case SPRN_DBAT0U ... SPRN_DBAT3L:
-	case SPRN_DBAT4U ... SPRN_DBAT7L:
-		kvmppc_write_bat(vcpu, sprn, (u32)spr_val);
-		/* BAT writes happen so rarely that we're ok to flush
-		 * everything here */
-		kvmppc_mmu_pte_flush(vcpu, 0, 0);
-		kvmppc_mmu_flush_segments(vcpu);
-		break;
-	case SPRN_HID0:
-		to_book3s(vcpu)->hid[0] = spr_val;
-		break;
-	case SPRN_HID1:
-		to_book3s(vcpu)->hid[1] = spr_val;
-		break;
-	case SPRN_HID2:
-		to_book3s(vcpu)->hid[2] = spr_val;
-		break;
-	case SPRN_HID2_GEKKO:
-		to_book3s(vcpu)->hid[2] = spr_val;
-		/* HID2.PSE controls paired single on gekko */
-		switch (vcpu->arch.pvr) {
-		case 0x00080200:	/* lonestar 2.0 */
-		case 0x00088202:	/* lonestar 2.2 */
-		case 0x70000100:	/* gekko 1.0 */
-		case 0x00080100:	/* gekko 2.0 */
-		case 0x00083203:	/* gekko 2.3a */
-		case 0x00083213:	/* gekko 2.3b */
-		case 0x00083204:	/* gekko 2.4 */
-		case 0x00083214:	/* gekko 2.4e (8SE) - retail HW2 */
-			if (spr_val & (1 << 29)) { /* HID2.PSE */
-				vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
-				kvmppc_giveup_ext(vcpu, MSR_FP);
-			} else {
-				vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
-			}
-			break;
-		}
-		break;
-	case SPRN_HID4:
-	case SPRN_HID4_GEKKO:
-		to_book3s(vcpu)->hid[4] = spr_val;
-		break;
-	case SPRN_HID5:
-		to_book3s(vcpu)->hid[5] = spr_val;
-		/* guest HID5 set can change is_dcbz32 */
-		if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
-		    (mfmsr() & MSR_HV))
-			vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
-		break;
-	case SPRN_GQR0:
-	case SPRN_GQR1:
-	case SPRN_GQR2:
-	case SPRN_GQR3:
-	case SPRN_GQR4:
-	case SPRN_GQR5:
-	case SPRN_GQR6:
-	case SPRN_GQR7:
-		to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
-		break;
-	case SPRN_ICTC:
-	case SPRN_THRM1:
-	case SPRN_THRM2:
-	case SPRN_THRM3:
-	case SPRN_CTRLF:
-	case SPRN_CTRLT:
-	case SPRN_L2CR:
-	case SPRN_MMCR0_GEKKO:
-	case SPRN_MMCR1_GEKKO:
-	case SPRN_PMC1_GEKKO:
-	case SPRN_PMC2_GEKKO:
-	case SPRN_PMC3_GEKKO:
-	case SPRN_PMC4_GEKKO:
-	case SPRN_WPAR_GEKKO:
-		break;
-	default:
-		printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
-#ifndef DEBUG_SPR
-		emulated = EMULATE_FAIL;
-#endif
-		break;
-	}
-
-	return emulated;
-}
-
-int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
-{
-	int emulated = EMULATE_DONE;
-
-	switch (sprn) {
-	case SPRN_IBAT0U ... SPRN_IBAT3L:
-	case SPRN_IBAT4U ... SPRN_IBAT7L:
-	case SPRN_DBAT0U ... SPRN_DBAT3L:
-	case SPRN_DBAT4U ... SPRN_DBAT7L:
-		kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn));
-		break;
-	case SPRN_SDR1:
-		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
-		break;
-	case SPRN_DSISR:
-		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr);
-		break;
-	case SPRN_DAR:
-		kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear);
-		break;
-	case SPRN_HIOR:
-		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
-		break;
-	case SPRN_HID0:
-		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
-		break;
-	case SPRN_HID1:
-		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
-		break;
-	case SPRN_HID2:
-	case SPRN_HID2_GEKKO:
-		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
-		break;
-	case SPRN_HID4:
-	case SPRN_HID4_GEKKO:
-		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
-		break;
-	case SPRN_HID5:
-		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
-		break;
-	case SPRN_GQR0:
-	case SPRN_GQR1:
-	case SPRN_GQR2:
-	case SPRN_GQR3:
-	case SPRN_GQR4:
-	case SPRN_GQR5:
-	case SPRN_GQR6:
-	case SPRN_GQR7:
-		kvmppc_set_gpr(vcpu, rt,
-			       to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
-		break;
-	case SPRN_THRM1:
-	case SPRN_THRM2:
-	case SPRN_THRM3:
-	case SPRN_CTRLF:
-	case SPRN_CTRLT:
-	case SPRN_L2CR:
-	case SPRN_MMCR0_GEKKO:
-	case SPRN_MMCR1_GEKKO:
-	case SPRN_PMC1_GEKKO:
-	case SPRN_PMC2_GEKKO:
-	case SPRN_PMC3_GEKKO:
-	case SPRN_PMC4_GEKKO:
-	case SPRN_WPAR_GEKKO:
-		kvmppc_set_gpr(vcpu, rt, 0);
-		break;
-	default:
-		printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
-#ifndef DEBUG_SPR
-		emulated = EMULATE_FAIL;
-#endif
-		break;
-	}
-
-	return emulated;
-}
-
-u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
-{
-	u32 dsisr = 0;
-
-	/*
-	 * This is what the spec says about DSISR bits (not mentioned = 0):
-	 *
-	 * 12:13		[DS]	Set to bits 30:31
-	 * 15:16		[X]	Set to bits 29:30
-	 * 17			[X]	Set to bit 25
-	 *			[D/DS]	Set to bit 5
-	 * 18:21		[X]	Set to bits 21:24
-	 *			[D/DS]	Set to bits 1:4
-	 * 22:26			Set to bits 6:10 (RT/RS/FRT/FRS)
-	 * 27:31			Set to bits 11:15 (RA)
-	 */
-
-	switch (get_op(inst)) {
-	/* D-form */
-	case OP_LFS:
-	case OP_LFD:
-	case OP_STFD:
-	case OP_STFS:
-		dsisr |= (inst >> 12) & 0x4000;	/* bit 17 */
-		dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */
-		break;
-	/* X-form */
-	case 31:
-		dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */
-		dsisr |= (inst << 8)  & 0x04000; /* bit 17 */
-		dsisr |= (inst << 3)  & 0x03c00; /* bits 18:21 */
-		break;
-	default:
-		printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
-		break;
-	}
-
-	dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */
-
-	return dsisr;
-}
-
-ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
-{
-	ulong dar = 0;
-	ulong ra;
-
-	switch (get_op(inst)) {
-	case OP_LFS:
-	case OP_LFD:
-	case OP_STFD:
-	case OP_STFS:
-		ra = get_ra(inst);
-		if (ra)
-			dar = kvmppc_get_gpr(vcpu, ra);
-		dar += (s32)((s16)inst);
-		break;
-	case 31:
-		ra = get_ra(inst);
-		if (ra)
-			dar = kvmppc_get_gpr(vcpu, ra);
-		dar += kvmppc_get_gpr(vcpu, get_rb(inst));
-		break;
-	default:
-		printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
-		break;
-	}
-
-	return dar;
-}
diff --git a/arch/powerpc/kvm/book3s_64_exports.c b/arch/powerpc/kvm/book3s_64_exports.c
deleted file mode 100644
index 1dd5a1d..0000000
--- a/arch/powerpc/kvm/book3s_64_exports.c
+++ /dev/null
@@ -1,32 +0,0 @@ 
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- *
- * Copyright SUSE Linux Products GmbH 2009
- *
- * Authors: Alexander Graf <agraf@suse.de>
- */
-
-#include <linux/module.h>
-#include <asm/kvm_book3s.h>
-
-EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
-EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
-EXPORT_SYMBOL_GPL(kvmppc_rmcall);
-EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
-#ifdef CONFIG_ALTIVEC
-EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
-#endif
-#ifdef CONFIG_VSX
-EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
-#endif
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S
deleted file mode 100644
index faca876..0000000
--- a/arch/powerpc/kvm/book3s_64_interrupts.S
+++ /dev/null
@@ -1,318 +0,0 @@ 
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- *
- * Copyright SUSE Linux Products GmbH 2009
- *
- * Authors: Alexander Graf <agraf@suse.de>
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/kvm_asm.h>
-#include <asm/reg.h>
-#include <asm/page.h>
-#include <asm/asm-offsets.h>
-#include <asm/exception-64s.h>
-
-#define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
-#define ULONG_SIZE 8
-#define VCPU_GPR(n)     (VCPU_GPRS + (n * ULONG_SIZE))
-
-.macro DISABLE_INTERRUPTS
-       mfmsr   r0
-       rldicl  r0,r0,48,1
-       rotldi  r0,r0,16
-       mtmsrd  r0,1
-.endm
-
-#define VCPU_LOAD_NVGPRS(vcpu) \
-	ld	r14, VCPU_GPR(r14)(vcpu); \
-	ld	r15, VCPU_GPR(r15)(vcpu); \
-	ld	r16, VCPU_GPR(r16)(vcpu); \
-	ld	r17, VCPU_GPR(r17)(vcpu); \
-	ld	r18, VCPU_GPR(r18)(vcpu); \
-	ld	r19, VCPU_GPR(r19)(vcpu); \
-	ld	r20, VCPU_GPR(r20)(vcpu); \
-	ld	r21, VCPU_GPR(r21)(vcpu); \
-	ld	r22, VCPU_GPR(r22)(vcpu); \
-	ld	r23, VCPU_GPR(r23)(vcpu); \
-	ld	r24, VCPU_GPR(r24)(vcpu); \
-	ld	r25, VCPU_GPR(r25)(vcpu); \
-	ld	r26, VCPU_GPR(r26)(vcpu); \
-	ld	r27, VCPU_GPR(r27)(vcpu); \
-	ld	r28, VCPU_GPR(r28)(vcpu); \
-	ld	r29, VCPU_GPR(r29)(vcpu); \
-	ld	r30, VCPU_GPR(r30)(vcpu); \
-	ld	r31, VCPU_GPR(r31)(vcpu); \
-
-/*****************************************************************************
- *                                                                           *
- *     Guest entry / exit code that is in kernel module memory (highmem)     *
- *                                                                           *
- ****************************************************************************/
-
-/* Registers:
- *  r3: kvm_run pointer
- *  r4: vcpu pointer
- */
-_GLOBAL(__kvmppc_vcpu_entry)
-
-kvm_start_entry:
-	/* Write correct stack frame */
-	mflr    r0
-	std     r0,16(r1)
-
-	/* Save host state to the stack */
-	stdu	r1, -SWITCH_FRAME_SIZE(r1)
-
-	/* Save r3 (kvm_run) and r4 (vcpu) */
-	SAVE_2GPRS(3, r1)
-
-	/* Save non-volatile registers (r14 - r31) */
-	SAVE_NVGPRS(r1)
-
-	/* Save LR */
-	std	r0, _LINK(r1)
-
-	/* Load non-volatile guest state from the vcpu */
-	VCPU_LOAD_NVGPRS(r4)
-
-	/* Save R1/R2 in the PACA */
-	std	r1, PACA_KVM_HOST_R1(r13)
-	std	r2, PACA_KVM_HOST_R2(r13)
-
-	/* XXX swap in/out on load? */
-	ld	r3, VCPU_HIGHMEM_HANDLER(r4)
-	std	r3, PACA_KVM_VMHANDLER(r13)
-
-kvm_start_lightweight:
-
-	ld	r9, VCPU_PC(r4)			/* r9 = vcpu->arch.pc */
-	ld	r10, VCPU_SHADOW_MSR(r4)	/* r10 = vcpu->arch.shadow_msr */
-
-	/* Load some guest state in the respective registers */
-	ld	r5, VCPU_CTR(r4)	/* r5 = vcpu->arch.ctr */
-					/* will be swapped in by rmcall */
-
-	ld	r3, VCPU_LR(r4)		/* r3 = vcpu->arch.lr */
-	mtlr	r3			/* LR = r3 */
-
-	DISABLE_INTERRUPTS
-
-	/* Some guests may need to have dcbz set to 32 byte length.
-	 *
-	 * Usually we ensure that by patching the guest's instructions
-	 * to trap on dcbz and emulate it in the hypervisor.
-	 *
-	 * If we can, we should tell the CPU to use 32 byte dcbz though,
-	 * because that's a lot faster.
-	 */
-
-	ld	r3, VCPU_HFLAGS(r4)
-	rldicl.	r3, r3, 0, 63		/* CR = ((r3 & 1) == 0) */
-	beq	no_dcbz32_on
-
-	mfspr   r3,SPRN_HID5
-	ori     r3, r3, 0x80		/* XXX HID5_dcbz32 = 0x80 */
-	mtspr   SPRN_HID5,r3
-
-no_dcbz32_on:
-
-	ld	r6, VCPU_RMCALL(r4)
-	mtctr	r6
-
-	ld	r3, VCPU_TRAMPOLINE_ENTER(r4)
-	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
-
-	/* Jump to SLB patching handlder and into our guest */
-	bctr
-
-/*
- * This is the handler in module memory. It gets jumped at from the
- * lowmem trampoline code, so it's basically the guest exit code.
- *
- */
-
-.global kvmppc_handler_highmem
-kvmppc_handler_highmem:
-
-	/*
-	 * Register usage at this point:
-	 *
-	 * R0         = guest last inst
-	 * R1         = host R1
-	 * R2         = host R2
-	 * R3         = guest PC
-	 * R4         = guest MSR
-	 * R5         = guest DAR
-	 * R6         = guest DSISR
-	 * R13        = PACA
-	 * PACA.KVM.* = guest *
-	 *
-	 */
-
-	/* R7 = vcpu */
-	ld	r7, GPR4(r1)
-
-	/* Now save the guest state */
-
-	stw	r0, VCPU_LAST_INST(r7)
-
-	std	r3, VCPU_PC(r7)
-	std	r4, VCPU_SHADOW_SRR1(r7)
-	std	r5, VCPU_FAULT_DEAR(r7)
-	stw	r6, VCPU_FAULT_DSISR(r7)
-
-	ld	r5, VCPU_HFLAGS(r7)
-	rldicl.	r5, r5, 0, 63		/* CR = ((r5 & 1) == 0) */
-	beq	no_dcbz32_off
-
-	li	r4, 0
-	mfspr   r5,SPRN_HID5
-	rldimi  r5,r4,6,56
-	mtspr   SPRN_HID5,r5
-
-no_dcbz32_off:
-
-	std	r14, VCPU_GPR(r14)(r7)
-	std	r15, VCPU_GPR(r15)(r7)
-	std	r16, VCPU_GPR(r16)(r7)
-	std	r17, VCPU_GPR(r17)(r7)
-	std	r18, VCPU_GPR(r18)(r7)
-	std	r19, VCPU_GPR(r19)(r7)
-	std	r20, VCPU_GPR(r20)(r7)
-	std	r21, VCPU_GPR(r21)(r7)
-	std	r22, VCPU_GPR(r22)(r7)
-	std	r23, VCPU_GPR(r23)(r7)
-	std	r24, VCPU_GPR(r24)(r7)
-	std	r25, VCPU_GPR(r25)(r7)
-	std	r26, VCPU_GPR(r26)(r7)
-	std	r27, VCPU_GPR(r27)(r7)
-	std	r28, VCPU_GPR(r28)(r7)
-	std	r29, VCPU_GPR(r29)(r7)
-	std	r30, VCPU_GPR(r30)(r7)
-	std	r31, VCPU_GPR(r31)(r7)
-
-	/* Save guest CTR */
-	mfctr	r5
-	std	r5, VCPU_CTR(r7)
-
-	/* Save guest LR */
-	mflr	r5
-	std	r5, VCPU_LR(r7)
-
-	/* Restore host msr -> SRR1 */
-	ld	r6, VCPU_HOST_MSR(r7)
-
-	/*
-	 * For some interrupts, we need to call the real Linux
-	 * handler, so it can do work for us. This has to happen
-	 * as if the interrupt arrived from the kernel though,
-	 * so let's fake it here where most state is restored.
-	 *
-	 * Call Linux for hardware interrupts/decrementer
-	 * r3 = address of interrupt handler (exit reason)
-	 */
-
-	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
-	beq	call_linux_handler
-	cmpwi	r12, BOOK3S_INTERRUPT_DECREMENTER
-	beq	call_linux_handler
-
-	/* Back to EE=1 */
-	mtmsr	r6
-	b	kvm_return_point
-
-call_linux_handler:
-
-	/*
-	 * If we land here we need to jump back to the handler we
-	 * came from.
-	 *
-	 * We have a page that we can access from real mode, so let's
-	 * jump back to that and use it as a trampoline to get back into the
-	 * interrupt handler!
-	 *
-	 * R3 still contains the exit code,
-	 * R5 VCPU_HOST_RETIP and
-	 * R6 VCPU_HOST_MSR
-	 */
-
-	/* Restore host IP -> SRR0 */
-	ld	r5, VCPU_HOST_RETIP(r7)
-
-	/* XXX Better move to a safe function?
-	 *     What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
-
-	mtlr	r12
-
-	ld	r4, VCPU_TRAMPOLINE_LOWMEM(r7)
-	mtsrr0	r4
-	LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
-	mtsrr1	r3
-
-	RFI
-
-.global kvm_return_point
-kvm_return_point:
-
-	/* Jump back to lightweight entry if we're supposed to */
-	/* go back into the guest */
-
-	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
-	mr	r5, r12
-
-	/* Restore r3 (kvm_run) and r4 (vcpu) */
-	REST_2GPRS(3, r1)
-	bl	KVMPPC_HANDLE_EXIT
-
-	/* If RESUME_GUEST, get back in the loop */
-	cmpwi	r3, RESUME_GUEST
-	beq	kvm_loop_lightweight
-
-	cmpwi	r3, RESUME_GUEST_NV
-	beq	kvm_loop_heavyweight
-
-kvm_exit_loop:
-
-	ld	r4, _LINK(r1)
-	mtlr	r4
-
-	/* Restore non-volatile host registers (r14 - r31) */
-	REST_NVGPRS(r1)
-
-	addi    r1, r1, SWITCH_FRAME_SIZE
-	blr
-
-kvm_loop_heavyweight:
-
-	ld	r4, _LINK(r1)
-	std     r4, (16 + SWITCH_FRAME_SIZE)(r1)
-
-	/* Load vcpu and cpu_run */
-	REST_2GPRS(3, r1)
-
-	/* Load non-volatile guest state from the vcpu */
-	VCPU_LOAD_NVGPRS(r4)
-
-	/* Jump back into the beginning of this function */
-	b	kvm_start_lightweight
-
-kvm_loop_lightweight:
-
-	/* We'll need the vcpu pointer */
-	REST_GPR(4, r1)
-
-	/* Jump back into the beginning of this function */
-	b	kvm_start_lightweight
-
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S
deleted file mode 100644
index bd08535..0000000
--- a/arch/powerpc/kvm/book3s_64_rmhandlers.S
+++ /dev/null
@@ -1,195 +0,0 @@ 
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
- *
- * Copyright SUSE Linux Products GmbH 2009
- *
- * Authors: Alexander Graf <agraf@suse.de>
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/kvm_asm.h>
-#include <asm/reg.h>
-#include <asm/page.h>
-#include <asm/asm-offsets.h>
-#include <asm/exception-64s.h>
-
-/*****************************************************************************
- *                                                                           *
- *        Real Mode handlers that need to be in low physical memory          *
- *                                                                           *
- ****************************************************************************/
-
-
-.macro INTERRUPT_TRAMPOLINE intno
-
-.global kvmppc_trampoline_\intno
-kvmppc_trampoline_\intno:
-
-	mtspr	SPRN_SPRG_SCRATCH0, r13		/* Save r13 */
-
-	/*
-	 * First thing to do is to find out if we're coming
-	 * from a KVM guest or a Linux process.
-	 *
-	 * To distinguish, we check a magic byte in the PACA
-	 */
-	mfspr	r13, SPRN_SPRG_PACA		/* r13 = PACA */
-	std	r12, PACA_KVM_SCRATCH0(r13)
-	mfcr	r12
-	stw	r12, PACA_KVM_SCRATCH1(r13)
-	lbz	r12, PACA_KVM_IN_GUEST(r13)
-	cmpwi	r12, KVM_GUEST_MODE_NONE
-	bne	..kvmppc_handler_hasmagic_\intno
-	/* No KVM guest? Then jump back to the Linux handler! */
-	lwz	r12, PACA_KVM_SCRATCH1(r13)
-	mtcr	r12
-	ld	r12, PACA_KVM_SCRATCH0(r13)
-	mfspr	r13, SPRN_SPRG_SCRATCH0		/* r13 = original r13 */
-	b	kvmppc_resume_\intno		/* Get back original handler */
-
-	/* Now we know we're handling a KVM guest */
-..kvmppc_handler_hasmagic_\intno:
-
-	/* Should we just skip the faulting instruction? */
-	cmpwi	r12, KVM_GUEST_MODE_SKIP
-	beq	kvmppc_handler_skip_ins
-
-	/* Let's store which interrupt we're handling */
-	li	r12, \intno
-
-	/* Jump into the SLB exit code that goes to the highmem handler */
-	b	kvmppc_handler_trampoline_exit
-
-.endm
-
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_SYSTEM_RESET
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_MACHINE_CHECK
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_DATA_STORAGE
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_DATA_SEGMENT
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_INST_STORAGE
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_INST_SEGMENT
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_EXTERNAL
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_ALIGNMENT
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_PROGRAM
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_FP_UNAVAIL
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_DECREMENTER
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_SYSCALL
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_TRACE
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_PERFMON
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_ALTIVEC
-INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_VSX
-
-/*
- * Bring us back to the faulting code, but skip the
- * faulting instruction.
- *
- * This is a generic exit path from the interrupt
- * trampolines above.
- *
- * Input Registers:
- *
- * R12               = free
- * R13               = PACA
- * PACA.KVM.SCRATCH0 = guest R12
- * PACA.KVM.SCRATCH1 = guest CR
- * SPRG_SCRATCH0     = guest R13
- *
- */
-kvmppc_handler_skip_ins:
-
-	/* Patch the IP to the next instruction */
-	mfsrr0	r12
-	addi	r12, r12, 4
-	mtsrr0	r12
-
-	/* Clean up all state */
-	lwz	r12, PACA_KVM_SCRATCH1(r13)
-	mtcr	r12
-	ld	r12, PACA_KVM_SCRATCH0(r13)
-	mfspr	r13, SPRN_SPRG_SCRATCH0
-
-	/* And get back into the code */
-	RFI
-
-/*
- * This trampoline brings us back to a real mode handler
- *
- * Input Registers:
- *
- * R5 = SRR0
- * R6 = SRR1
- * LR = real-mode IP
- *
- */
-.global kvmppc_handler_lowmem_trampoline
-kvmppc_handler_lowmem_trampoline:
-
-	mtsrr0	r5
-	mtsrr1	r6
-	blr
-kvmppc_handler_lowmem_trampoline_end:
-
-/*
- * Call a function in real mode
- *
- * Input Registers:
- *
- * R3 = function
- * R4 = MSR
- * R5 = CTR
- *
- */
-_GLOBAL(kvmppc_rmcall)
-	mtmsr	r4		/* Disable relocation, so mtsrr
-				   doesn't get interrupted */
-	mtctr	r5
-	mtsrr0	r3
-	mtsrr1	r4
-	RFI
-
-/*
- * Activate current's external feature (FPU/Altivec/VSX)
- */
-#define define_load_up(what) 				\
-							\
-_GLOBAL(kvmppc_load_up_ ## what);			\
-	stdu	r1, -INT_FRAME_SIZE(r1);		\
-	mflr	r3;					\
-	std	r3, _LINK(r1);				\
-							\
-	bl	.load_up_ ## what;			\
-							\
-	ld	r3, _LINK(r1);				\
-	mtlr	r3;					\
-	addi	r1, r1, INT_FRAME_SIZE;			\
-	blr
-
-define_load_up(fpu)
-#ifdef CONFIG_ALTIVEC
-define_load_up(altivec)
-#endif
-#ifdef CONFIG_VSX
-define_load_up(vsx)
-#endif
-
-.global kvmppc_trampoline_lowmem
-kvmppc_trampoline_lowmem:
-	.long kvmppc_handler_lowmem_trampoline - _stext
-
-.global kvmppc_trampoline_enter
-kvmppc_trampoline_enter:
-	.long kvmppc_handler_trampoline_enter - _stext
-
-#include "book3s_64_slb.S"
-
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
new file mode 100644
index 0000000..8f50776
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -0,0 +1,566 @@ 
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/kvm_ppc.h>
+#include <asm/disassemble.h>
+#include <asm/kvm_book3s.h>
+#include <asm/reg.h>
+
+#define OP_19_XOP_RFID		18
+#define OP_19_XOP_RFI		50
+
+#define OP_31_XOP_MFMSR		83
+#define OP_31_XOP_MTMSR		146
+#define OP_31_XOP_MTMSRD	178
+#define OP_31_XOP_MTSR		210
+#define OP_31_XOP_MTSRIN	242
+#define OP_31_XOP_TLBIEL	274
+#define OP_31_XOP_TLBIE		306
+#define OP_31_XOP_SLBMTE	402
+#define OP_31_XOP_SLBIE		434
+#define OP_31_XOP_SLBIA		498
+#define OP_31_XOP_MFSR		595
+#define OP_31_XOP_MFSRIN	659
+#define OP_31_XOP_DCBA		758
+#define OP_31_XOP_SLBMFEV	851
+#define OP_31_XOP_EIOIO		854
+#define OP_31_XOP_SLBMFEE	915
+
+/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
+#define OP_31_XOP_DCBZ		1010
+
+#define OP_LFS			48
+#define OP_LFD			50
+#define OP_STFS			52
+#define OP_STFD			54
+
+#define SPRN_GQR0		912
+#define SPRN_GQR1		913
+#define SPRN_GQR2		914
+#define SPRN_GQR3		915
+#define SPRN_GQR4		916
+#define SPRN_GQR5		917
+#define SPRN_GQR6		918
+#define SPRN_GQR7		919
+
+int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
+                           unsigned int inst, int *advance)
+{
+	int emulated = EMULATE_DONE;
+
+	switch (get_op(inst)) {
+	case 19:
+		switch (get_xop(inst)) {
+		case OP_19_XOP_RFID:
+		case OP_19_XOP_RFI:
+			vcpu->arch.pc = vcpu->arch.srr0;
+			kvmppc_set_msr(vcpu, vcpu->arch.srr1);
+			*advance = 0;
+			break;
+
+		default:
+			emulated = EMULATE_FAIL;
+			break;
+		}
+		break;
+	case 31:
+		switch (get_xop(inst)) {
+		case OP_31_XOP_MFMSR:
+			kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr);
+			break;
+		case OP_31_XOP_MTMSRD:
+		{
+			ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst));
+			if (inst & 0x10000) {
+				vcpu->arch.msr &= ~(MSR_RI | MSR_EE);
+				vcpu->arch.msr |= rs & (MSR_RI | MSR_EE);
+			} else
+				kvmppc_set_msr(vcpu, rs);
+			break;
+		}
+		case OP_31_XOP_MTMSR:
+			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst)));
+			break;
+		case OP_31_XOP_MFSR:
+		{
+			int srnum;
+
+			srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
+			if (vcpu->arch.mmu.mfsrin) {
+				u32 sr;
+				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
+				kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+			}
+			break;
+		}
+		case OP_31_XOP_MFSRIN:
+		{
+			int srnum;
+
+			srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf;
+			if (vcpu->arch.mmu.mfsrin) {
+				u32 sr;
+				sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
+				kvmppc_set_gpr(vcpu, get_rt(inst), sr);
+			}
+			break;
+		}
+		case OP_31_XOP_MTSR:
+			vcpu->arch.mmu.mtsrin(vcpu,
+				(inst >> 16) & 0xf,
+				kvmppc_get_gpr(vcpu, get_rs(inst)));
+			break;
+		case OP_31_XOP_MTSRIN:
+			vcpu->arch.mmu.mtsrin(vcpu,
+				(kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf,
+				kvmppc_get_gpr(vcpu, get_rs(inst)));
+			break;
+		case OP_31_XOP_TLBIE:
+		case OP_31_XOP_TLBIEL:
+		{
+			bool large = (inst & 0x00200000) ? true : false;
+			ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst));
+			vcpu->arch.mmu.tlbie(vcpu, addr, large);
+			break;
+		}
+		case OP_31_XOP_EIOIO:
+			break;
+		case OP_31_XOP_SLBMTE:
+			if (!vcpu->arch.mmu.slbmte)
+				return EMULATE_FAIL;
+
+			vcpu->arch.mmu.slbmte(vcpu,
+					kvmppc_get_gpr(vcpu, get_rs(inst)),
+					kvmppc_get_gpr(vcpu, get_rb(inst)));
+			break;
+		case OP_31_XOP_SLBIE:
+			if (!vcpu->arch.mmu.slbie)
+				return EMULATE_FAIL;
+
+			vcpu->arch.mmu.slbie(vcpu,
+					kvmppc_get_gpr(vcpu, get_rb(inst)));
+			break;
+		case OP_31_XOP_SLBIA:
+			if (!vcpu->arch.mmu.slbia)
+				return EMULATE_FAIL;
+
+			vcpu->arch.mmu.slbia(vcpu);
+			break;
+		case OP_31_XOP_SLBMFEE:
+			if (!vcpu->arch.mmu.slbmfee) {
+				emulated = EMULATE_FAIL;
+			} else {
+				ulong t, rb;
+
+				rb = kvmppc_get_gpr(vcpu, get_rb(inst));
+				t = vcpu->arch.mmu.slbmfee(vcpu, rb);
+				kvmppc_set_gpr(vcpu, get_rt(inst), t);
+			}
+			break;
+		case OP_31_XOP_SLBMFEV:
+			if (!vcpu->arch.mmu.slbmfev) {
+				emulated = EMULATE_FAIL;
+			} else {
+				ulong t, rb;
+
+				rb = kvmppc_get_gpr(vcpu, get_rb(inst));
+				t = vcpu->arch.mmu.slbmfev(vcpu, rb);
+				kvmppc_set_gpr(vcpu, get_rt(inst), t);
+			}
+			break;
+		case OP_31_XOP_DCBA:
+			/* Gets treated as NOP */
+			break;
+		case OP_31_XOP_DCBZ:
+		{
+			ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst));
+			ulong ra = 0;
+			ulong addr, vaddr;
+			u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
+			u32 dsisr;
+			int r;
+
+			if (get_ra(inst))
+				ra = kvmppc_get_gpr(vcpu, get_ra(inst));
+
+			addr = (ra + rb) & ~31ULL;
+			if (!(vcpu->arch.msr & MSR_SF))
+				addr &= 0xffffffff;
+			vaddr = addr;
+
+			r = kvmppc_st(vcpu, &addr, 32, zeros, true);
+			if ((r == -ENOENT) || (r == -EPERM)) {
+				*advance = 0;
+				vcpu->arch.dear = vaddr;
+				vcpu->arch.fault_dear = vaddr;
+
+				dsisr = DSISR_ISSTORE;
+				if (r == -ENOENT)
+					dsisr |= DSISR_NOHPTE;
+				else if (r == -EPERM)
+					dsisr |= DSISR_PROTFAULT;
+
+				to_book3s(vcpu)->dsisr = dsisr;
+				vcpu->arch.fault_dsisr = dsisr;
+
+				kvmppc_book3s_queue_irqprio(vcpu,
+					BOOK3S_INTERRUPT_DATA_STORAGE);
+			}
+
+			break;
+		}
+		default:
+			emulated = EMULATE_FAIL;
+		}
+		break;
+	default:
+		emulated = EMULATE_FAIL;
+	}
+
+	if (emulated == EMULATE_FAIL)
+		emulated = kvmppc_emulate_paired_single(run, vcpu);
+
+	return emulated;
+}
+
+void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
+                    u32 val)
+{
+	if (upper) {
+		/* Upper BAT */
+		u32 bl = (val >> 2) & 0x7ff;
+		bat->bepi_mask = (~bl << 17);
+		bat->bepi = val & 0xfffe0000;
+		bat->vs = (val & 2) ? 1 : 0;
+		bat->vp = (val & 1) ? 1 : 0;
+		bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
+	} else {
+		/* Lower BAT */
+		bat->brpn = val & 0xfffe0000;
+		bat->wimg = (val >> 3) & 0xf;
+		bat->pp = val & 3;
+		bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
+	}
+}
+
+static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn)
+{
+	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+	struct kvmppc_bat *bat;
+
+	switch (sprn) {
+	case SPRN_IBAT0U ... SPRN_IBAT3L:
+		bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
+		break;
+	case SPRN_IBAT4U ... SPRN_IBAT7L:
+		bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
+		break;
+	case SPRN_DBAT0U ... SPRN_DBAT3L:
+		bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
+		break;
+	case SPRN_DBAT4U ... SPRN_DBAT7L:
+		bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
+		break;
+	default:
+		BUG();
+	}
+
+	if (sprn % 2)
+		return bat->raw >> 32;
+	else
+		return bat->raw;
+}
+
+static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val)
+{
+	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
+	struct kvmppc_bat *bat;
+
+	switch (sprn) {
+	case SPRN_IBAT0U ... SPRN_IBAT3L:
+		bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
+		break;
+	case SPRN_IBAT4U ... SPRN_IBAT7L:
+		bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
+		break;
+	case SPRN_DBAT0U ... SPRN_DBAT3L:
+		bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
+		break;
+	case SPRN_DBAT4U ... SPRN_DBAT7L:
+		bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
+		break;
+	default:
+		BUG();
+	}
+
+	kvmppc_set_bat(vcpu, bat, !(sprn % 2), val);
+}
+
+int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
+{
+	int emulated = EMULATE_DONE;
+	ulong spr_val = kvmppc_get_gpr(vcpu, rs);
+
+	switch (sprn) {
+	case SPRN_SDR1:
+		to_book3s(vcpu)->sdr1 = spr_val;
+		break;
+	case SPRN_DSISR:
+		to_book3s(vcpu)->dsisr = spr_val;
+		break;
+	case SPRN_DAR:
+		vcpu->arch.dear = spr_val;
+		break;
+	case SPRN_HIOR:
+		to_book3s(vcpu)->hior = spr_val;
+		break;
+	case SPRN_IBAT0U ... SPRN_IBAT3L:
+	case SPRN_IBAT4U ... SPRN_IBAT7L:
+	case SPRN_DBAT0U ... SPRN_DBAT3L:
+	case SPRN_DBAT4U ... SPRN_DBAT7L:
+		kvmppc_write_bat(vcpu, sprn, (u32)spr_val);
+		/* BAT writes happen so rarely that we're ok to flush
+		 * everything here */
+		kvmppc_mmu_pte_flush(vcpu, 0, 0);
+		kvmppc_mmu_flush_segments(vcpu);
+		break;
+	case SPRN_HID0:
+		to_book3s(vcpu)->hid[0] = spr_val;
+		break;
+	case SPRN_HID1:
+		to_book3s(vcpu)->hid[1] = spr_val;
+		break;
+	case SPRN_HID2:
+		to_book3s(vcpu)->hid[2] = spr_val;
+		break;
+	case SPRN_HID2_GEKKO:
+		to_book3s(vcpu)->hid[2] = spr_val;
+		/* HID2.PSE controls paired single on gekko */
+		switch (vcpu->arch.pvr) {
+		case 0x00080200:	/* lonestar 2.0 */
+		case 0x00088202:	/* lonestar 2.2 */
+		case 0x70000100:	/* gekko 1.0 */
+		case 0x00080100:	/* gekko 2.0 */
+		case 0x00083203:	/* gekko 2.3a */
+		case 0x00083213:	/* gekko 2.3b */
+		case 0x00083204:	/* gekko 2.4 */
+		case 0x00083214:	/* gekko 2.4e (8SE) - retail HW2 */
+			if (spr_val & (1 << 29)) { /* HID2.PSE */
+				vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
+				kvmppc_giveup_ext(vcpu, MSR_FP);
+			} else {
+				vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
+			}
+			break;
+		}
+		break;
+	case SPRN_HID4:
+	case SPRN_HID4_GEKKO:
+		to_book3s(vcpu)->hid[4] = spr_val;
+		break;
+	case SPRN_HID5:
+		to_book3s(vcpu)->hid[5] = spr_val;
+		/* guest HID5 set can change is_dcbz32 */
+		if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
+		    (mfmsr() & MSR_HV))
+			vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
+		break;
+	case SPRN_GQR0:
+	case SPRN_GQR1:
+	case SPRN_GQR2:
+	case SPRN_GQR3:
+	case SPRN_GQR4:
+	case SPRN_GQR5:
+	case SPRN_GQR6:
+	case SPRN_GQR7:
+		to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
+		break;
+	case SPRN_ICTC:
+	case SPRN_THRM1:
+	case SPRN_THRM2:
+	case SPRN_THRM3:
+	case SPRN_CTRLF:
+	case SPRN_CTRLT:
+	case SPRN_L2CR:
+	case SPRN_MMCR0_GEKKO:
+	case SPRN_MMCR1_GEKKO:
+	case SPRN_PMC1_GEKKO:
+	case SPRN_PMC2_GEKKO:
+	case SPRN_PMC3_GEKKO:
+	case SPRN_PMC4_GEKKO:
+	case SPRN_WPAR_GEKKO:
+		break;
+	default:
+		printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn);
+#ifndef DEBUG_SPR
+		emulated = EMULATE_FAIL;
+#endif
+		break;
+	}
+
+	return emulated;
+}
+
+int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
+{
+	int emulated = EMULATE_DONE;
+
+	switch (sprn) {
+	case SPRN_IBAT0U ... SPRN_IBAT3L:
+	case SPRN_IBAT4U ... SPRN_IBAT7L:
+	case SPRN_DBAT0U ... SPRN_DBAT3L:
+	case SPRN_DBAT4U ... SPRN_DBAT7L:
+		kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn));
+		break;
+	case SPRN_SDR1:
+		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1);
+		break;
+	case SPRN_DSISR:
+		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr);
+		break;
+	case SPRN_DAR:
+		kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear);
+		break;
+	case SPRN_HIOR:
+		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior);
+		break;
+	case SPRN_HID0:
+		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]);
+		break;
+	case SPRN_HID1:
+		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]);
+		break;
+	case SPRN_HID2:
+	case SPRN_HID2_GEKKO:
+		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]);
+		break;
+	case SPRN_HID4:
+	case SPRN_HID4_GEKKO:
+		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]);
+		break;
+	case SPRN_HID5:
+		kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]);
+		break;
+	case SPRN_GQR0:
+	case SPRN_GQR1:
+	case SPRN_GQR2:
+	case SPRN_GQR3:
+	case SPRN_GQR4:
+	case SPRN_GQR5:
+	case SPRN_GQR6:
+	case SPRN_GQR7:
+		kvmppc_set_gpr(vcpu, rt,
+			       to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
+		break;
+	case SPRN_THRM1:
+	case SPRN_THRM2:
+	case SPRN_THRM3:
+	case SPRN_CTRLF:
+	case SPRN_CTRLT:
+	case SPRN_L2CR:
+	case SPRN_MMCR0_GEKKO:
+	case SPRN_MMCR1_GEKKO:
+	case SPRN_PMC1_GEKKO:
+	case SPRN_PMC2_GEKKO:
+	case SPRN_PMC3_GEKKO:
+	case SPRN_PMC4_GEKKO:
+	case SPRN_WPAR_GEKKO:
+		kvmppc_set_gpr(vcpu, rt, 0);
+		break;
+	default:
+		printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn);
+#ifndef DEBUG_SPR
+		emulated = EMULATE_FAIL;
+#endif
+		break;
+	}
+
+	return emulated;
+}
+
+u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
+{
+	u32 dsisr = 0;
+
+	/*
+	 * This is what the spec says about DSISR bits (not mentioned = 0):
+	 *
+	 * 12:13		[DS]	Set to bits 30:31
+	 * 15:16		[X]	Set to bits 29:30
+	 * 17			[X]	Set to bit 25
+	 *			[D/DS]	Set to bit 5
+	 * 18:21		[X]	Set to bits 21:24
+	 *			[D/DS]	Set to bits 1:4
+	 * 22:26			Set to bits 6:10 (RT/RS/FRT/FRS)
+	 * 27:31			Set to bits 11:15 (RA)
+	 */
+
+	switch (get_op(inst)) {
+	/* D-form */
+	case OP_LFS:
+	case OP_LFD:
+	case OP_STFD:
+	case OP_STFS:
+		dsisr |= (inst >> 12) & 0x4000;	/* bit 17 */
+		dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */
+		break;
+	/* X-form */
+	case 31:
+		dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */
+		dsisr |= (inst << 8)  & 0x04000; /* bit 17 */
+		dsisr |= (inst << 3)  & 0x03c00; /* bits 18:21 */
+		break;
+	default:
+		printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
+		break;
+	}
+
+	dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */
+
+	return dsisr;
+}
+
+ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
+{
+	ulong dar = 0;
+	ulong ra;
+
+	switch (get_op(inst)) {
+	case OP_LFS:
+	case OP_LFD:
+	case OP_STFD:
+	case OP_STFS:
+		ra = get_ra(inst);
+		if (ra)
+			dar = kvmppc_get_gpr(vcpu, ra);
+		dar += (s32)((s16)inst);
+		break;
+	case 31:
+		ra = get_ra(inst);
+		if (ra)
+			dar = kvmppc_get_gpr(vcpu, ra);
+		dar += kvmppc_get_gpr(vcpu, get_rb(inst));
+		break;
+	default:
+		printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
+		break;
+	}
+
+	return dar;
+}
diff --git a/arch/powerpc/kvm/book3s_exports.c b/arch/powerpc/kvm/book3s_exports.c
new file mode 100644
index 0000000..1dd5a1d
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_exports.c
@@ -0,0 +1,32 @@ 
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <linux/module.h>
+#include <asm/kvm_book3s.h>
+
+EXPORT_SYMBOL_GPL(kvmppc_trampoline_enter);
+EXPORT_SYMBOL_GPL(kvmppc_trampoline_lowmem);
+EXPORT_SYMBOL_GPL(kvmppc_rmcall);
+EXPORT_SYMBOL_GPL(kvmppc_load_up_fpu);
+#ifdef CONFIG_ALTIVEC
+EXPORT_SYMBOL_GPL(kvmppc_load_up_altivec);
+#endif
+#ifdef CONFIG_VSX
+EXPORT_SYMBOL_GPL(kvmppc_load_up_vsx);
+#endif
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
new file mode 100644
index 0000000..faca876
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -0,0 +1,318 @@ 
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+
+#define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
+#define ULONG_SIZE 8
+#define VCPU_GPR(n)     (VCPU_GPRS + (n * ULONG_SIZE))
+
+.macro DISABLE_INTERRUPTS
+       mfmsr   r0
+       rldicl  r0,r0,48,1
+       rotldi  r0,r0,16
+       mtmsrd  r0,1
+.endm
+
+#define VCPU_LOAD_NVGPRS(vcpu) \
+	ld	r14, VCPU_GPR(r14)(vcpu); \
+	ld	r15, VCPU_GPR(r15)(vcpu); \
+	ld	r16, VCPU_GPR(r16)(vcpu); \
+	ld	r17, VCPU_GPR(r17)(vcpu); \
+	ld	r18, VCPU_GPR(r18)(vcpu); \
+	ld	r19, VCPU_GPR(r19)(vcpu); \
+	ld	r20, VCPU_GPR(r20)(vcpu); \
+	ld	r21, VCPU_GPR(r21)(vcpu); \
+	ld	r22, VCPU_GPR(r22)(vcpu); \
+	ld	r23, VCPU_GPR(r23)(vcpu); \
+	ld	r24, VCPU_GPR(r24)(vcpu); \
+	ld	r25, VCPU_GPR(r25)(vcpu); \
+	ld	r26, VCPU_GPR(r26)(vcpu); \
+	ld	r27, VCPU_GPR(r27)(vcpu); \
+	ld	r28, VCPU_GPR(r28)(vcpu); \
+	ld	r29, VCPU_GPR(r29)(vcpu); \
+	ld	r30, VCPU_GPR(r30)(vcpu); \
+	ld	r31, VCPU_GPR(r31)(vcpu); \
+
+/*****************************************************************************
+ *                                                                           *
+ *     Guest entry / exit code that is in kernel module memory (highmem)     *
+ *                                                                           *
+ ****************************************************************************/
+
+/* Registers:
+ *  r3: kvm_run pointer
+ *  r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcpu_entry)
+
+kvm_start_entry:
+	/* Write correct stack frame */
+	mflr    r0
+	std     r0,16(r1)
+
+	/* Save host state to the stack */
+	stdu	r1, -SWITCH_FRAME_SIZE(r1)
+
+	/* Save r3 (kvm_run) and r4 (vcpu) */
+	SAVE_2GPRS(3, r1)
+
+	/* Save non-volatile registers (r14 - r31) */
+	SAVE_NVGPRS(r1)
+
+	/* Save LR */
+	std	r0, _LINK(r1)
+
+	/* Load non-volatile guest state from the vcpu */
+	VCPU_LOAD_NVGPRS(r4)
+
+	/* Save R1/R2 in the PACA */
+	std	r1, PACA_KVM_HOST_R1(r13)
+	std	r2, PACA_KVM_HOST_R2(r13)
+
+	/* XXX swap in/out on load? */
+	ld	r3, VCPU_HIGHMEM_HANDLER(r4)
+	std	r3, PACA_KVM_VMHANDLER(r13)
+
+kvm_start_lightweight:
+
+	ld	r9, VCPU_PC(r4)			/* r9 = vcpu->arch.pc */
+	ld	r10, VCPU_SHADOW_MSR(r4)	/* r10 = vcpu->arch.shadow_msr */
+
+	/* Load some guest state in the respective registers */
+	ld	r5, VCPU_CTR(r4)	/* r5 = vcpu->arch.ctr */
+					/* will be swapped in by rmcall */
+
+	ld	r3, VCPU_LR(r4)		/* r3 = vcpu->arch.lr */
+	mtlr	r3			/* LR = r3 */
+
+	DISABLE_INTERRUPTS
+
+	/* Some guests may need to have dcbz set to 32 byte length.
+	 *
+	 * Usually we ensure that by patching the guest's instructions
+	 * to trap on dcbz and emulate it in the hypervisor.
+	 *
+	 * If we can, we should tell the CPU to use 32 byte dcbz though,
+	 * because that's a lot faster.
+	 */
+
+	ld	r3, VCPU_HFLAGS(r4)
+	rldicl.	r3, r3, 0, 63		/* CR = ((r3 & 1) == 0) */
+	beq	no_dcbz32_on
+
+	mfspr   r3,SPRN_HID5
+	ori     r3, r3, 0x80		/* XXX HID5_dcbz32 = 0x80 */
+	mtspr   SPRN_HID5,r3
+
+no_dcbz32_on:
+
+	ld	r6, VCPU_RMCALL(r4)
+	mtctr	r6
+
+	ld	r3, VCPU_TRAMPOLINE_ENTER(r4)
+	LOAD_REG_IMMEDIATE(r4, MSR_KERNEL & ~(MSR_IR | MSR_DR))
+
+	/* Jump to SLB patching handlder and into our guest */
+	bctr
+
+/*
+ * This is the handler in module memory. It gets jumped at from the
+ * lowmem trampoline code, so it's basically the guest exit code.
+ *
+ */
+
+.global kvmppc_handler_highmem
+kvmppc_handler_highmem:
+
+	/*
+	 * Register usage at this point:
+	 *
+	 * R0         = guest last inst
+	 * R1         = host R1
+	 * R2         = host R2
+	 * R3         = guest PC
+	 * R4         = guest MSR
+	 * R5         = guest DAR
+	 * R6         = guest DSISR
+	 * R13        = PACA
+	 * PACA.KVM.* = guest *
+	 *
+	 */
+
+	/* R7 = vcpu */
+	ld	r7, GPR4(r1)
+
+	/* Now save the guest state */
+
+	stw	r0, VCPU_LAST_INST(r7)
+
+	std	r3, VCPU_PC(r7)
+	std	r4, VCPU_SHADOW_SRR1(r7)
+	std	r5, VCPU_FAULT_DEAR(r7)
+	stw	r6, VCPU_FAULT_DSISR(r7)
+
+	ld	r5, VCPU_HFLAGS(r7)
+	rldicl.	r5, r5, 0, 63		/* CR = ((r5 & 1) == 0) */
+	beq	no_dcbz32_off
+
+	li	r4, 0
+	mfspr   r5,SPRN_HID5
+	rldimi  r5,r4,6,56
+	mtspr   SPRN_HID5,r5
+
+no_dcbz32_off:
+
+	std	r14, VCPU_GPR(r14)(r7)
+	std	r15, VCPU_GPR(r15)(r7)
+	std	r16, VCPU_GPR(r16)(r7)
+	std	r17, VCPU_GPR(r17)(r7)
+	std	r18, VCPU_GPR(r18)(r7)
+	std	r19, VCPU_GPR(r19)(r7)
+	std	r20, VCPU_GPR(r20)(r7)
+	std	r21, VCPU_GPR(r21)(r7)
+	std	r22, VCPU_GPR(r22)(r7)
+	std	r23, VCPU_GPR(r23)(r7)
+	std	r24, VCPU_GPR(r24)(r7)
+	std	r25, VCPU_GPR(r25)(r7)
+	std	r26, VCPU_GPR(r26)(r7)
+	std	r27, VCPU_GPR(r27)(r7)
+	std	r28, VCPU_GPR(r28)(r7)
+	std	r29, VCPU_GPR(r29)(r7)
+	std	r30, VCPU_GPR(r30)(r7)
+	std	r31, VCPU_GPR(r31)(r7)
+
+	/* Save guest CTR */
+	mfctr	r5
+	std	r5, VCPU_CTR(r7)
+
+	/* Save guest LR */
+	mflr	r5
+	std	r5, VCPU_LR(r7)
+
+	/* Restore host msr -> SRR1 */
+	ld	r6, VCPU_HOST_MSR(r7)
+
+	/*
+	 * For some interrupts, we need to call the real Linux
+	 * handler, so it can do work for us. This has to happen
+	 * as if the interrupt arrived from the kernel though,
+	 * so let's fake it here where most state is restored.
+	 *
+	 * Call Linux for hardware interrupts/decrementer
+	 * r3 = address of interrupt handler (exit reason)
+	 */
+
+	cmpwi	r12, BOOK3S_INTERRUPT_EXTERNAL
+	beq	call_linux_handler
+	cmpwi	r12, BOOK3S_INTERRUPT_DECREMENTER
+	beq	call_linux_handler
+
+	/* Back to EE=1 */
+	mtmsr	r6
+	b	kvm_return_point
+
+call_linux_handler:
+
+	/*
+	 * If we land here we need to jump back to the handler we
+	 * came from.
+	 *
+	 * We have a page that we can access from real mode, so let's
+	 * jump back to that and use it as a trampoline to get back into the
+	 * interrupt handler!
+	 *
+	 * R3 still contains the exit code,
+	 * R5 VCPU_HOST_RETIP and
+	 * R6 VCPU_HOST_MSR
+	 */
+
+	/* Restore host IP -> SRR0 */
+	ld	r5, VCPU_HOST_RETIP(r7)
+
+	/* XXX Better move to a safe function?
+	 *     What if we get an HTAB flush in between mtsrr0 and mtsrr1? */
+
+	mtlr	r12
+
+	ld	r4, VCPU_TRAMPOLINE_LOWMEM(r7)
+	mtsrr0	r4
+	LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
+	mtsrr1	r3
+
+	RFI
+
+.global kvm_return_point
+kvm_return_point:
+
+	/* Jump back to lightweight entry if we're supposed to */
+	/* go back into the guest */
+
+	/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
+	mr	r5, r12
+
+	/* Restore r3 (kvm_run) and r4 (vcpu) */
+	REST_2GPRS(3, r1)
+	bl	KVMPPC_HANDLE_EXIT
+
+	/* If RESUME_GUEST, get back in the loop */
+	cmpwi	r3, RESUME_GUEST
+	beq	kvm_loop_lightweight
+
+	cmpwi	r3, RESUME_GUEST_NV
+	beq	kvm_loop_heavyweight
+
+kvm_exit_loop:
+
+	ld	r4, _LINK(r1)
+	mtlr	r4
+
+	/* Restore non-volatile host registers (r14 - r31) */
+	REST_NVGPRS(r1)
+
+	addi    r1, r1, SWITCH_FRAME_SIZE
+	blr
+
+kvm_loop_heavyweight:
+
+	ld	r4, _LINK(r1)
+	std     r4, (16 + SWITCH_FRAME_SIZE)(r1)
+
+	/* Load vcpu and cpu_run */
+	REST_2GPRS(3, r1)
+
+	/* Load non-volatile guest state from the vcpu */
+	VCPU_LOAD_NVGPRS(r4)
+
+	/* Jump back into the beginning of this function */
+	b	kvm_start_lightweight
+
+kvm_loop_lightweight:
+
+	/* We'll need the vcpu pointer */
+	REST_GPR(4, r1)
+
+	/* Jump back into the beginning of this function */
+	b	kvm_start_lightweight
+
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
new file mode 100644
index 0000000..bd08535
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -0,0 +1,195 @@ 
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright SUSE Linux Products GmbH 2009
+ *
+ * Authors: Alexander Graf <agraf@suse.de>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/exception-64s.h>
+
+/*****************************************************************************
+ *                                                                           *
+ *        Real Mode handlers that need to be in low physical memory          *
+ *                                                                           *
+ ****************************************************************************/
+
+
+.macro INTERRUPT_TRAMPOLINE intno
+
+.global kvmppc_trampoline_\intno
+kvmppc_trampoline_\intno:
+
+	mtspr	SPRN_SPRG_SCRATCH0, r13		/* Save r13 */
+
+	/*
+	 * First thing to do is to find out if we're coming
+	 * from a KVM guest or a Linux process.
+	 *
+	 * To distinguish, we check a magic byte in the PACA
+	 */
+	mfspr	r13, SPRN_SPRG_PACA		/* r13 = PACA */
+	std	r12, PACA_KVM_SCRATCH0(r13)
+	mfcr	r12
+	stw	r12, PACA_KVM_SCRATCH1(r13)
+	lbz	r12, PACA_KVM_IN_GUEST(r13)
+	cmpwi	r12, KVM_GUEST_MODE_NONE
+	bne	..kvmppc_handler_hasmagic_\intno
+	/* No KVM guest? Then jump back to the Linux handler! */
+	lwz	r12, PACA_KVM_SCRATCH1(r13)
+	mtcr	r12
+	ld	r12, PACA_KVM_SCRATCH0(r13)
+	mfspr	r13, SPRN_SPRG_SCRATCH0		/* r13 = original r13 */
+	b	kvmppc_resume_\intno		/* Get back original handler */
+
+	/* Now we know we're handling a KVM guest */
+..kvmppc_handler_hasmagic_\intno:
+
+	/* Should we just skip the faulting instruction? */
+	cmpwi	r12, KVM_GUEST_MODE_SKIP
+	beq	kvmppc_handler_skip_ins
+
+	/* Let's store which interrupt we're handling */
+	li	r12, \intno
+
+	/* Jump into the SLB exit code that goes to the highmem handler */
+	b	kvmppc_handler_trampoline_exit
+
+.endm
+
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_SYSTEM_RESET
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_MACHINE_CHECK
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_DATA_STORAGE
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_DATA_SEGMENT
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_INST_STORAGE
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_INST_SEGMENT
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_EXTERNAL
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_ALIGNMENT
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_PROGRAM
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_FP_UNAVAIL
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_DECREMENTER
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_SYSCALL
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_TRACE
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_PERFMON
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_ALTIVEC
+INTERRUPT_TRAMPOLINE	BOOK3S_INTERRUPT_VSX
+
+/*
+ * Bring us back to the faulting code, but skip the
+ * faulting instruction.
+ *
+ * This is a generic exit path from the interrupt
+ * trampolines above.
+ *
+ * Input Registers:
+ *
+ * R12               = free
+ * R13               = PACA
+ * PACA.KVM.SCRATCH0 = guest R12
+ * PACA.KVM.SCRATCH1 = guest CR
+ * SPRG_SCRATCH0     = guest R13
+ *
+ */
+kvmppc_handler_skip_ins:
+
+	/* Patch the IP to the next instruction */
+	mfsrr0	r12
+	addi	r12, r12, 4
+	mtsrr0	r12
+
+	/* Clean up all state */
+	lwz	r12, PACA_KVM_SCRATCH1(r13)
+	mtcr	r12
+	ld	r12, PACA_KVM_SCRATCH0(r13)
+	mfspr	r13, SPRN_SPRG_SCRATCH0
+
+	/* And get back into the code */
+	RFI
+
+/*
+ * This trampoline brings us back to a real mode handler
+ *
+ * Input Registers:
+ *
+ * R5 = SRR0
+ * R6 = SRR1
+ * LR = real-mode IP
+ *
+ */
+.global kvmppc_handler_lowmem_trampoline
+kvmppc_handler_lowmem_trampoline:
+
+	mtsrr0	r5
+	mtsrr1	r6
+	blr
+kvmppc_handler_lowmem_trampoline_end:
+
+/*
+ * Call a function in real mode
+ *
+ * Input Registers:
+ *
+ * R3 = function
+ * R4 = MSR
+ * R5 = CTR
+ *
+ */
+_GLOBAL(kvmppc_rmcall)
+	mtmsr	r4		/* Disable relocation, so mtsrr
+				   doesn't get interrupted */
+	mtctr	r5
+	mtsrr0	r3
+	mtsrr1	r4
+	RFI
+
+/*
+ * Activate current's external feature (FPU/Altivec/VSX)
+ */
+#define define_load_up(what) 				\
+							\
+_GLOBAL(kvmppc_load_up_ ## what);			\
+	stdu	r1, -INT_FRAME_SIZE(r1);		\
+	mflr	r3;					\
+	std	r3, _LINK(r1);				\
+							\
+	bl	.load_up_ ## what;			\
+							\
+	ld	r3, _LINK(r1);				\
+	mtlr	r3;					\
+	addi	r1, r1, INT_FRAME_SIZE;			\
+	blr
+
+define_load_up(fpu)
+#ifdef CONFIG_ALTIVEC
+define_load_up(altivec)
+#endif
+#ifdef CONFIG_VSX
+define_load_up(vsx)
+#endif
+
+.global kvmppc_trampoline_lowmem
+kvmppc_trampoline_lowmem:
+	.long kvmppc_handler_lowmem_trampoline - _stext
+
+.global kvmppc_trampoline_enter
+kvmppc_trampoline_enter:
+	.long kvmppc_handler_trampoline_enter - _stext
+
+#include "book3s_64_slb.S"
+