From patchwork Fri Aug 17 08:39:03 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Benjamin Herrenschmidt X-Patchwork-Id: 1337901 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-process-083081@patchwork2.kernel.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by patchwork2.kernel.org (Postfix) with ESMTP id BCC4FDF266 for ; Fri, 17 Aug 2012 08:39:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755583Ab2HQIji (ORCPT ); Fri, 17 Aug 2012 04:39:38 -0400 Received: from gate.crashing.org ([63.228.1.57]:44943 "EHLO gate.crashing.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755265Ab2HQIjd (ORCPT ); Fri, 17 Aug 2012 04:39:33 -0400 Received: from [127.0.0.1] (localhost.localdomain [127.0.0.1]) by gate.crashing.org (8.14.1/8.13.8) with ESMTP id q7H8d3YB024663; Fri, 17 Aug 2012 03:39:04 -0500 Message-ID: <1345192743.11751.74.camel@pasglop> Subject: [PATCH 6/7] powerpc/kvm: Speed up wakeups of CPUs on HV KVM From: Benjamin Herrenschmidt To: kvm-ppc@vger.kernel.org Cc: Alexander Graf , kvm@vger.kernel.org Date: Fri, 17 Aug 2012 18:39:03 +1000 X-Mailer: Evolution 3.2.3-0ubuntu6 Mime-Version: 1.0 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org Currently, we wake up a CPUs by sending a host IPI with smp_send_reschedule() to thread 0 of that core, which will take all threads out of the guest, and cause them to re-evalutate their interrupt status on the way back in. This adds a mechanism to differenciate real host IPIs and IPIs sent by KVM for guest threads to poke each other, in order to target the guest threads precisely when possible and avoid that global switch of the core to host state. Then use this new facility in the in-kernel XICS code Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/kvm_book3s_asm.h | 8 ++- arch/powerpc/include/asm/kvm_ppc.h | 29 ++++++++ arch/powerpc/kernel/asm-offsets.c | 2 + arch/powerpc/kvm/book3s_hv.c | 26 ++++++- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 112 +++++++++++++++++++++++------ arch/powerpc/kvm/book3s_xics.c | 2 +- arch/powerpc/sysdev/xics/icp-native.c | 8 +++ 7 files changed, 163 insertions(+), 24 deletions(-) -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index bfcd00c..c66cbee 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -20,6 +20,11 @@ #ifndef __ASM_KVM_BOOK3S_ASM_H__ #define __ASM_KVM_BOOK3S_ASM_H__ +/* XICS ICP register offsets */ +#define XICS_XIRR 4 +#define XICS_MFRR 0xc +#define XICS_IPI 2 /* interrupt source # for IPIs */ + #ifdef __ASSEMBLY__ #ifdef CONFIG_KVM_BOOK3S_HANDLER @@ -82,10 +87,11 @@ struct kvmppc_host_state { #ifdef CONFIG_KVM_BOOK3S_64_HV u8 hwthread_req; u8 hwthread_state; - + u8 host_ipi; struct kvm_vcpu *kvm_vcpu; struct kvmppc_vcore *kvm_vcore; unsigned long xics_phys; + u32 saved_xirr; u64 dabr; u64 host_mmcr[3]; u32 host_pmc[8]; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index ce81d91..2a10748 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -212,6 +212,21 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) paca[cpu].kvm_hstate.xics_phys = addr; } +static inline u32 kvmppc_get_xics_latch(void) +{ + u32 xirr = get_paca()->kvm_hstate.saved_xirr; + + get_paca()->kvm_hstate.saved_xirr = 0; + + return xirr; +} + +static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) +{ + paca[cpu].kvm_hstate.host_ipi = host_ipi; +} + +extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); extern void kvm_linear_init(void); #else @@ -221,6 +236,18 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) static inline void kvm_linear_init(void) {} +static inline u32 kvmppc_get_xics_latch(void) +{ + return 0; +} + +static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) +{} + +static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) +{ + kvm_vcpu_kick(vcpu); +} #endif #ifdef CONFIG_PPC_BOOK3S_64 @@ -251,4 +278,6 @@ void kvmppc_claim_lpid(long lpid); void kvmppc_free_lpid(long lpid); void kvmppc_init_lpid(unsigned long nr_lpids); +extern void xics_wake_cpu(int cpu); + #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 85b05c4..5d9d972 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -544,6 +544,8 @@ int main(void) HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); + HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr); + HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi); HSTATE_FIELD(HSTATE_MMCR, host_mmcr); HSTATE_FIELD(HSTATE_PMC, host_pmc); HSTATE_FIELD(HSTATE_PURR, host_purr); diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index b41e586..a35aaa9 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -58,6 +58,31 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu); static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); +void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) +{ + int me; + int cpu = vcpu->cpu; + wait_queue_head_t *wqp; + + wqp = kvm_arch_vcpu_wq(vcpu); + if (waitqueue_active(wqp)) { + wake_up_interruptible(wqp); + ++vcpu->stat.halt_wakeup; + } + + me = get_cpu(); + + /* CPU points to the first thread of the core */ + if (cpu != me && cpu >= 0 && cpu < nr_cpu_ids) { + int real_cpu = cpu + vcpu->arch.ptid; + if (paca[real_cpu].kvm_hstate.xics_phys) + xics_wake_cpu(real_cpu); + else if (cpu_online(cpu)) + smp_send_reschedule(cpu); + } + put_cpu(); +} + void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) { struct kvmppc_vcore *vc = vcpu->arch.vcore; @@ -775,7 +800,6 @@ static void kvmppc_end_cede(struct kvm_vcpu *vcpu) } extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); -extern void xics_wake_cpu(int cpu); static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, struct kvm_vcpu *vcpu) diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5a84c8d..5544751 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -78,10 +78,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline) * * *****************************************************************************/ -#define XICS_XIRR 4 -#define XICS_QIRR 0xc -#define XICS_IPI 2 /* interrupt source # for IPIs */ - /* * We come in here when wakened from nap mode on a secondary hw thread. * Relocation is off and most register values are lost. @@ -121,7 +117,7 @@ kvm_start_guest: beq 27f 25: ld r5,HSTATE_XICS_PHYS(r13) li r0,0xff - li r6,XICS_QIRR + li r6,XICS_MFRR li r7,XICS_XIRR lwzcix r8,r5,r7 /* get and ack the interrupt */ sync @@ -667,17 +663,91 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) cmpwi r12,BOOK3S_INTERRUPT_SYSCALL beq hcall_try_real_mode - /* Check for mediated interrupts (could be done earlier really ...) */ + /* Only handle external interrupts here on arch 206 and later */ BEGIN_FTR_SECTION - cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL - bne+ 1f - andi. r0,r11,MSR_EE - beq 1f - mfspr r5,SPRN_LPCR - andi. r0,r5,LPCR_MER + b ext_interrupt_to_host +END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) + + /* External interrupt ? */ + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL + bne+ ext_interrupt_to_host + + /* External interrupt, first check for host_ipi. If this is + * set, we know the host wants us out so let's do it now + */ + lbz r0, HSTATE_HOST_IPI(r13) + cmpwi r0, 0 + bne ext_interrupt_to_host + + /* Now read the interrupt from the ICP */ + ld r5, HSTATE_XICS_PHYS(r13) + li r7, XICS_XIRR + cmpdi r5, 0 + bne- ext_interrupt_to_host + lwzcix r3, r5, r7 + rlwinm. r0, r3, 0, 0xffffff + sync + bne 1f + + /* Nothing pending in the ICP, check for mediated interrupts + * and bounce it to the guest + */ + andi. r0, r11, MSR_EE + beq ext_interrupt_to_host /* shouldn't happen ?? */ + mfspr r5, SPRN_LPCR + andi. r0, r5, LPCR_MER bne bounce_ext_interrupt -1: -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) + b ext_interrupt_to_host /* shouldn't happen ?? */ + +1: /* We found something in the ICP... + * + * If it's not an IPI, stash it in the PACA and return to + * the host, we don't (yet) handle directing real external + * interrutps directly to the guest + */ + cmpwi r2, XICS_IPI + bne ext_stash_for_host + + /* It's an IPI, clear the MFRR and EOI it */ + li r0, 0xff + li r6, XICS_MFRR + stbcix r0, r5, r6 /* clear the IPI */ + stwcix r3, r5, r7 /* EOI it */ + sync + + /* We need to re-check host IPI now in case it got set in the + * meantime. If it's clear, we bounce the interrupt to the + * guest + */ + lbz r0, HSTATE_HOST_IPI(r13) + cmpwi r0, 0 + bne- 1f + + /* Allright, looks like an IPI for the guest, we need to set MER */ + mfspr r8,SPRN_LPCR + ori r8,r8,LPCR_MER + mtspr SPRN_LPCR,r8 + + /* And if the guest EE is set, we can deliver immediately, else + * we return to the guest with MER set + */ + andi. r0, r11, MSR_EE + bne bounce_ext_interrupt + mr r4, r9 + b fast_guest_return + + /* We raced with the host, we need to resend that IPI, bummer */ +1: li r0, IPI_PRIORITY + stbcix r0, r5, r6 /* set the IPI */ + sync + b ext_interrupt_to_host + +ext_stash_for_host: + /* It's not an IPI and it's for the host, stash it in the PACA + * before exit, it will be picked up by the host ICP driver + */ + stw r3, HSTATE_SAVED_XIRR(r13) +ext_interrupt_to_host: nohpte_cont: hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ @@ -816,7 +886,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) beq 44f ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */ li r0,IPI_PRIORITY - li r7,XICS_QIRR + li r7,XICS_MFRR stbcix r0,r7,r8 /* trigger the IPI */ 44: srdi. r3,r3,1 addi r6,r6,PACA_SIZE @@ -1338,11 +1408,11 @@ hcall_real_table: .long 0 /* 0x58 */ .long 0 /* 0x5c */ .long 0 /* 0x60 */ - .long 0 /* 0x64 */ - .long 0 /* 0x68 */ - .long 0 /* 0x6c */ - .long 0 /* 0x70 */ - .long 0 /* 0x74 */ + .long .kvmppc_rm_h_eoi - hcall_real_table + .long .kvmppc_rm_h_cppr - hcall_real_table + .long .kvmppc_rm_h_ipi - hcall_real_table + .long 0 /* 0x70 - H_IPOLL */ + .long .kvmppc_rm_h_xirr - hcall_real_table .long 0 /* 0x78 */ .long 0 /* 0x7c */ .long 0 /* 0x80 */ @@ -1593,7 +1663,7 @@ secondary_nap: beq 37f sync li r0, 0xff - li r6, XICS_QIRR + li r6, XICS_MFRR stbcix r0, r5, r6 /* clear the IPI */ stwcix r3, r5, r7 /* EOI it */ 37: sync diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index 254d0fe..5c9493f 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c @@ -318,7 +318,7 @@ static inline bool icp_try_update(struct kvmppc_icp *icp, kvmppc_book3s_queue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); if (!change_self) - kvm_vcpu_kick(icp->vcpu); + kvmppc_fast_vcpu_kick(icp->vcpu); } bail: return success; diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index 48861d3..20b328b 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -51,6 +51,12 @@ static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; static inline unsigned int icp_native_get_xirr(void) { int cpu = smp_processor_id(); + unsigned int xirr; + + /* Handled an interrupt latched by KVM */ + xirr = kvmppc_get_xics_latch(); + if (xirr) + return xirr; return in_be32(&icp_native_regs[cpu]->xirr.word); } @@ -138,6 +144,7 @@ static unsigned int icp_native_get_irq(void) static void icp_native_cause_ipi(int cpu, unsigned long data) { + kvmppc_set_host_ipi(cpu, 1); icp_native_set_qirr(cpu, IPI_PRIORITY); } @@ -151,6 +158,7 @@ static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) { int cpu = smp_processor_id(); + kvmppc_set_host_ipi(cpu, 0); icp_native_set_qirr(cpu, 0xff); return smp_ipi_demux();