From patchwork Fri Nov 27 18:50:03 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marc Zyngier X-Patchwork-Id: 7714421 Return-Path: X-Original-To: patchwork-kvm@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id 2034EBEEE1 for ; Fri, 27 Nov 2015 18:51:18 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 1ED0D206C5 for ; Fri, 27 Nov 2015 18:51:17 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id EB484206C0 for ; Fri, 27 Nov 2015 18:51:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1755278AbbK0SvM (ORCPT ); Fri, 27 Nov 2015 13:51:12 -0500 Received: from foss.arm.com ([217.140.101.70]:50864 "EHLO foss.arm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1755219AbbK0Suo (ORCPT ); Fri, 27 Nov 2015 13:50:44 -0500 Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id A90225EE; Fri, 27 Nov 2015 10:50:25 -0800 (PST) Received: from approximate.cambridge.arm.com (approximate.cambridge.arm.com [10.1.209.24]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 8550B3F317; Fri, 27 Nov 2015 10:50:42 -0800 (PST) From: Marc Zyngier To: Christoffer Dall Cc: =?UTF-8?q?Alex=20Benn=C3=A9e?= , Steve Capper , Ard Biesheuvel , Mark Rutland , Catalin Marinas , linux-arm-kernel@lists.infradead.org, kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu Subject: [PATCH v2 09/21] arm64: KVM: Implement guest entry Date: Fri, 27 Nov 2015 18:50:03 +0000 Message-Id: <1448650215-15218-10-git-send-email-marc.zyngier@arm.com> X-Mailer: git-send-email 2.1.4 In-Reply-To: <1448650215-15218-1-git-send-email-marc.zyngier@arm.com> References: <1448650215-15218-1-git-send-email-marc.zyngier@arm.com> Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, T_RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP Contrary to the previous patch, the guest entry is fairly different from its assembly counterpart, mostly because it is only concerned with saving/restoring the GP registers, and nothing else. Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/Makefile | 1 + arch/arm64/kvm/hyp/entry.S | 155 ++++++++++++++++++++++++++++++++++++++++++++ arch/arm64/kvm/hyp/hyp.h | 2 + 3 files changed, 158 insertions(+) create mode 100644 arch/arm64/kvm/hyp/entry.S diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile index ec14cac..1e1ff06 100644 --- a/arch/arm64/kvm/hyp/Makefile +++ b/arch/arm64/kvm/hyp/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o +obj-$(CONFIG_KVM_ARM_HOST) += entry.o diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S new file mode 100644 index 0000000..2c4449a --- /dev/null +++ b/arch/arm64/kvm/hyp/entry.S @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2015 - ARM Ltd + * Author: Marc Zyngier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) +#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) + + .text + .pushsection .hyp.text, "ax" + +.macro save_common_regs ctxt + stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] + stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] + stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] + stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] + stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] + stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] +.endm + +.macro restore_common_regs ctxt + ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)] + ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)] + ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)] + ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)] + ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)] + ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)] +.endm + +.macro save_host_regs reg + save_common_regs \reg +.endm + +.macro restore_host_regs reg + restore_common_regs \reg +.endm + +.macro save_guest_regs + // x0 is the vcpu address + // x1 is the return code, do not corrupt! + // x2 is the cpu context + // x3 is a tmp register + // Guest's x0-x3 are on the stack + + add x2, x0, #VCPU_CONTEXT + + // Compute base to save registers + stp x4, x5, [x2, #CPU_XREG_OFFSET(4)] + stp x6, x7, [x2, #CPU_XREG_OFFSET(6)] + stp x8, x9, [x2, #CPU_XREG_OFFSET(8)] + stp x10, x11, [x2, #CPU_XREG_OFFSET(10)] + stp x12, x13, [x2, #CPU_XREG_OFFSET(12)] + stp x14, x15, [x2, #CPU_XREG_OFFSET(14)] + stp x16, x17, [x2, #CPU_XREG_OFFSET(16)] + str x18, [x2, #CPU_XREG_OFFSET(18)] + + pop x6, x7 // x2, x3 + pop x4, x5 // x0, x1 + + stp x4, x5, [x2, #CPU_XREG_OFFSET(0)] + stp x6, x7, [x2, #CPU_XREG_OFFSET(2)] + + save_common_regs x2 +.endm + +.macro restore_guest_regs + // Assume vcpu in x0, clobbers everything else + + add x2, x0, #VCPU_CONTEXT + + // Prepare x0-x3 for later restore + ldp x4, x5, [x2, #CPU_XREG_OFFSET(0)] + ldp x6, x7, [x2, #CPU_XREG_OFFSET(2)] + push x4, x5 // Push x0-x3 on the stack + push x6, x7 + + // x4-x18 + ldp x4, x5, [x2, #CPU_XREG_OFFSET(4)] + ldp x6, x7, [x2, #CPU_XREG_OFFSET(6)] + ldp x8, x9, [x2, #CPU_XREG_OFFSET(8)] + ldp x10, x11, [x2, #CPU_XREG_OFFSET(10)] + ldp x12, x13, [x2, #CPU_XREG_OFFSET(12)] + ldp x14, x15, [x2, #CPU_XREG_OFFSET(14)] + ldp x16, x17, [x2, #CPU_XREG_OFFSET(16)] + ldr x18, [x2, #CPU_XREG_OFFSET(18)] + + // x19-x29, lr + restore_common_regs x2 + + // Last bits of the 64bit state + pop x2, x3 + pop x0, x1 + + // Do not touch any register after this! +.endm + +/* + * u64 __guest_enter(struct kvm_vcpu *vcpu, + * struct kvm_cpu_context *host_ctxt); + */ +ENTRY(__guest_enter) + // x0: vcpu + // x1: host_ctxt + // x2, x3: parameter registers + // x4-x18: clobbered by macros + + save_host_regs x1 + + // Preserve vcpu & host_ctxt for use at exit time + stp x0, x1, [sp, #-16]! + + restore_guest_regs + eret +ENDPROC(__guest_enter) + +ENTRY(__guest_exit) + // x0: vcpu + // x1: return code + // x2-x3: free + // x4-x29,lr: vcpu regs + // vcpu x0-x3 on the stack + save_guest_regs + + // Restore vcpu & host_ctxt from the stack + // (preserving return code in x1) + ldp x0, x2, [sp], #16 + restore_host_regs x2 + + mov x0, x1 + ret +ENDPROC(__guest_exit) + + /* Insert fault handling here */ diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h index 2581232..7ac8e11 100644 --- a/arch/arm64/kvm/hyp/hyp.h +++ b/arch/arm64/kvm/hyp/hyp.h @@ -50,5 +50,7 @@ void __debug_restore_state(struct kvm_vcpu *vcpu, void __debug_cond_save_host_state(struct kvm_vcpu *vcpu); void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu); +u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt); + #endif /* __ARM64_KVM_HYP_H__ */