From patchwork Tue Aug 18 04:24:05 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sean Christopherson X-Patchwork-Id: 11719889 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 24DB4739 for ; Tue, 18 Aug 2020 04:24:11 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 144EA2078D for ; Tue, 18 Aug 2020 04:24:11 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726483AbgHREYK (ORCPT ); Tue, 18 Aug 2020 00:24:10 -0400 Received: from mga07.intel.com ([134.134.136.100]:16710 "EHLO mga07.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726145AbgHREYJ (ORCPT ); Tue, 18 Aug 2020 00:24:09 -0400 IronPort-SDR: ey3SFvj7rn0AHffYU+G7tqGVyAsYAI4hicGvxdqoZGWzRsYkXzJuegUhASx/9/TCf2VplMGqpt q79eUvJHWMGw== X-IronPort-AV: E=McAfee;i="6000,8403,9716"; a="219156479" X-IronPort-AV: E=Sophos;i="5.76,326,1592895600"; d="scan'208";a="219156479" X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 17 Aug 2020 21:24:07 -0700 IronPort-SDR: FjUntzt3Z10dbmXBoWEo+xfYaIvfSbdEd+3BV02ftxh6uUHlVpGA9TmmjG/o9/P2bRFYTa112M fdeCWPUJqPyg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.76,326,1592895600"; d="scan'208";a="334270273" Received: from sjchrist-coffee.jf.intel.com ([10.54.74.160]) by FMSMGA003.fm.intel.com with ESMTP; 17 Aug 2020 21:24:07 -0700 From: Sean Christopherson To: Jarkko Sakkinen Cc: Nathaniel McCallum , Cedric Xing , Jethro Beekman , Andy Lutomirski , linux-sgx@vger.kernel.org Subject: [RFC PATCH 4/4] x86/vdso: x86/sgx: Allow the user to exit the vDSO loop on interrupts Date: Mon, 17 Aug 2020 21:24:05 -0700 Message-Id: <20200818042405.12871-5-sean.j.christopherson@intel.com> X-Mailer: git-send-email 2.28.0 In-Reply-To: <20200818042405.12871-1-sean.j.christopherson@intel.com> References: <20200818042405.12871-1-sean.j.christopherson@intel.com> MIME-Version: 1.0 Sender: linux-sgx-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-sgx@vger.kernel.org Allow userspace to exit the vDSO on interrupts that are acknowledged while the enclave is active. This allows the user's runtime to switch contexts at opportune times without additional overhead, e.g. when using an M:N threading model (where M user threads run N TCSs, with N > M). Suggested-by: Jethro Beekman Signed-off-by: Sean Christopherson Tested-by: Jethro Beekman --- arch/x86/entry/vdso/vsgx_enter_enclave.S | 27 ++++++++++++++++++++---- arch/x86/include/uapi/asm/sgx.h | 3 +++ 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/arch/x86/entry/vdso/vsgx_enter_enclave.S b/arch/x86/entry/vdso/vsgx_enter_enclave.S index b09e87dbe9334..33428c0f94b0d 100644 --- a/arch/x86/entry/vdso/vsgx_enter_enclave.S +++ b/arch/x86/entry/vdso/vsgx_enter_enclave.S @@ -21,6 +21,9 @@ #define SGX_SYNCHRONOUS_EXIT 0 #define SGX_EXCEPTION_EXIT 1 +#define SGX_INTERRUPT_EXIT 2 + +#define SGX_EXIT_ON_INTERRUPTS 1 /* Offsets into sgx_enter_enclave.exception. */ #define EX_TRAPNR 0*8 @@ -51,12 +54,17 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave) mov RUN_OFFSET(%rbp), %rcx - /* No flags are currently defined/supported. */ - cmpq $0, FLAGS_OFFSET(%rcx) - jne .Linvalid_input - /* Load TCS and AEP */ mov TCS_OFFEST(%rcx), %rbx + + /* Use the alternate AEP if the user wants to exit on interrupts. */ + mov FLAGS_OFFSET(%rcx), %rcx + cmpq $SGX_EXIT_ON_INTERRUPTS, %rcx + je .Lload_interrupts_aep + + /* All other flags are reserved. */ + test %rcx, %rcx + jne .Linvalid_input lea .Lasync_exit_pointer(%rip), %rcx /* Single ENCLU serving as both EENTER and AEP (ERESUME) */ @@ -93,6 +101,17 @@ SYM_FUNC_START(__vdso_sgx_enter_enclave) mov $(-EINVAL), %eax jmp .Lout +.Lload_interrupts_aep: + lea .Lhandle_interrupt(%rip), %rcx + jmp .Lenclu_eenter_eresume + +.Lhandle_interrupt: + mov RUN_OFFSET(%rbp), %rbx + + /* Set the exit_reason and exception info. */ + movl $SGX_INTERRUPT_EXIT, EXIT_REASON_OFFSET(%rbx) + jmp .Lhandle_exit + .Lhandle_exception: mov RUN_OFFSET(%rbp), %rbx diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h index 80a8b7a949a23..beeabfad6eb81 100644 --- a/arch/x86/include/uapi/asm/sgx.h +++ b/arch/x86/include/uapi/asm/sgx.h @@ -76,6 +76,7 @@ struct sgx_enclave_set_attribute { #define SGX_SYNCHRONOUS_EXIT 0 #define SGX_EXCEPTION_EXIT 1 +#define SGX_INTERRUPT_EXIT 2 struct sgx_enclave_run; @@ -116,6 +117,8 @@ struct sgx_enclave_exception { __u64 address; }; +#define SGX_EXIT_ON_INTERRUPTS (1ULL << 0) + /** * struct sgx_enclave_run - Control structure for __vdso_sgx_enter_enclave() *