diff mbox series

[for_v23,v3,03/12] x86/sgx: Fix EEXTEND error handling

Message ID 20191016183745.8226-4-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series x86/sgx: Bug fixes for v23 | expand

Commit Message

Sean Christopherson Oct. 16, 2019, 6:37 p.m. UTC
Rework EEXTEND error handling to fix issues related to destroying the
enclave in response to EEXTEND failure.  At the time of EEXTEND, the
page is already visibile in the sense that it has been added to the
radix tree, and therefore will be processed by sgx_encl_destroy().  This
means the "add" part needs to be fully completed prior to invoking
sgx_encl_destroy() in order to avoid consuming half-baked state.

Move sgx_encl_destroy() to the call site of __sgx_encl_extend() so that
it is somewhat more obvious why the add needs to complete before doing
EEXTEND.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kernel/cpu/sgx/ioctl.c | 31 ++++++++++++++++---------------
 1 file changed, 16 insertions(+), 15 deletions(-)

Comments

Jarkko Sakkinen Oct. 18, 2019, 10:42 a.m. UTC | #1
On Wed, Oct 16, 2019 at 11:37:36AM -0700, Sean Christopherson wrote:
> Rework EEXTEND error handling to fix issues related to destroying the
> enclave in response to EEXTEND failure.  At the time of EEXTEND, the
> page is already visibile in the sense that it has been added to the
> radix tree, and therefore will be processed by sgx_encl_destroy().  This
> means the "add" part needs to be fully completed prior to invoking
> sgx_encl_destroy() in order to avoid consuming half-baked state.
> 
> Move sgx_encl_destroy() to the call site of __sgx_encl_extend() so that
> it is somewhat more obvious why the add needs to complete before doing
> EEXTEND.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>

First three (1, 2, 3) have been applied.

/Jarkko
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 7d1b449bf771..4169ff3c81d8 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -351,18 +351,14 @@  static int __sgx_encl_extend(struct sgx_encl *encl,
 	for_each_set_bit(i, &mrmask, 16) {
 		ret = __eextend(sgx_epc_addr(encl->secs.epc_page),
 				sgx_epc_addr(epc_page) + (i * 0x100));
-		if (ret)
-			goto err_out;
+		if (ret) {
+			if (encls_failed(ret))
+				ENCLS_WARN(ret, "EEXTEND");
+			return -EFAULT;
+		}
 	}
 
 	return 0;
-
-err_out:
-	if (encls_failed(ret))
-		ENCLS_WARN(ret, "EEXTEND");
-
-	sgx_encl_destroy(encl);
-	return -EFAULT;
 }
 
 static int sgx_encl_add_page(struct sgx_encl *encl,
@@ -421,19 +417,24 @@  static int sgx_encl_add_page(struct sgx_encl *encl,
 	if (ret)
 		goto err_out;
 
-	ret = __sgx_encl_extend(encl, epc_page, addp->mrmask);
-	if (ret)
-		goto err_out;
-
+	/*
+	 * Complete the "add" before doing the "extend" so that the "add"
+	 * isn't in a half-baked state in the extremely unlikely scenario the
+	 * the enclave will be destroyed in response to EEXTEND failure.
+	 */
 	encl_page->encl = encl;
 	encl_page->epc_page = epc_page;
 	encl->secs_child_cnt++;
 
-	sgx_mark_page_reclaimable(encl_page->epc_page);
+	ret = __sgx_encl_extend(encl, epc_page, addp->mrmask);
+	if (ret)
+		sgx_encl_destroy(encl);
+	else
+		sgx_mark_page_reclaimable(encl_page->epc_page);
 
 	mutex_unlock(&encl->lock);
 	up_read(&current->mm->mmap_sem);
-	return 0;
+	return ret;
 
 err_out:
 	radix_tree_delete(&encl_page->encl->page_tree,