diff mbox series

[for_v23,4/4] x86/sgx: Drop mmap_sem before EEXTENDing an enclave page

Message ID 20191010225530.26400-5-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series x86/sgx: Fix add page bugs | expand

Commit Message

Sean Christopherson Oct. 10, 2019, 10:55 p.m. UTC
Drop mmap_sem, which needs to be held for read across EADD, prior to
doing EEXTEND on the newly added page to avoid holding mmap_sem for an
extended duration.  EEXTEND doesn't access user pages and holding
encl->lock without mmap_sem is perfectly ok, while EEXTEND is a _slow_
operation, to the point where it operates on 256-byte chunks
instead of 4k pages to maintain a reasonable latency for a single
instruction.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kernel/cpu/sgx/ioctl.c | 8 +++++---
 1 file changed, 5 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index fd4117f18564..46f2769d16fe 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -403,11 +403,15 @@  static int sgx_encl_add_page(struct sgx_encl *encl,
 	 */
 	ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
 				encl_page);
-	if (ret)
+	if (ret) {
+		up_read(&current->mm->mmap_sem);
 		goto err_out_unlock;
+	}
 
 	ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
 				  addp->src);
+	up_read(&current->mm->mmap_sem);
+
 	if (ret)
 		goto err_out;
 
@@ -427,7 +431,6 @@  static int sgx_encl_add_page(struct sgx_encl *encl,
 		sgx_mark_page_reclaimable(encl_page->epc_page);
 
 	mutex_unlock(&encl->lock);
-	up_read(&current->mm->mmap_sem);
 	return ret;
 
 err_out:
@@ -437,7 +440,6 @@  static int sgx_encl_add_page(struct sgx_encl *encl,
 err_out_unlock:
 	sgx_encl_shrink(encl, va_page);
 	mutex_unlock(&encl->lock);
-	up_read(&current->mm->mmap_sem);
 
 err_out_free:
 	sgx_free_page(epc_page);