diff mbox series

[4/4] x86/sgx: Take encl->lock inside of mm->mmap_sem for EADD

Message ID 20190827001128.25066-5-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series x86/sgx: Fix lock ordering bug w/ EADD | expand

Commit Message

Sean Christopherson Aug. 27, 2019, 12:11 a.m. UTC
Reverse the order in which encl->lock and mm->mmap_sem are taken during
ENCLAVE_ADD_PAGE so as to adhere to SGX's lock ordering requirements.
Attempting to acquire mm->mmap_sem while holding encl->lock can result
in deadlock.

Refactor EEXTEND and the final bookkeeping out of __sgx_encl_add_page()
so that mm->mmap_sem can be dropped after EADD without spreading the
lock/unlock across multiple functions.

Reported-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kernel/cpu/sgx/ioctl.c | 55 ++++++++++++++++++++-------------
 1 file changed, 33 insertions(+), 22 deletions(-)

Comments

Jarkko Sakkinen Aug. 27, 2019, 12:21 p.m. UTC | #1
On Mon, Aug 26, 2019 at 05:11:28PM -0700, Sean Christopherson wrote:
> Reverse the order in which encl->lock and mm->mmap_sem are taken during
> ENCLAVE_ADD_PAGE so as to adhere to SGX's lock ordering requirements.
> Attempting to acquire mm->mmap_sem while holding encl->lock can result
> in deadlock.
> 
> Refactor EEXTEND and the final bookkeeping out of __sgx_encl_add_page()
> so that mm->mmap_sem can be dropped after EADD without spreading the
> lock/unlock across multiple functions.
> 
> Reported-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>

Thanks, looks good.

Acked-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>

/Jarkko
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 170ed538b02b..4a9ae1090433 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -317,43 +317,40 @@  static int sgx_validate_secinfo(struct sgx_secinfo *secinfo)
 static int __sgx_encl_add_page(struct sgx_encl *encl,
 			       struct sgx_encl_page *encl_page,
 			       struct sgx_epc_page *epc_page,
-			       struct sgx_secinfo *secinfo, unsigned long src,
-			       unsigned long mrmask)
+			       struct sgx_secinfo *secinfo, unsigned long src)
 {
 	struct sgx_pageinfo pginfo;
 	struct vm_area_struct *vma;
 	int ret;
-	int i;
 
 	pginfo.secs = (unsigned long)sgx_epc_addr(encl->secs.epc_page);
 	pginfo.addr = SGX_ENCL_PAGE_ADDR(encl_page);
 	pginfo.metadata = (unsigned long)secinfo;
 	pginfo.contents = src;
 
-	down_read(&current->mm->mmap_sem);
-
 	/* Query vma's VM_MAYEXEC as an indirect path_noexec() check. */
 	if (encl_page->vm_max_prot_bits & VM_EXEC) {
 		vma = find_vma(current->mm, src);
-		if (!vma) {
-			up_read(&current->mm->mmap_sem);
+		if (!vma)
 			return -EFAULT;
-		}
 
-		if (!(vma->vm_flags & VM_MAYEXEC)) {
-			up_read(&current->mm->mmap_sem);
+		if (!(vma->vm_flags & VM_MAYEXEC))
 			return -EACCES;
-		}
 	}
 
 	__uaccess_begin();
 	ret = __eadd(&pginfo, sgx_epc_addr(epc_page));
 	__uaccess_end();
 
-	up_read(&current->mm->mmap_sem);
+	return ret ? -EFAULT : 0;
+}
 
-	if (ret)
-		return -EFAULT;
+static int __sgx_encl_extend(struct sgx_encl *encl,
+			     struct sgx_epc_page *epc_page,
+			     unsigned long mrmask)
+{
+	int ret;
+	int i;
 
 	for_each_set_bit(i, &mrmask, 16) {
 		ret = __eextend(sgx_epc_addr(encl->secs.epc_page),
@@ -364,12 +361,6 @@  static int __sgx_encl_add_page(struct sgx_encl *encl,
 			return -EFAULT;
 		}
 	}
-
-	encl_page->encl = encl;
-	encl_page->epc_page = epc_page;
-	encl->secs_child_cnt++;
-	sgx_mark_page_reclaimable(encl_page->epc_page);
-
 	return 0;
 }
 
@@ -398,19 +389,39 @@  static int sgx_encl_add_page(struct sgx_encl *encl,
 		goto err_out_free;
 	}
 
+	down_read(&current->mm->mmap_sem);
+
 	mutex_lock(&encl->lock);
 
+	/*
+	 * Insert prior to EADD in case of OOM.  EADD modifies MRENCLAVE, i.e.
+	 * can't be gracefully unwound, while failure on EADD/EXTEND is limited
+	 * to userspace errors (or kernel/hardware bugs).
+	 */
 	ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
 				encl_page);
-	if (ret)
+	if (ret) {
+		up_read(&current->mm->mmap_sem);
 		goto err_out_shrink;
+	}
 
 	ret = __sgx_encl_add_page(encl, encl_page, epc_page, secinfo,
-				  addp->src, addp->mrmask);
+				  addp->src);
+	up_read(&current->mm->mmap_sem);
+
+	if (ret)
+		goto err_out;
+
+	ret = __sgx_encl_extend(encl, epc_page, addp->mrmask);
 	if (ret)
 		goto err_out;
 
+	encl_page->encl = encl;
+	encl_page->epc_page = epc_page;
+	encl->secs_child_cnt++;
+	sgx_mark_page_reclaimable(encl_page->epc_page);
 	mutex_unlock(&encl->lock);
+
 	return 0;
 
 err_out: