diff mbox series

[for_v22,11/11] x86/sgx: Shrink the enclave if ECREATE/EADD fails

Message ID 20190808001254.11926-12-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series x86/sgx: Bug fixes for v22 | expand

Commit Message

Sean Christopherson Aug. 8, 2019, 12:12 a.m. UTC
Add sgx_encl_shrink() to pair with sgx_encl_grow() and use it to adjust
the VA page count when ECREATE or EADD fails.  Return the allocated VA
page from sgx_encl_grow() so that it can be freed during shrink.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kernel/cpu/sgx/driver/ioctl.c | 55 +++++++++++++++++++-------
 1 file changed, 40 insertions(+), 15 deletions(-)

Comments

Jarkko Sakkinen Aug. 8, 2019, 3:50 p.m. UTC | #1
On Wed, Aug 07, 2019 at 05:12:54PM -0700, Sean Christopherson wrote:
> Add sgx_encl_shrink() to pair with sgx_encl_grow() and use it to adjust
> the VA page count when ECREATE or EADD fails.  Return the allocated VA
> page from sgx_encl_grow() so that it can be freed during shrink.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>

Acked-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>

/Jarkko
Sean Christopherson Aug. 8, 2019, 6:03 p.m. UTC | #2
On Thu, Aug 08, 2019 at 06:50:08PM +0300, Jarkko Sakkinen wrote:
> On Wed, Aug 07, 2019 at 05:12:54PM -0700, Sean Christopherson wrote:
> > Add sgx_encl_shrink() to pair with sgx_encl_grow() and use it to adjust
> > the VA page count when ECREATE or EADD fails.  Return the allocated VA
> > page from sgx_encl_grow() so that it can be freed during shrink.
> > 
> > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> 
> Acked-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>

This missed wrapping -ENOMEM with ERR_PTR() when va_page alloation fails.
Let me know if you want me to send a v2 or if you'll fix it up when
applying.
Jarkko Sakkinen Aug. 9, 2019, 4:13 p.m. UTC | #3
On Thu, 2019-08-08 at 11:03 -0700, Sean Christopherson wrote:
> On Thu, Aug 08, 2019 at 06:50:08PM +0300, Jarkko Sakkinen wrote:
> > On Wed, Aug 07, 2019 at 05:12:54PM -0700, Sean Christopherson wrote:
> > > Add sgx_encl_shrink() to pair with sgx_encl_grow() and use it to adjust
> > > the VA page count when ECREATE or EADD fails.  Return the allocated VA
> > > page from sgx_encl_grow() so that it can be freed during shrink.
> > > 
> > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> > 
> > Acked-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> 
> This missed wrapping -ENOMEM with ERR_PTR() when va_page alloation fails.
> Let me know if you want me to send a v2 or if you'll fix it up when
> applying.

I can fix it, no problem.

/Jarkko
Jarkko Sakkinen Aug. 10, 2019, 11:37 a.m. UTC | #4
On Thu, Aug 08, 2019 at 11:03:55AM -0700, Sean Christopherson wrote:
> On Thu, Aug 08, 2019 at 06:50:08PM +0300, Jarkko Sakkinen wrote:
> > On Wed, Aug 07, 2019 at 05:12:54PM -0700, Sean Christopherson wrote:
> > > Add sgx_encl_shrink() to pair with sgx_encl_grow() and use it to adjust
> > > the VA page count when ECREATE or EADD fails.  Return the allocated VA
> > > page from sgx_encl_grow() so that it can be freed during shrink.
> > > 
> > > Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> > 
> > Acked-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> 
> This missed wrapping -ENOMEM with ERR_PTR() when va_page alloation fails.
> Let me know if you want me to send a v2 or if you'll fix it up when
> applying.

Merged.

/Jarkko
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/driver/ioctl.c b/arch/x86/kernel/cpu/sgx/driver/ioctl.c
index a531cf615f3c..173a405d59a5 100644
--- a/arch/x86/kernel/cpu/sgx/driver/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/driver/ioctl.c
@@ -22,16 +22,17 @@  struct sgx_add_page_req {
 	struct list_head list;
 };
 
-static int sgx_encl_grow(struct sgx_encl *encl, unsigned int disallowed_flags)
+static struct sgx_va_page *sgx_encl_grow(struct sgx_encl *encl,
+					 unsigned int disallowed_flags)
 {
-	struct sgx_va_page *va_page;
-	int ret;
+	struct sgx_va_page *va_page = NULL;
+	void *err;
 
 	BUILD_BUG_ON(SGX_VA_SLOT_COUNT !=
 		(SGX_ENCL_PAGE_VA_OFFSET_MASK >> 3) + 1);
 
 	if (encl->flags & disallowed_flags)
-		return -EFAULT;
+		return ERR_PTR(-EFAULT);
 
 	if (!(encl->page_cnt % SGX_VA_SLOT_COUNT)) {
 		mutex_unlock(&encl->lock);
@@ -46,22 +47,34 @@  static int sgx_encl_grow(struct sgx_encl *encl, unsigned int disallowed_flags)
 		mutex_lock(&encl->lock);
 
 		if (IS_ERR(va_page->epc_page)) {
-			ret = PTR_ERR(va_page->epc_page);
+			err = ERR_CAST(va_page->epc_page);
 			kfree(va_page);
-			return ret;
+			return err;
 		} else if (encl->flags & disallowed_flags) {
 			sgx_free_page(va_page->epc_page);
 			kfree(va_page);
-			return -EFAULT;
+			return ERR_PTR(-EFAULT);
 		} else if (encl->page_cnt % SGX_VA_SLOT_COUNT) {
 			sgx_free_page(va_page->epc_page);
 			kfree(va_page);
+			va_page = NULL;
 		} else {
 			list_add(&va_page->list, &encl->va_pages);
 		}
 	}
 	encl->page_cnt++;
-	return 0;
+	return va_page;
+}
+
+static void sgx_encl_shrink(struct sgx_encl *encl, struct sgx_va_page *va_page)
+{
+	encl->page_cnt--;
+
+	if (va_page) {
+		sgx_free_page(va_page->epc_page);
+		list_del(&va_page->list);
+		kfree(va_page);
+	}
 }
 
 static bool sgx_process_add_page_req(struct sgx_add_page_req *req,
@@ -260,6 +273,7 @@  static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
 {
 	unsigned long encl_size = secs->size + PAGE_SIZE;
 	struct sgx_epc_page *secs_epc;
+	struct sgx_va_page *va_page;
 	unsigned long ssaframesize;
 	struct sgx_pageinfo pginfo;
 	struct sgx_secinfo secinfo;
@@ -268,21 +282,23 @@  static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
 
 	mutex_lock(&encl->lock);
 
-	ret = sgx_encl_grow(encl, SGX_ENCL_CREATED | SGX_ENCL_DEAD);
-	if (ret)
+	va_page = sgx_encl_grow(encl, SGX_ENCL_CREATED | SGX_ENCL_DEAD);
+	if (IS_ERR(va_page)) {
+		ret = PTR_ERR(va_page);
 		goto err_out_unlock;
+	}
 
 	ssaframesize = sgx_calc_ssaframesize(secs->miscselect, secs->xfrm);
 	if (sgx_validate_secs(secs, ssaframesize)) {
 		ret = -EINVAL;
-		goto err_out_unlock;
+		goto err_out_shrink;
 	}
 
 	backing = shmem_file_setup("SGX backing", encl_size + (encl_size >> 5),
 				   VM_NORESERVE);
 	if (IS_ERR(backing)) {
 		ret = PTR_ERR(backing);
-		goto err_out_unlock;
+		goto err_out_shrink;
 	}
 
 	encl->backing = backing;
@@ -337,6 +353,9 @@  static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
 	fput(encl->backing);
 	encl->backing = NULL;
 
+err_out_shrink:
+	sgx_encl_shrink(encl, va_page);
+
 err_out_unlock:
 	mutex_unlock(&encl->lock);
 	return ret;
@@ -496,6 +515,7 @@  static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
 {
 	u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
 	struct sgx_encl_page *encl_page;
+	struct sgx_va_page *va_page;
 	int ret;
 
 	if (sgx_validate_secinfo(secinfo))
@@ -508,14 +528,16 @@  static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
 
 	mutex_lock(&encl->lock);
 
-	ret = sgx_encl_grow(encl, SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD);
-	if (ret)
+	va_page = sgx_encl_grow(encl, SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD);
+	if (IS_ERR(va_page)) {
+		ret = PTR_ERR(va_page);
 		goto err_out_unlock;
+	}
 
 	encl_page = sgx_encl_page_alloc(encl, addr, prot);
 	if (IS_ERR(encl_page)) {
 		ret = PTR_ERR(encl_page);
-		goto err_out_unlock;
+		goto err_out_shrink;
 	}
 
 	ret = __sgx_encl_add_page(encl, encl_page, data, secinfo, mrmask);
@@ -530,6 +552,9 @@  static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
 			  PFN_DOWN(encl_page->desc));
 	kfree(encl_page);
 
+err_out_shrink:
+	sgx_encl_shrink(encl, va_page);
+
 err_out_unlock:
 	mutex_unlock(&encl->lock);
 	return ret;