diff mbox

[intel-sgx-kernel-dev,5/6] intel_sgx: Delay VA slot allocation until EWB

Message ID 1482437735-4722-6-git-send-email-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Sean Christopherson Dec. 22, 2016, 8:15 p.m. UTC
Reserve enough VA slots/pages at enclave creation, but delay actual
VA slot allocation until EWB.  Track the number of EPC and VA pages
that have been allocated and use those numbers to determine whether
or not enough VA slots have been reserved.

Rename sgx_init_page to sgx_alloc_va_page and check if we need a new
VA page outside of the function.  This allows __encl_add_page to do
a single mutex_lock for its fast path; the lock cannot be held while
allocating the VA page as allocating an EPC page may cause the thread
to yield.  Alternatively, atomics could be used for the counters, but
that is kludgier since we need to acquire encl->lock anyways.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 drivers/platform/x86/intel_sgx.h            |  12 +--
 drivers/platform/x86/intel_sgx_ioctl.c      | 109 +++++++++++++---------------
 drivers/platform/x86/intel_sgx_page_cache.c |  44 ++++++++++-
 drivers/platform/x86/intel_sgx_vma.c        |   8 +-
 4 files changed, 99 insertions(+), 74 deletions(-)

Comments

Jarkko Sakkinen Dec. 29, 2016, 1:04 p.m. UTC | #1
On Thu, Dec 22, 2016 at 12:15:34PM -0800, Sean Christopherson wrote:
> Reserve enough VA slots/pages at enclave creation, but delay actual
> VA slot allocation until EWB.  Track the number of EPC and VA pages
> that have been allocated and use those numbers to determine whether
> or not enough VA slots have been reserved.
> 
> Rename sgx_init_page to sgx_alloc_va_page and check if we need a new
> VA page outside of the function.  This allows __encl_add_page to do
> a single mutex_lock for its fast path; the lock cannot be held while
> allocating the VA page as allocating an EPC page may cause the thread
> to yield.  Alternatively, atomics could be used for the counters, but
> that is kludgier since we need to acquire encl->lock anyways.
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>

The problem with this patch is that it oversized to the goal. At minimum
you could skip moving the inline function. Put it into its own patch if
you really want to do it. There's no strict rule when one should chop
such extra steps into its own patch but this is fairly large code change
even without this extra step.

/Jarkko

> ---
>  drivers/platform/x86/intel_sgx.h            |  12 +--
>  drivers/platform/x86/intel_sgx_ioctl.c      | 109 +++++++++++++---------------
>  drivers/platform/x86/intel_sgx_page_cache.c |  44 ++++++++++-
>  drivers/platform/x86/intel_sgx_vma.c        |   8 +-
>  4 files changed, 99 insertions(+), 74 deletions(-)
> 
> diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h
> index fb5a75a..23cfd63 100644
> --- a/drivers/platform/x86/intel_sgx.h
> +++ b/drivers/platform/x86/intel_sgx.h
> @@ -87,16 +87,6 @@ struct sgx_va_page {
>  	struct list_head list;
>  };
>  
> -static inline unsigned int sgx_alloc_va_slot(struct sgx_va_page *page)
> -{
> -	int slot = find_first_zero_bit(page->slots, SGX_VA_SLOT_COUNT);
> -
> -	if (slot < SGX_VA_SLOT_COUNT)
> -		set_bit(slot, page->slots);
> -
> -	return slot << 3;
> -}
> -
>  static inline void sgx_free_va_slot(struct sgx_va_page *page,
>  				    unsigned int offset)
>  {
> @@ -136,6 +126,8 @@ enum sgx_encl_flags {
>  struct sgx_encl {
>  	unsigned int flags;
>  	unsigned int secs_child_cnt;
> +	unsigned int encl_page_cnt;
> +	unsigned int va_page_cnt;
>  	struct mutex lock;
>  	struct task_struct *owner;
>  	struct mm_struct *mm;
> diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c
> index b78c552..a5849c6 100644
> --- a/drivers/platform/x86/intel_sgx_ioctl.c
> +++ b/drivers/platform/x86/intel_sgx_ioctl.c
> @@ -385,65 +385,50 @@ static int sgx_validate_secs(const struct sgx_secs *secs)
>  	return 0;
>  }
>  
> -static int sgx_init_page(struct sgx_encl *encl,
> -			 struct sgx_encl_page *entry,
> -			 unsigned long addr)
> +static int sgx_alloc_va_page(struct sgx_encl *encl)
>  {
>  	struct sgx_va_page *va_page;
>  	struct sgx_epc_page *epc_page = NULL;
> -	unsigned int va_offset = PAGE_SIZE;
>  	void *vaddr;
>  	int ret = 0;
>  
> -	list_for_each_entry(va_page, &encl->va_pages, list) {
> -		va_offset = sgx_alloc_va_slot(va_page);
> -		if (va_offset < PAGE_SIZE)
> -			break;
> -	}
> -
> -	if (va_offset == PAGE_SIZE) {
> -		va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
> -		if (!va_page)
> -			return -ENOMEM;
> -
> -		epc_page = sgx_alloc_page(encl->tgid_ctx, 0);
> -		if (IS_ERR(epc_page)) {
> -			kfree(va_page);
> -			return PTR_ERR(epc_page);
> -		}
> -
> -		vaddr = sgx_get_epc_page(epc_page);
> -		if (!vaddr) {
> -			sgx_warn(encl, "kmap of a new VA page failed %d\n",
> -				 ret);
> -			sgx_free_page(epc_page, encl,
> -					  SGX_FREE_SKIP_EREMOVE);
> -			kfree(va_page);
> -			return -EFAULT;
> -		}
> -
> -		ret = __epa(vaddr);
> -		sgx_put_epc_page(vaddr);
> +	va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
> +	if (!va_page)
> +		return -ENOMEM;
>  
> -		if (ret) {
> -			sgx_warn(encl, "EPA returned %d\n", ret);
> -			sgx_free_page(epc_page, encl,
> -					  SGX_FREE_SKIP_EREMOVE);
> -			kfree(va_page);
> -			return -EFAULT;
> -		}
> +	epc_page = sgx_alloc_page(encl->tgid_ctx, 0);
> +	if (IS_ERR(epc_page)) {
> +		kfree(va_page);
> +		return PTR_ERR(epc_page);
> +	}
> +
> +	vaddr = sgx_get_epc_page(epc_page);
> +	if (!vaddr) {
> +		sgx_warn(encl, "kmap of a new VA page failed %d\n",
> +				ret);
> +		sgx_free_page(epc_page, encl,
> +					SGX_FREE_SKIP_EREMOVE);
> +		kfree(va_page);
> +		return -EFAULT;
> +	}
>  
> -		va_page->epc_page = epc_page;
> -		va_offset = sgx_alloc_va_slot(va_page);
> +	ret = __epa(vaddr);
> +	sgx_put_epc_page(vaddr);
>  
> -		mutex_lock(&encl->lock);
> -		list_add(&va_page->list, &encl->va_pages);
> -		mutex_unlock(&encl->lock);
> +	if (ret) {
> +		sgx_warn(encl, "EPA returned %d\n", ret);
> +		sgx_free_page(epc_page, encl,
> +					SGX_FREE_SKIP_EREMOVE);
> +		kfree(va_page);
> +		return -EFAULT;
>  	}
>  
> -	entry->va_page = va_page;
> -	entry->va_offset = va_offset;
> -	entry->addr = addr;
> +	va_page->epc_page = epc_page;
> +
> +	mutex_lock(&encl->lock);
> +	encl->va_page_cnt++;
> +	list_add(&va_page->list, &encl->va_pages);
> +	mutex_unlock(&encl->lock);
>  
>  	return 0;
>  }
> @@ -537,8 +522,10 @@ static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
>  	if (ret)
>  		goto out;
>  
> -	ret = sgx_init_page(encl, &encl->secs_page,
> -			    encl->base + encl->size);
> +	encl->encl_page_cnt++;
> +	encl->secs_page.addr = encl->base + encl->size;
> +
> +	ret = sgx_alloc_va_page(encl);
>  	if (ret)
>  		goto out;
>  
> @@ -677,14 +664,21 @@ static int __encl_add_page(struct sgx_encl *encl,
>  		}
>  	}
>  
> -	ret = sgx_init_page(encl, encl_page, addp->addr);
> -	if (ret) {
> -		__free_page(tmp_page);
> -		return -EINVAL;
> -	}
> -
>  	mutex_lock(&encl->lock);
>  
> +	encl_page->addr = addp->addr;
> +	encl->encl_page_cnt++;
> +
> +	if (encl->encl_page_cnt > (encl->va_page_cnt * SGX_VA_SLOT_COUNT)) {
> +		/* slow path, new VA page needed */
> +		mutex_unlock(&encl->lock);
> +		ret = sgx_alloc_va_page(encl);
> +		mutex_lock(&encl->lock);
> +		if (ret) {
> +			goto out;
> +		}
> +	}
> +
>  	if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD)) {
>  		ret = -EINVAL;
>  		goto out;
> @@ -739,8 +733,7 @@ static int __encl_add_page(struct sgx_encl *encl,
>  
>  	if (ret) {
>  		kfree(req);
> -		sgx_free_va_slot(encl_page->va_page,
> -				 encl_page->va_offset);
> +		encl->encl_page_cnt--;
>  	}
>  
>  	mutex_unlock(&encl->lock);
> diff --git a/drivers/platform/x86/intel_sgx_page_cache.c b/drivers/platform/x86/intel_sgx_page_cache.c
> index 45964b4..e5965b3 100644
> --- a/drivers/platform/x86/intel_sgx_page_cache.c
> +++ b/drivers/platform/x86/intel_sgx_page_cache.c
> @@ -232,6 +232,30 @@ static void sgx_etrack(struct sgx_epc_page *epc_page)
>  	sgx_put_epc_page(epc);
>  }
>  
> +/**
> + * sgx_alloc_va_slot() - allocate VA slot from a VA page
> + *
> + * @encl:        Enclave to allocate from
> + * @va_page:     Pointer to a VA page pointer
> + *
> + * Returns a free VA offset and sets va_page to the corresponding VA page.
> + * If there are no free slots, SGX_VA_SLOT_COUNT is returned.
> + */
> +static inline unsigned int sgx_alloc_va_slot(struct sgx_encl *encl,
> +					     struct sgx_va_page **va_page)
> +{
> +	unsigned int slot = SGX_VA_SLOT_COUNT;
> +	list_for_each_entry((*va_page), &encl->va_pages, list) {
> +		slot = find_first_zero_bit((*va_page)->slots, SGX_VA_SLOT_COUNT);
> +		if (slot < SGX_VA_SLOT_COUNT) {
> +			set_bit(slot, (*va_page)->slots);
> +			break;
> +		}
> +	}
> +
> +	return (slot << 3);
> +}
> +
>  static void sgx_free_encl_page(struct sgx_encl_page *encl_page,
>  			       struct sgx_encl *encl,
>  			       unsigned int flags)
> @@ -245,9 +269,11 @@ static int __sgx_ewb(struct sgx_encl *encl,
>  		     struct sgx_encl_page *encl_page)
>  {
>  	struct sgx_page_info pginfo;
> +	struct sgx_va_page *va_page;
>  	struct page *backing;
>  	void *epc;
>  	void *va;
> +	unsigned int va_offset;
>  	int ret;
>  
>  	backing = sgx_get_backing(encl, encl_page);
> @@ -258,19 +284,33 @@ static int __sgx_ewb(struct sgx_encl *encl,
>  		return ret;
>  	}
>  
> +	va_offset = sgx_alloc_va_slot(encl, &va_page);
> +	if (va_offset == PAGE_SIZE) {
> +		sgx_put_backing(backing, true);
> +		return -ENOMEM;
> +	}
> +
>  	epc = sgx_get_epc_page(encl_page->epc_page);
> -	va = sgx_get_epc_page(encl_page->va_page->epc_page);
> +	va = sgx_get_epc_page(va_page->epc_page);
>  
>  	pginfo.srcpge = (unsigned long)kmap_atomic(backing);
>  	pginfo.pcmd = (unsigned long)&encl_page->pcmd;
>  	pginfo.linaddr = 0;
>  	pginfo.secs = 0;
>  	ret = __ewb(&pginfo, epc,
> -		    (void *)((unsigned long)va + encl_page->va_offset));
> +		    (void *)((unsigned long)va + va_offset));
>  	kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
>  
>  	sgx_put_epc_page(va);
>  	sgx_put_epc_page(epc);
> +
> +	if (ret == SGX_SUCCESS) {
> +		encl_page->va_page = va_page;
> +		encl_page->va_offset = va_offset;
> +	} else {
> +		sgx_free_va_slot(va_page, va_offset);
> +	}
> +
>  	sgx_put_backing(backing, true);
>  
>  	return ret;
> diff --git a/drivers/platform/x86/intel_sgx_vma.c b/drivers/platform/x86/intel_sgx_vma.c
> index db4cdcb..fd5536c 100644
> --- a/drivers/platform/x86/intel_sgx_vma.c
> +++ b/drivers/platform/x86/intel_sgx_vma.c
> @@ -149,6 +149,10 @@ static int do_eldu(struct sgx_encl *encl,
>  	if (ret)
>  		return -EFAULT;
>  
> +	sgx_free_va_slot(encl_page->va_page, encl_page->va_offset);
> +
> +	encl_page->epc_page = epc_page;
> +
>  	return 0;
>  }
>  
> @@ -229,8 +233,6 @@ static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
>  		if (rc)
>  			goto out;
>  
> -		encl->secs_page.epc_page = secs_epc_page;
> -
>  		/* Do not free */
>  		secs_epc_page = NULL;
>  	}
> @@ -256,8 +258,6 @@ static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
>  
>  	encl->secs_child_cnt++;
>  
> -	entry->epc_page = epc_page;
> -
>  	if (reserve)
>  		entry->flags |= SGX_ENCL_PAGE_RESERVED;
>  
> -- 
> 2.7.4
> 
> _______________________________________________
> intel-sgx-kernel-dev mailing list
> intel-sgx-kernel-dev@lists.01.org
> https://lists.01.org/mailman/listinfo/intel-sgx-kernel-dev
diff mbox

Patch

diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h
index fb5a75a..23cfd63 100644
--- a/drivers/platform/x86/intel_sgx.h
+++ b/drivers/platform/x86/intel_sgx.h
@@ -87,16 +87,6 @@  struct sgx_va_page {
 	struct list_head list;
 };
 
-static inline unsigned int sgx_alloc_va_slot(struct sgx_va_page *page)
-{
-	int slot = find_first_zero_bit(page->slots, SGX_VA_SLOT_COUNT);
-
-	if (slot < SGX_VA_SLOT_COUNT)
-		set_bit(slot, page->slots);
-
-	return slot << 3;
-}
-
 static inline void sgx_free_va_slot(struct sgx_va_page *page,
 				    unsigned int offset)
 {
@@ -136,6 +126,8 @@  enum sgx_encl_flags {
 struct sgx_encl {
 	unsigned int flags;
 	unsigned int secs_child_cnt;
+	unsigned int encl_page_cnt;
+	unsigned int va_page_cnt;
 	struct mutex lock;
 	struct task_struct *owner;
 	struct mm_struct *mm;
diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c
index b78c552..a5849c6 100644
--- a/drivers/platform/x86/intel_sgx_ioctl.c
+++ b/drivers/platform/x86/intel_sgx_ioctl.c
@@ -385,65 +385,50 @@  static int sgx_validate_secs(const struct sgx_secs *secs)
 	return 0;
 }
 
-static int sgx_init_page(struct sgx_encl *encl,
-			 struct sgx_encl_page *entry,
-			 unsigned long addr)
+static int sgx_alloc_va_page(struct sgx_encl *encl)
 {
 	struct sgx_va_page *va_page;
 	struct sgx_epc_page *epc_page = NULL;
-	unsigned int va_offset = PAGE_SIZE;
 	void *vaddr;
 	int ret = 0;
 
-	list_for_each_entry(va_page, &encl->va_pages, list) {
-		va_offset = sgx_alloc_va_slot(va_page);
-		if (va_offset < PAGE_SIZE)
-			break;
-	}
-
-	if (va_offset == PAGE_SIZE) {
-		va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
-		if (!va_page)
-			return -ENOMEM;
-
-		epc_page = sgx_alloc_page(encl->tgid_ctx, 0);
-		if (IS_ERR(epc_page)) {
-			kfree(va_page);
-			return PTR_ERR(epc_page);
-		}
-
-		vaddr = sgx_get_epc_page(epc_page);
-		if (!vaddr) {
-			sgx_warn(encl, "kmap of a new VA page failed %d\n",
-				 ret);
-			sgx_free_page(epc_page, encl,
-					  SGX_FREE_SKIP_EREMOVE);
-			kfree(va_page);
-			return -EFAULT;
-		}
-
-		ret = __epa(vaddr);
-		sgx_put_epc_page(vaddr);
+	va_page = kzalloc(sizeof(*va_page), GFP_KERNEL);
+	if (!va_page)
+		return -ENOMEM;
 
-		if (ret) {
-			sgx_warn(encl, "EPA returned %d\n", ret);
-			sgx_free_page(epc_page, encl,
-					  SGX_FREE_SKIP_EREMOVE);
-			kfree(va_page);
-			return -EFAULT;
-		}
+	epc_page = sgx_alloc_page(encl->tgid_ctx, 0);
+	if (IS_ERR(epc_page)) {
+		kfree(va_page);
+		return PTR_ERR(epc_page);
+	}
+
+	vaddr = sgx_get_epc_page(epc_page);
+	if (!vaddr) {
+		sgx_warn(encl, "kmap of a new VA page failed %d\n",
+				ret);
+		sgx_free_page(epc_page, encl,
+					SGX_FREE_SKIP_EREMOVE);
+		kfree(va_page);
+		return -EFAULT;
+	}
 
-		va_page->epc_page = epc_page;
-		va_offset = sgx_alloc_va_slot(va_page);
+	ret = __epa(vaddr);
+	sgx_put_epc_page(vaddr);
 
-		mutex_lock(&encl->lock);
-		list_add(&va_page->list, &encl->va_pages);
-		mutex_unlock(&encl->lock);
+	if (ret) {
+		sgx_warn(encl, "EPA returned %d\n", ret);
+		sgx_free_page(epc_page, encl,
+					SGX_FREE_SKIP_EREMOVE);
+		kfree(va_page);
+		return -EFAULT;
 	}
 
-	entry->va_page = va_page;
-	entry->va_offset = va_offset;
-	entry->addr = addr;
+	va_page->epc_page = epc_page;
+
+	mutex_lock(&encl->lock);
+	encl->va_page_cnt++;
+	list_add(&va_page->list, &encl->va_pages);
+	mutex_unlock(&encl->lock);
 
 	return 0;
 }
@@ -537,8 +522,10 @@  static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
 	if (ret)
 		goto out;
 
-	ret = sgx_init_page(encl, &encl->secs_page,
-			    encl->base + encl->size);
+	encl->encl_page_cnt++;
+	encl->secs_page.addr = encl->base + encl->size;
+
+	ret = sgx_alloc_va_page(encl);
 	if (ret)
 		goto out;
 
@@ -677,14 +664,21 @@  static int __encl_add_page(struct sgx_encl *encl,
 		}
 	}
 
-	ret = sgx_init_page(encl, encl_page, addp->addr);
-	if (ret) {
-		__free_page(tmp_page);
-		return -EINVAL;
-	}
-
 	mutex_lock(&encl->lock);
 
+	encl_page->addr = addp->addr;
+	encl->encl_page_cnt++;
+
+	if (encl->encl_page_cnt > (encl->va_page_cnt * SGX_VA_SLOT_COUNT)) {
+		/* slow path, new VA page needed */
+		mutex_unlock(&encl->lock);
+		ret = sgx_alloc_va_page(encl);
+		mutex_lock(&encl->lock);
+		if (ret) {
+			goto out;
+		}
+	}
+
 	if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_DEAD)) {
 		ret = -EINVAL;
 		goto out;
@@ -739,8 +733,7 @@  static int __encl_add_page(struct sgx_encl *encl,
 
 	if (ret) {
 		kfree(req);
-		sgx_free_va_slot(encl_page->va_page,
-				 encl_page->va_offset);
+		encl->encl_page_cnt--;
 	}
 
 	mutex_unlock(&encl->lock);
diff --git a/drivers/platform/x86/intel_sgx_page_cache.c b/drivers/platform/x86/intel_sgx_page_cache.c
index 45964b4..e5965b3 100644
--- a/drivers/platform/x86/intel_sgx_page_cache.c
+++ b/drivers/platform/x86/intel_sgx_page_cache.c
@@ -232,6 +232,30 @@  static void sgx_etrack(struct sgx_epc_page *epc_page)
 	sgx_put_epc_page(epc);
 }
 
+/**
+ * sgx_alloc_va_slot() - allocate VA slot from a VA page
+ *
+ * @encl:        Enclave to allocate from
+ * @va_page:     Pointer to a VA page pointer
+ *
+ * Returns a free VA offset and sets va_page to the corresponding VA page.
+ * If there are no free slots, SGX_VA_SLOT_COUNT is returned.
+ */
+static inline unsigned int sgx_alloc_va_slot(struct sgx_encl *encl,
+					     struct sgx_va_page **va_page)
+{
+	unsigned int slot = SGX_VA_SLOT_COUNT;
+	list_for_each_entry((*va_page), &encl->va_pages, list) {
+		slot = find_first_zero_bit((*va_page)->slots, SGX_VA_SLOT_COUNT);
+		if (slot < SGX_VA_SLOT_COUNT) {
+			set_bit(slot, (*va_page)->slots);
+			break;
+		}
+	}
+
+	return (slot << 3);
+}
+
 static void sgx_free_encl_page(struct sgx_encl_page *encl_page,
 			       struct sgx_encl *encl,
 			       unsigned int flags)
@@ -245,9 +269,11 @@  static int __sgx_ewb(struct sgx_encl *encl,
 		     struct sgx_encl_page *encl_page)
 {
 	struct sgx_page_info pginfo;
+	struct sgx_va_page *va_page;
 	struct page *backing;
 	void *epc;
 	void *va;
+	unsigned int va_offset;
 	int ret;
 
 	backing = sgx_get_backing(encl, encl_page);
@@ -258,19 +284,33 @@  static int __sgx_ewb(struct sgx_encl *encl,
 		return ret;
 	}
 
+	va_offset = sgx_alloc_va_slot(encl, &va_page);
+	if (va_offset == PAGE_SIZE) {
+		sgx_put_backing(backing, true);
+		return -ENOMEM;
+	}
+
 	epc = sgx_get_epc_page(encl_page->epc_page);
-	va = sgx_get_epc_page(encl_page->va_page->epc_page);
+	va = sgx_get_epc_page(va_page->epc_page);
 
 	pginfo.srcpge = (unsigned long)kmap_atomic(backing);
 	pginfo.pcmd = (unsigned long)&encl_page->pcmd;
 	pginfo.linaddr = 0;
 	pginfo.secs = 0;
 	ret = __ewb(&pginfo, epc,
-		    (void *)((unsigned long)va + encl_page->va_offset));
+		    (void *)((unsigned long)va + va_offset));
 	kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
 
 	sgx_put_epc_page(va);
 	sgx_put_epc_page(epc);
+
+	if (ret == SGX_SUCCESS) {
+		encl_page->va_page = va_page;
+		encl_page->va_offset = va_offset;
+	} else {
+		sgx_free_va_slot(va_page, va_offset);
+	}
+
 	sgx_put_backing(backing, true);
 
 	return ret;
diff --git a/drivers/platform/x86/intel_sgx_vma.c b/drivers/platform/x86/intel_sgx_vma.c
index db4cdcb..fd5536c 100644
--- a/drivers/platform/x86/intel_sgx_vma.c
+++ b/drivers/platform/x86/intel_sgx_vma.c
@@ -149,6 +149,10 @@  static int do_eldu(struct sgx_encl *encl,
 	if (ret)
 		return -EFAULT;
 
+	sgx_free_va_slot(encl_page->va_page, encl_page->va_offset);
+
+	encl_page->epc_page = epc_page;
+
 	return 0;
 }
 
@@ -229,8 +233,6 @@  static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
 		if (rc)
 			goto out;
 
-		encl->secs_page.epc_page = secs_epc_page;
-
 		/* Do not free */
 		secs_epc_page = NULL;
 	}
@@ -256,8 +258,6 @@  static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
 
 	encl->secs_child_cnt++;
 
-	entry->epc_page = epc_page;
-
 	if (reserve)
 		entry->flags |= SGX_ENCL_PAGE_RESERVED;