diff mbox

[intel-sgx-kernel-dev,v7,6/9] intel_sgx: disallow VMA reconfiguration after EPC pages have been added

Message ID 20161207130045.22615-7-jarkko.sakkinen@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jarkko Sakkinen Dec. 7, 2016, 1 p.m. UTC
Do not allow VMA reconfiguration after EPC pages are added because SGX1
permissions are static. The policy might be easened with SGX2 (EMODP)
but it is better to start with this because in SGX1 the PTE permissions
and EPCM permissions must be in-sync.

Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
 drivers/platform/x86/intel_sgx.h       |  3 ++-
 drivers/platform/x86/intel_sgx_ioctl.c | 11 +++++---
 drivers/platform/x86/intel_sgx_util.c  | 13 ++++-----
 drivers/platform/x86/intel_sgx_vma.c   | 49 +++++++++++++++-------------------
 4 files changed, 35 insertions(+), 41 deletions(-)

Comments

Jarkko Sakkinen Dec. 8, 2016, 9:27 a.m. UTC | #1
On Wed, Dec 07, 2016 at 03:00:42PM +0200, Jarkko Sakkinen wrote:
> Do not allow VMA reconfiguration after EPC pages are added because SGX1
> permissions are static. The policy might be easened with SGX2 (EMODP)
> but it is better to start with this because in SGX1 the PTE permissions
> and EPCM permissions must be in-sync.
> 
> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>

I discussed about this with platform software people.

No matter how you implement your user space you will probably want to
implement a relocator at some point of some sort that runs after EINIT.
And only after relocation you do mprotect calls. That makes sense.

The worst thing that can happen is a #GP due to EPCM/PTE mismatch, which
isn't really such a big of a deal in this scenario.

For vma_close killing the enclave immediately even when the first VMA
is close definitely makes sense so that you cannot for example try to
subvert regular memory VMAs in-between the enclave VMAs. Thus I'll
update this patch to only harder the vma_close callback.

/Jarkko

> ---
>  drivers/platform/x86/intel_sgx.h       |  3 ++-
>  drivers/platform/x86/intel_sgx_ioctl.c | 11 +++++---
>  drivers/platform/x86/intel_sgx_util.c  | 13 ++++-----
>  drivers/platform/x86/intel_sgx_vma.c   | 49 +++++++++++++++-------------------
>  4 files changed, 35 insertions(+), 41 deletions(-)
> 
> diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h
> index 464763d..35c03fc 100644
> --- a/drivers/platform/x86/intel_sgx.h
> +++ b/drivers/platform/x86/intel_sgx.h
> @@ -130,12 +130,13 @@ enum sgx_encl_flags {
>  	SGX_ENCL_DEBUG		= BIT(1),
>  	SGX_ENCL_SECS_EVICTED	= BIT(2),
>  	SGX_ENCL_SUSPEND	= BIT(3),
> +	SGX_ENCL_PAGES_ADDED	= BIT(4),
> +	SGX_ENCL_INVALIDATED	= BIT(5),
>  };
>  
>  struct sgx_encl {
>  	unsigned int flags;
>  	unsigned int secs_child_cnt;
> -	unsigned int vma_cnt;
>  	struct mutex lock;
>  	struct task_struct *owner;
>  	struct mm_struct *mm;
> diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c
> index b377200..0c3fd29 100644
> --- a/drivers/platform/x86/intel_sgx_ioctl.c
> +++ b/drivers/platform/x86/intel_sgx_ioctl.c
> @@ -255,7 +255,10 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req)
>  
>  	mutex_lock(&encl->lock);
>  
> -	if (!encl->vma_cnt || sgx_find_encl(encl->mm, encl_page->addr, &vma))
> +	if (encl->flags & SGX_ENCL_INVALIDATED)
> +		goto out;
> +
> +	if (sgx_find_encl(encl->mm, encl_page->addr, &vma))
>  		goto out;
>  
>  	backing = sgx_get_backing(encl, encl_page);
> @@ -317,7 +320,7 @@ static void sgx_add_page_worker(struct work_struct *work)
>  	do {
>  		schedule();
>  
> -		if (encl->flags & SGX_ENCL_SUSPEND)
> +		if (encl->flags & SGX_ENCL_INVALIDATED)
>  			skip_rest = true;
>  
>  		mutex_lock(&encl->lock);
> @@ -578,7 +581,6 @@ static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
>  		up_read(&current->mm->mmap_sem);
>  		goto out;
>  	}
> -	encl->vma_cnt++;
>  	vma->vm_private_data = encl;
>  	up_read(&current->mm->mmap_sem);
>  
> @@ -682,7 +684,7 @@ static int __encl_add_page(struct sgx_encl *encl,
>  
>  	mutex_lock(&encl->lock);
>  
> -	if (encl->flags & SGX_ENCL_INITIALIZED) {
> +	if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_INVALIDATED)) {
>  		ret = -EINVAL;
>  		goto out;
>  	}
> @@ -734,6 +736,7 @@ out:
>  	} else {
>  		ret = encl_rb_insert(&encl->encl_rb, encl_page);
>  		WARN_ON(ret);
> +		encl->flags |= SGX_ENCL_PAGES_ADDED;
>  	}
>  
>  	mutex_unlock(&encl->lock);
> diff --git a/drivers/platform/x86/intel_sgx_util.c b/drivers/platform/x86/intel_sgx_util.c
> index 3878d9a..41ccc18 100644
> --- a/drivers/platform/x86/intel_sgx_util.c
> +++ b/drivers/platform/x86/intel_sgx_util.c
> @@ -135,21 +135,18 @@ void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma)
>  
>  bool sgx_pin_mm(struct sgx_encl *encl)
>  {
> -	if (encl->flags & SGX_ENCL_SUSPEND)
> -		return false;
> -
>  	mutex_lock(&encl->lock);
> -	if (encl->vma_cnt) {
> -		atomic_inc(&encl->mm->mm_count);
> -	} else {
> +	if (encl->flags & SGX_ENCL_INVALIDATED) {
>  		mutex_unlock(&encl->lock);
>  		return false;
>  	}
> +
> +	atomic_inc(&encl->mm->mm_count);
>  	mutex_unlock(&encl->lock);
>  
>  	down_read(&encl->mm->mmap_sem);
>  
> -	if (!encl->vma_cnt) {
> +	if (encl->flags & SGX_ENCL_INVALIDATED) {
>  		sgx_unpin_mm(encl);
>  		return false;
>  	}
> @@ -177,7 +174,7 @@ void sgx_invalidate(struct sgx_encl *encl)
>  			break;
>  	}
>  
> -	encl->vma_cnt = 0;
> +	encl->flags |= SGX_ENCL_INVALIDATED;
>  }
>  
>  int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
> diff --git a/drivers/platform/x86/intel_sgx_vma.c b/drivers/platform/x86/intel_sgx_vma.c
> index 0649978..54690de 100644
> --- a/drivers/platform/x86/intel_sgx_vma.c
> +++ b/drivers/platform/x86/intel_sgx_vma.c
> @@ -70,48 +70,43 @@
>  
>  static void sgx_vma_open(struct vm_area_struct *vma)
>  {
> -	struct sgx_encl *encl;
> +	struct sgx_encl *encl = vma->vm_private_data;
>  
> -	/* Was vm_private_data nullified as a result of the previous fork? */
> -	encl = vma->vm_private_data;
> -	if (!encl)
> -		goto out_fork;
> +	/* When forking for the second time vm_private_data is already set to
> +	 * NULL.
> +	 */
> +	if (!encl) {
> +		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
> +		return;
> +	}
>  
> -	/* Was the process forked? mm_struct changes when the process is
> -	 * forked.
> +	/* Invalidate enclave when the process has been forked for the first
> +	 * time or pages have been added because PTEs must bee in sync with
> +	 * the EPCM entries.
>  	 */
>  	mutex_lock(&encl->lock);
> -	if (encl->mm != vma->vm_mm) {
> -		mutex_unlock(&encl->lock);
> -		goto out_fork;
> +	if (encl->mm != vma->vm_mm || (encl->flags & SGX_ENCL_PAGES_ADDED)) {
> +		sgx_invalidate(encl);
> +		vma->vm_private_data = NULL;
>  	}
> -	encl->vma_cnt++;
>  	mutex_unlock(&encl->lock);
>  
> -	kref_get(&encl->refcount);
> -	return;
> -out_fork:
> -	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
> -	vma->vm_private_data = NULL;
> +	if (vma->vm_private_data)
> +		kref_get(&encl->refcount);
>  }
>  
>  static void sgx_vma_close(struct vm_area_struct *vma)
>  {
>  	struct sgx_encl *encl = vma->vm_private_data;
>  
> -	/* If process was forked, VMA is still there but
> -	 * vm_private_data is set to NULL.
> +	/* When forking for the second time vm_private_data is already set to
> +	 * NULL.
>  	 */
>  	if (!encl)
>  		return;
>  
>  	mutex_lock(&encl->lock);
> -	encl->vma_cnt--;
> -	vma->vm_private_data = NULL;
> -
> -	sgx_zap_tcs_ptes(encl, vma);
> -	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
> -
> +	sgx_invalidate(encl);
>  	mutex_unlock(&encl->lock);
>  
>  	kref_put(&encl->refcount, sgx_encl_release);
> @@ -187,7 +182,7 @@ static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
>  
>  	mutex_lock(&encl->lock);
>  
> -	if (!encl->vma_cnt) {
> +	if (encl->flags & SGX_ENCL_INVALIDATED) {
>  		entry = ERR_PTR(-EFAULT);
>  		goto out;
>  	}
> @@ -268,8 +263,6 @@ static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
>  	list_add_tail(&entry->load_list, &encl->load_list);
>  out:
>  	mutex_unlock(&encl->lock);
> -	if (encl->flags & SGX_ENCL_SUSPEND)
> -		free_flags |= SGX_FREE_SKIP_EREMOVE;
>  	if (epc_page)
>  		sgx_free_page(epc_page, encl, free_flags);
>  	if (secs_epc_page)
> @@ -370,7 +363,7 @@ static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
>  
>  	if (!(encl->flags & SGX_ENCL_DEBUG) ||
>  	    !(encl->flags & SGX_ENCL_INITIALIZED) ||
> -	    (encl->flags & SGX_ENCL_SUSPEND))
> +	    (encl->flags & SGX_ENCL_INVALIDATED))
>  		return -EFAULT;
>  
>  	sgx_dbg(encl, "%s addr=0x%lx, len=%d\n", op_str, addr, len);
> -- 
> 2.9.3
>
Jarkko Sakkinen Dec. 8, 2016, 9:29 a.m. UTC | #2
On Thu, Dec 08, 2016 at 11:27:32AM +0200, Jarkko Sakkinen wrote:
> On Wed, Dec 07, 2016 at 03:00:42PM +0200, Jarkko Sakkinen wrote:
> > Do not allow VMA reconfiguration after EPC pages are added because SGX1
> > permissions are static. The policy might be easened with SGX2 (EMODP)
> > but it is better to start with this because in SGX1 the PTE permissions
> > and EPCM permissions must be in-sync.
> > 
> > Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> 
> I discussed about this with platform software people.
> 
> No matter how you implement your user space you will probably want to
> implement a relocator at some point of some sort that runs after EINIT.
> And only after relocation you do mprotect calls. That makes sense.
> 
> The worst thing that can happen is a #GP due to EPCM/PTE mismatch, which
> isn't really such a big of a deal in this scenario.
> 
> For vma_close killing the enclave immediately even when the first VMA
> is close definitely makes sense so that you cannot for example try to
> subvert regular memory VMAs in-between the enclave VMAs. Thus I'll
> update this patch to only harder the vma_close callback.
                            ~~~~~~
                            harden

/Jarkko
diff mbox

Patch

diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h
index 464763d..35c03fc 100644
--- a/drivers/platform/x86/intel_sgx.h
+++ b/drivers/platform/x86/intel_sgx.h
@@ -130,12 +130,13 @@  enum sgx_encl_flags {
 	SGX_ENCL_DEBUG		= BIT(1),
 	SGX_ENCL_SECS_EVICTED	= BIT(2),
 	SGX_ENCL_SUSPEND	= BIT(3),
+	SGX_ENCL_PAGES_ADDED	= BIT(4),
+	SGX_ENCL_INVALIDATED	= BIT(5),
 };
 
 struct sgx_encl {
 	unsigned int flags;
 	unsigned int secs_child_cnt;
-	unsigned int vma_cnt;
 	struct mutex lock;
 	struct task_struct *owner;
 	struct mm_struct *mm;
diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c
index b377200..0c3fd29 100644
--- a/drivers/platform/x86/intel_sgx_ioctl.c
+++ b/drivers/platform/x86/intel_sgx_ioctl.c
@@ -255,7 +255,10 @@  static bool sgx_process_add_page_req(struct sgx_add_page_req *req)
 
 	mutex_lock(&encl->lock);
 
-	if (!encl->vma_cnt || sgx_find_encl(encl->mm, encl_page->addr, &vma))
+	if (encl->flags & SGX_ENCL_INVALIDATED)
+		goto out;
+
+	if (sgx_find_encl(encl->mm, encl_page->addr, &vma))
 		goto out;
 
 	backing = sgx_get_backing(encl, encl_page);
@@ -317,7 +320,7 @@  static void sgx_add_page_worker(struct work_struct *work)
 	do {
 		schedule();
 
-		if (encl->flags & SGX_ENCL_SUSPEND)
+		if (encl->flags & SGX_ENCL_INVALIDATED)
 			skip_rest = true;
 
 		mutex_lock(&encl->lock);
@@ -578,7 +581,6 @@  static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
 		up_read(&current->mm->mmap_sem);
 		goto out;
 	}
-	encl->vma_cnt++;
 	vma->vm_private_data = encl;
 	up_read(&current->mm->mmap_sem);
 
@@ -682,7 +684,7 @@  static int __encl_add_page(struct sgx_encl *encl,
 
 	mutex_lock(&encl->lock);
 
-	if (encl->flags & SGX_ENCL_INITIALIZED) {
+	if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_INVALIDATED)) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -734,6 +736,7 @@  out:
 	} else {
 		ret = encl_rb_insert(&encl->encl_rb, encl_page);
 		WARN_ON(ret);
+		encl->flags |= SGX_ENCL_PAGES_ADDED;
 	}
 
 	mutex_unlock(&encl->lock);
diff --git a/drivers/platform/x86/intel_sgx_util.c b/drivers/platform/x86/intel_sgx_util.c
index 3878d9a..41ccc18 100644
--- a/drivers/platform/x86/intel_sgx_util.c
+++ b/drivers/platform/x86/intel_sgx_util.c
@@ -135,21 +135,18 @@  void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma)
 
 bool sgx_pin_mm(struct sgx_encl *encl)
 {
-	if (encl->flags & SGX_ENCL_SUSPEND)
-		return false;
-
 	mutex_lock(&encl->lock);
-	if (encl->vma_cnt) {
-		atomic_inc(&encl->mm->mm_count);
-	} else {
+	if (encl->flags & SGX_ENCL_INVALIDATED) {
 		mutex_unlock(&encl->lock);
 		return false;
 	}
+
+	atomic_inc(&encl->mm->mm_count);
 	mutex_unlock(&encl->lock);
 
 	down_read(&encl->mm->mmap_sem);
 
-	if (!encl->vma_cnt) {
+	if (encl->flags & SGX_ENCL_INVALIDATED) {
 		sgx_unpin_mm(encl);
 		return false;
 	}
@@ -177,7 +174,7 @@  void sgx_invalidate(struct sgx_encl *encl)
 			break;
 	}
 
-	encl->vma_cnt = 0;
+	encl->flags |= SGX_ENCL_INVALIDATED;
 }
 
 int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
diff --git a/drivers/platform/x86/intel_sgx_vma.c b/drivers/platform/x86/intel_sgx_vma.c
index 0649978..54690de 100644
--- a/drivers/platform/x86/intel_sgx_vma.c
+++ b/drivers/platform/x86/intel_sgx_vma.c
@@ -70,48 +70,43 @@ 
 
 static void sgx_vma_open(struct vm_area_struct *vma)
 {
-	struct sgx_encl *encl;
+	struct sgx_encl *encl = vma->vm_private_data;
 
-	/* Was vm_private_data nullified as a result of the previous fork? */
-	encl = vma->vm_private_data;
-	if (!encl)
-		goto out_fork;
+	/* When forking for the second time vm_private_data is already set to
+	 * NULL.
+	 */
+	if (!encl) {
+		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+		return;
+	}
 
-	/* Was the process forked? mm_struct changes when the process is
-	 * forked.
+	/* Invalidate enclave when the process has been forked for the first
+	 * time or pages have been added because PTEs must bee in sync with
+	 * the EPCM entries.
 	 */
 	mutex_lock(&encl->lock);
-	if (encl->mm != vma->vm_mm) {
-		mutex_unlock(&encl->lock);
-		goto out_fork;
+	if (encl->mm != vma->vm_mm || (encl->flags & SGX_ENCL_PAGES_ADDED)) {
+		sgx_invalidate(encl);
+		vma->vm_private_data = NULL;
 	}
-	encl->vma_cnt++;
 	mutex_unlock(&encl->lock);
 
-	kref_get(&encl->refcount);
-	return;
-out_fork:
-	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-	vma->vm_private_data = NULL;
+	if (vma->vm_private_data)
+		kref_get(&encl->refcount);
 }
 
 static void sgx_vma_close(struct vm_area_struct *vma)
 {
 	struct sgx_encl *encl = vma->vm_private_data;
 
-	/* If process was forked, VMA is still there but
-	 * vm_private_data is set to NULL.
+	/* When forking for the second time vm_private_data is already set to
+	 * NULL.
 	 */
 	if (!encl)
 		return;
 
 	mutex_lock(&encl->lock);
-	encl->vma_cnt--;
-	vma->vm_private_data = NULL;
-
-	sgx_zap_tcs_ptes(encl, vma);
-	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-
+	sgx_invalidate(encl);
 	mutex_unlock(&encl->lock);
 
 	kref_put(&encl->refcount, sgx_encl_release);
@@ -187,7 +182,7 @@  static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
 
 	mutex_lock(&encl->lock);
 
-	if (!encl->vma_cnt) {
+	if (encl->flags & SGX_ENCL_INVALIDATED) {
 		entry = ERR_PTR(-EFAULT);
 		goto out;
 	}
@@ -268,8 +263,6 @@  static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
 	list_add_tail(&entry->load_list, &encl->load_list);
 out:
 	mutex_unlock(&encl->lock);
-	if (encl->flags & SGX_ENCL_SUSPEND)
-		free_flags |= SGX_FREE_SKIP_EREMOVE;
 	if (epc_page)
 		sgx_free_page(epc_page, encl, free_flags);
 	if (secs_epc_page)
@@ -370,7 +363,7 @@  static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
 
 	if (!(encl->flags & SGX_ENCL_DEBUG) ||
 	    !(encl->flags & SGX_ENCL_INITIALIZED) ||
-	    (encl->flags & SGX_ENCL_SUSPEND))
+	    (encl->flags & SGX_ENCL_INVALIDATED))
 		return -EFAULT;
 
 	sgx_dbg(encl, "%s addr=0x%lx, len=%d\n", op_str, addr, len);