diff mbox

[intel-sgx-kernel-dev,v4,5/8] intel_sgx: freeze VMAs after EPC pages have been added

Message ID 20161201205632.8593-6-jarkko.sakkinen@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jarkko Sakkinen Dec. 1, 2016, 8:56 p.m. UTC
Added SGX_ENCL_INVALIDATED flag to inform when the enclave is not usable
anymore. After pages have been added to the enclave it does not make
sense to change the VMA config anymore because permissions of pages
cannot changed with SGX1 architecture. Also if any of the VMAs are
closed the whole enclave is invalidated.

This makes sense for SGX1 architecture as the permissions of pages
cannot be changed later on. For SGX2 we might be able to ease the
requirement because of EMODP.

Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
 drivers/platform/x86/intel_sgx.h       |  5 +++--
 drivers/platform/x86/intel_sgx_ioctl.c |  9 ++++++---
 drivers/platform/x86/intel_sgx_util.c  |  6 +++---
 drivers/platform/x86/intel_sgx_vma.c   | 32 ++++++++++++--------------------
 4 files changed, 24 insertions(+), 28 deletions(-)

Comments

Sean Christopherson Dec. 2, 2016, 7:49 p.m. UTC | #1
Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> wrote:
> b/drivers/platform/x86/intel_sgx_vma.c
> index 0649978..88a316a 100644
> --- a/drivers/platform/x86/intel_sgx_vma.c
> +++ b/drivers/platform/x86/intel_sgx_vma.c
> @@ -72,21 +72,20 @@ static void sgx_vma_open(struct vm_area_struct *vma)  {
>  	struct sgx_encl *encl;
>  
> -	/* Was vm_private_data nullified as a result of the previous fork? */
> +	/* fork after fork */
>  	encl = vma->vm_private_data;
>  	if (!encl)
>  		goto out_fork;
>  
> -	/* Was the process forked? mm_struct changes when the process is
> -	 * forked.
> -	 */
> -	mutex_lock(&encl->lock);
> -	if (encl->mm != vma->vm_mm) {
> -		mutex_unlock(&encl->lock);
> +	/* mm_struct changes when the process is forked */
> +	if (encl->mm != vma->vm_mm)
>  		goto out_fork;
> +
> +	if (test_bit(SGX_ENCL_PAGES_ADDED, &encl->flags)) {
> +		mutex_lock(&encl->lock);
> +		sgx_invalidate(encl);
> +		mutex_unlock(&encl->lock);
>  	}
> -	encl->vma_cnt++;
> -	mutex_unlock(&encl->lock);
>  
>  	kref_get(&encl->refcount);
>  	return;
> @@ -99,21 +98,13 @@ static void sgx_vma_close(struct vm_area_struct *vma)  {
>  	struct sgx_encl *encl = vma->vm_private_data;
>  
> -	/* If process was forked, VMA is still there but
> -	 * vm_private_data is set to NULL.
> -	 */
> +	/* fork */
>  	if (!encl)
>  		return;
>  
>  	mutex_lock(&encl->lock);
> -	encl->vma_cnt--;
> -	vma->vm_private_data = NULL;
> -
> -	sgx_zap_tcs_ptes(encl, vma);
> -	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
> -
> +	sgx_invalidate(encl);
>  	mutex_unlock(&encl->lock);
> -
>  	kref_put(&encl->refcount, sgx_encl_release);  }
>  

Invoking sgx_invalidate in sgx_vma_close causes NULL pointer faults in find_vma when sgx_vma_close is called via do_exit->mmput->exit_mmap->remove_vma.  The exit_mmap flow doesn't dismantle mm->mm_rb, it simply nukes the vma structs, i.e. calling find_vma via sgx_vma_close results in walking mm->mm_rb when it contains one or more invalid nodes.

Below is my suggested version of sgx_vma_open/sgx_vma_close to eliminate the calls to sgx_invalidate.  I think it makes sense to remove sgx_invalidate from sgx_vma_open as well as sgx_vma_close to keep "invalidate" behavior consistent between the two functions, and within sgx_vma_open itself, i.e. jump to out_fork like the other failing cases.  This also saves us from grabbing an unnecessary reference to the encl and results in an early return in the subsequent sgx_vma_close since vma->vm_private_data is null.

static void sgx_vma_open(struct vm_area_struct *vma)
{
	struct sgx_encl *encl;

	/* fork after fork */
	encl = vma->vm_private_data;
	if (!encl)
		goto out_fork;

	/* mm_struct changes when the process is forked */
	if (encl->mm != vma->vm_mm)
		goto out_fork;

	if (test_bit(SGX_ENCL_PAGES_ADDED, &encl->flags)) {
		mutex_lock(&encl->lock);
		encl->flags |= SGX_ENCL_INVALIDATED;
		mutex_unlock(&encl->lock);
		goto out_fork;
	}

	kref_get(&encl->refcount);
	return;
out_fork:
	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
	vma->vm_private_data = NULL;
}

static void sgx_vma_close(struct vm_area_struct *vma)
{
	struct sgx_encl *encl = vma->vm_private_data;

	/* fork */
	if (!encl)
		return;

	mutex_lock(&encl->lock);
	encl->flags |= SGX_ENCL_INVALIDATED;
	mutex_unlock(&encl->lock);
	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
	vma->vm_private_data = NULL;
	kref_put(&encl->refcount, sgx_encl_release);
}
Jarkko Sakkinen Dec. 2, 2016, 8:56 p.m. UTC | #2
On Fri, Dec 02, 2016 at 07:49:17PM +0000, Christopherson, Sean J wrote:
> Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com> wrote:
> > b/drivers/platform/x86/intel_sgx_vma.c
> > index 0649978..88a316a 100644
> > --- a/drivers/platform/x86/intel_sgx_vma.c
> > +++ b/drivers/platform/x86/intel_sgx_vma.c
> > @@ -72,21 +72,20 @@ static void sgx_vma_open(struct vm_area_struct *vma)  {
> >  	struct sgx_encl *encl;
> >  
> > -	/* Was vm_private_data nullified as a result of the previous fork? */
> > +	/* fork after fork */
> >  	encl = vma->vm_private_data;
> >  	if (!encl)
> >  		goto out_fork;
> >  
> > -	/* Was the process forked? mm_struct changes when the process is
> > -	 * forked.
> > -	 */
> > -	mutex_lock(&encl->lock);
> > -	if (encl->mm != vma->vm_mm) {
> > -		mutex_unlock(&encl->lock);
> > +	/* mm_struct changes when the process is forked */
> > +	if (encl->mm != vma->vm_mm)
> >  		goto out_fork;
> > +
> > +	if (test_bit(SGX_ENCL_PAGES_ADDED, &encl->flags)) {
> > +		mutex_lock(&encl->lock);
> > +		sgx_invalidate(encl);
> > +		mutex_unlock(&encl->lock);
> >  	}
> > -	encl->vma_cnt++;
> > -	mutex_unlock(&encl->lock);
> >  
> >  	kref_get(&encl->refcount);
> >  	return;
> > @@ -99,21 +98,13 @@ static void sgx_vma_close(struct vm_area_struct *vma)  {
> >  	struct sgx_encl *encl = vma->vm_private_data;
> >  
> > -	/* If process was forked, VMA is still there but
> > -	 * vm_private_data is set to NULL.
> > -	 */
> > +	/* fork */
> >  	if (!encl)
> >  		return;
> >  
> >  	mutex_lock(&encl->lock);
> > -	encl->vma_cnt--;
> > -	vma->vm_private_data = NULL;
> > -
> > -	sgx_zap_tcs_ptes(encl, vma);
> > -	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
> > -
> > +	sgx_invalidate(encl);
> >  	mutex_unlock(&encl->lock);
> > -
> >  	kref_put(&encl->refcount, sgx_encl_release);  }
> >  
> 
> Invoking sgx_invalidate in sgx_vma_close causes NULL pointer faults in find_vma when sgx_vma_close is called via do_exit->mmput->exit_mmap->remove_vma.  The exit_mmap flow doesn't dismantle mm->mm_rb, it simply nukes the vma structs, i.e. calling find_vma via sgx_vma_close results in walking mm->mm_rb when it contains one or more invalid nodes.
> 
> Below is my suggested version of sgx_vma_open/sgx_vma_close to
> eliminate the calls to sgx_invalidate.  I think it makes sense to
> remove sgx_invalidate from sgx_vma_open as well as sgx_vma_close to
> keep "invalidate" behavior consistent between the two functions, and
> within sgx_vma_open itself, i.e. jump to out_fork like the other
> failing cases.  This also saves us from grabbing an unnecessary
> reference to the encl and results in an early return in the subsequent
> sgx_vma_close since vma->vm_private_data is null.

Is the difference to above is that you set the flag instead of calling
sgx_invalidate()? Are there other differences?

/Jarkko

> static void sgx_vma_open(struct vm_area_struct *vma)
> {
> 	struct sgx_encl *encl;
> 
> 	/* fork after fork */
> 	encl = vma->vm_private_data;
> 	if (!encl)
> 		goto out_fork;
> 
> 	/* mm_struct changes when the process is forked */
> 	if (encl->mm != vma->vm_mm)
> 		goto out_fork;
> 
> 	if (test_bit(SGX_ENCL_PAGES_ADDED, &encl->flags)) {
> 		mutex_lock(&encl->lock);
> 		encl->flags |= SGX_ENCL_INVALIDATED;
> 		mutex_unlock(&encl->lock);
> 		goto out_fork;
> 	}
> 
> 	kref_get(&encl->refcount);
> 	return;
> out_fork:
> 	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
> 	vma->vm_private_data = NULL;
> }
> 
> static void sgx_vma_close(struct vm_area_struct *vma)
> {
> 	struct sgx_encl *encl = vma->vm_private_data;
> 
> 	/* fork */
> 	if (!encl)
> 		return;
> 
> 	mutex_lock(&encl->lock);
> 	encl->flags |= SGX_ENCL_INVALIDATED;
> 	mutex_unlock(&encl->lock);
> 	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
> 	vma->vm_private_data = NULL;
> 	kref_put(&encl->refcount, sgx_encl_release);
> }
diff mbox

Patch

diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h
index 464763d..4430012 100644
--- a/drivers/platform/x86/intel_sgx.h
+++ b/drivers/platform/x86/intel_sgx.h
@@ -130,12 +130,13 @@  enum sgx_encl_flags {
 	SGX_ENCL_DEBUG		= BIT(1),
 	SGX_ENCL_SECS_EVICTED	= BIT(2),
 	SGX_ENCL_SUSPEND	= BIT(3),
+	SGX_ENCL_PAGES_ADDED	= BIT(4),
+	SGX_ENCL_INVALIDATED	= BIT(5),
 };
 
 struct sgx_encl {
-	unsigned int flags;
+	unsigned long flags;
 	unsigned int secs_child_cnt;
-	unsigned int vma_cnt;
 	struct mutex lock;
 	struct task_struct *owner;
 	struct mm_struct *mm;
diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c
index b377200..5a68dbe 100644
--- a/drivers/platform/x86/intel_sgx_ioctl.c
+++ b/drivers/platform/x86/intel_sgx_ioctl.c
@@ -255,7 +255,10 @@  static bool sgx_process_add_page_req(struct sgx_add_page_req *req)
 
 	mutex_lock(&encl->lock);
 
-	if (!encl->vma_cnt || sgx_find_encl(encl->mm, encl_page->addr, &vma))
+	if (encl->flags & SGX_ENCL_INVALIDATED)
+		goto out;
+
+	if (sgx_find_encl(encl->mm, encl_page->addr, &vma))
 		goto out;
 
 	backing = sgx_get_backing(encl, encl_page);
@@ -578,7 +581,6 @@  static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
 		up_read(&current->mm->mmap_sem);
 		goto out;
 	}
-	encl->vma_cnt++;
 	vma->vm_private_data = encl;
 	up_read(&current->mm->mmap_sem);
 
@@ -682,7 +684,7 @@  static int __encl_add_page(struct sgx_encl *encl,
 
 	mutex_lock(&encl->lock);
 
-	if (encl->flags & SGX_ENCL_INITIALIZED) {
+	if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_INVALIDATED)) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -734,6 +736,7 @@  out:
 	} else {
 		ret = encl_rb_insert(&encl->encl_rb, encl_page);
 		WARN_ON(ret);
+		encl->flags |= SGX_ENCL_PAGES_ADDED;
 	}
 
 	mutex_unlock(&encl->lock);
diff --git a/drivers/platform/x86/intel_sgx_util.c b/drivers/platform/x86/intel_sgx_util.c
index 3878d9a..8ba2abd 100644
--- a/drivers/platform/x86/intel_sgx_util.c
+++ b/drivers/platform/x86/intel_sgx_util.c
@@ -139,7 +139,7 @@  bool sgx_pin_mm(struct sgx_encl *encl)
 		return false;
 
 	mutex_lock(&encl->lock);
-	if (encl->vma_cnt) {
+	if (!(encl->flags & SGX_ENCL_INVALIDATED)) {
 		atomic_inc(&encl->mm->mm_count);
 	} else {
 		mutex_unlock(&encl->lock);
@@ -149,7 +149,7 @@  bool sgx_pin_mm(struct sgx_encl *encl)
 
 	down_read(&encl->mm->mmap_sem);
 
-	if (!encl->vma_cnt) {
+	if (encl->flags & SGX_ENCL_INVALIDATED) {
 		sgx_unpin_mm(encl);
 		return false;
 	}
@@ -177,7 +177,7 @@  void sgx_invalidate(struct sgx_encl *encl)
 			break;
 	}
 
-	encl->vma_cnt = 0;
+	encl->flags |= SGX_ENCL_INVALIDATED;
 }
 
 int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
diff --git a/drivers/platform/x86/intel_sgx_vma.c b/drivers/platform/x86/intel_sgx_vma.c
index 0649978..88a316a 100644
--- a/drivers/platform/x86/intel_sgx_vma.c
+++ b/drivers/platform/x86/intel_sgx_vma.c
@@ -72,21 +72,20 @@  static void sgx_vma_open(struct vm_area_struct *vma)
 {
 	struct sgx_encl *encl;
 
-	/* Was vm_private_data nullified as a result of the previous fork? */
+	/* fork after fork */
 	encl = vma->vm_private_data;
 	if (!encl)
 		goto out_fork;
 
-	/* Was the process forked? mm_struct changes when the process is
-	 * forked.
-	 */
-	mutex_lock(&encl->lock);
-	if (encl->mm != vma->vm_mm) {
-		mutex_unlock(&encl->lock);
+	/* mm_struct changes when the process is forked */
+	if (encl->mm != vma->vm_mm)
 		goto out_fork;
+
+	if (test_bit(SGX_ENCL_PAGES_ADDED, &encl->flags)) {
+		mutex_lock(&encl->lock);
+		sgx_invalidate(encl);
+		mutex_unlock(&encl->lock);
 	}
-	encl->vma_cnt++;
-	mutex_unlock(&encl->lock);
 
 	kref_get(&encl->refcount);
 	return;
@@ -99,21 +98,13 @@  static void sgx_vma_close(struct vm_area_struct *vma)
 {
 	struct sgx_encl *encl = vma->vm_private_data;
 
-	/* If process was forked, VMA is still there but
-	 * vm_private_data is set to NULL.
-	 */
+	/* fork */
 	if (!encl)
 		return;
 
 	mutex_lock(&encl->lock);
-	encl->vma_cnt--;
-	vma->vm_private_data = NULL;
-
-	sgx_zap_tcs_ptes(encl, vma);
-	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-
+	sgx_invalidate(encl);
 	mutex_unlock(&encl->lock);
-
 	kref_put(&encl->refcount, sgx_encl_release);
 }
 
@@ -187,7 +178,7 @@  static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
 
 	mutex_lock(&encl->lock);
 
-	if (!encl->vma_cnt) {
+	if (encl->flags & SGX_ENCL_INVALIDATED) {
 		entry = ERR_PTR(-EFAULT);
 		goto out;
 	}
@@ -370,6 +361,7 @@  static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
 
 	if (!(encl->flags & SGX_ENCL_DEBUG) ||
 	    !(encl->flags & SGX_ENCL_INITIALIZED) ||
+	    (encl->flags & SGX_ENCL_INVALIDATED) ||
 	    (encl->flags & SGX_ENCL_SUSPEND))
 		return -EFAULT;