[intel-sgx-kernel-dev,v6,6/8] intel_sgx: disallow VMA reconfiguration after EPC pages have been added
diff mbox

Message ID 20161204184044.21031-7-jarkko.sakkinen@linux.intel.com
State New
Headers show

Commit Message

Jarkko Sakkinen Dec. 4, 2016, 6:40 p.m. UTC
Do not allow VMA reconfiguration after EPC pages are added because SGX1
permissions are static. The policy might be easened with SGX2 (EMODP)
but it is better to start with this because in SGX1 the PTE permissions
and EPCM permissions must be in-sync.

Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
 drivers/platform/x86/intel_sgx.h       |  3 ++-
 drivers/platform/x86/intel_sgx_ioctl.c | 11 +++++---
 drivers/platform/x86/intel_sgx_util.c  | 13 ++++-----
 drivers/platform/x86/intel_sgx_vma.c   | 49 +++++++++++++++-------------------
 4 files changed, 36 insertions(+), 40 deletions(-)

Comments

Jarkko Sakkinen Dec. 7, 2016, 8:31 a.m. UTC | #1
On Sun, Dec 04, 2016 at 08:40:42PM +0200, Jarkko Sakkinen wrote:
> Do not allow VMA reconfiguration after EPC pages are added because SGX1
> permissions are static. The policy might be easened with SGX2 (EMODP)
> but it is better to start with this because in SGX1 the PTE permissions
> and EPCM permissions must be in-sync.
> 
> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> ---
>  drivers/platform/x86/intel_sgx.h       |  3 ++-
>  drivers/platform/x86/intel_sgx_ioctl.c | 11 +++++---
>  drivers/platform/x86/intel_sgx_util.c  | 13 ++++-----
>  drivers/platform/x86/intel_sgx_vma.c   | 49 +++++++++++++++-------------------
>  4 files changed, 36 insertions(+), 40 deletions(-)
> 
> diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h
> index 464763d..35c03fc 100644
> --- a/drivers/platform/x86/intel_sgx.h
> +++ b/drivers/platform/x86/intel_sgx.h
> @@ -130,12 +130,13 @@ enum sgx_encl_flags {
>  	SGX_ENCL_DEBUG		= BIT(1),
>  	SGX_ENCL_SECS_EVICTED	= BIT(2),
>  	SGX_ENCL_SUSPEND	= BIT(3),
> +	SGX_ENCL_PAGES_ADDED	= BIT(4),
> +	SGX_ENCL_INVALIDATED	= BIT(5),
>  };
>  
>  struct sgx_encl {
>  	unsigned int flags;
>  	unsigned int secs_child_cnt;
> -	unsigned int vma_cnt;
>  	struct mutex lock;
>  	struct task_struct *owner;
>  	struct mm_struct *mm;
> diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c
> index b377200..0c3fd29 100644
> --- a/drivers/platform/x86/intel_sgx_ioctl.c
> +++ b/drivers/platform/x86/intel_sgx_ioctl.c
> @@ -255,7 +255,10 @@ static bool sgx_process_add_page_req(struct sgx_add_page_req *req)
>  
>  	mutex_lock(&encl->lock);
>  
> -	if (!encl->vma_cnt || sgx_find_encl(encl->mm, encl_page->addr, &vma))
> +	if (encl->flags & SGX_ENCL_INVALIDATED)
> +		goto out;
> +
> +	if (sgx_find_encl(encl->mm, encl_page->addr, &vma))
>  		goto out;
>  
>  	backing = sgx_get_backing(encl, encl_page);
> @@ -317,7 +320,7 @@ static void sgx_add_page_worker(struct work_struct *work)
>  	do {
>  		schedule();
>  
> -		if (encl->flags & SGX_ENCL_SUSPEND)
> +		if (encl->flags & SGX_ENCL_INVALIDATED)
>  			skip_rest = true;
>  
>  		mutex_lock(&encl->lock);
> @@ -578,7 +581,6 @@ static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
>  		up_read(&current->mm->mmap_sem);
>  		goto out;
>  	}
> -	encl->vma_cnt++;
>  	vma->vm_private_data = encl;
>  	up_read(&current->mm->mmap_sem);
>  
> @@ -682,7 +684,7 @@ static int __encl_add_page(struct sgx_encl *encl,
>  
>  	mutex_lock(&encl->lock);
>  
> -	if (encl->flags & SGX_ENCL_INITIALIZED) {
> +	if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_INVALIDATED)) {
>  		ret = -EINVAL;
>  		goto out;
>  	}
> @@ -734,6 +736,7 @@ out:
>  	} else {
>  		ret = encl_rb_insert(&encl->encl_rb, encl_page);
>  		WARN_ON(ret);
> +		encl->flags |= SGX_ENCL_PAGES_ADDED;
>  	}
>  
>  	mutex_unlock(&encl->lock);
> diff --git a/drivers/platform/x86/intel_sgx_util.c b/drivers/platform/x86/intel_sgx_util.c
> index 3878d9a..41ccc18 100644
> --- a/drivers/platform/x86/intel_sgx_util.c
> +++ b/drivers/platform/x86/intel_sgx_util.c
> @@ -135,21 +135,18 @@ void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma)
>  
>  bool sgx_pin_mm(struct sgx_encl *encl)
>  {
> -	if (encl->flags & SGX_ENCL_SUSPEND)
> -		return false;
> -
>  	mutex_lock(&encl->lock);
> -	if (encl->vma_cnt) {
> -		atomic_inc(&encl->mm->mm_count);
> -	} else {
> +	if (encl->flags & SGX_ENCL_INVALIDATED) {
>  		mutex_unlock(&encl->lock);
>  		return false;
>  	}
> +
> +	atomic_inc(&encl->mm->mm_count);
>  	mutex_unlock(&encl->lock);
>  
>  	down_read(&encl->mm->mmap_sem);
>  
> -	if (!encl->vma_cnt) {
> +	if (encl->flags & SGX_ENCL_INVALIDATED) {
>  		sgx_unpin_mm(encl);
>  		return false;
>  	}
> @@ -177,7 +174,7 @@ void sgx_invalidate(struct sgx_encl *encl)
>  			break;
>  	}
>  
> -	encl->vma_cnt = 0;
> +	encl->flags |= SGX_ENCL_INVALIDATED;
>  }
>  
>  int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
> diff --git a/drivers/platform/x86/intel_sgx_vma.c b/drivers/platform/x86/intel_sgx_vma.c
> index 0649978..4515cc3c 100644
> --- a/drivers/platform/x86/intel_sgx_vma.c
> +++ b/drivers/platform/x86/intel_sgx_vma.c
> @@ -70,48 +70,45 @@
>  
>  static void sgx_vma_open(struct vm_area_struct *vma)
>  {
> -	struct sgx_encl *encl;
> +	struct sgx_encl *encl = vma->vm_private_data;
>  
> -	/* Was vm_private_data nullified as a result of the previous fork? */
> -	encl = vma->vm_private_data;
> -	if (!encl)
> -		goto out_fork;
> +	/* When forking for the second time vm_private_data is already set to
> +	 * NULL.
> +	 */
> +	if (!encl) {
> +		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
> +		return;
> +	}

Here calling zap_vma_ptes() is OK.

>  
> -	/* Was the process forked? mm_struct changes when the process is
> -	 * forked.
> +	/* Invalidate enclave when the process has been forked for the first
> +	 * time or pages have been added because PTEs must bee in sync with
> +	 * the EPCM entries.
>  	 */
>  	mutex_lock(&encl->lock);
> -	if (encl->mm != vma->vm_mm) {
> -		mutex_unlock(&encl->lock);
> -		goto out_fork;
> +	if (encl->mm != vma->vm_mm || (encl->flags & SGX_ENCL_PAGES_ADDED)) {
> +		encl->flags |= SGX_ENCL_INVALIDATED;
> +		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
> +		vma->vm_private_data = NULL;

I'll change this back to using sgx_invalidate() as I had in earlier
patch version. It was a wrong choice to convert it to this. If the
enclave is killed, we want to first destroy TCS pages to prevent new
hardware threads entering and we want to do it for *all* VMAs when the
enclave becomes get invalidated.

We do not want to start destroying regular pages before all new HW
threads have been blocked. This is just more senseful and stable way
to rollback.

/Jarkko

Patch
diff mbox

diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h
index 464763d..35c03fc 100644
--- a/drivers/platform/x86/intel_sgx.h
+++ b/drivers/platform/x86/intel_sgx.h
@@ -130,12 +130,13 @@  enum sgx_encl_flags {
 	SGX_ENCL_DEBUG		= BIT(1),
 	SGX_ENCL_SECS_EVICTED	= BIT(2),
 	SGX_ENCL_SUSPEND	= BIT(3),
+	SGX_ENCL_PAGES_ADDED	= BIT(4),
+	SGX_ENCL_INVALIDATED	= BIT(5),
 };
 
 struct sgx_encl {
 	unsigned int flags;
 	unsigned int secs_child_cnt;
-	unsigned int vma_cnt;
 	struct mutex lock;
 	struct task_struct *owner;
 	struct mm_struct *mm;
diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c
index b377200..0c3fd29 100644
--- a/drivers/platform/x86/intel_sgx_ioctl.c
+++ b/drivers/platform/x86/intel_sgx_ioctl.c
@@ -255,7 +255,10 @@  static bool sgx_process_add_page_req(struct sgx_add_page_req *req)
 
 	mutex_lock(&encl->lock);
 
-	if (!encl->vma_cnt || sgx_find_encl(encl->mm, encl_page->addr, &vma))
+	if (encl->flags & SGX_ENCL_INVALIDATED)
+		goto out;
+
+	if (sgx_find_encl(encl->mm, encl_page->addr, &vma))
 		goto out;
 
 	backing = sgx_get_backing(encl, encl_page);
@@ -317,7 +320,7 @@  static void sgx_add_page_worker(struct work_struct *work)
 	do {
 		schedule();
 
-		if (encl->flags & SGX_ENCL_SUSPEND)
+		if (encl->flags & SGX_ENCL_INVALIDATED)
 			skip_rest = true;
 
 		mutex_lock(&encl->lock);
@@ -578,7 +581,6 @@  static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
 		up_read(&current->mm->mmap_sem);
 		goto out;
 	}
-	encl->vma_cnt++;
 	vma->vm_private_data = encl;
 	up_read(&current->mm->mmap_sem);
 
@@ -682,7 +684,7 @@  static int __encl_add_page(struct sgx_encl *encl,
 
 	mutex_lock(&encl->lock);
 
-	if (encl->flags & SGX_ENCL_INITIALIZED) {
+	if (encl->flags & (SGX_ENCL_INITIALIZED | SGX_ENCL_INVALIDATED)) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -734,6 +736,7 @@  out:
 	} else {
 		ret = encl_rb_insert(&encl->encl_rb, encl_page);
 		WARN_ON(ret);
+		encl->flags |= SGX_ENCL_PAGES_ADDED;
 	}
 
 	mutex_unlock(&encl->lock);
diff --git a/drivers/platform/x86/intel_sgx_util.c b/drivers/platform/x86/intel_sgx_util.c
index 3878d9a..41ccc18 100644
--- a/drivers/platform/x86/intel_sgx_util.c
+++ b/drivers/platform/x86/intel_sgx_util.c
@@ -135,21 +135,18 @@  void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma)
 
 bool sgx_pin_mm(struct sgx_encl *encl)
 {
-	if (encl->flags & SGX_ENCL_SUSPEND)
-		return false;
-
 	mutex_lock(&encl->lock);
-	if (encl->vma_cnt) {
-		atomic_inc(&encl->mm->mm_count);
-	} else {
+	if (encl->flags & SGX_ENCL_INVALIDATED) {
 		mutex_unlock(&encl->lock);
 		return false;
 	}
+
+	atomic_inc(&encl->mm->mm_count);
 	mutex_unlock(&encl->lock);
 
 	down_read(&encl->mm->mmap_sem);
 
-	if (!encl->vma_cnt) {
+	if (encl->flags & SGX_ENCL_INVALIDATED) {
 		sgx_unpin_mm(encl);
 		return false;
 	}
@@ -177,7 +174,7 @@  void sgx_invalidate(struct sgx_encl *encl)
 			break;
 	}
 
-	encl->vma_cnt = 0;
+	encl->flags |= SGX_ENCL_INVALIDATED;
 }
 
 int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
diff --git a/drivers/platform/x86/intel_sgx_vma.c b/drivers/platform/x86/intel_sgx_vma.c
index 0649978..4515cc3c 100644
--- a/drivers/platform/x86/intel_sgx_vma.c
+++ b/drivers/platform/x86/intel_sgx_vma.c
@@ -70,48 +70,45 @@ 
 
 static void sgx_vma_open(struct vm_area_struct *vma)
 {
-	struct sgx_encl *encl;
+	struct sgx_encl *encl = vma->vm_private_data;
 
-	/* Was vm_private_data nullified as a result of the previous fork? */
-	encl = vma->vm_private_data;
-	if (!encl)
-		goto out_fork;
+	/* When forking for the second time vm_private_data is already set to
+	 * NULL.
+	 */
+	if (!encl) {
+		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+		return;
+	}
 
-	/* Was the process forked? mm_struct changes when the process is
-	 * forked.
+	/* Invalidate enclave when the process has been forked for the first
+	 * time or pages have been added because PTEs must bee in sync with
+	 * the EPCM entries.
 	 */
 	mutex_lock(&encl->lock);
-	if (encl->mm != vma->vm_mm) {
-		mutex_unlock(&encl->lock);
-		goto out_fork;
+	if (encl->mm != vma->vm_mm || (encl->flags & SGX_ENCL_PAGES_ADDED)) {
+		encl->flags |= SGX_ENCL_INVALIDATED;
+		zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
+		vma->vm_private_data = NULL;
 	}
-	encl->vma_cnt++;
 	mutex_unlock(&encl->lock);
 
-	kref_get(&encl->refcount);
-	return;
-out_fork:
-	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-	vma->vm_private_data = NULL;
+	if (vma->vm_private_data)
+		kref_get(&encl->refcount);
 }
 
 static void sgx_vma_close(struct vm_area_struct *vma)
 {
 	struct sgx_encl *encl = vma->vm_private_data;
 
-	/* If process was forked, VMA is still there but
-	 * vm_private_data is set to NULL.
+	/* When forking for the second time vm_private_data is already set to
+	 * NULL.
 	 */
 	if (!encl)
 		return;
 
 	mutex_lock(&encl->lock);
-	encl->vma_cnt--;
-	vma->vm_private_data = NULL;
-
-	sgx_zap_tcs_ptes(encl, vma);
+	encl->flags |= SGX_ENCL_INVALIDATED;
 	zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
-
 	mutex_unlock(&encl->lock);
 
 	kref_put(&encl->refcount, sgx_encl_release);
@@ -187,7 +184,7 @@  static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
 
 	mutex_lock(&encl->lock);
 
-	if (!encl->vma_cnt) {
+	if (encl->flags & SGX_ENCL_INVALIDATED) {
 		entry = ERR_PTR(-EFAULT);
 		goto out;
 	}
@@ -268,8 +265,6 @@  static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
 	list_add_tail(&entry->load_list, &encl->load_list);
 out:
 	mutex_unlock(&encl->lock);
-	if (encl->flags & SGX_ENCL_SUSPEND)
-		free_flags |= SGX_FREE_SKIP_EREMOVE;
 	if (epc_page)
 		sgx_free_page(epc_page, encl, free_flags);
 	if (secs_epc_page)
@@ -370,7 +365,7 @@  static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
 
 	if (!(encl->flags & SGX_ENCL_DEBUG) ||
 	    !(encl->flags & SGX_ENCL_INITIALIZED) ||
-	    (encl->flags & SGX_ENCL_SUSPEND))
+	    (encl->flags & SGX_ENCL_INVALIDATED))
 		return -EFAULT;
 
 	sgx_dbg(encl, "%s addr=0x%lx, len=%d\n", op_str, addr, len);