diff mbox series

[RFC,5/9] x86/sgx: Restrict mapping without an enclave page to PROT_NONE

Message ID 20190531233159.30992-6-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series security: x86/sgx: SGX vs. LSM | expand

Commit Message

Sean Christopherson May 31, 2019, 11:31 p.m. UTC
To support LSM integration, SGX will require userspace to explicitly
specify the allowed protections for each page.  The allowed protections
will be supplied to and modified by LSMs (based on their policies).
To prevent userspace from circumventing the allowed protections, do not
allow PROT_{READ,WRITE,EXEC} mappings to an enclave without an
associated enclave page (which will track the allowed protections).

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kernel/cpu/sgx/driver/main.c |  5 +++++
 arch/x86/kernel/cpu/sgx/encl.c        | 30 +++++++++++++++++++++++++++
 arch/x86/kernel/cpu/sgx/encl.h        |  3 +++
 3 files changed, 38 insertions(+)

Comments

Xing, Cedric June 3, 2019, 6:28 a.m. UTC | #1
> From: Christopherson, Sean J
> Sent: Friday, May 31, 2019 4:32 PM
> 
> To support LSM integration, SGX will require userspace to explicitly specify the allowed
> protections for each page.  The allowed protections will be supplied to and modified by
> LSMs (based on their policies).
> To prevent userspace from circumventing the allowed protections, do not allow
> PROT_{READ,WRITE,EXEC} mappings to an enclave without an associated enclave page (which
> will track the allowed protections).

This is unnecessary. 

For mprotect(), LSM shall validate @prot against existing pages with applicable global flags (e.g. FILE__EXECMOD/PROCESS__EXECMEM in the case of SELinux). 

For mmap(), SGX driver could invoke security_file_mprotect() explicitly to have LSM validate requested protection.

In the case where there's no page associated with an VMA, security_file_mprotect() shall still dictate whether to allow/deny the request. LSM internally is able to track existence/nonexistence of enclave pages. If there's no page, there's no conflict so the decision shall only depend on global flags (if any). Afterwards, #PF may trigger SGX driver to EAUG, in which case security_enclave_load() will be invoked and LSM could decide whether to approve/decline EAUG request.
Jarkko Sakkinen June 4, 2019, 3:32 p.m. UTC | #2
On Fri, May 31, 2019 at 04:31:55PM -0700, Sean Christopherson wrote:
> To support LSM integration, SGX will require userspace to explicitly
> specify the allowed protections for each page.  The allowed protections
> will be supplied to and modified by LSMs (based on their policies).

How the allowed protections are modified by LSMs? AFAIK they don't touch
the PROT_* flags but I could be wrong too.

> To prevent userspace from circumventing the allowed protections, do not
> allow PROT_{READ,WRITE,EXEC} mappings to an enclave without an
> associated enclave page (which will track the allowed protections).
> 
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/x86/kernel/cpu/sgx/driver/main.c |  5 +++++
>  arch/x86/kernel/cpu/sgx/encl.c        | 30 +++++++++++++++++++++++++++
>  arch/x86/kernel/cpu/sgx/encl.h        |  3 +++
>  3 files changed, 38 insertions(+)
> 
> diff --git a/arch/x86/kernel/cpu/sgx/driver/main.c b/arch/x86/kernel/cpu/sgx/driver/main.c
> index 129d356aff30..65a87c2fdf02 100644
> --- a/arch/x86/kernel/cpu/sgx/driver/main.c
> +++ b/arch/x86/kernel/cpu/sgx/driver/main.c
> @@ -63,6 +63,11 @@ static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
>  static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
>  {
>  	struct sgx_encl *encl = file->private_data;
> +	int ret;
> +
> +	ret = sgx_map_allowed(encl, vma->vm_start, vma->vm_end, vma->vm_flags);
> +	if (ret)
> +		return ret;
>  
>  	vma->vm_ops = &sgx_vm_ops;
>  	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
> diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
> index f23ea0fbaa47..955d4f430adc 100644
> --- a/arch/x86/kernel/cpu/sgx/encl.c
> +++ b/arch/x86/kernel/cpu/sgx/encl.c
> @@ -235,6 +235,35 @@ static void sgx_vma_close(struct vm_area_struct *vma)
>  	kref_put(&encl->refcount, sgx_encl_release);
>  }
>  
> +int sgx_map_allowed(struct sgx_encl *encl, unsigned long start,
> +		    unsigned long end, unsigned long prot)

Documentation missing.

> +{
> +	struct sgx_encl_page *page;
> +	unsigned long addr;
> +
> +	prot &= (VM_READ | VM_WRITE | VM_EXEC);
> +	if (!prot || !encl)
> +		return 0;
> +
> +	mutex_lock(&encl->lock);
> +
> +	for (addr = start; addr < end; addr += PAGE_SIZE) {
> +		page = radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT);
> +		if (!page)
> +			return -EACCES;
> +	}
> +
> +	mutex_unlock(&encl->lock);
> +
> +	return 0;
> +}
> +
> +static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start,
> +			    unsigned long end, unsigned long prot)
> +{
> +	return sgx_map_allowed(vma->vm_private_data, start, end, prot);
> +}
> +
>  static unsigned int sgx_vma_fault(struct vm_fault *vmf)
>  {
>  	unsigned long addr = (unsigned long)vmf->address;
> @@ -372,6 +401,7 @@ static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
>  const struct vm_operations_struct sgx_vm_ops = {
>  	.close = sgx_vma_close,
>  	.open = sgx_vma_open,
> +	.mprotect = sgx_vma_mprotect,
>  	.fault = sgx_vma_fault,
>  	.access = sgx_vma_access,
>  };
> diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
> index c557f0374d74..6e310e3b3fff 100644
> --- a/arch/x86/kernel/cpu/sgx/encl.h
> +++ b/arch/x86/kernel/cpu/sgx/encl.h
> @@ -106,6 +106,9 @@ static inline unsigned long sgx_pcmd_offset(pgoff_t page_index)
>  	       sizeof(struct sgx_pcmd);
>  }
>  
> +int sgx_map_allowed(struct sgx_encl *encl, unsigned long start,
> +		    unsigned long end, unsigned long prot);
> +
>  enum sgx_encl_mm_iter {
>  	SGX_ENCL_MM_ITER_DONE		= 0,
>  	SGX_ENCL_MM_ITER_NEXT		= 1,
> -- 
> 2.21.0

This is missing explanation why it is OK to have a mismatch between
the SECINFO flags and VM_* flags. Maybe that could be explained in
sgx_map_allowed() documentation.

/Jarkko
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/driver/main.c b/arch/x86/kernel/cpu/sgx/driver/main.c
index 129d356aff30..65a87c2fdf02 100644
--- a/arch/x86/kernel/cpu/sgx/driver/main.c
+++ b/arch/x86/kernel/cpu/sgx/driver/main.c
@@ -63,6 +63,11 @@  static long sgx_compat_ioctl(struct file *filep, unsigned int cmd,
 static int sgx_mmap(struct file *file, struct vm_area_struct *vma)
 {
 	struct sgx_encl *encl = file->private_data;
+	int ret;
+
+	ret = sgx_map_allowed(encl, vma->vm_start, vma->vm_end, vma->vm_flags);
+	if (ret)
+		return ret;
 
 	vma->vm_ops = &sgx_vm_ops;
 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO;
diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index f23ea0fbaa47..955d4f430adc 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -235,6 +235,35 @@  static void sgx_vma_close(struct vm_area_struct *vma)
 	kref_put(&encl->refcount, sgx_encl_release);
 }
 
+int sgx_map_allowed(struct sgx_encl *encl, unsigned long start,
+		    unsigned long end, unsigned long prot)
+{
+	struct sgx_encl_page *page;
+	unsigned long addr;
+
+	prot &= (VM_READ | VM_WRITE | VM_EXEC);
+	if (!prot || !encl)
+		return 0;
+
+	mutex_lock(&encl->lock);
+
+	for (addr = start; addr < end; addr += PAGE_SIZE) {
+		page = radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT);
+		if (!page)
+			return -EACCES;
+	}
+
+	mutex_unlock(&encl->lock);
+
+	return 0;
+}
+
+static int sgx_vma_mprotect(struct vm_area_struct *vma, unsigned long start,
+			    unsigned long end, unsigned long prot)
+{
+	return sgx_map_allowed(vma->vm_private_data, start, end, prot);
+}
+
 static unsigned int sgx_vma_fault(struct vm_fault *vmf)
 {
 	unsigned long addr = (unsigned long)vmf->address;
@@ -372,6 +401,7 @@  static int sgx_vma_access(struct vm_area_struct *vma, unsigned long addr,
 const struct vm_operations_struct sgx_vm_ops = {
 	.close = sgx_vma_close,
 	.open = sgx_vma_open,
+	.mprotect = sgx_vma_mprotect,
 	.fault = sgx_vma_fault,
 	.access = sgx_vma_access,
 };
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index c557f0374d74..6e310e3b3fff 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -106,6 +106,9 @@  static inline unsigned long sgx_pcmd_offset(pgoff_t page_index)
 	       sizeof(struct sgx_pcmd);
 }
 
+int sgx_map_allowed(struct sgx_encl *encl, unsigned long start,
+		    unsigned long end, unsigned long prot);
+
 enum sgx_encl_mm_iter {
 	SGX_ENCL_MM_ITER_DONE		= 0,
 	SGX_ENCL_MM_ITER_NEXT		= 1,