diff mbox series

x86/sgx: Fix sgx_encl_may_map locking

Message ID 20201005004120.105849-1-jarkko.sakkinen@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series x86/sgx: Fix sgx_encl_may_map locking | expand

Commit Message

Jarkko Sakkinen Oct. 5, 2020, 12:41 a.m. UTC
Fix the issue further discussed in:

1. https://lore.kernel.org/linux-sgx/op.0rwbv916wjvjmi@mqcpg7oapc828.gar.corp.intel.com/
2. https://lore.kernel.org/linux-sgx/20201003195440.GD20115@casper.infradead.org/

Reported-by: Haitao Huang <haitao.huang@linux.intel.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Jethro Beekman <jethro@fortanix.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
 arch/x86/kernel/cpu/sgx/encl.c | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

Comments

Haitao Huang Oct. 5, 2020, 3:27 a.m. UTC | #1
On Sun, 04 Oct 2020 19:41:20 -0500, Jarkko Sakkinen  
<jarkko.sakkinen@linux.intel.com> wrote:

> Fix the issue further discussed in:
>
> 1.  
> https://lore.kernel.org/linux-sgx/op.0rwbv916wjvjmi@mqcpg7oapc828.gar.corp.intel.com/
> 2.  
> https://lore.kernel.org/linux-sgx/20201003195440.GD20115@casper.infradead.org/
>
> Reported-by: Haitao Huang <haitao.huang@linux.intel.com>
> Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> Cc: Jethro Beekman <jethro@fortanix.com>
> Cc: Matthew Wilcox <willy@infradead.org>
> Cc: Dave Hansen <dave.hansen@linux.intel.com>
> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> ---
>  arch/x86/kernel/cpu/sgx/encl.c | 14 +++++++++-----
>  1 file changed, 9 insertions(+), 5 deletions(-)
>
> diff --git a/arch/x86/kernel/cpu/sgx/encl.c  
> b/arch/x86/kernel/cpu/sgx/encl.c
> index ae45f8f0951e..a225e96c7a39 100644
> --- a/arch/x86/kernel/cpu/sgx/encl.c
> +++ b/arch/x86/kernel/cpu/sgx/encl.c
> @@ -304,11 +304,10 @@ int sgx_encl_may_map(struct sgx_encl *encl,  
> unsigned long start,
>  		     unsigned long end, unsigned long vm_flags)
>  {
>  	unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
> -	unsigned long idx_start = PFN_DOWN(start);
> -	unsigned long idx_end = PFN_DOWN(end - 1);
> +	unsigned long start_i = PFN_DOWN(start);
> +	unsigned long end_i = PFN_DOWN(end - 1);
>  	struct sgx_encl_page *page;
> -
> -	XA_STATE(xas, &encl->page_array, idx_start);
> +	int i;
> 	/*
>  	 * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
> @@ -317,9 +316,14 @@ int sgx_encl_may_map(struct sgx_encl *encl,  
> unsigned long start,
>  	if (current->personality & READ_IMPLIES_EXEC)
>  		return -EACCES;
> -	xas_for_each(&xas, page, idx_end)
> +	for (i = start_i; i <= end_i; i++) {
> +		mutex_lock(&encl->lock);
> +		page = xa_load(&encl->page_array, i);
> +		mutex_unlock(&encl->lock);
> +
>  		if (!page || (~page->vm_max_prot_bits & vm_prot_bits))
>  			return -EACCES;
> +	}
> 	return 0;
>  }

Works with no PROVE_LOCKING complaints.
Haitao
Jarkko Sakkinen Oct. 5, 2020, 3:43 a.m. UTC | #2
On Sun, Oct 04, 2020 at 10:27:43PM -0500, Haitao Huang wrote:
> On Sun, 04 Oct 2020 19:41:20 -0500, Jarkko Sakkinen
> <jarkko.sakkinen@linux.intel.com> wrote:
> 
> > Fix the issue further discussed in:
> > 
> > 1. https://lore.kernel.org/linux-sgx/op.0rwbv916wjvjmi@mqcpg7oapc828.gar.corp.intel.com/
> > 2. https://lore.kernel.org/linux-sgx/20201003195440.GD20115@casper.infradead.org/
> > 
> > Reported-by: Haitao Huang <haitao.huang@linux.intel.com>
> > Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> > Cc: Jethro Beekman <jethro@fortanix.com>
> > Cc: Matthew Wilcox <willy@infradead.org>
> > Cc: Dave Hansen <dave.hansen@linux.intel.com>
> > Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> > ---
> >  arch/x86/kernel/cpu/sgx/encl.c | 14 +++++++++-----
> >  1 file changed, 9 insertions(+), 5 deletions(-)
> > 
> > diff --git a/arch/x86/kernel/cpu/sgx/encl.c
> > b/arch/x86/kernel/cpu/sgx/encl.c
> > index ae45f8f0951e..a225e96c7a39 100644
> > --- a/arch/x86/kernel/cpu/sgx/encl.c
> > +++ b/arch/x86/kernel/cpu/sgx/encl.c
> > @@ -304,11 +304,10 @@ int sgx_encl_may_map(struct sgx_encl *encl,
> > unsigned long start,
> >  		     unsigned long end, unsigned long vm_flags)
> >  {
> >  	unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
> > -	unsigned long idx_start = PFN_DOWN(start);
> > -	unsigned long idx_end = PFN_DOWN(end - 1);
> > +	unsigned long start_i = PFN_DOWN(start);
> > +	unsigned long end_i = PFN_DOWN(end - 1);
> >  	struct sgx_encl_page *page;
> > -
> > -	XA_STATE(xas, &encl->page_array, idx_start);
> > +	int i;
> > 	/*
> >  	 * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
> > @@ -317,9 +316,14 @@ int sgx_encl_may_map(struct sgx_encl *encl,
> > unsigned long start,
> >  	if (current->personality & READ_IMPLIES_EXEC)
> >  		return -EACCES;
> > -	xas_for_each(&xas, page, idx_end)
> > +	for (i = start_i; i <= end_i; i++) {
> > +		mutex_lock(&encl->lock);
> > +		page = xa_load(&encl->page_array, i);
> > +		mutex_unlock(&encl->lock);
> > +
> >  		if (!page || (~page->vm_max_prot_bits & vm_prot_bits))
> >  			return -EACCES;
> > +	}
> > 	return 0;
> >  }
> 
> Works with no PROVE_LOCKING complaints.
> Haitao

Great. That is a good reference point. Thank you.

v2 should work too but needs to be checked.

/Jarkko
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index ae45f8f0951e..a225e96c7a39 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -304,11 +304,10 @@  int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
 		     unsigned long end, unsigned long vm_flags)
 {
 	unsigned long vm_prot_bits = vm_flags & (VM_READ | VM_WRITE | VM_EXEC);
-	unsigned long idx_start = PFN_DOWN(start);
-	unsigned long idx_end = PFN_DOWN(end - 1);
+	unsigned long start_i = PFN_DOWN(start);
+	unsigned long end_i = PFN_DOWN(end - 1);
 	struct sgx_encl_page *page;
-
-	XA_STATE(xas, &encl->page_array, idx_start);
+	int i;
 
 	/*
 	 * Disallow READ_IMPLIES_EXEC tasks as their VMA permissions might
@@ -317,9 +316,14 @@  int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
 	if (current->personality & READ_IMPLIES_EXEC)
 		return -EACCES;
 
-	xas_for_each(&xas, page, idx_end)
+	for (i = start_i; i <= end_i; i++) {
+		mutex_lock(&encl->lock);
+		page = xa_load(&encl->page_array, i);
+		mutex_unlock(&encl->lock);
+
 		if (!page || (~page->vm_max_prot_bits & vm_prot_bits))
 			return -EACCES;
+	}
 
 	return 0;
 }