diff mbox series

[v3,09/17] x86/sgx: Move SGX_ENCL_DEAD check to sgx_reclaimer_write()

Message ID 20190916101803.30726-10-jarkko.sakkinen@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series Fixes and updates for v23 | expand

Commit Message

Jarkko Sakkinen Sept. 16, 2019, 10:17 a.m. UTC
Do enclave state checks only in sgx_reclaimer_write(). Checking the
enclave state is not part of the sgx_encl_ewb() flow. The check is done
differently for SECS and for addressable pages.

Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Shay Katz-zamir <shay.katz-zamir@intel.com>
Cc: Serge Ayoun <serge.ayoun@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
 arch/x86/kernel/cpu/sgx/reclaim.c | 69 +++++++++++++++----------------
 1 file changed, 34 insertions(+), 35 deletions(-)

Comments

Sean Christopherson Sept. 17, 2019, 11:13 p.m. UTC | #1
On Mon, Sep 16, 2019 at 01:17:55PM +0300, Jarkko Sakkinen wrote:
> Do enclave state checks only in sgx_reclaimer_write(). Checking the
> enclave state is not part of the sgx_encl_ewb() flow. The check is done
> differently for SECS and for addressable pages.
> 
> Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> Cc: Shay Katz-zamir <shay.katz-zamir@intel.com>
> Cc: Serge Ayoun <serge.ayoun@intel.com>
> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> ---
>  arch/x86/kernel/cpu/sgx/reclaim.c | 69 +++++++++++++++----------------
>  1 file changed, 34 insertions(+), 35 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c
> index e758a06919e4..a3e36f959c74 100644
> --- a/arch/x86/kernel/cpu/sgx/reclaim.c
> +++ b/arch/x86/kernel/cpu/sgx/reclaim.c
> @@ -308,47 +308,45 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
>  
>  	encl_page->desc &= ~SGX_ENCL_PAGE_RECLAIMED;
>  
> -	if (!(atomic_read(&encl->flags) & SGX_ENCL_DEAD)) {
> -		va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
> -					   list);
> -		va_offset = sgx_alloc_va_slot(va_page);
> -		if (sgx_va_page_full(va_page))
> -			list_move_tail(&va_page->list, &encl->va_pages);
> +	va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
> +				   list);
> +	va_offset = sgx_alloc_va_slot(va_page);
> +	if (sgx_va_page_full(va_page))
> +		list_move_tail(&va_page->list, &encl->va_pages);
> +
> +	ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
> +			     page_index);
> +	if (ret == SGX_NOT_TRACKED) {
> +		ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
> +		if (ret) {
> +			if (encls_failed(ret) ||
> +			    encls_returned_code(ret))
> +				ENCLS_WARN(ret, "ETRACK");
> +		}
>  
>  		ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
>  				     page_index);
>  		if (ret == SGX_NOT_TRACKED) {
> -			ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
> -			if (ret) {
> -				if (encls_failed(ret) ||
> -				    encls_returned_code(ret))
> -					ENCLS_WARN(ret, "ETRACK");
> -			}
> -
> -			ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
> -					     page_index);
> -			if (ret == SGX_NOT_TRACKED) {
> -				/*
> -				 * Slow path, send IPIs to kick cpus out of the
> -				 * enclave.  Note, it's imperative that the cpu
> -				 * mask is generated *after* ETRACK, else we'll
> -				 * miss cpus that entered the enclave between
> -				 * generating the mask and incrementing epoch.
> -				 */
> -				on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
> -						 sgx_ipi_cb, NULL, 1);
> -				ret = __sgx_encl_ewb(encl, epc_page, va_page,
> -						     va_offset, page_index);
> -			}
> +			/*
> +			 * Slow path, send IPIs to kick cpus out of the
> +			 * enclave.  Note, it's imperative that the cpu
> +			 * mask is generated *after* ETRACK, else we'll
> +			 * miss cpus that entered the enclave between
> +			 * generating the mask and incrementing epoch.
> +			 */
> +			on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
> +					 sgx_ipi_cb, NULL, 1);
> +			ret = __sgx_encl_ewb(encl, epc_page, va_page,
> +					     va_offset, page_index);
>  		}
> +	}
>  
> -		if (ret)
> -			if (encls_failed(ret) || encls_returned_code(ret))
> -				ENCLS_WARN(ret, "EWB");
> +	if (ret)
> +		if (encls_failed(ret) || encls_returned_code(ret))
> +			ENCLS_WARN(ret, "EWB");
>  
> -		encl_page->desc |= va_offset;
> -		encl_page->va_page = va_page;
> -	}
> +	encl_page->desc |= va_offset;
> +	encl_page->va_page = va_page;
>  }
>  
>  static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
> @@ -359,10 +357,11 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
>  
>  	mutex_lock(&encl->lock);
>  
> -	sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));
>  	if (atomic_read(&encl->flags) & SGX_ENCL_DEAD) {
>  		ret = __eremove(sgx_epc_addr(epc_page));
>  		WARN(ret, "EREMOVE returned %d\n", ret);

SGX_ENCL_PAGE_RECLAIMED is left dangling on encl_page in the SGX_ENCL_DEAD
case.  It doesn't affect functionality, but it could complicate debug if
anything goes awry.

> +	} else {
> +		sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));
>  	}
>  
>  	encl_page->epc_page = NULL;
> -- 
> 2.20.1
>
Sean Christopherson Sept. 17, 2019, 11:21 p.m. UTC | #2
On Mon, Sep 16, 2019 at 01:17:55PM +0300, Jarkko Sakkinen wrote:
> Do enclave state checks only in sgx_reclaimer_write(). Checking the
> enclave state is not part of the sgx_encl_ewb() flow. The check is done
> differently for SECS and for addressable pages.
> 
> Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> Cc: Shay Katz-zamir <shay.katz-zamir@intel.com>
> Cc: Serge Ayoun <serge.ayoun@intel.com>
> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> ---
>  arch/x86/kernel/cpu/sgx/reclaim.c | 69 +++++++++++++++----------------
>  1 file changed, 34 insertions(+), 35 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c
> index e758a06919e4..a3e36f959c74 100644
> --- a/arch/x86/kernel/cpu/sgx/reclaim.c
> +++ b/arch/x86/kernel/cpu/sgx/reclaim.c
> @@ -308,47 +308,45 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
>  
>  	encl_page->desc &= ~SGX_ENCL_PAGE_RECLAIMED;
>  
> -	if (!(atomic_read(&encl->flags) & SGX_ENCL_DEAD)) {
> -		va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
> -					   list);
> -		va_offset = sgx_alloc_va_slot(va_page);
> -		if (sgx_va_page_full(va_page))
> -			list_move_tail(&va_page->list, &encl->va_pages);
> +	va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
> +				   list);
> +	va_offset = sgx_alloc_va_slot(va_page);
> +	if (sgx_va_page_full(va_page))
> +		list_move_tail(&va_page->list, &encl->va_pages);
> +
> +	ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
> +			     page_index);
> +	if (ret == SGX_NOT_TRACKED) {
> +		ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
> +		if (ret) {
> +			if (encls_failed(ret) ||
> +			    encls_returned_code(ret))
> +				ENCLS_WARN(ret, "ETRACK");
> +		}
>  
>  		ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
>  				     page_index);
>  		if (ret == SGX_NOT_TRACKED) {
> -			ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
> -			if (ret) {
> -				if (encls_failed(ret) ||
> -				    encls_returned_code(ret))
> -					ENCLS_WARN(ret, "ETRACK");
> -			}
> -
> -			ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
> -					     page_index);
> -			if (ret == SGX_NOT_TRACKED) {
> -				/*
> -				 * Slow path, send IPIs to kick cpus out of the
> -				 * enclave.  Note, it's imperative that the cpu
> -				 * mask is generated *after* ETRACK, else we'll
> -				 * miss cpus that entered the enclave between
> -				 * generating the mask and incrementing epoch.
> -				 */
> -				on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
> -						 sgx_ipi_cb, NULL, 1);
> -				ret = __sgx_encl_ewb(encl, epc_page, va_page,
> -						     va_offset, page_index);
> -			}
> +			/*
> +			 * Slow path, send IPIs to kick cpus out of the
> +			 * enclave.  Note, it's imperative that the cpu
> +			 * mask is generated *after* ETRACK, else we'll
> +			 * miss cpus that entered the enclave between
> +			 * generating the mask and incrementing epoch.
> +			 */
> +			on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
> +					 sgx_ipi_cb, NULL, 1);
> +			ret = __sgx_encl_ewb(encl, epc_page, va_page,
> +					     va_offset, page_index);
>  		}
> +	}
>  
> -		if (ret)
> -			if (encls_failed(ret) || encls_returned_code(ret))
> -				ENCLS_WARN(ret, "EWB");
> +	if (ret)
> +		if (encls_failed(ret) || encls_returned_code(ret))
> +			ENCLS_WARN(ret, "EWB");
>  
> -		encl_page->desc |= va_offset;
> -		encl_page->va_page = va_page;
> -	}
> +	encl_page->desc |= va_offset;
> +	encl_page->va_page = va_page;
>  }
>  
>  static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
> @@ -359,10 +357,11 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
>  
>  	mutex_lock(&encl->lock);
>  
> -	sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));
>  	if (atomic_read(&encl->flags) & SGX_ENCL_DEAD) {
>  		ret = __eremove(sgx_epc_addr(epc_page));
>  		WARN(ret, "EREMOVE returned %d\n", ret);
> +	} else {
> +		sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));

The sgx_encl_ewb() for SECS also needs to be skipped, otherwise we'll
attempt EWB on a dead enclave.  If the enclave is dead we can simply
free the SECS.

>  	}
>  
>  	encl_page->epc_page = NULL;
> -- 
> 2.20.1
>
Jarkko Sakkinen Sept. 18, 2019, 4:15 a.m. UTC | #3
On Tue, Sep 17, 2019 at 04:13:26PM -0700, Sean Christopherson wrote:
> SGX_ENCL_PAGE_RECLAIMED is left dangling on encl_page in the SGX_ENCL_DEAD
> case.  It doesn't affect functionality, but it could complicate debug if
> anything goes awry.

Agreed. It is a regression in this patch actually because my purpose was
not to affect semantics.

/Jarkko
Jarkko Sakkinen Sept. 18, 2019, 4:16 a.m. UTC | #4
On Tue, Sep 17, 2019 at 04:21:19PM -0700, Sean Christopherson wrote:
> On Mon, Sep 16, 2019 at 01:17:55PM +0300, Jarkko Sakkinen wrote:
> > Do enclave state checks only in sgx_reclaimer_write(). Checking the
> > enclave state is not part of the sgx_encl_ewb() flow. The check is done
> > differently for SECS and for addressable pages.
> > 
> > Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> > Cc: Shay Katz-zamir <shay.katz-zamir@intel.com>
> > Cc: Serge Ayoun <serge.ayoun@intel.com>
> > Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> > ---
> >  arch/x86/kernel/cpu/sgx/reclaim.c | 69 +++++++++++++++----------------
> >  1 file changed, 34 insertions(+), 35 deletions(-)
> > 
> > diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c
> > index e758a06919e4..a3e36f959c74 100644
> > --- a/arch/x86/kernel/cpu/sgx/reclaim.c
> > +++ b/arch/x86/kernel/cpu/sgx/reclaim.c
> > @@ -308,47 +308,45 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
> >  
> >  	encl_page->desc &= ~SGX_ENCL_PAGE_RECLAIMED;
> >  
> > -	if (!(atomic_read(&encl->flags) & SGX_ENCL_DEAD)) {
> > -		va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
> > -					   list);
> > -		va_offset = sgx_alloc_va_slot(va_page);
> > -		if (sgx_va_page_full(va_page))
> > -			list_move_tail(&va_page->list, &encl->va_pages);
> > +	va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
> > +				   list);
> > +	va_offset = sgx_alloc_va_slot(va_page);
> > +	if (sgx_va_page_full(va_page))
> > +		list_move_tail(&va_page->list, &encl->va_pages);
> > +
> > +	ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
> > +			     page_index);
> > +	if (ret == SGX_NOT_TRACKED) {
> > +		ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
> > +		if (ret) {
> > +			if (encls_failed(ret) ||
> > +			    encls_returned_code(ret))
> > +				ENCLS_WARN(ret, "ETRACK");
> > +		}
> >  
> >  		ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
> >  				     page_index);
> >  		if (ret == SGX_NOT_TRACKED) {
> > -			ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
> > -			if (ret) {
> > -				if (encls_failed(ret) ||
> > -				    encls_returned_code(ret))
> > -					ENCLS_WARN(ret, "ETRACK");
> > -			}
> > -
> > -			ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
> > -					     page_index);
> > -			if (ret == SGX_NOT_TRACKED) {
> > -				/*
> > -				 * Slow path, send IPIs to kick cpus out of the
> > -				 * enclave.  Note, it's imperative that the cpu
> > -				 * mask is generated *after* ETRACK, else we'll
> > -				 * miss cpus that entered the enclave between
> > -				 * generating the mask and incrementing epoch.
> > -				 */
> > -				on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
> > -						 sgx_ipi_cb, NULL, 1);
> > -				ret = __sgx_encl_ewb(encl, epc_page, va_page,
> > -						     va_offset, page_index);
> > -			}
> > +			/*
> > +			 * Slow path, send IPIs to kick cpus out of the
> > +			 * enclave.  Note, it's imperative that the cpu
> > +			 * mask is generated *after* ETRACK, else we'll
> > +			 * miss cpus that entered the enclave between
> > +			 * generating the mask and incrementing epoch.
> > +			 */
> > +			on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
> > +					 sgx_ipi_cb, NULL, 1);
> > +			ret = __sgx_encl_ewb(encl, epc_page, va_page,
> > +					     va_offset, page_index);
> >  		}
> > +	}
> >  
> > -		if (ret)
> > -			if (encls_failed(ret) || encls_returned_code(ret))
> > -				ENCLS_WARN(ret, "EWB");
> > +	if (ret)
> > +		if (encls_failed(ret) || encls_returned_code(ret))
> > +			ENCLS_WARN(ret, "EWB");
> >  
> > -		encl_page->desc |= va_offset;
> > -		encl_page->va_page = va_page;
> > -	}
> > +	encl_page->desc |= va_offset;
> > +	encl_page->va_page = va_page;
> >  }
> >  
> >  static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
> > @@ -359,10 +357,11 @@ static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
> >  
> >  	mutex_lock(&encl->lock);
> >  
> > -	sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));
> >  	if (atomic_read(&encl->flags) & SGX_ENCL_DEAD) {
> >  		ret = __eremove(sgx_epc_addr(epc_page));
> >  		WARN(ret, "EREMOVE returned %d\n", ret);
> > +	} else {
> > +		sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));
> 
> The sgx_encl_ewb() for SECS also needs to be skipped, otherwise we'll
> attempt EWB on a dead enclave.  If the enclave is dead we can simply
> free the SECS.

Thanks! I'll refine.

/Jarkko
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c
index e758a06919e4..a3e36f959c74 100644
--- a/arch/x86/kernel/cpu/sgx/reclaim.c
+++ b/arch/x86/kernel/cpu/sgx/reclaim.c
@@ -308,47 +308,45 @@  static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
 
 	encl_page->desc &= ~SGX_ENCL_PAGE_RECLAIMED;
 
-	if (!(atomic_read(&encl->flags) & SGX_ENCL_DEAD)) {
-		va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
-					   list);
-		va_offset = sgx_alloc_va_slot(va_page);
-		if (sgx_va_page_full(va_page))
-			list_move_tail(&va_page->list, &encl->va_pages);
+	va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
+				   list);
+	va_offset = sgx_alloc_va_slot(va_page);
+	if (sgx_va_page_full(va_page))
+		list_move_tail(&va_page->list, &encl->va_pages);
+
+	ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
+			     page_index);
+	if (ret == SGX_NOT_TRACKED) {
+		ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
+		if (ret) {
+			if (encls_failed(ret) ||
+			    encls_returned_code(ret))
+				ENCLS_WARN(ret, "ETRACK");
+		}
 
 		ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
 				     page_index);
 		if (ret == SGX_NOT_TRACKED) {
-			ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
-			if (ret) {
-				if (encls_failed(ret) ||
-				    encls_returned_code(ret))
-					ENCLS_WARN(ret, "ETRACK");
-			}
-
-			ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
-					     page_index);
-			if (ret == SGX_NOT_TRACKED) {
-				/*
-				 * Slow path, send IPIs to kick cpus out of the
-				 * enclave.  Note, it's imperative that the cpu
-				 * mask is generated *after* ETRACK, else we'll
-				 * miss cpus that entered the enclave between
-				 * generating the mask and incrementing epoch.
-				 */
-				on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
-						 sgx_ipi_cb, NULL, 1);
-				ret = __sgx_encl_ewb(encl, epc_page, va_page,
-						     va_offset, page_index);
-			}
+			/*
+			 * Slow path, send IPIs to kick cpus out of the
+			 * enclave.  Note, it's imperative that the cpu
+			 * mask is generated *after* ETRACK, else we'll
+			 * miss cpus that entered the enclave between
+			 * generating the mask and incrementing epoch.
+			 */
+			on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
+					 sgx_ipi_cb, NULL, 1);
+			ret = __sgx_encl_ewb(encl, epc_page, va_page,
+					     va_offset, page_index);
 		}
+	}
 
-		if (ret)
-			if (encls_failed(ret) || encls_returned_code(ret))
-				ENCLS_WARN(ret, "EWB");
+	if (ret)
+		if (encls_failed(ret) || encls_returned_code(ret))
+			ENCLS_WARN(ret, "EWB");
 
-		encl_page->desc |= va_offset;
-		encl_page->va_page = va_page;
-	}
+	encl_page->desc |= va_offset;
+	encl_page->va_page = va_page;
 }
 
 static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
@@ -359,10 +357,11 @@  static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
 
 	mutex_lock(&encl->lock);
 
-	sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));
 	if (atomic_read(&encl->flags) & SGX_ENCL_DEAD) {
 		ret = __eremove(sgx_epc_addr(epc_page));
 		WARN(ret, "EREMOVE returned %d\n", ret);
+	} else {
+		sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));
 	}
 
 	encl_page->epc_page = NULL;