diff mbox series

[RESEND,09/11] x86/sgx: Move SGX_ENCL_DEAD check to sgx_reclaimer_write()

Message ID 20190912194720.7107-10-jarkko.sakkinen@linux.intel.com (mailing list archive)
State New, archived
Headers show
Series v23 updates | expand

Commit Message

Jarkko Sakkinen Sept. 12, 2019, 7:47 p.m. UTC
Do enclave state checks only in sgx_reclaimer_write(). Checking the
enclave state is not part of the sgx_encl_ewb() flow. The check is done
differently for SECS and for addressable pages.

Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
 arch/x86/kernel/cpu/sgx/reclaim.c | 69 +++++++++++++++----------------
 1 file changed, 34 insertions(+), 35 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c
index 872c68bf04dd..f96f4c70f4a6 100644
--- a/arch/x86/kernel/cpu/sgx/reclaim.c
+++ b/arch/x86/kernel/cpu/sgx/reclaim.c
@@ -308,47 +308,45 @@  static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
 
 	encl_page->desc &= ~SGX_ENCL_PAGE_RECLAIMED;
 
-	if (!(atomic_read(&encl->flags) & SGX_ENCL_DEAD)) {
-		va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
-					   list);
-		va_offset = sgx_alloc_va_slot(va_page);
-		if (sgx_va_page_full(va_page))
-			list_move_tail(&va_page->list, &encl->va_pages);
+	va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
+				   list);
+	va_offset = sgx_alloc_va_slot(va_page);
+	if (sgx_va_page_full(va_page))
+		list_move_tail(&va_page->list, &encl->va_pages);
+
+	ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
+			     page_index);
+	if (ret == SGX_NOT_TRACKED) {
+		ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
+		if (ret) {
+			if (encls_failed(ret) ||
+			    encls_returned_code(ret))
+				ENCLS_WARN(ret, "ETRACK");
+		}
 
 		ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
 				     page_index);
 		if (ret == SGX_NOT_TRACKED) {
-			ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
-			if (ret) {
-				if (encls_failed(ret) ||
-				    encls_returned_code(ret))
-					ENCLS_WARN(ret, "ETRACK");
-			}
-
-			ret = __sgx_encl_ewb(encl, epc_page, va_page, va_offset,
-					     page_index);
-			if (ret == SGX_NOT_TRACKED) {
-				/*
-				 * Slow path, send IPIs to kick cpus out of the
-				 * enclave.  Note, it's imperative that the cpu
-				 * mask is generated *after* ETRACK, else we'll
-				 * miss cpus that entered the enclave between
-				 * generating the mask and incrementing epoch.
-				 */
-				on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
-						 sgx_ipi_cb, NULL, 1);
-				ret = __sgx_encl_ewb(encl, epc_page, va_page,
-						     va_offset, page_index);
-			}
+			/*
+			 * Slow path, send IPIs to kick cpus out of the
+			 * enclave.  Note, it's imperative that the cpu
+			 * mask is generated *after* ETRACK, else we'll
+			 * miss cpus that entered the enclave between
+			 * generating the mask and incrementing epoch.
+			 */
+			on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
+					 sgx_ipi_cb, NULL, 1);
+			ret = __sgx_encl_ewb(encl, epc_page, va_page,
+					     va_offset, page_index);
 		}
+	}
 
-		if (ret)
-			if (encls_failed(ret) || encls_returned_code(ret))
-				ENCLS_WARN(ret, "EWB");
+	if (ret)
+		if (encls_failed(ret) || encls_returned_code(ret))
+			ENCLS_WARN(ret, "EWB");
 
-		encl_page->desc |= va_offset;
-		encl_page->va_page = va_page;
-	}
+	encl_page->desc |= va_offset;
+	encl_page->va_page = va_page;
 }
 
 static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
@@ -365,10 +363,11 @@  static void sgx_reclaimer_write(struct sgx_epc_page *epc_page)
 
 	mutex_lock(&encl->lock);
 
-	sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));
 	if (atomic_read(&encl->flags) & SGX_ENCL_DEAD) {
 		ret = __eremove(sgx_epc_addr(epc_page));
 		WARN(ret, "EREMOVE returned %d\n", ret);
+	} else {
+		sgx_encl_ewb(epc_page, SGX_ENCL_PAGE_INDEX(encl_page));
 	}
 
 	encl_page->epc_page = NULL;