diff mbox

[intel-sgx-kernel-dev] intel_sgx: simplify sgx_write_pages()

Message ID 1482327954-13747-1-git-send-email-jarkko.sakkinen@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jarkko Sakkinen Dec. 21, 2016, 1:45 p.m. UTC
Now that sgx_ewb flow has a sane error recovery flow we can simplify
sgx_write_pages() significantly by moving the pinning of backing page
into sgx_ewb(). This was not possible before as in some situations
pinning could legally fail.

Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
 drivers/platform/x86/intel_sgx_page_cache.c | 63 ++++++++++++-----------------
 1 file changed, 25 insertions(+), 38 deletions(-)

Comments

Sean Christopherson Dec. 21, 2016, 3:22 p.m. UTC | #1
On Wed, 2016-12-21 at 15:45 +0200, Jarkko Sakkinen wrote:
> Now that sgx_ewb flow has a sane error recovery flow we can simplify
> sgx_write_pages() significantly by moving the pinning of backing page
> into sgx_ewb(). This was not possible before as in some situations
> pinning could legally fail.
> 
> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> ---
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Tested-by: Sean Christopherson <sean.j.christopherson@intel.com>
Jarkko Sakkinen Dec. 22, 2016, 9:25 a.m. UTC | #2
On Wed, Dec 21, 2016 at 07:22:02AM -0800, Sean Christopherson wrote:
> On Wed, 2016-12-21 at 15:45 +0200, Jarkko Sakkinen wrote:
> > Now that sgx_ewb flow has a sane error recovery flow we can simplify
> > sgx_write_pages() significantly by moving the pinning of backing page
> > into sgx_ewb(). This was not possible before as in some situations
> > pinning could legally fail.
> > 
> > Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> > ---
> Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
> Tested-by: Sean Christopherson <sean.j.christopherson@intel.com>

Thanks Sean!

/Jarkko
Jarkko Sakkinen Dec. 22, 2016, 1:33 p.m. UTC | #3
On Thu, Dec 22, 2016 at 11:25:25AM +0200, Jarkko Sakkinen wrote:
> On Wed, Dec 21, 2016 at 07:22:02AM -0800, Sean Christopherson wrote:
> > On Wed, 2016-12-21 at 15:45 +0200, Jarkko Sakkinen wrote:
> > > Now that sgx_ewb flow has a sane error recovery flow we can simplify
> > > sgx_write_pages() significantly by moving the pinning of backing page
> > > into sgx_ewb(). This was not possible before as in some situations
> > > pinning could legally fail.
> > > 
> > > Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> > > ---
> > Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
> > Tested-by: Sean Christopherson <sean.j.christopherson@intel.com>
> 
> Thanks Sean!
> 
> /Jarkko

Applied.

/Jarkko
diff mbox

Patch

diff --git a/drivers/platform/x86/intel_sgx_page_cache.c b/drivers/platform/x86/intel_sgx_page_cache.c
index 36d4d54..d073057 100644
--- a/drivers/platform/x86/intel_sgx_page_cache.c
+++ b/drivers/platform/x86/intel_sgx_page_cache.c
@@ -233,48 +233,57 @@  static void sgx_etrack(struct sgx_epc_page *epc_page)
 }
 
 static int __sgx_ewb(struct sgx_encl *encl,
-		     struct sgx_encl_page *encl_page,
-		     struct page *backing)
+		     struct sgx_encl_page *encl_page)
 {
 	struct sgx_page_info pginfo;
+	struct page *backing;
 	void *epc;
 	void *va;
 	int ret;
 
-	pginfo.srcpge = (unsigned long)kmap_atomic(backing);
+	backing = sgx_get_backing(encl, encl_page);
+	if (IS_ERR(backing)) {
+		ret = PTR_ERR(backing);
+		sgx_warn(encl, "pinning the backing page for EWB failed with %d\n",
+			 ret);
+		return ret;
+	}
+
 	epc = sgx_get_epc_page(encl_page->epc_page);
 	va = sgx_get_epc_page(encl_page->va_page->epc_page);
 
+	pginfo.srcpge = (unsigned long)kmap_atomic(backing);
 	pginfo.pcmd = (unsigned long)&encl_page->pcmd;
 	pginfo.linaddr = 0;
 	pginfo.secs = 0;
 	ret = __ewb(&pginfo, epc,
 		    (void *)((unsigned long)va + encl_page->va_offset));
+	kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
 
 	sgx_put_epc_page(va);
 	sgx_put_epc_page(epc);
-	kunmap_atomic((void *)(unsigned long)pginfo.srcpge);
+	sgx_put_backing(backing, true);
 
 	return ret;
 }
 
 static bool sgx_ewb(struct sgx_encl *encl,
-		    struct sgx_encl_page *entry,
-		    struct page *backing)
+		    struct sgx_encl_page *entry)
 {
-	int ret = __sgx_ewb(encl, entry, backing);
+	int ret = __sgx_ewb(encl, entry);
 
 	if (ret == SGX_NOT_TRACKED) {
 		/* slow path, IPI needed */
 		smp_call_function(sgx_ipi_cb, NULL, 1);
-		ret = __sgx_ewb(encl, entry, backing);
+		ret = __sgx_ewb(encl, entry);
 	}
 
 	if (ret) {
 		/* make enclave inaccessible */
 		sgx_invalidate(encl);
 		smp_call_function(sgx_ipi_cb, NULL, 1);
-		sgx_err(encl, "EWB returned %d, enclave killed\n", ret);
+		if (ret > 0)
+			sgx_err(encl, "EWB returned %d, enclave killed\n", ret);
 		return false;
 	}
 
@@ -294,11 +303,8 @@  static void sgx_write_pages(struct sgx_encl *encl, struct list_head *src)
 {
 	struct sgx_encl_page *entry;
 	struct sgx_encl_page *tmp;
-	struct page *pages[SGX_NR_SWAP_CLUSTER_MAX + 1];
 	struct vm_area_struct *evma;
 	unsigned int free_flags;
-	int cnt = 0;
-	int i = 0;
 
 	if (list_empty(src))
 		return;
@@ -316,25 +322,14 @@  static void sgx_write_pages(struct sgx_encl *encl, struct list_head *src)
 			continue;
 		}
 
-		pages[cnt] = sgx_get_backing(encl, entry);
-		if (IS_ERR(pages[cnt])) {
-			list_del(&entry->load_list);
-			list_add_tail(&entry->load_list, &encl->load_list);
-			entry->flags &= ~SGX_ENCL_PAGE_RESERVED;
-			continue;
-		}
-
 		zap_vma_ptes(evma, entry->addr, PAGE_SIZE);
 		sgx_eblock(entry->epc_page);
-		cnt++;
 	}
 
 	/* ETRACK */
 	sgx_etrack(encl->secs_page.epc_page);
 
 	/* EWB */
-	i = 0;
-
 	while (!list_empty(src)) {
 		entry = list_first_entry(src, struct sgx_encl_page,
 					 load_list);
@@ -344,29 +339,21 @@  static void sgx_write_pages(struct sgx_encl *encl, struct list_head *src)
 
 		evma = sgx_find_vma(encl, entry->addr);
 		if (evma) {
-			if (sgx_ewb(encl, entry, pages[i]))
+			if (sgx_ewb(encl, entry))
 				free_flags = SGX_FREE_SKIP_EREMOVE;
 			encl->secs_child_cnt--;
 		}
 
 		sgx_free_encl_page(entry, encl, free_flags);
-		sgx_put_backing(pages[i++], evma);
 	}
 
-	/* Allow SECS page eviction only when the encl is initialized. */
-	if (!encl->secs_child_cnt &&
-	    (encl->flags & SGX_ENCL_INITIALIZED)) {
-		pages[cnt] = sgx_get_backing(encl, &encl->secs_page);
-		if (!IS_ERR(pages[cnt])) {
-			free_flags = 0;
-			if (sgx_ewb(encl, &encl->secs_page, pages[cnt]))
-				free_flags = SGX_FREE_SKIP_EREMOVE;
-
-			encl->flags |= SGX_ENCL_SECS_EVICTED;
+	if (!encl->secs_child_cnt && (encl->flags & SGX_ENCL_INITIALIZED)) {
+		free_flags = 0;
+		if (sgx_ewb(encl, &encl->secs_page))
+			free_flags = SGX_FREE_SKIP_EREMOVE;
 
-			sgx_free_encl_page(&encl->secs_page, encl, free_flags);
-			sgx_put_backing(pages[cnt], true);
-		}
+		encl->flags |= SGX_ENCL_SECS_EVICTED;
+		sgx_free_encl_page(&encl->secs_page, encl, free_flags);
 	}
 
 	mutex_unlock(&encl->lock);