diff mbox series

[for_v23,v3,09/12] x86/sgx: Split second half of sgx_free_page() to a separate helper

Message ID 20191016183745.8226-10-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series x86/sgx: Bug fixes for v23 | expand

Commit Message

Sean Christopherson Oct. 16, 2019, 6:37 p.m. UTC
Move the post-reclaim half of sgx_free_page() to a standalone helper so
that it can be used in flows where the page is known to be
non-reclaimable.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kernel/cpu/sgx/main.c | 44 ++++++++++++++++++++++++++--------
 arch/x86/kernel/cpu/sgx/sgx.h  |  1 +
 2 files changed, 35 insertions(+), 10 deletions(-)

Comments

Jarkko Sakkinen Oct. 18, 2019, 10:06 a.m. UTC | #1
On Wed, Oct 16, 2019 at 11:37:42AM -0700, Sean Christopherson wrote:
> Move the post-reclaim half of sgx_free_page() to a standalone helper so
> that it can be used in flows where the page is known to be
> non-reclaimable.

The call sites wher it is known to be reclaimable should handle the
error instead of creating call site specific versions of the function.

/Jarkko
Sean Christopherson Oct. 22, 2019, 3:36 a.m. UTC | #2
On Fri, Oct 18, 2019 at 01:06:55PM +0300, Jarkko Sakkinen wrote:
> On Wed, Oct 16, 2019 at 11:37:42AM -0700, Sean Christopherson wrote:
> > Move the post-reclaim half of sgx_free_page() to a standalone helper so
> > that it can be used in flows where the page is known to be
> > non-reclaimable.
> 
> The call sites wher it is known to be reclaimable should handle the
> error instead of creating call site specific versions of the function.

What if we completely split the function(s)?  The existing callers of
sgx_free_page() stay as is, there is one and only one "free_page()", we
don't take sgx_active_page_list_lock in most flows, and the one case
where failure is acceptable gets to do its thing.  I think this'd make
both of us happy.  E.g.:

/**
 * sgx_unmark_page_reclaimable() - Unmark a page as reclaimable
 * @page:	EPC page
 *
 * Clear the reclaimable flag from the page and remove the page from the active
 * page list.
 *
 * Return:
 *   0 on success,
 *   -EBUSY if a reclaim is in progress
 */
int sgx_unmark_page_reclaimable(struct sgx_epc_page *page)
{
	/*
	 * Remove the page from the active list if necessary.  If the page
	 * is actively being reclaimed, i.e. RECLAIMABLE is set but the
	 * page isn't on the active list, return -EBUSY as we can't free
	 * the page at this time since it is "owned" by the reclaimer.
	 */
	spin_lock(&sgx_active_page_list_lock);
	if (page->desc & SGX_EPC_PAGE_RECLAIMABLE) {
		if (list_empty(&page->list)) {
			spin_unlock(&sgx_active_page_list_lock);
			return -EBUSY;
		}
		list_del(&page->list);
		page->desc &= ~SGX_EPC_PAGE_RECLAIMABLE;
	}
	spin_unlock(&sgx_active_page_list_lock);

	return 0;
}

/**
 * sgx_free_page() - Free an EPC page
 * @page:	pointer a previously allocated EPC page
 *
 * EREMOVE an EPC page and insert it back to the list of free pages. The page
 * must not be reclaimable.
 */
void sgx_free_page(struct sgx_epc_page *page)
{
	struct sgx_epc_section *section;
	int ret;

	/*
	 * Don't take sgx_active_page_list_lock when asserting the page isn't
	 * reclaimable, missing a WARN in the very rare case is preferable to
	 * unnecessarily taking a global lock in the common case.
	 */
	WARN_ON_ONCE(page->desc & SGX_EPC_PAGE_RECLAIMABLE);

	ret = __eremove(sgx_epc_addr(page));
	if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
		return;

	section = sgx_epc_section(page);

	spin_lock(&section->lock);
	list_add_tail(&page->list, &section->page_list);
	section->free_cnt++;
	spin_unlock(&section->lock);
}
Jarkko Sakkinen Oct. 23, 2019, 11:59 a.m. UTC | #3
On Mon, Oct 21, 2019 at 08:36:17PM -0700, Sean Christopherson wrote:
> On Fri, Oct 18, 2019 at 01:06:55PM +0300, Jarkko Sakkinen wrote:
> > On Wed, Oct 16, 2019 at 11:37:42AM -0700, Sean Christopherson wrote:
> > > Move the post-reclaim half of sgx_free_page() to a standalone helper so
> > > that it can be used in flows where the page is known to be
> > > non-reclaimable.
> > 
> > The call sites wher it is known to be reclaimable should handle the
> > error instead of creating call site specific versions of the function.
> 
> What if we completely split the function(s)?  The existing callers of
> sgx_free_page() stay as is, there is one and only one "free_page()", we
> don't take sgx_active_page_list_lock in most flows, and the one case
> where failure is acceptable gets to do its thing.  I think this'd make
> both of us happy.  E.g.:

The split is a clean way to sort this out.

Makes sense also from "symmetry" perspective: we don't mark pages
reclaimable in sgx_alloc_page().

/Jarkko
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 718fd5590608..083d9a589882 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -103,6 +103,39 @@  struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim)
 	return entry;
 }
 
+
+/**
+ * __sgx_free_page() - Free an EPC page
+ * @page:	pointer a previously allocated EPC page
+ *
+ * EREMOVE an EPC page and insert it back to the list of free pages. The page
+ * must not be reclaimable.
+ */
+void __sgx_free_page(struct sgx_epc_page *page)
+{
+	struct sgx_epc_section *section;
+	int ret;
+
+	/*
+	 * Don't take sgx_active_page_list_lock when asserting the page isn't
+	 * reclaimable, missing a WARN in the very rare case is preferable to
+	 * unnecessarily taking a global lock in the common case.
+	 */
+	WARN_ON_ONCE(page->desc & SGX_EPC_PAGE_RECLAIMABLE);
+
+	ret = __eremove(sgx_epc_addr(page));
+	if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
+		return;
+
+	section = sgx_epc_section(page);
+
+	spin_lock(&section->lock);
+	list_add_tail(&page->list, &section->page_list);
+	sgx_nr_free_pages++;
+	spin_unlock(&section->lock);
+
+}
+
 /**
  * sgx_free_page() - Free an EPC page
  * @page:	pointer a previously allocated EPC page
@@ -116,9 +149,6 @@  struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim)
  */
 int sgx_free_page(struct sgx_epc_page *page)
 {
-	struct sgx_epc_section *section = sgx_epc_section(page);
-	int ret;
-
 	/*
 	 * Remove the page from the active list if necessary.  If the page
 	 * is actively being reclaimed, i.e. RECLAIMABLE is set but the
@@ -136,13 +166,7 @@  int sgx_free_page(struct sgx_epc_page *page)
 	}
 	spin_unlock(&sgx_active_page_list_lock);
 
-	ret = __eremove(sgx_epc_addr(page));
-	WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret);
-
-	spin_lock(&section->lock);
-	list_add_tail(&page->list, &section->page_list);
-	sgx_nr_free_pages++;
-	spin_unlock(&section->lock);
+	__sgx_free_page(page);
 
 	return 0;
 }
diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
index 160a3c996ef6..87e375e8c25e 100644
--- a/arch/x86/kernel/cpu/sgx/sgx.h
+++ b/arch/x86/kernel/cpu/sgx/sgx.h
@@ -85,6 +85,7 @@  void sgx_reclaim_pages(void);
 
 struct sgx_epc_page *sgx_try_alloc_page(void);
 struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim);
+void __sgx_free_page(struct sgx_epc_page *page);
 int sgx_free_page(struct sgx_epc_page *page);
 
 #endif /* _X86_SGX_H */