diff mbox series

[v3,4/5] x86/sgx: Replace section->page_list with a global free page list

Message ID 20210303150323.433207-5-jarkko@kernel.org (mailing list archive)
State New, archived
Headers show
Series x86/sgx: NUMA | expand

Commit Message

Jarkko Sakkinen March 3, 2021, 3:03 p.m. UTC
Background
==========

EPC section is covered by one or more SRAT entries that are associated with
one and only one PXM (NUMA node). The current implementation overheats a
single NUMA node, because sgx_alloc_epc_page() always starts looking for
pages from the same EPC section everytime.

Only within a section it does pick pages in FIFO fashion, i.e. the oldest
freed in that section is the EPC page given back to the caller.  That does
not do any good, as the pages in the same node are performance-wise equal.

Solution
========

Replace local lists with a single global free page list, from which pages
are borrowed in FIFO fashion.

Signed-off-by: Jarkko Sakkinen <jarkko@kernel.org>
---
 arch/x86/kernel/cpu/sgx/main.c | 84 +++++++++++-----------------------
 arch/x86/kernel/cpu/sgx/sgx.h  |  6 ---
 2 files changed, 27 insertions(+), 63 deletions(-)

Comments

Dave Hansen March 3, 2021, 11:48 p.m. UTC | #1
On 3/3/21 7:03 AM, Jarkko Sakkinen wrote:
> Background
> ==========
> 
> EPC section is covered by one or more SRAT entries that are associated with
> one and only one PXM (NUMA node). The current implementation overheats a

Overheats?

> single NUMA node, because sgx_alloc_epc_page() always starts looking for
> pages from the same EPC section everytime.

"every time"

> Only within a section it does pick pages in FIFO fashion, i.e. the oldest
> freed in that section is the EPC page given back to the caller.  That does
> not do any good, as the pages in the same node are performance-wise equal.

I'm not sure why all of this is relevant and it doesn't really tell me
anything about this patch's place in the *series*.

Why are we destroying all of the per-node structures just before adding
NUMA support?

> Replace local lists with a single global free page list, from which pages
> are borrowed in FIFO fashion.
Jarkko Sakkinen March 10, 2021, 10:54 a.m. UTC | #2
On Wed, Mar 03, 2021 at 03:48:46PM -0800, Dave Hansen wrote:
> On 3/3/21 7:03 AM, Jarkko Sakkinen wrote:
> > Background
> > ==========
> > 
> > EPC section is covered by one or more SRAT entries that are associated with
> > one and only one PXM (NUMA node). The current implementation overheats a
> 
> Overheats?
> 
> > single NUMA node, because sgx_alloc_epc_page() always starts looking for
> > pages from the same EPC section everytime.
> 
> "every time"
> 
> > Only within a section it does pick pages in FIFO fashion, i.e. the oldest
> > freed in that section is the EPC page given back to the caller.  That does
> > not do any good, as the pages in the same node are performance-wise equal.
> 
> I'm not sure why all of this is relevant and it doesn't really tell me
> anything about this patch's place in the *series*.
> 
> Why are we destroying all of the per-node structures just before adding
> NUMA support?

These are per-section structures, not per-node structures. Probably most
times, if not all times, they are equal, but conceptually I'm not
destroying per-node structures :-) I'm introducing them in 5/5.

/Jarkko
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index a649010949c2..58474480f5be 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -18,14 +18,15 @@  static int sgx_nr_epc_sections;
 static struct task_struct *ksgxd_tsk;
 static DECLARE_WAIT_QUEUE_HEAD(ksgxd_waitq);
 
-/*
- * These variables are part of the state of the reclaimer, and must be accessed
- * with sgx_reclaimer_lock acquired.
- */
+/* The reclaimer lock protected variables prepend the lock. */
 static LIST_HEAD(sgx_active_page_list);
-
 static DEFINE_SPINLOCK(sgx_reclaimer_lock);
 
+/* The free page list lock protected variables prepend the lock. */
+static unsigned long sgx_nr_free_pages;
+static LIST_HEAD(sgx_free_page_list);
+static DEFINE_SPINLOCK(sgx_free_page_list_lock);
+
 /*
  * Reset dirty EPC pages to uninitialized state. Laundry can be left with SECS
  * pages whose child pages blocked EREMOVE.
@@ -379,21 +380,9 @@  static void sgx_reclaim_pages(void)
 	}
 }
 
-static unsigned long sgx_nr_free_pages(void)
-{
-	unsigned long cnt = 0;
-	int i;
-
-	for (i = 0; i < sgx_nr_epc_sections; i++)
-		cnt += sgx_epc_sections[i].free_cnt;
-
-	return cnt;
-}
-
 static bool sgx_should_reclaim(unsigned long watermark)
 {
-	return sgx_nr_free_pages() < watermark &&
-	       !list_empty(&sgx_active_page_list);
+	return sgx_nr_free_pages < watermark && !list_empty(&sgx_active_page_list);
 }
 
 static int ksgxd(void *p)
@@ -445,50 +434,34 @@  static bool __init sgx_page_reclaimer_init(struct list_head *laundry)
 	return true;
 }
 
-static struct sgx_epc_page *__sgx_alloc_epc_page_from_section(struct sgx_epc_section *section)
-{
-	struct sgx_epc_page *page;
-
-	spin_lock(&section->lock);
-
-	if (list_empty(&section->page_list)) {
-		spin_unlock(&section->lock);
-		return NULL;
-	}
-
-	page = list_first_entry(&section->page_list, struct sgx_epc_page, list);
-	list_del_init(&page->list);
-	section->free_cnt--;
-
-	spin_unlock(&section->lock);
-	return page;
-}
-
 /**
  * __sgx_alloc_epc_page() - Allocate an EPC page
  *
- * Iterate through EPC sections and borrow a free EPC page to the caller. When a
- * page is no longer needed it must be released with sgx_free_epc_page().
+ * Borrow a free EPC page to the caller in FIFO fashion: the callers gets oldest
+ * freed page.
  *
  * Return:
- *   an EPC page,
- *   -errno on error
+ * - an EPC page:	Free EPC pages were available.
+ * - ERR_PTR(-ENOMEM):	Run out of EPC pages.
  */
 struct sgx_epc_page *__sgx_alloc_epc_page(void)
 {
-	struct sgx_epc_section *section;
 	struct sgx_epc_page *page;
-	int i;
 
-	for (i = 0; i < sgx_nr_epc_sections; i++) {
-		section = &sgx_epc_sections[i];
+	spin_lock(&sgx_free_page_list_lock);
 
-		page = __sgx_alloc_epc_page_from_section(section);
-		if (page)
-			return page;
+	if (list_empty(&sgx_free_page_list)) {
+		spin_unlock(&sgx_free_page_list_lock);
+		return NULL;
 	}
 
-	return ERR_PTR(-ENOMEM);
+	page = list_first_entry(&sgx_free_page_list, struct sgx_epc_page, list);
+	list_del_init(&page->list);
+	sgx_nr_free_pages--;
+
+	spin_unlock(&sgx_free_page_list_lock);
+
+	return page;
 }
 
 /**
@@ -593,7 +566,6 @@  struct sgx_epc_page *sgx_alloc_epc_page(void *owner, bool reclaim)
  */
 void sgx_free_epc_page(struct sgx_epc_page *page)
 {
-	struct sgx_epc_section *section = &sgx_epc_sections[page->section];
 	int ret;
 
 	WARN_ON_ONCE(page->flags & SGX_EPC_PAGE_RECLAIMER_TRACKED);
@@ -602,10 +574,10 @@  void sgx_free_epc_page(struct sgx_epc_page *page)
 	if (WARN_ONCE(ret, "EREMOVE returned %d (0x%x)", ret, ret))
 		return;
 
-	spin_lock(&section->lock);
-	list_add_tail(&page->list, &section->page_list);
-	section->free_cnt++;
-	spin_unlock(&section->lock);
+	spin_lock(&sgx_free_page_list_lock);
+	list_add_tail(&page->list, &sgx_free_page_list);
+	sgx_nr_free_pages++;
+	spin_unlock(&sgx_free_page_list_lock);
 }
 
 static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
@@ -627,8 +599,6 @@  static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
 	}
 
 	section->phys_addr = phys_addr;
-	spin_lock_init(&section->lock);
-	INIT_LIST_HEAD(&section->page_list);
 
 	for (i = 0; i < nr_pages; i++) {
 		section->pages[i].section = index;
@@ -637,7 +607,7 @@  static bool __init sgx_setup_epc_section(u64 phys_addr, u64 size,
 		list_add_tail(&section->pages[i].list, laundry);
 	}
 
-	section->free_cnt = nr_pages;
+	sgx_nr_free_pages += nr_pages;
 	return true;
 }
 
diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
index bc8af0428640..41ca045a574a 100644
--- a/arch/x86/kernel/cpu/sgx/sgx.h
+++ b/arch/x86/kernel/cpu/sgx/sgx.h
@@ -34,17 +34,11 @@  struct sgx_epc_page {
  * physical memory e.g. for memory areas of the each node. This structure is
  * used to store EPC pages for one EPC section and virtual memory area where
  * the pages have been mapped.
- *
- * 'lock' must be held before accessing 'page_list' or 'free_cnt'.
  */
 struct sgx_epc_section {
 	unsigned long phys_addr;
 	void *virt_addr;
 	struct sgx_epc_page *pages;
-
-	spinlock_t lock;
-	struct list_head page_list;
-	unsigned long free_cnt;
 };
 
 extern struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];