[v3,14/17] x86/sgx: Replace section->free_cnt with a global sgx_nr_free_pages
diff mbox series

Message ID 20190916101803.30726-15-jarkko.sakkinen@linux.intel.com
State New
Headers show
Series
  • Fixes and updates for v23
Related show

Commit Message

Jarkko Sakkinen Sept. 16, 2019, 10:18 a.m. UTC
Replace section specific counters with a single gloal counter for free
pages. In effect, remove sgx_calc_free_cnt().

Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Shay Katz-zamir <shay.katz-zamir@intel.com>
Cc: Serge Ayoun <serge.ayoun@intel.com>
Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
 arch/x86/kernel/cpu/sgx/main.c    | 11 ++++++-----
 arch/x86/kernel/cpu/sgx/reclaim.c | 19 ++-----------------
 arch/x86/kernel/cpu/sgx/sgx.h     |  3 +--
 3 files changed, 9 insertions(+), 24 deletions(-)

Comments

Sean Christopherson Sept. 17, 2019, 10:50 p.m. UTC | #1
On Mon, Sep 16, 2019 at 01:18:00PM +0300, Jarkko Sakkinen wrote:
> Replace section specific counters with a single gloal counter for free
> pages. In effect, remove sgx_calc_free_cnt().
> 
> Cc: Sean Christopherson <sean.j.christopherson@intel.com>
> Cc: Shay Katz-zamir <shay.katz-zamir@intel.com>
> Cc: Serge Ayoun <serge.ayoun@intel.com>
> Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
> ---
>  arch/x86/kernel/cpu/sgx/main.c    | 11 ++++++-----
>  arch/x86/kernel/cpu/sgx/reclaim.c | 19 ++-----------------
>  arch/x86/kernel/cpu/sgx/sgx.h     |  3 +--
>  3 files changed, 9 insertions(+), 24 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
> index 4c03e5f33414..f37d28023b97 100644
> --- a/arch/x86/kernel/cpu/sgx/main.c
> +++ b/arch/x86/kernel/cpu/sgx/main.c
> @@ -14,19 +14,20 @@
>  
>  struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
>  int sgx_nr_epc_sections;
> +unsigned long sgx_nr_free_pages;
>  
>  static struct sgx_epc_page *sgx_section_try_take_page(
>  	struct sgx_epc_section *section)
>  {
>  	struct sgx_epc_page *page;
>  
> -	if (!section->free_cnt)
> +	if (list_empty(&section->page_list))
>  		return NULL;
>  
>  	page = list_first_entry(&section->page_list, struct sgx_epc_page,
>  				list);
>  	list_del_init(&page->list);
> -	section->free_cnt--;
> +	sgx_nr_free_pages--;
>  	return page;
>  }
>  
> @@ -90,7 +91,7 @@ struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim)
>  		schedule();
>  	}
>  
> -	if (sgx_calc_free_cnt() < SGX_NR_LOW_PAGES)
> +	if (sgx_nr_free_pages < SGX_NR_LOW_PAGES)
>  		wake_up(&ksgxswapd_waitq);
>  
>  	return entry;
> @@ -136,7 +137,7 @@ int __sgx_free_page(struct sgx_epc_page *page)
>  
>  	spin_lock(&section->lock);
>  	list_add_tail(&page->list, &section->page_list);
> -	section->free_cnt++;
> +	sgx_nr_free_pages++;

This isn't safe when there are multiple EPC sections as each section has
its own spinlock.

>  	spin_unlock(&section->lock);
>  
>  	return 0;
> @@ -202,7 +203,7 @@ static __init int sgx_init_epc_section(u64 addr, u64 size, unsigned long index,
>  			goto out;
>  		page->desc = (addr + (i << PAGE_SHIFT)) | index;
>  		list_add_tail(&page->list, &section->unsanitized_page_list);
> -		section->free_cnt++;
> +		sgx_nr_free_pages++;
>  	}
>  
>  	return 0;
> diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c
> index cc3155b61513..2e04a923d8dc 100644
> --- a/arch/x86/kernel/cpu/sgx/reclaim.c
> +++ b/arch/x86/kernel/cpu/sgx/reclaim.c
> @@ -64,7 +64,7 @@ static void sgx_sanitize_section(struct sgx_epc_section *section)
>  
>  static inline bool sgx_should_reclaim(void)
>  {
> -	return sgx_calc_free_cnt() < SGX_NR_HIGH_PAGES &&
> +	return sgx_nr_free_pages < SGX_NR_HIGH_PAGES &&
>  	       !list_empty(&sgx_active_page_list);
>  }
>  
> @@ -432,7 +432,6 @@ void sgx_reclaim_pages(void)
>  		if (!epc_page)
>  			continue;
>  
> -
>  		encl_page = epc_page->owner;
>  		sgx_reclaimer_write(epc_page);
>  		kref_put(&encl_page->encl->refcount, sgx_encl_release);
> @@ -441,21 +440,7 @@ void sgx_reclaim_pages(void)
>  		section = sgx_epc_section(epc_page);
>  		spin_lock(&section->lock);
>  		list_add_tail(&epc_page->list, &section->page_list);
> -		section->free_cnt++;
> +		sgx_nr_free_pages++;
>  		spin_unlock(&section->lock);
>  	}
>  }
> -
> -unsigned long sgx_calc_free_cnt(void)
> -{
> -	struct sgx_epc_section *section;
> -	unsigned long free_cnt = 0;
> -	int i;
> -
> -	for (i = 0; i < sgx_nr_epc_sections; i++) {
> -		section = &sgx_epc_sections[i];
> -		free_cnt += section->free_cnt;
> -	}
> -
> -	return free_cnt;
> -}
> diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
> index 9b08690262b5..56d0bde3f4d8 100644
> --- a/arch/x86/kernel/cpu/sgx/sgx.h
> +++ b/arch/x86/kernel/cpu/sgx/sgx.h
> @@ -30,7 +30,6 @@ struct sgx_epc_section {
>  	void *va;
>  	struct list_head page_list;
>  	struct list_head unsanitized_page_list;
> -	unsigned long free_cnt;
>  	spinlock_t lock;
>  };
>  
> @@ -72,6 +71,7 @@ static inline void *sgx_epc_addr(struct sgx_epc_page *page)
>  #define SGX_NR_HIGH_PAGES	64
>  
>  extern int sgx_nr_epc_sections;
> +extern unsigned long sgx_nr_free_pages;
>  extern struct task_struct *ksgxswapd_tsk;
>  extern struct wait_queue_head(ksgxswapd_waitq);
>  extern struct list_head sgx_active_page_list;
> @@ -79,7 +79,6 @@ extern spinlock_t sgx_active_page_list_lock;
>  
>  int sgx_page_reclaimer_init(void);
>  void sgx_mark_page_reclaimable(struct sgx_epc_page *page);
> -unsigned long sgx_calc_free_cnt(void);
>  void sgx_reclaim_pages(void);
>  
>  struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim);
> -- 
> 2.20.1
>
Jarkko Sakkinen Sept. 18, 2019, 4:07 a.m. UTC | #2
On Tue, Sep 17, 2019 at 03:50:28PM -0700, Sean Christopherson wrote:
> This isn't safe when there are multiple EPC sections as each section has
> its own spinlock.

The variable does not need to be exact as long as it settles eventually
to the correct value. It is only used to retrigger the reclaimer.

/Jarkko

Patch
diff mbox series

diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 4c03e5f33414..f37d28023b97 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -14,19 +14,20 @@ 
 
 struct sgx_epc_section sgx_epc_sections[SGX_MAX_EPC_SECTIONS];
 int sgx_nr_epc_sections;
+unsigned long sgx_nr_free_pages;
 
 static struct sgx_epc_page *sgx_section_try_take_page(
 	struct sgx_epc_section *section)
 {
 	struct sgx_epc_page *page;
 
-	if (!section->free_cnt)
+	if (list_empty(&section->page_list))
 		return NULL;
 
 	page = list_first_entry(&section->page_list, struct sgx_epc_page,
 				list);
 	list_del_init(&page->list);
-	section->free_cnt--;
+	sgx_nr_free_pages--;
 	return page;
 }
 
@@ -90,7 +91,7 @@  struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim)
 		schedule();
 	}
 
-	if (sgx_calc_free_cnt() < SGX_NR_LOW_PAGES)
+	if (sgx_nr_free_pages < SGX_NR_LOW_PAGES)
 		wake_up(&ksgxswapd_waitq);
 
 	return entry;
@@ -136,7 +137,7 @@  int __sgx_free_page(struct sgx_epc_page *page)
 
 	spin_lock(&section->lock);
 	list_add_tail(&page->list, &section->page_list);
-	section->free_cnt++;
+	sgx_nr_free_pages++;
 	spin_unlock(&section->lock);
 
 	return 0;
@@ -202,7 +203,7 @@  static __init int sgx_init_epc_section(u64 addr, u64 size, unsigned long index,
 			goto out;
 		page->desc = (addr + (i << PAGE_SHIFT)) | index;
 		list_add_tail(&page->list, &section->unsanitized_page_list);
-		section->free_cnt++;
+		sgx_nr_free_pages++;
 	}
 
 	return 0;
diff --git a/arch/x86/kernel/cpu/sgx/reclaim.c b/arch/x86/kernel/cpu/sgx/reclaim.c
index cc3155b61513..2e04a923d8dc 100644
--- a/arch/x86/kernel/cpu/sgx/reclaim.c
+++ b/arch/x86/kernel/cpu/sgx/reclaim.c
@@ -64,7 +64,7 @@  static void sgx_sanitize_section(struct sgx_epc_section *section)
 
 static inline bool sgx_should_reclaim(void)
 {
-	return sgx_calc_free_cnt() < SGX_NR_HIGH_PAGES &&
+	return sgx_nr_free_pages < SGX_NR_HIGH_PAGES &&
 	       !list_empty(&sgx_active_page_list);
 }
 
@@ -432,7 +432,6 @@  void sgx_reclaim_pages(void)
 		if (!epc_page)
 			continue;
 
-
 		encl_page = epc_page->owner;
 		sgx_reclaimer_write(epc_page);
 		kref_put(&encl_page->encl->refcount, sgx_encl_release);
@@ -441,21 +440,7 @@  void sgx_reclaim_pages(void)
 		section = sgx_epc_section(epc_page);
 		spin_lock(&section->lock);
 		list_add_tail(&epc_page->list, &section->page_list);
-		section->free_cnt++;
+		sgx_nr_free_pages++;
 		spin_unlock(&section->lock);
 	}
 }
-
-unsigned long sgx_calc_free_cnt(void)
-{
-	struct sgx_epc_section *section;
-	unsigned long free_cnt = 0;
-	int i;
-
-	for (i = 0; i < sgx_nr_epc_sections; i++) {
-		section = &sgx_epc_sections[i];
-		free_cnt += section->free_cnt;
-	}
-
-	return free_cnt;
-}
diff --git a/arch/x86/kernel/cpu/sgx/sgx.h b/arch/x86/kernel/cpu/sgx/sgx.h
index 9b08690262b5..56d0bde3f4d8 100644
--- a/arch/x86/kernel/cpu/sgx/sgx.h
+++ b/arch/x86/kernel/cpu/sgx/sgx.h
@@ -30,7 +30,6 @@  struct sgx_epc_section {
 	void *va;
 	struct list_head page_list;
 	struct list_head unsanitized_page_list;
-	unsigned long free_cnt;
 	spinlock_t lock;
 };
 
@@ -72,6 +71,7 @@  static inline void *sgx_epc_addr(struct sgx_epc_page *page)
 #define SGX_NR_HIGH_PAGES	64
 
 extern int sgx_nr_epc_sections;
+extern unsigned long sgx_nr_free_pages;
 extern struct task_struct *ksgxswapd_tsk;
 extern struct wait_queue_head(ksgxswapd_waitq);
 extern struct list_head sgx_active_page_list;
@@ -79,7 +79,6 @@  extern spinlock_t sgx_active_page_list_lock;
 
 int sgx_page_reclaimer_init(void);
 void sgx_mark_page_reclaimable(struct sgx_epc_page *page);
-unsigned long sgx_calc_free_cnt(void);
 void sgx_reclaim_pages(void);
 
 struct sgx_epc_page *sgx_alloc_page(void *owner, bool reclaim);