diff mbox series

[V3,07/30] x86/sgx: Rename sgx_encl_ewb_cpumask() as sgx_encl_cpumask()

Message ID c42cb6298d69cacd48872f97d11df694074803d8.1648847675.git.reinette.chatre@intel.com (mailing list archive)
State New, archived
Headers show
Series x86/sgx and selftests/sgx: Support SGX2 | expand

Commit Message

Reinette Chatre April 4, 2022, 4:49 p.m. UTC
sgx_encl_ewb_cpumask() is no longer unique to the reclaimer where it
is used during the EWB ENCLS leaf function when EPC pages are written
out to main memory and sgx_encl_ewb_cpumask() is used to learn which
CPUs might have executed the enclave to ensure that TLBs are cleared.

Upcoming SGX2 enabling will use sgx_encl_ewb_cpumask() during the
EMODPR and EMODT ENCLS leaf functions that make changes to enclave
pages. The function is needed for the same reason it is used now: to
learn which CPUs might have executed the enclave to ensure that TLBs
no longer point to the changed pages.

Rename sgx_encl_ewb_cpumask() to sgx_encl_cpumask() to reflect the
broader usage.

Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
---
No changes since V2

Changes since V1:
- New patch split from original "x86/sgx: Use more generic name for
  enclave cpumask function" (Jarkko).

 arch/x86/kernel/cpu/sgx/encl.c | 6 +++---
 arch/x86/kernel/cpu/sgx/encl.h | 2 +-
 arch/x86/kernel/cpu/sgx/main.c | 2 +-
 3 files changed, 5 insertions(+), 5 deletions(-)

Comments

Jarkko Sakkinen April 5, 2022, 6:57 a.m. UTC | #1
On Mon, Apr 04, 2022 at 09:49:15AM -0700, Reinette Chatre wrote:
> sgx_encl_ewb_cpumask() is no longer unique to the reclaimer where it
> is used during the EWB ENCLS leaf function when EPC pages are written
> out to main memory and sgx_encl_ewb_cpumask() is used to learn which
> CPUs might have executed the enclave to ensure that TLBs are cleared.
> 
> Upcoming SGX2 enabling will use sgx_encl_ewb_cpumask() during the
> EMODPR and EMODT ENCLS leaf functions that make changes to enclave
> pages. The function is needed for the same reason it is used now: to
> learn which CPUs might have executed the enclave to ensure that TLBs
> no longer point to the changed pages.
> 
> Rename sgx_encl_ewb_cpumask() to sgx_encl_cpumask() to reflect the
> broader usage.
> 
> Signed-off-by: Reinette Chatre <reinette.chatre@intel.com>
> ---
> No changes since V2
> 
> Changes since V1:
> - New patch split from original "x86/sgx: Use more generic name for
>   enclave cpumask function" (Jarkko).
> 
>  arch/x86/kernel/cpu/sgx/encl.c | 6 +++---
>  arch/x86/kernel/cpu/sgx/encl.h | 2 +-
>  arch/x86/kernel/cpu/sgx/main.c | 2 +-
>  3 files changed, 5 insertions(+), 5 deletions(-)
> 
> diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
> index c6525eba74e8..8de9bebc4d81 100644
> --- a/arch/x86/kernel/cpu/sgx/encl.c
> +++ b/arch/x86/kernel/cpu/sgx/encl.c
> @@ -614,7 +614,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
>  }
>  
>  /**
> - * sgx_encl_ewb_cpumask() - Query which CPUs might be accessing the enclave
> + * sgx_encl_cpumask() - Query which CPUs might be accessing the enclave
>   * @encl: the enclave
>   *
>   * Some SGX functions require that no cached linear-to-physical address
> @@ -639,7 +639,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
>   * The following flow is used to support SGX functions that require that
>   * no cached linear-to-physical address mappings are present:
>   * 1) Execute ENCLS[ETRACK] to initiate hardware tracking.
> - * 2) Use this function (sgx_encl_ewb_cpumask()) to query which CPUs might be
> + * 2) Use this function (sgx_encl_cpumask()) to query which CPUs might be
>   *    accessing the enclave.
>   * 3) Send IPI to identified CPUs, kicking them out of the enclave and
>   *    thus flushing all locally cached linear-to-physical address mappings.
> @@ -656,7 +656,7 @@ int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
>   *
>   * Return: cpumask of CPUs that might be accessing @encl
>   */
> -const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
> +const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl)
>  {
>  	cpumask_t *cpumask = &encl->cpumask;
>  	struct sgx_encl_mm *encl_mm;
> diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
> index d2acb4debde5..e59c2cbf71e2 100644
> --- a/arch/x86/kernel/cpu/sgx/encl.h
> +++ b/arch/x86/kernel/cpu/sgx/encl.h
> @@ -105,7 +105,7 @@ int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
>  
>  void sgx_encl_release(struct kref *ref);
>  int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
> -const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl);
> +const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl);
>  int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
>  			 struct sgx_backing *backing);
>  void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
> diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
> index 2de85f459492..fa33922879bf 100644
> --- a/arch/x86/kernel/cpu/sgx/main.c
> +++ b/arch/x86/kernel/cpu/sgx/main.c
> @@ -249,7 +249,7 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
>  			 * miss cpus that entered the enclave between
>  			 * generating the mask and incrementing epoch.
>  			 */
> -			on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
> +			on_each_cpu_mask(sgx_encl_cpumask(encl),
>  					 sgx_ipi_cb, NULL, 1);
>  			ret = __sgx_encl_ewb(epc_page, va_slot, backing);
>  		}
> -- 
> 2.25.1
> 

Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>

BR, Jarkko
diff mbox series

Patch

diff --git a/arch/x86/kernel/cpu/sgx/encl.c b/arch/x86/kernel/cpu/sgx/encl.c
index c6525eba74e8..8de9bebc4d81 100644
--- a/arch/x86/kernel/cpu/sgx/encl.c
+++ b/arch/x86/kernel/cpu/sgx/encl.c
@@ -614,7 +614,7 @@  int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
 }
 
 /**
- * sgx_encl_ewb_cpumask() - Query which CPUs might be accessing the enclave
+ * sgx_encl_cpumask() - Query which CPUs might be accessing the enclave
  * @encl: the enclave
  *
  * Some SGX functions require that no cached linear-to-physical address
@@ -639,7 +639,7 @@  int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
  * The following flow is used to support SGX functions that require that
  * no cached linear-to-physical address mappings are present:
  * 1) Execute ENCLS[ETRACK] to initiate hardware tracking.
- * 2) Use this function (sgx_encl_ewb_cpumask()) to query which CPUs might be
+ * 2) Use this function (sgx_encl_cpumask()) to query which CPUs might be
  *    accessing the enclave.
  * 3) Send IPI to identified CPUs, kicking them out of the enclave and
  *    thus flushing all locally cached linear-to-physical address mappings.
@@ -656,7 +656,7 @@  int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm)
  *
  * Return: cpumask of CPUs that might be accessing @encl
  */
-const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl)
+const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl)
 {
 	cpumask_t *cpumask = &encl->cpumask;
 	struct sgx_encl_mm *encl_mm;
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index d2acb4debde5..e59c2cbf71e2 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -105,7 +105,7 @@  int sgx_encl_may_map(struct sgx_encl *encl, unsigned long start,
 
 void sgx_encl_release(struct kref *ref);
 int sgx_encl_mm_add(struct sgx_encl *encl, struct mm_struct *mm);
-const cpumask_t *sgx_encl_ewb_cpumask(struct sgx_encl *encl);
+const cpumask_t *sgx_encl_cpumask(struct sgx_encl *encl);
 int sgx_encl_get_backing(struct sgx_encl *encl, unsigned long page_index,
 			 struct sgx_backing *backing);
 void sgx_encl_put_backing(struct sgx_backing *backing, bool do_write);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index 2de85f459492..fa33922879bf 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -249,7 +249,7 @@  static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
 			 * miss cpus that entered the enclave between
 			 * generating the mask and incrementing epoch.
 			 */
-			on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
+			on_each_cpu_mask(sgx_encl_cpumask(encl),
 					 sgx_ipi_cb, NULL, 1);
 			ret = __sgx_encl_ewb(epc_page, va_slot, backing);
 		}