diff mbox series

[v1,1/2] arm64/kpti: Move DAIF masking to C code

Message ID 20221123180209.634650-2-broonie@kernel.org (mailing list archive)
State New, archived
Headers show
Series arm64/asm: Remove DAIF save/restore macros | expand

Commit Message

Mark Brown Nov. 23, 2022, 6:02 p.m. UTC
We really don't want to take an exception while replacing TTBR1 so we mask
DAIF during the actual update. Currently this is done in the assembly
function idmap_cpu_replace_ttbr1() but it could equally be done in the only
caller of that function, cpu_replace_ttbr1(). This simplifies the assembly
code slightly and means that when working with the code around masking DAIF
flags there is one less piece of assembly code which needs to be considered.

While we're at it add a comment which makes explicit why we are masking
DAIF in this code.

There should be no functional effect.

Signed-off-by: Mark Brown <broonie@kernel.org>
---
 arch/arm64/include/asm/mmu_context.h | 10 ++++++++++
 arch/arm64/mm/proc.S                 |  4 ----
 2 files changed, 10 insertions(+), 4 deletions(-)

Comments

Mark Rutland Nov. 24, 2022, 10:58 a.m. UTC | #1
On Wed, Nov 23, 2022 at 06:02:08PM +0000, Mark Brown wrote:
> We really don't want to take an exception while replacing TTBR1 so we mask
> DAIF during the actual update. Currently this is done in the assembly
> function idmap_cpu_replace_ttbr1() but it could equally be done in the only
> caller of that function, cpu_replace_ttbr1(). This simplifies the assembly
> code slightly and means that when working with the code around masking DAIF
> flags there is one less piece of assembly code which needs to be considered.
> 
> While we're at it add a comment which makes explicit why we are masking
> DAIF in this code.
> 
> There should be no functional effect.
> 
> Signed-off-by: Mark Brown <broonie@kernel.org>

When using GIC priority masking this means we'll also poke the PMR, but other
than that this is identical, and getting rid of the asm variation will make it
easier to clean up the DAIF / PMR manipulation for (p)NMI support.

So this makes sense to me; I don't see a problem with the additional PMR poking
in that case (it's consistent with other DAIF masking in C code), and we'll
still correctly mask and restore all the relevant DAIF bits.

FWIW:

  Reviewed-by: Mark Rutland <mark.rutland@arm.com>

Mark.


> ---
>  arch/arm64/include/asm/mmu_context.h | 10 ++++++++++
>  arch/arm64/mm/proc.S                 |  4 ----
>  2 files changed, 10 insertions(+), 4 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
> index d3f8b5df0c1f..72dbd6400549 100644
> --- a/arch/arm64/include/asm/mmu_context.h
> +++ b/arch/arm64/include/asm/mmu_context.h
> @@ -18,6 +18,7 @@
>  
>  #include <asm/cacheflush.h>
>  #include <asm/cpufeature.h>
> +#include <asm/daifflags.h>
>  #include <asm/proc-fns.h>
>  #include <asm-generic/mm_hooks.h>
>  #include <asm/cputype.h>
> @@ -152,6 +153,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
>  	typedef void (ttbr_replace_func)(phys_addr_t);
>  	extern ttbr_replace_func idmap_cpu_replace_ttbr1;
>  	ttbr_replace_func *replace_phys;
> +	unsigned long daif;
>  
>  	/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
>  	phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
> @@ -171,7 +173,15 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
>  	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
>  
>  	__cpu_install_idmap(idmap);
> +
> +	/*
> +	 * We really don't want to take *any* exceptions while TTBR1 is
> +	 * in the process of being replaced so mask everything.
> +	 */
> +	daif = local_daif_save();
>  	replace_phys(ttbr1);
> +	local_daif_restore(daif);
> +
>  	cpu_uninstall_idmap();
>  }
>  
> diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
> index b9ecbbae1e1a..066fa60b93d2 100644
> --- a/arch/arm64/mm/proc.S
> +++ b/arch/arm64/mm/proc.S
> @@ -189,16 +189,12 @@ SYM_FUNC_END(cpu_do_resume)
>   * called by anything else. It can only be executed from a TTBR0 mapping.
>   */
>  SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
> -	save_and_disable_daif flags=x2
> -
>  	__idmap_cpu_set_reserved_ttbr1 x1, x3
>  
>  	offset_ttbr1 x0, x3
>  	msr	ttbr1_el1, x0
>  	isb
>  
> -	restore_daif x2
> -
>  	ret
>  SYM_FUNC_END(idmap_cpu_replace_ttbr1)
>  	.popsection
> -- 
> 2.30.2
>
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index d3f8b5df0c1f..72dbd6400549 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -18,6 +18,7 @@ 
 
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
+#include <asm/daifflags.h>
 #include <asm/proc-fns.h>
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
@@ -152,6 +153,7 @@  static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
 	typedef void (ttbr_replace_func)(phys_addr_t);
 	extern ttbr_replace_func idmap_cpu_replace_ttbr1;
 	ttbr_replace_func *replace_phys;
+	unsigned long daif;
 
 	/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
 	phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
@@ -171,7 +173,15 @@  static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
 	replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
 
 	__cpu_install_idmap(idmap);
+
+	/*
+	 * We really don't want to take *any* exceptions while TTBR1 is
+	 * in the process of being replaced so mask everything.
+	 */
+	daif = local_daif_save();
 	replace_phys(ttbr1);
+	local_daif_restore(daif);
+
 	cpu_uninstall_idmap();
 }
 
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index b9ecbbae1e1a..066fa60b93d2 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -189,16 +189,12 @@  SYM_FUNC_END(cpu_do_resume)
  * called by anything else. It can only be executed from a TTBR0 mapping.
  */
 SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
-	save_and_disable_daif flags=x2
-
 	__idmap_cpu_set_reserved_ttbr1 x1, x3
 
 	offset_ttbr1 x0, x3
 	msr	ttbr1_el1, x0
 	isb
 
-	restore_daif x2
-
 	ret
 SYM_FUNC_END(idmap_cpu_replace_ttbr1)
 	.popsection