diff mbox series

[07/18] KVM: arm64: Move vcpu configuration flags into their own set

Message ID 20220528113829.1043361-8-maz@kernel.org (mailing list archive)
State New, archived
Headers show
Series KVM/arm64: Refactoring the vcpu flags | expand

Commit Message

Marc Zyngier May 28, 2022, 11:38 a.m. UTC
The KVM_ARM64_{GUEST_HAS_SVE,VCPU_SVE_FINALIZED,GUEST_HAS_PTRAUTH}
flags are purely configuration flags. Once set, they are never cleared,
but evaluated all over the code base.

Move these three flags into the configuration set in one go, using
the new accessors, and take this opportunity to drop the KVM_ARM64_
prefix which doesn't provide any help.

Signed-off-by: Marc Zyngier <maz@kernel.org>
---
 arch/arm64/include/asm/kvm_host.h | 17 ++++++++++-------
 arch/arm64/kvm/reset.c            |  6 +++---
 2 files changed, 13 insertions(+), 10 deletions(-)

Comments

Reiji Watanabe June 9, 2022, 6:15 a.m. UTC | #1
On Sat, May 28, 2022 at 4:38 AM Marc Zyngier <maz@kernel.org> wrote:
>
> The KVM_ARM64_{GUEST_HAS_SVE,VCPU_SVE_FINALIZED,GUEST_HAS_PTRAUTH}
> flags are purely configuration flags. Once set, they are never cleared,
> but evaluated all over the code base.
>
> Move these three flags into the configuration set in one go, using
> the new accessors, and take this opportunity to drop the KVM_ARM64_
> prefix which doesn't provide any help.
>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
> ---
>  arch/arm64/include/asm/kvm_host.h | 17 ++++++++++-------
>  arch/arm64/kvm/reset.c            |  6 +++---
>  2 files changed, 13 insertions(+), 10 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index c9dd0d4e22f2..2b8f1265eade 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -459,6 +459,13 @@ struct kvm_vcpu_arch {
>  #define __flag_unpack(_set, _f, _m)    _f
>  #define vcpu_flag_unpack(...)          __flag_unpack(__VA_ARGS__)
>
> +/* SVE exposed to guest */
> +#define GUEST_HAS_SVE          __vcpu_single_flag(cflags, BIT(0))
> +/* SVE config completed */
> +#define VCPU_SVE_FINALIZED     __vcpu_single_flag(cflags, BIT(1))
> +/* PTRAUTH exposed to guest */
> +#define GUEST_HAS_PTRAUTH      __vcpu_single_flag(cflags, BIT(2))
> +
>
>  /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
>  #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +     \
> @@ -483,9 +490,6 @@ struct kvm_vcpu_arch {
>  /* vcpu_arch flags field values: */
>  #define KVM_ARM64_DEBUG_DIRTY          (1 << 0)
>  #define KVM_ARM64_HOST_SVE_ENABLED     (1 << 4) /* SVE enabled for EL0 */
> -#define KVM_ARM64_GUEST_HAS_SVE                (1 << 5) /* SVE exposed to guest */
> -#define KVM_ARM64_VCPU_SVE_FINALIZED   (1 << 6) /* SVE config completed */
> -#define KVM_ARM64_GUEST_HAS_PTRAUTH    (1 << 7) /* PTRAUTH exposed to guest */
>  #define KVM_ARM64_PENDING_EXCEPTION    (1 << 8) /* Exception pending */
>  /*
>   * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
> @@ -522,13 +526,13 @@ struct kvm_vcpu_arch {
>                                  KVM_GUESTDBG_SINGLESTEP)
>
>  #define vcpu_has_sve(vcpu) (system_supports_sve() &&                   \
> -                           ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
> +                           vcpu_get_flag((vcpu), GUEST_HAS_SVE))

Minor nit: The parentheses around the vcpu above would be unnecessary.
(as was omitted for vcpu_has_ptrauth/kvm_arm_vcpu_sve_finalized)

Reviewed-by: Reiji Watanabe <reijiw@google.com>

The new infrastructure for those flags looks nice.

Thanks!
Reiji



>
>  #ifdef CONFIG_ARM64_PTR_AUTH
>  #define vcpu_has_ptrauth(vcpu)                                         \
>         ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||                \
>           cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&               \
> -        (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
> +         vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
>  #else
>  #define vcpu_has_ptrauth(vcpu)         false
>  #endif
> @@ -885,8 +889,7 @@ void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
>  int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
>  bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
>
> -#define kvm_arm_vcpu_sve_finalized(vcpu) \
> -       ((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
> +#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
>
>  #define kvm_has_mte(kvm)                                       \
>         (system_supports_mte() &&                               \
> diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
> index 6c70c6f61c70..0e08fbe68715 100644
> --- a/arch/arm64/kvm/reset.c
> +++ b/arch/arm64/kvm/reset.c
> @@ -81,7 +81,7 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
>          * KVM_REG_ARM64_SVE_VLS.  Allocation is deferred until
>          * kvm_arm_vcpu_finalize(), which freezes the configuration.
>          */
> -       vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
> +       vcpu_set_flag(vcpu, GUEST_HAS_SVE);
>
>         return 0;
>  }
> @@ -120,7 +120,7 @@ static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
>         }
>
>         vcpu->arch.sve_state = buf;
> -       vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
> +       vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
>         return 0;
>  }
>
> @@ -177,7 +177,7 @@ static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
>             !system_has_full_ptr_auth())
>                 return -EINVAL;
>
> -       vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
> +       vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
>         return 0;
>  }
>
> --
> 2.34.1
>
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c9dd0d4e22f2..2b8f1265eade 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -459,6 +459,13 @@  struct kvm_vcpu_arch {
 #define __flag_unpack(_set, _f, _m)	_f
 #define vcpu_flag_unpack(...)		__flag_unpack(__VA_ARGS__)
 
+/* SVE exposed to guest */
+#define GUEST_HAS_SVE		__vcpu_single_flag(cflags, BIT(0))
+/* SVE config completed */
+#define VCPU_SVE_FINALIZED	__vcpu_single_flag(cflags, BIT(1))
+/* PTRAUTH exposed to guest */
+#define GUEST_HAS_PTRAUTH	__vcpu_single_flag(cflags, BIT(2))
+
 
 /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
 #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) +	\
@@ -483,9 +490,6 @@  struct kvm_vcpu_arch {
 /* vcpu_arch flags field values: */
 #define KVM_ARM64_DEBUG_DIRTY		(1 << 0)
 #define KVM_ARM64_HOST_SVE_ENABLED	(1 << 4) /* SVE enabled for EL0 */
-#define KVM_ARM64_GUEST_HAS_SVE		(1 << 5) /* SVE exposed to guest */
-#define KVM_ARM64_VCPU_SVE_FINALIZED	(1 << 6) /* SVE config completed */
-#define KVM_ARM64_GUEST_HAS_PTRAUTH	(1 << 7) /* PTRAUTH exposed to guest */
 #define KVM_ARM64_PENDING_EXCEPTION	(1 << 8) /* Exception pending */
 /*
  * Overlaps with KVM_ARM64_EXCEPT_MASK on purpose so that it can't be
@@ -522,13 +526,13 @@  struct kvm_vcpu_arch {
 				 KVM_GUESTDBG_SINGLESTEP)
 
 #define vcpu_has_sve(vcpu) (system_supports_sve() &&			\
-			    ((vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_SVE))
+			    vcpu_get_flag((vcpu), GUEST_HAS_SVE))
 
 #ifdef CONFIG_ARM64_PTR_AUTH
 #define vcpu_has_ptrauth(vcpu)						\
 	((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) ||		\
 	  cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) &&		\
-	 (vcpu)->arch.flags & KVM_ARM64_GUEST_HAS_PTRAUTH)
+	  vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
 #else
 #define vcpu_has_ptrauth(vcpu)		false
 #endif
@@ -885,8 +889,7 @@  void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
 
-#define kvm_arm_vcpu_sve_finalized(vcpu) \
-	((vcpu)->arch.flags & KVM_ARM64_VCPU_SVE_FINALIZED)
+#define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
 
 #define kvm_has_mte(kvm)					\
 	(system_supports_mte() &&				\
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 6c70c6f61c70..0e08fbe68715 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -81,7 +81,7 @@  static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
 	 * KVM_REG_ARM64_SVE_VLS.  Allocation is deferred until
 	 * kvm_arm_vcpu_finalize(), which freezes the configuration.
 	 */
-	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_SVE;
+	vcpu_set_flag(vcpu, GUEST_HAS_SVE);
 
 	return 0;
 }
@@ -120,7 +120,7 @@  static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
 	}
 	
 	vcpu->arch.sve_state = buf;
-	vcpu->arch.flags |= KVM_ARM64_VCPU_SVE_FINALIZED;
+	vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
 	return 0;
 }
 
@@ -177,7 +177,7 @@  static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
 	    !system_has_full_ptr_auth())
 		return -EINVAL;
 
-	vcpu->arch.flags |= KVM_ARM64_GUEST_HAS_PTRAUTH;
+	vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
 	return 0;
 }