@@ -448,6 +448,10 @@ enum vcpu_sysreg {
POR_EL0, /* Permission Overlay Register 0 (EL0) */
+ /* Guarded Control Stack registers */
+ GCSCRE0_EL1, /* Guarded Control Stack Control (EL0) */
+ GCSPR_EL0, /* Guarded Control Stack Pointer (EL0) */
+
/* FP/SIMD/SVE */
SVCR,
FPMR,
@@ -525,6 +529,10 @@ enum vcpu_sysreg {
VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */
+ /* Guarded Control Stack registers */
+ VNCR(GCSPR_EL1), /* Guarded Control Stack Pointer (EL1) */
+ VNCR(GCSCR_EL1), /* Guarded Control Stack Control (EL1) */
+
VNCR(HFGRTR_EL2),
VNCR(HFGWTR_EL2),
VNCR(HFGITR_EL2),
@@ -1495,4 +1503,8 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
(system_supports_fpmr() && \
kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
+#define kvm_has_gcs(k) \
+ (system_supports_gcs() && \
+ kvm_has_feat((k), ID_AA64PFR1_EL1, GCS, IMP))
+
#endif /* __ARM64_KVM_HOST_H__ */
@@ -89,6 +89,8 @@
#define VNCR_PMSIRR_EL1 0x840
#define VNCR_PMSLATFR_EL1 0x848
#define VNCR_TRFCR_EL1 0x880
+#define VNCR_GCSPR_EL1 0x8C0
+#define VNCR_GCSCR_EL1 0x8D0
#define VNCR_MPAM1_EL1 0x900
#define VNCR_MPAMHCR_EL2 0x930
#define VNCR_MPAMVPMV_EL2 0x938
@@ -17,6 +17,7 @@
#include <asm/kvm_mmu.h>
static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt);
+static inline bool ctxt_has_gcs(struct kvm_cpu_context *ctxt);
static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
@@ -31,6 +32,11 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
+
+ if (ctxt_has_gcs(ctxt)) {
+ ctxt_sys_reg(ctxt, GCSPR_EL0) = read_sysreg_s(SYS_GCSPR_EL0);
+ ctxt_sys_reg(ctxt, GCSCRE0_EL1) = read_sysreg_s(SYS_GCSCRE0_EL1);
+ }
}
static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt)
@@ -83,6 +89,17 @@ static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt)
return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64MMFR3_EL1, S1POE, IMP);
}
+static inline bool ctxt_has_gcs(struct kvm_cpu_context *ctxt)
+{
+ struct kvm_vcpu *vcpu;
+
+ if (!cpus_have_final_cap(ARM64_HAS_GCS))
+ return false;
+
+ vcpu = ctxt_to_vcpu(ctxt);
+ return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR1_EL1, GCS, IMP);
+}
+
static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, SCTLR_EL1) = read_sysreg_el1(SYS_SCTLR);
@@ -96,6 +113,10 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
if (ctxt_has_s1pie(ctxt)) {
ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR);
ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0);
+ if (ctxt_has_gcs(ctxt)) {
+ ctxt_sys_reg(ctxt, GCSPR_EL1) = read_sysreg_el1(SYS_GCSPR);
+ ctxt_sys_reg(ctxt, GCSCR_EL1) = read_sysreg_el1(SYS_GCSCR);
+ }
}
if (ctxt_has_s1poe(ctxt))
@@ -150,6 +171,11 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0);
write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0);
+ if (ctxt_has_gcs(ctxt)) {
+ write_sysreg_s(ctxt_sys_reg(ctxt, GCSPR_EL0), SYS_GCSPR_EL0);
+ write_sysreg_s(ctxt_sys_reg(ctxt, GCSCRE0_EL1),
+ SYS_GCSCRE0_EL1);
+ }
}
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
@@ -181,6 +207,11 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
if (ctxt_has_s1pie(ctxt)) {
write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR);
write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0);
+
+ if (ctxt_has_gcs(ctxt)) {
+ write_sysreg_el1(ctxt_sys_reg(ctxt, GCSPR_EL1), SYS_GCSPR);
+ write_sysreg_el1(ctxt_sys_reg(ctxt, GCSCR_EL1), SYS_GCSCR);
+ }
}
if (ctxt_has_s1poe(ctxt))
@@ -1641,6 +1641,15 @@ static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
return REG_RAZ;
}
+static unsigned int gcs_visibility(const struct kvm_vcpu *vcpu,
+ const struct sys_reg_desc *r)
+{
+ if (kvm_has_gcs(vcpu->kvm))
+ return 0;
+
+ return REG_HIDDEN;
+}
+
/* cpufeature ID register access trap handlers */
static bool access_id_reg(struct kvm_vcpu *vcpu,
@@ -2376,7 +2385,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
ID_AA64PFR0_EL1_RAS |
ID_AA64PFR0_EL1_AdvSIMD |
ID_AA64PFR0_EL1_FP), },
- ID_SANITISED(ID_AA64PFR1_EL1),
+ ID_WRITABLE(ID_AA64PFR1_EL1, ID_AA64PFR1_EL1_GCS),
ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
ID_UNALLOCATED(4,3),
ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
@@ -2461,6 +2470,13 @@ static const struct sys_reg_desc sys_reg_descs[] = {
PTRAUTH_KEY(APDB),
PTRAUTH_KEY(APGA),
+ { SYS_DESC(SYS_GCSCR_EL1), NULL, reset_val, GCSCR_EL1, 0,
+ .visibility = gcs_visibility },
+ { SYS_DESC(SYS_GCSPR_EL1), NULL, reset_unknown, GCSPR_EL1,
+ .visibility = gcs_visibility },
+ { SYS_DESC(SYS_GCSCRE0_EL1), NULL, reset_val, GCSCRE0_EL1, 0,
+ .visibility = gcs_visibility },
+
{ SYS_DESC(SYS_SPSR_EL1), access_spsr},
{ SYS_DESC(SYS_ELR_EL1), access_elr},
@@ -2567,6 +2583,8 @@ static const struct sys_reg_desc sys_reg_descs[] = {
CTR_EL0_IDC_MASK |
CTR_EL0_DminLine_MASK |
CTR_EL0_IminLine_MASK),
+ { SYS_DESC(SYS_GCSPR_EL0), NULL, reset_unknown, GCSPR_EL0,
+ .visibility = gcs_visibility },
{ SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
{ SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
@@ -4661,6 +4679,9 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
if (kvm_has_fpmr(kvm))
vcpu->arch.hcrx_el2 |= HCRX_EL2_EnFPM;
+
+ if (kvm_has_gcs(kvm))
+ vcpu->arch.hcrx_el2 |= HCRX_EL2_GCSEn;
}
if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
@@ -4714,6 +4735,10 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu)
kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPOR_EL1 |
HFGxTR_EL2_nPOR_EL0);
+ if (!kvm_has_gcs(kvm))
+ kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nGCS_EL0 |
+ HFGxTR_EL2_nGCS_EL1);
+
if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP))
kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 |
HAFGRTR_EL2_RES1);