Message ID | 20240801-arm64-gcs-v10-14-699e2bd2190b@kernel.org (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | arm64/gcs: Provide support for GCS in userspace | expand |
Context | Check | Description |
---|---|---|
conchuod/vmtest-fixes-PR | fail | merge-conflict |
On Thu, 01 Aug 2024 13:06:41 +0100, Mark Brown <broonie@kernel.org> wrote: > > GCS introduces a number of system registers for EL1 and EL0, on systems > with GCS we need to context switch them and expose them to VMMs to allow > guests to use GCS. > > In order to allow guests to use GCS we also need to configure > HCRX_EL2.GCSEn, if this is not set GCS instructions will be noops and > CHKFEAT will report GCS as disabled. Also enable fine grained traps for > access to the GCS registers by guests which do not have the feature > enabled. > > Signed-off-by: Mark Brown <broonie@kernel.org> > --- > arch/arm64/include/asm/kvm_host.h | 8 +++++ > arch/arm64/include/asm/vncr_mapping.h | 2 ++ > arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 49 ++++++++++++++++++++++++------ > arch/arm64/kvm/sys_regs.c | 12 ++++++++ > 4 files changed, 61 insertions(+), 10 deletions(-) > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h > index a33f5996ca9f..5818e4a1c2d1 100644 > --- a/arch/arm64/include/asm/kvm_host.h > +++ b/arch/arm64/include/asm/kvm_host.h > @@ -446,6 +446,10 @@ enum vcpu_sysreg { > GCR_EL1, /* Tag Control Register */ > TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ > > + /* Guarded Control Stack registers */ > + GCSCRE0_EL1, /* Guarded Control Stack Control (EL0) */ > + GCSPR_EL0, /* Guarded Control Stack Pointer (EL0) */ > + > /* 32bit specific registers. */ > DACR32_EL2, /* Domain Access Control Register */ > IFSR32_EL2, /* Instruction Fault Status Register */ > @@ -517,6 +521,10 @@ enum vcpu_sysreg { > VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */ > VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */ > > + /* Guarded Control Stack registers */ > + VNCR(GCSPR_EL1), /* Guarded Control Stack Pointer (EL1) */ > + VNCR(GCSCR_EL1), /* Guarded Control Stack Control (EL1) */ > + > VNCR(HFGRTR_EL2), > VNCR(HFGWTR_EL2), > VNCR(HFGITR_EL2), > diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h > index df2c47c55972..5e83e6f579fd 100644 > --- a/arch/arm64/include/asm/vncr_mapping.h > +++ b/arch/arm64/include/asm/vncr_mapping.h > @@ -88,6 +88,8 @@ > #define VNCR_PMSIRR_EL1 0x840 > #define VNCR_PMSLATFR_EL1 0x848 > #define VNCR_TRFCR_EL1 0x880 > +#define VNCR_GCSPR_EL1 0x8C0 > +#define VNCR_GCSCR_EL1 0x8D0 > #define VNCR_MPAM1_EL1 0x900 > #define VNCR_MPAMHCR_EL2 0x930 > #define VNCR_MPAMVPMV_EL2 0x938 > diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h > index 4c0fdabaf8ae..ac29352e225a 100644 > --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h > +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h > @@ -16,6 +16,27 @@ > #include <asm/kvm_hyp.h> > #include <asm/kvm_mmu.h> > > +static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) > +{ > + struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; > + > + if (!vcpu) > + vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); > + > + return vcpu; > +} > + > +static inline bool ctxt_has_gcs(struct kvm_cpu_context *ctxt) > +{ > + struct kvm_vcpu *vcpu; > + > + if (!cpus_have_final_cap(ARM64_HAS_GCS)) > + return false; > + > + vcpu = ctxt_to_vcpu(ctxt); > + return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR1_EL1, GCS, IMP); > +} > + > static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) > { > ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1); > @@ -25,16 +46,10 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) > { > ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0); > ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); > -} > - > -static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) > -{ > - struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; > - > - if (!vcpu) > - vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); > - > - return vcpu; > + if (ctxt_has_gcs(ctxt)) { > + ctxt_sys_reg(ctxt, GCSPR_EL0) = read_sysreg_s(SYS_GCSPR_EL0); > + ctxt_sys_reg(ctxt, GCSCRE0_EL1) = read_sysreg_s(SYS_GCSCRE0_EL1); > + } > } > > static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) > @@ -79,6 +94,10 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) > if (ctxt_has_s1pie(ctxt)) { > ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR); > ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0); > + if (ctxt_has_gcs(ctxt)) { > + ctxt_sys_reg(ctxt, GCSPR_EL1) = read_sysreg_el1(SYS_GCSPR); > + ctxt_sys_reg(ctxt, GCSCR_EL1) = read_sysreg_el1(SYS_GCSCR); > + } > } > } > ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR); > @@ -126,6 +145,11 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) > { > write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0); > write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0); > + if (ctxt_has_gcs(ctxt)) { > + write_sysreg_s(ctxt_sys_reg(ctxt, GCSPR_EL0), SYS_GCSPR_EL0); > + write_sysreg_s(ctxt_sys_reg(ctxt, GCSCRE0_EL1), > + SYS_GCSCRE0_EL1); > + } > } > > static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) > @@ -157,6 +181,11 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) > if (ctxt_has_s1pie(ctxt)) { > write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR); > write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0); > + > + if (ctxt_has_gcs(ctxt)) { > + write_sysreg_el1(ctxt_sys_reg(ctxt, GCSPR_EL1), SYS_GCSPR); > + write_sysreg_el1(ctxt_sys_reg(ctxt, GCSCR_EL1), SYS_GCSCR); > + } > } > } > write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR); > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c > index c90324060436..ac98d3237130 100644 > --- a/arch/arm64/kvm/sys_regs.c > +++ b/arch/arm64/kvm/sys_regs.c > @@ -2446,6 +2446,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { > PTRAUTH_KEY(APDB), > PTRAUTH_KEY(APGA), > > + { SYS_DESC(SYS_GCSCR_EL1), NULL, reset_val, GCSCR_EL1, 0 }, > + { SYS_DESC(SYS_GCSPR_EL1), NULL, reset_unknown, GCSPR_EL1 }, > + { SYS_DESC(SYS_GCSCRE0_EL1), NULL, reset_val, GCSCRE0_EL1, 0 }, > + Global visibility for these registers? Why should we expose them to userspace if the feature is neither present nor configured? > { SYS_DESC(SYS_SPSR_EL1), access_spsr}, > { SYS_DESC(SYS_ELR_EL1), access_elr}, > > @@ -2535,6 +2539,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { > CTR_EL0_IDC_MASK | > CTR_EL0_DminLine_MASK | > CTR_EL0_IminLine_MASK), > + { SYS_DESC(SYS_GCSPR_EL0), NULL, reset_unknown, GCSPR_EL0 }, > { SYS_DESC(SYS_SVCR), undef_access }, > > { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, > @@ -4560,6 +4565,9 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) > > if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) > vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En; > + > + if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) > + vcpu->arch.hcrx_el2 |= HCRX_EL2_GCSEn; > } > > if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) > @@ -4604,6 +4612,10 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) > kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 | > HFGxTR_EL2_nPIR_EL1); > > + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) > + kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nGCS_EL0 | > + HFGxTR_EL2_nGCS_EL1); How can this work if you don't handle ID_AA64PFR_EL1 being written to? You are exposing GCS to all guests without giving the VMM an opportunity to turn it off. This breaks A->B->A migration, which is not acceptable. M.
On Fri, Aug 16, 2024 at 03:15:19PM +0100, Marc Zyngier wrote: > Mark Brown <broonie@kernel.org> wrote: > > + { SYS_DESC(SYS_GCSCR_EL1), NULL, reset_val, GCSCR_EL1, 0 }, > > + { SYS_DESC(SYS_GCSPR_EL1), NULL, reset_unknown, GCSPR_EL1 }, > > + { SYS_DESC(SYS_GCSCRE0_EL1), NULL, reset_val, GCSCRE0_EL1, 0 }, > Global visibility for these registers? Why should we expose them to > userspace if the feature is neither present nor configured? ... > > + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) > > + kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nGCS_EL0 | > > + HFGxTR_EL2_nGCS_EL1); > How can this work if you don't handle ID_AA64PFR_EL1 being written to? > You are exposing GCS to all guests without giving the VMM an > opportunity to turn it off. This breaks A->B->A migration, which is > not acceptable. This was done based on your positive review of the POE series which follows the same pattern: https://lore.kernel.org/linux-arm-kernel/20240503130147.1154804-8-joey.gouly@arm.com/ https://lore.kernel.org/linux-arm-kernel/864jagmxn7.wl-maz@kernel.org/ in which you didn't note any concerns about the handling for the sysregs. If your decisions have changed then you'll need to withdraw your review there, I'd figured that given the current incompleteness of the writability conversions and there being a bunch of existing registers exposed unconditionally you'd decided to defer until some more general cleanup of the situation.
On Fri, 16 Aug 2024 15:40:33 +0100, Mark Brown <broonie@kernel.org> wrote: > > [1 <text/plain; us-ascii (7bit)>] > On Fri, Aug 16, 2024 at 03:15:19PM +0100, Marc Zyngier wrote: > > Mark Brown <broonie@kernel.org> wrote: > > > > + { SYS_DESC(SYS_GCSCR_EL1), NULL, reset_val, GCSCR_EL1, 0 }, > > > + { SYS_DESC(SYS_GCSPR_EL1), NULL, reset_unknown, GCSPR_EL1 }, > > > + { SYS_DESC(SYS_GCSCRE0_EL1), NULL, reset_val, GCSCRE0_EL1, 0 }, > > > Global visibility for these registers? Why should we expose them to > > userspace if the feature is neither present nor configured? > > ... > > > > + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) > > > + kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nGCS_EL0 | > > > + HFGxTR_EL2_nGCS_EL1); > > > How can this work if you don't handle ID_AA64PFR_EL1 being written to? > > You are exposing GCS to all guests without giving the VMM an > > opportunity to turn it off. This breaks A->B->A migration, which is > > not acceptable. > > This was done based on your positive review of the POE series which > follows the same pattern: > > https://lore.kernel.org/linux-arm-kernel/20240503130147.1154804-8-joey.gouly@arm.com/ > https://lore.kernel.org/linux-arm-kernel/864jagmxn7.wl-maz@kernel.org/ > > in which you didn't note any concerns about the handling for the > sysregs. > > If your decisions have changed then you'll need to withdraw your review > there, I'd figured that given the current incompleteness of the > writability conversions and there being a bunch of existing registers > exposed unconditionally you'd decided to defer until some more general > cleanup of the situation. Thanks for pointing out that I missed this crucial detail in the POE series. I'll immediately go and point that out. M.
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a33f5996ca9f..5818e4a1c2d1 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -446,6 +446,10 @@ enum vcpu_sysreg { GCR_EL1, /* Tag Control Register */ TFSRE0_EL1, /* Tag Fault Status Register (EL0) */ + /* Guarded Control Stack registers */ + GCSCRE0_EL1, /* Guarded Control Stack Control (EL0) */ + GCSPR_EL0, /* Guarded Control Stack Pointer (EL0) */ + /* 32bit specific registers. */ DACR32_EL2, /* Domain Access Control Register */ IFSR32_EL2, /* Instruction Fault Status Register */ @@ -517,6 +521,10 @@ enum vcpu_sysreg { VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */ VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */ + /* Guarded Control Stack registers */ + VNCR(GCSPR_EL1), /* Guarded Control Stack Pointer (EL1) */ + VNCR(GCSCR_EL1), /* Guarded Control Stack Control (EL1) */ + VNCR(HFGRTR_EL2), VNCR(HFGWTR_EL2), VNCR(HFGITR_EL2), diff --git a/arch/arm64/include/asm/vncr_mapping.h b/arch/arm64/include/asm/vncr_mapping.h index df2c47c55972..5e83e6f579fd 100644 --- a/arch/arm64/include/asm/vncr_mapping.h +++ b/arch/arm64/include/asm/vncr_mapping.h @@ -88,6 +88,8 @@ #define VNCR_PMSIRR_EL1 0x840 #define VNCR_PMSLATFR_EL1 0x848 #define VNCR_TRFCR_EL1 0x880 +#define VNCR_GCSPR_EL1 0x8C0 +#define VNCR_GCSCR_EL1 0x8D0 #define VNCR_MPAM1_EL1 0x900 #define VNCR_MPAMHCR_EL2 0x930 #define VNCR_MPAMVPMV_EL2 0x938 diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index 4c0fdabaf8ae..ac29352e225a 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -16,6 +16,27 @@ #include <asm/kvm_hyp.h> #include <asm/kvm_mmu.h> +static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; + + if (!vcpu) + vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); + + return vcpu; +} + +static inline bool ctxt_has_gcs(struct kvm_cpu_context *ctxt) +{ + struct kvm_vcpu *vcpu; + + if (!cpus_have_final_cap(ARM64_HAS_GCS)) + return false; + + vcpu = ctxt_to_vcpu(ctxt); + return kvm_has_feat(kern_hyp_va(vcpu->kvm), ID_AA64PFR1_EL1, GCS, IMP); +} + static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, MDSCR_EL1) = read_sysreg(mdscr_el1); @@ -25,16 +46,10 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) { ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0); ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0); -} - -static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) -{ - struct kvm_vcpu *vcpu = ctxt->__hyp_running_vcpu; - - if (!vcpu) - vcpu = container_of(ctxt, struct kvm_vcpu, arch.ctxt); - - return vcpu; + if (ctxt_has_gcs(ctxt)) { + ctxt_sys_reg(ctxt, GCSPR_EL0) = read_sysreg_s(SYS_GCSPR_EL0); + ctxt_sys_reg(ctxt, GCSCRE0_EL1) = read_sysreg_s(SYS_GCSCRE0_EL1); + } } static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) @@ -79,6 +94,10 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt) if (ctxt_has_s1pie(ctxt)) { ctxt_sys_reg(ctxt, PIR_EL1) = read_sysreg_el1(SYS_PIR); ctxt_sys_reg(ctxt, PIRE0_EL1) = read_sysreg_el1(SYS_PIRE0); + if (ctxt_has_gcs(ctxt)) { + ctxt_sys_reg(ctxt, GCSPR_EL1) = read_sysreg_el1(SYS_GCSPR); + ctxt_sys_reg(ctxt, GCSCR_EL1) = read_sysreg_el1(SYS_GCSCR); + } } } ctxt_sys_reg(ctxt, ESR_EL1) = read_sysreg_el1(SYS_ESR); @@ -126,6 +145,11 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt) { write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0); write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0); + if (ctxt_has_gcs(ctxt)) { + write_sysreg_s(ctxt_sys_reg(ctxt, GCSPR_EL0), SYS_GCSPR_EL0); + write_sysreg_s(ctxt_sys_reg(ctxt, GCSCRE0_EL1), + SYS_GCSCRE0_EL1); + } } static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) @@ -157,6 +181,11 @@ static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt) if (ctxt_has_s1pie(ctxt)) { write_sysreg_el1(ctxt_sys_reg(ctxt, PIR_EL1), SYS_PIR); write_sysreg_el1(ctxt_sys_reg(ctxt, PIRE0_EL1), SYS_PIRE0); + + if (ctxt_has_gcs(ctxt)) { + write_sysreg_el1(ctxt_sys_reg(ctxt, GCSPR_EL1), SYS_GCSPR); + write_sysreg_el1(ctxt_sys_reg(ctxt, GCSCR_EL1), SYS_GCSCR); + } } } write_sysreg_el1(ctxt_sys_reg(ctxt, ESR_EL1), SYS_ESR); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index c90324060436..ac98d3237130 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2446,6 +2446,10 @@ static const struct sys_reg_desc sys_reg_descs[] = { PTRAUTH_KEY(APDB), PTRAUTH_KEY(APGA), + { SYS_DESC(SYS_GCSCR_EL1), NULL, reset_val, GCSCR_EL1, 0 }, + { SYS_DESC(SYS_GCSPR_EL1), NULL, reset_unknown, GCSPR_EL1 }, + { SYS_DESC(SYS_GCSCRE0_EL1), NULL, reset_val, GCSCRE0_EL1, 0 }, + { SYS_DESC(SYS_SPSR_EL1), access_spsr}, { SYS_DESC(SYS_ELR_EL1), access_elr}, @@ -2535,6 +2539,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { CTR_EL0_IDC_MASK | CTR_EL0_DminLine_MASK | CTR_EL0_IminLine_MASK), + { SYS_DESC(SYS_GCSPR_EL0), NULL, reset_unknown, GCSPR_EL0 }, { SYS_DESC(SYS_SVCR), undef_access }, { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, @@ -4560,6 +4565,9 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) if (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, TCRX, IMP)) vcpu->arch.hcrx_el2 |= HCRX_EL2_TCR2En; + + if (kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) + vcpu->arch.hcrx_el2 |= HCRX_EL2_GCSEn; } if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags)) @@ -4604,6 +4612,10 @@ void kvm_calculate_traps(struct kvm_vcpu *vcpu) kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nPIRE0_EL1 | HFGxTR_EL2_nPIR_EL1); + if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP)) + kvm->arch.fgu[HFGxTR_GROUP] |= (HFGxTR_EL2_nGCS_EL0 | + HFGxTR_EL2_nGCS_EL1); + if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, AMU, IMP)) kvm->arch.fgu[HAFGRTR_GROUP] |= ~(HAFGRTR_EL2_RES0 | HAFGRTR_EL2_RES1);
GCS introduces a number of system registers for EL1 and EL0, on systems with GCS we need to context switch them and expose them to VMMs to allow guests to use GCS. In order to allow guests to use GCS we also need to configure HCRX_EL2.GCSEn, if this is not set GCS instructions will be noops and CHKFEAT will report GCS as disabled. Also enable fine grained traps for access to the GCS registers by guests which do not have the feature enabled. Signed-off-by: Mark Brown <broonie@kernel.org> --- arch/arm64/include/asm/kvm_host.h | 8 +++++ arch/arm64/include/asm/vncr_mapping.h | 2 ++ arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 49 ++++++++++++++++++++++++------ arch/arm64/kvm/sys_regs.c | 12 ++++++++ 4 files changed, 61 insertions(+), 10 deletions(-)