@@ -347,6 +347,7 @@ enum vcpu_sysreg {
CONTEXTIDR_EL1, /* Context ID Register */
TPIDR_EL0, /* Thread ID, User R/W */
TPIDRRO_EL0, /* Thread ID, User R/O */
+ TPIDR2_EL0, /* Thread ID 2, User R/W */
TPIDR_EL1, /* Thread ID, Privileged */
AMAIR_EL1, /* Aux Memory Attribute Indirection Register */
CNTKCTL_EL1, /* Timer Control Register (EL1) */
@@ -885,6 +886,7 @@ static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
+ case TPIDR2_EL0: *val = read_sysreg_s(SYS_TPIDR2_EL0); break;
case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
@@ -928,6 +930,7 @@ static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
+ case TPIDR2_EL0: write_sysreg_s(val, SYS_TPIDR2_EL0); break;
case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
@@ -93,7 +93,10 @@ static inline void __activate_traps_hfgxtr(struct kvm_vcpu *vcpu)
ctxt_sys_reg(hctxt, HFGWTR_EL2) = read_sysreg_s(SYS_HFGWTR_EL2);
if (cpus_have_final_cap(ARM64_SME)) {
- tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK;
+ tmp = HFGxTR_EL2_nSMPRI_EL1_MASK;
+
+ if (!vcpu_has_sme(vcpu))
+ tmp |= HFGxTR_EL2_nTPIDR2_EL0_MASK;
r_clr |= tmp;
w_clr |= tmp;
@@ -35,6 +35,9 @@ static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt)
{
ctxt_sys_reg(ctxt, TPIDR_EL0) = read_sysreg(tpidr_el0);
ctxt_sys_reg(ctxt, TPIDRRO_EL0) = read_sysreg(tpidrro_el0);
+
+ if (ctxt_has_sme(ctxt))
+ ctxt_sys_reg(ctxt, TPIDR2_EL0) = read_sysreg_s(SYS_TPIDR2_EL0);
}
static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt)
@@ -105,6 +108,9 @@ static inline void __sysreg_restore_user_state(struct kvm_cpu_context *ctxt)
{
write_sysreg(ctxt_sys_reg(ctxt, TPIDR_EL0), tpidr_el0);
write_sysreg(ctxt_sys_reg(ctxt, TPIDRRO_EL0), tpidrro_el0);
+
+ if (ctxt_has_sme(ctxt))
+ write_sysreg_s(ctxt_sys_reg(ctxt, TPIDR2_EL0), SYS_TPIDR2_EL0);
}
static inline void __sysreg_restore_el1_state(struct kvm_cpu_context *ctxt)
@@ -2369,7 +2369,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
{ SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
- { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
+ { SYS_DESC(SYS_TPIDR2_EL0), NULL, reset_unknown, TPIDR2_EL0, 0, .visibility = sme_visibility },
{ SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
SME adds a new user register TPIDR2_EL0, implement support for mananging this for guests. We provide system register access to it, disable traps and context switch it along with the other TPIDRs for guests with SME. Signed-off-by: Mark Brown <broonie@kernel.org> --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/hyp/include/hyp/switch.h | 5 ++++- arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h | 6 ++++++ arch/arm64/kvm/sys_regs.c | 2 +- 4 files changed, 14 insertions(+), 2 deletions(-)