diff mbox series

[RFC,1/2] arm64: kvm: expose sanitised cache type register to guest

Message ID 20181217150205.27981-2-ard.biesheuvel@linaro.org (mailing list archive)
State RFC
Headers show
Series arm64: kvm: cache ID register trapping | expand

Commit Message

Ard Biesheuvel Dec. 17, 2018, 3:02 p.m. UTC
We currently permit CPUs in the same system to deviate in the exact
topology of the caches, and we subsequently hide this fact from user
space by exposing a sanitised value of the cache type register CTR_EL0.

However, guests running under KVM see the bare value of CTR_EL0, which
could potentially result in issues with, e.g., JITs or other pieces of
code that are sensitive to misreported cache line sizes.

So let's start trapping cache ID instructions, and expose the sanitised
version of CTR_EL0 to guests. Note that CTR_EL0 is treated as an invariant
to KVM user space, so update that part as well.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
---
 arch/arm64/include/asm/kvm_arm.h |  3 +-
 arch/arm64/include/asm/sysreg.h  |  1 +
 arch/arm64/kvm/sys_regs.c        | 59 +++++++++++++++++++-
 3 files changed, 60 insertions(+), 3 deletions(-)

Comments

Marc Zyngier Jan. 31, 2019, 11:22 a.m. UTC | #1
Hi Ard,

On 17/12/2018 15:02, Ard Biesheuvel wrote:
> We currently permit CPUs in the same system to deviate in the exact
> topology of the caches, and we subsequently hide this fact from user
> space by exposing a sanitised value of the cache type register CTR_EL0.
> 
> However, guests running under KVM see the bare value of CTR_EL0, which
> could potentially result in issues with, e.g., JITs or other pieces of
> code that are sensitive to misreported cache line sizes.
> 
> So let's start trapping cache ID instructions, and expose the sanitised
> version of CTR_EL0 to guests. Note that CTR_EL0 is treated as an invariant
> to KVM user space, so update that part as well.

I'm a bit uneasy with this. We rely on the kernel to perform this
sanitization for userspace when absolutely required, and this is so far
the exception.

If we start trapping it unconditionally, we're likely to introduce
performance regressions on system where there is no need to perform any
form of sanitization.

Could we instead only do this if ARM64_MISMATCHED_CACHE_TYPE is set?

Thanks,

	M.

> 
> Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> ---
>  arch/arm64/include/asm/kvm_arm.h |  3 +-
>  arch/arm64/include/asm/sysreg.h  |  1 +
>  arch/arm64/kvm/sys_regs.c        | 59 +++++++++++++++++++-
>  3 files changed, 60 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
> index 6f602af5263c..628dcb0cfea3 100644
> --- a/arch/arm64/include/asm/kvm_arm.h
> +++ b/arch/arm64/include/asm/kvm_arm.h
> @@ -81,9 +81,10 @@
>   * IMO:		Override CPSR.I and enable signaling with VI
>   * FMO:		Override CPSR.F and enable signaling with VF
>   * SWIO:	Turn set/way invalidates into set/way clean+invalidate
> + * TID2:	Trap cache identification instructions
>   */
>  #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
> -			 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
> +			 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | HCR_TID2 | \
>  			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
>  			 HCR_FMO | HCR_IMO)
>  #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
> diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> index 842fb9572661..3b8e51874da4 100644
> --- a/arch/arm64/include/asm/sysreg.h
> +++ b/arch/arm64/include/asm/sysreg.h
> @@ -342,6 +342,7 @@
>  
>  #define SYS_CNTKCTL_EL1			sys_reg(3, 0, 14, 1, 0)
>  
> +#define SYS_CCSIDR_EL1			sys_reg(3, 1, 0, 0, 0)
>  #define SYS_CLIDR_EL1			sys_reg(3, 1, 0, 0, 1)
>  #define SYS_AIDR_EL1			sys_reg(3, 1, 0, 0, 7)
>  
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index 22fbbdbece3c..464e794b5bc5 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -1140,6 +1140,49 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
>  	return __set_id_reg(rd, uaddr, true);
>  }
>  
> +static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> +		       const struct sys_reg_desc *r)
> +{
> +	if (p->is_write)
> +		return write_to_read_only(vcpu, p, r);
> +
> +	p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
> +	return true;
> +}
> +
> +static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> +			 const struct sys_reg_desc *r)
> +{
> +	if (p->is_write)
> +		return write_to_read_only(vcpu, p, r);
> +
> +	p->regval = read_sysreg(clidr_el1);
> +	return true;
> +}
> +
> +static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> +			  const struct sys_reg_desc *r)
> +{
> +	if (p->is_write)
> +		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
> +	else
> +		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
> +	return true;
> +}
> +
> +static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> +			  const struct sys_reg_desc *r)
> +{
> +	u32 csselr;
> +
> +	if (p->is_write)
> +		return write_to_read_only(vcpu, p, r);
> +
> +	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
> +	p->regval = get_ccsidr(csselr);
> +	return true;
> +}
> +
>  /* sys_reg_desc initialiser for known cpufeature ID registers */
>  #define ID_SANITISED(name) {			\
>  	SYS_DESC(SYS_##name),			\
> @@ -1357,7 +1400,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
>  
>  	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
>  
> -	{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
> +	{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
> +	{ SYS_DESC(SYS_CLIDR_EL1), access_clidr },
> +	{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
> +	{ SYS_DESC(SYS_CTR_EL0), access_ctr },
>  
>  	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
>  	{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
> @@ -1657,6 +1703,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
>   * register).
>   */
>  static const struct sys_reg_desc cp15_regs[] = {
> +	{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
>  	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
>  	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
>  	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
> @@ -1774,6 +1821,10 @@ static const struct sys_reg_desc cp15_regs[] = {
>  	PMU_PMEVTYPER(30),
>  	/* PMCCFILTR */
>  	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
> +
> +	{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
> +	{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
> +	{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
>  };
>  
>  static const struct sys_reg_desc cp15_64_regs[] = {
> @@ -2196,11 +2247,15 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
>  	}
>  
>  FUNCTION_INVARIANT(midr_el1)
> -FUNCTION_INVARIANT(ctr_el0)
>  FUNCTION_INVARIANT(revidr_el1)
>  FUNCTION_INVARIANT(clidr_el1)
>  FUNCTION_INVARIANT(aidr_el1)
>  
> +static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
> +{
> +	((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
> +}
> +
>  /* ->val is filled in by kvm_sys_reg_table_init() */
>  static struct sys_reg_desc invariant_sys_regs[] = {
>  	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
>
Ard Biesheuvel Jan. 31, 2019, 11:24 a.m. UTC | #2
On Thu, 31 Jan 2019 at 12:22, Marc Zyngier <marc.zyngier@arm.com> wrote:
>
> Hi Ard,
>
> On 17/12/2018 15:02, Ard Biesheuvel wrote:
> > We currently permit CPUs in the same system to deviate in the exact
> > topology of the caches, and we subsequently hide this fact from user
> > space by exposing a sanitised value of the cache type register CTR_EL0.
> >
> > However, guests running under KVM see the bare value of CTR_EL0, which
> > could potentially result in issues with, e.g., JITs or other pieces of
> > code that are sensitive to misreported cache line sizes.
> >
> > So let's start trapping cache ID instructions, and expose the sanitised
> > version of CTR_EL0 to guests. Note that CTR_EL0 is treated as an invariant
> > to KVM user space, so update that part as well.
>
> I'm a bit uneasy with this. We rely on the kernel to perform this
> sanitization for userspace when absolutely required, and this is so far
> the exception.
>
> If we start trapping it unconditionally, we're likely to introduce
> performance regressions on system where there is no need to perform any
> form of sanitization.
>
> Could we instead only do this if ARM64_MISMATCHED_CACHE_TYPE is set?
>

I suppose. Note that the next patch relies on the trapping as well,
but we could enable that piece for only 32-bit guests.


> >
> > Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
> > ---
> >  arch/arm64/include/asm/kvm_arm.h |  3 +-
> >  arch/arm64/include/asm/sysreg.h  |  1 +
> >  arch/arm64/kvm/sys_regs.c        | 59 +++++++++++++++++++-
> >  3 files changed, 60 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
> > index 6f602af5263c..628dcb0cfea3 100644
> > --- a/arch/arm64/include/asm/kvm_arm.h
> > +++ b/arch/arm64/include/asm/kvm_arm.h
> > @@ -81,9 +81,10 @@
> >   * IMO:              Override CPSR.I and enable signaling with VI
> >   * FMO:              Override CPSR.F and enable signaling with VF
> >   * SWIO:     Turn set/way invalidates into set/way clean+invalidate
> > + * TID2:     Trap cache identification instructions
> >   */
> >  #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
> > -                      HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
> > +                      HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | HCR_TID2 | \
> >                        HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
> >                        HCR_FMO | HCR_IMO)
> >  #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
> > diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
> > index 842fb9572661..3b8e51874da4 100644
> > --- a/arch/arm64/include/asm/sysreg.h
> > +++ b/arch/arm64/include/asm/sysreg.h
> > @@ -342,6 +342,7 @@
> >
> >  #define SYS_CNTKCTL_EL1                      sys_reg(3, 0, 14, 1, 0)
> >
> > +#define SYS_CCSIDR_EL1                       sys_reg(3, 1, 0, 0, 0)
> >  #define SYS_CLIDR_EL1                        sys_reg(3, 1, 0, 0, 1)
> >  #define SYS_AIDR_EL1                 sys_reg(3, 1, 0, 0, 7)
> >
> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > index 22fbbdbece3c..464e794b5bc5 100644
> > --- a/arch/arm64/kvm/sys_regs.c
> > +++ b/arch/arm64/kvm/sys_regs.c
> > @@ -1140,6 +1140,49 @@ static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
> >       return __set_id_reg(rd, uaddr, true);
> >  }
> >
> > +static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> > +                    const struct sys_reg_desc *r)
> > +{
> > +     if (p->is_write)
> > +             return write_to_read_only(vcpu, p, r);
> > +
> > +     p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
> > +     return true;
> > +}
> > +
> > +static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> > +                      const struct sys_reg_desc *r)
> > +{
> > +     if (p->is_write)
> > +             return write_to_read_only(vcpu, p, r);
> > +
> > +     p->regval = read_sysreg(clidr_el1);
> > +     return true;
> > +}
> > +
> > +static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> > +                       const struct sys_reg_desc *r)
> > +{
> > +     if (p->is_write)
> > +             vcpu_write_sys_reg(vcpu, p->regval, r->reg);
> > +     else
> > +             p->regval = vcpu_read_sys_reg(vcpu, r->reg);
> > +     return true;
> > +}
> > +
> > +static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
> > +                       const struct sys_reg_desc *r)
> > +{
> > +     u32 csselr;
> > +
> > +     if (p->is_write)
> > +             return write_to_read_only(vcpu, p, r);
> > +
> > +     csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
> > +     p->regval = get_ccsidr(csselr);
> > +     return true;
> > +}
> > +
> >  /* sys_reg_desc initialiser for known cpufeature ID registers */
> >  #define ID_SANITISED(name) {                 \
> >       SYS_DESC(SYS_##name),                   \
> > @@ -1357,7 +1400,10 @@ static const struct sys_reg_desc sys_reg_descs[] = {
> >
> >       { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
> >
> > -     { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
> > +     { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
> > +     { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
> > +     { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
> > +     { SYS_DESC(SYS_CTR_EL0), access_ctr },
> >
> >       { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
> >       { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
> > @@ -1657,6 +1703,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
> >   * register).
> >   */
> >  static const struct sys_reg_desc cp15_regs[] = {
> > +     { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
> >       { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
> >       { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
> >       { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
> > @@ -1774,6 +1821,10 @@ static const struct sys_reg_desc cp15_regs[] = {
> >       PMU_PMEVTYPER(30),
> >       /* PMCCFILTR */
> >       { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
> > +
> > +     { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
> > +     { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
> > +     { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
> >  };
> >
> >  static const struct sys_reg_desc cp15_64_regs[] = {
> > @@ -2196,11 +2247,15 @@ static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
> >       }
> >
> >  FUNCTION_INVARIANT(midr_el1)
> > -FUNCTION_INVARIANT(ctr_el0)
> >  FUNCTION_INVARIANT(revidr_el1)
> >  FUNCTION_INVARIANT(clidr_el1)
> >  FUNCTION_INVARIANT(aidr_el1)
> >
> > +static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
> > +{
> > +     ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
> > +}
> > +
> >  /* ->val is filled in by kvm_sys_reg_table_init() */
> >  static struct sys_reg_desc invariant_sys_regs[] = {
> >       { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
> >
>
>
> --
> Jazz is not dead. It just smells funny...
Marc Zyngier Jan. 31, 2019, 11:44 a.m. UTC | #3
On 31/01/2019 11:24, Ard Biesheuvel wrote:
> On Thu, 31 Jan 2019 at 12:22, Marc Zyngier <marc.zyngier@arm.com> wrote:
>>
>> Hi Ard,
>>
>> On 17/12/2018 15:02, Ard Biesheuvel wrote:
>>> We currently permit CPUs in the same system to deviate in the exact
>>> topology of the caches, and we subsequently hide this fact from user
>>> space by exposing a sanitised value of the cache type register CTR_EL0.
>>>
>>> However, guests running under KVM see the bare value of CTR_EL0, which
>>> could potentially result in issues with, e.g., JITs or other pieces of
>>> code that are sensitive to misreported cache line sizes.
>>>
>>> So let's start trapping cache ID instructions, and expose the sanitised
>>> version of CTR_EL0 to guests. Note that CTR_EL0 is treated as an invariant
>>> to KVM user space, so update that part as well.
>>
>> I'm a bit uneasy with this. We rely on the kernel to perform this
>> sanitization for userspace when absolutely required, and this is so far
>> the exception.
>>
>> If we start trapping it unconditionally, we're likely to introduce
>> performance regressions on system where there is no need to perform any
>> form of sanitization.
>>
>> Could we instead only do this if ARM64_MISMATCHED_CACHE_TYPE is set?
>>
> 
> I suppose. Note that the next patch relies on the trapping as well,
> but we could enable that piece for only 32-bit guests.

I think that'd be fine. 32bit has no EL0 access to this, so it shouldn't
see any major hit (nor should it use S/W ops, but hey, odd fixes).

Do you mind trying to have a go at that?

Thanks,

	M.
Ard Biesheuvel Jan. 31, 2019, 11:45 a.m. UTC | #4
On Thu, 31 Jan 2019 at 12:44, Marc Zyngier <marc.zyngier@arm.com> wrote:
>
> On 31/01/2019 11:24, Ard Biesheuvel wrote:
> > On Thu, 31 Jan 2019 at 12:22, Marc Zyngier <marc.zyngier@arm.com> wrote:
> >>
> >> Hi Ard,
> >>
> >> On 17/12/2018 15:02, Ard Biesheuvel wrote:
> >>> We currently permit CPUs in the same system to deviate in the exact
> >>> topology of the caches, and we subsequently hide this fact from user
> >>> space by exposing a sanitised value of the cache type register CTR_EL0.
> >>>
> >>> However, guests running under KVM see the bare value of CTR_EL0, which
> >>> could potentially result in issues with, e.g., JITs or other pieces of
> >>> code that are sensitive to misreported cache line sizes.
> >>>
> >>> So let's start trapping cache ID instructions, and expose the sanitised
> >>> version of CTR_EL0 to guests. Note that CTR_EL0 is treated as an invariant
> >>> to KVM user space, so update that part as well.
> >>
> >> I'm a bit uneasy with this. We rely on the kernel to perform this
> >> sanitization for userspace when absolutely required, and this is so far
> >> the exception.
> >>
> >> If we start trapping it unconditionally, we're likely to introduce
> >> performance regressions on system where there is no need to perform any
> >> form of sanitization.
> >>
> >> Could we instead only do this if ARM64_MISMATCHED_CACHE_TYPE is set?
> >>
> >
> > I suppose. Note that the next patch relies on the trapping as well,
> > but we could enable that piece for only 32-bit guests.
>
> I think that'd be fine. 32bit has no EL0 access to this, so it shouldn't
> see any major hit (nor should it use S/W ops, but hey, odd fixes).
>
> Do you mind trying to have a go at that?
>

Not at all.
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index 6f602af5263c..628dcb0cfea3 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -81,9 +81,10 @@ 
  * IMO:		Override CPSR.I and enable signaling with VI
  * FMO:		Override CPSR.F and enable signaling with VF
  * SWIO:	Turn set/way invalidates into set/way clean+invalidate
+ * TID2:	Trap cache identification instructions
  */
 #define HCR_GUEST_FLAGS (HCR_TSC | HCR_TSW | HCR_TWE | HCR_TWI | HCR_VM | \
-			 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | \
+			 HCR_TVM | HCR_BSU_IS | HCR_FB | HCR_TAC | HCR_TID2 | \
 			 HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW | HCR_TLOR | \
 			 HCR_FMO | HCR_IMO)
 #define HCR_VIRT_EXCP_MASK (HCR_VSE | HCR_VI | HCR_VF)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 842fb9572661..3b8e51874da4 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -342,6 +342,7 @@ 
 
 #define SYS_CNTKCTL_EL1			sys_reg(3, 0, 14, 1, 0)
 
+#define SYS_CCSIDR_EL1			sys_reg(3, 1, 0, 0, 0)
 #define SYS_CLIDR_EL1			sys_reg(3, 1, 0, 0, 1)
 #define SYS_AIDR_EL1			sys_reg(3, 1, 0, 0, 7)
 
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 22fbbdbece3c..464e794b5bc5 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1140,6 +1140,49 @@  static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
 	return __set_id_reg(rd, uaddr, true);
 }
 
+static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+		       const struct sys_reg_desc *r)
+{
+	if (p->is_write)
+		return write_to_read_only(vcpu, p, r);
+
+	p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
+	return true;
+}
+
+static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			 const struct sys_reg_desc *r)
+{
+	if (p->is_write)
+		return write_to_read_only(vcpu, p, r);
+
+	p->regval = read_sysreg(clidr_el1);
+	return true;
+}
+
+static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			  const struct sys_reg_desc *r)
+{
+	if (p->is_write)
+		vcpu_write_sys_reg(vcpu, p->regval, r->reg);
+	else
+		p->regval = vcpu_read_sys_reg(vcpu, r->reg);
+	return true;
+}
+
+static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+			  const struct sys_reg_desc *r)
+{
+	u32 csselr;
+
+	if (p->is_write)
+		return write_to_read_only(vcpu, p, r);
+
+	csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
+	p->regval = get_ccsidr(csselr);
+	return true;
+}
+
 /* sys_reg_desc initialiser for known cpufeature ID registers */
 #define ID_SANITISED(name) {			\
 	SYS_DESC(SYS_##name),			\
@@ -1357,7 +1400,10 @@  static const struct sys_reg_desc sys_reg_descs[] = {
 
 	{ SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
 
-	{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
+	{ SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
+	{ SYS_DESC(SYS_CLIDR_EL1), access_clidr },
+	{ SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
+	{ SYS_DESC(SYS_CTR_EL0), access_ctr },
 
 	{ SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
 	{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
@@ -1657,6 +1703,7 @@  static const struct sys_reg_desc cp14_64_regs[] = {
  * register).
  */
 static const struct sys_reg_desc cp15_regs[] = {
+	{ Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
 	{ Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
 	{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
@@ -1774,6 +1821,10 @@  static const struct sys_reg_desc cp15_regs[] = {
 	PMU_PMEVTYPER(30),
 	/* PMCCFILTR */
 	{ Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
+
+	{ Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
+	{ Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
+	{ Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, c0_CSSELR },
 };
 
 static const struct sys_reg_desc cp15_64_regs[] = {
@@ -2196,11 +2247,15 @@  static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
 	}
 
 FUNCTION_INVARIANT(midr_el1)
-FUNCTION_INVARIANT(ctr_el0)
 FUNCTION_INVARIANT(revidr_el1)
 FUNCTION_INVARIANT(clidr_el1)
 FUNCTION_INVARIANT(aidr_el1)
 
+static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
+{
+	((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
+}
+
 /* ->val is filled in by kvm_sys_reg_table_init() */
 static struct sys_reg_desc invariant_sys_regs[] = {
 	{ SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },