diff mbox series

[v6,4/6] KVM: arm64: Use per guest ID register for ID_AA64DFR0_EL1.PMUVer

Message ID 20230404035344.4043856-5-jingzhangos@google.com (mailing list archive)
State New, archived
Headers show
Series Support writable CPU ID registers from userspace | expand

Commit Message

Jing Zhang April 4, 2023, 3:53 a.m. UTC
With per guest ID registers, PMUver settings from userspace
can be stored in its corresponding ID register.

No functional change intended.

Signed-off-by: Jing Zhang <jingzhangos@google.com>
---
 arch/arm64/include/asm/kvm_host.h | 11 +++----
 arch/arm64/kvm/arm.c              |  6 ----
 arch/arm64/kvm/id_regs.c          | 50 ++++++++++++++++++++++++-------
 include/kvm/arm_pmu.h             |  5 ++--
 4 files changed, 49 insertions(+), 23 deletions(-)

Comments

kernel test robot April 4, 2023, 4:59 p.m. UTC | #1
Hi Jing,

kernel test robot noticed the following build errors:

[auto build test ERROR on 7e364e56293bb98cae1b55fd835f5991c4e96e7d]

url:    https://github.com/intel-lab-lkp/linux/commits/Jing-Zhang/KVM-arm64-Move-CPU-ID-feature-registers-emulation-into-a-separate-file/20230404-115612
base:   7e364e56293bb98cae1b55fd835f5991c4e96e7d
patch link:    https://lore.kernel.org/r/20230404035344.4043856-5-jingzhangos%40google.com
patch subject: [PATCH v6 4/6] KVM: arm64: Use per guest ID register for ID_AA64DFR0_EL1.PMUVer
config: arm64-allyesconfig (https://download.01.org/0day-ci/archive/20230405/202304050006.49TmDWF6-lkp@intel.com/config)
compiler: aarch64-linux-gcc (GCC) 12.1.0
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # https://github.com/intel-lab-lkp/linux/commit/66ece3020c02ab1206bb9478e8cb0172e125bbfc
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Jing-Zhang/KVM-arm64-Move-CPU-ID-feature-registers-emulation-into-a-separate-file/20230404-115612
        git checkout 66ece3020c02ab1206bb9478e8cb0172e125bbfc
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=arm64 olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-12.1.0 make.cross W=1 O=build_dir ARCH=arm64 SHELL=/bin/bash arch/arm64/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Link: https://lore.kernel.org/oe-kbuild-all/202304050006.49TmDWF6-lkp@intel.com/

All errors (new ones prefixed by >>):

   In file included from include/linux/rhashtable-types.h:14,
                    from include/linux/ipc.h:7,
                    from include/uapi/linux/sem.h:5,
                    from include/linux/sem.h:5,
                    from include/linux/sched.h:15,
                    from include/linux/hardirq.h:9,
                    from include/linux/kvm_host.h:7,
                    from arch/arm64/kvm/id_regs.c:13:
   arch/arm64/kvm/id_regs.c: In function 'set_id_aa64dfr0_el1':
>> arch/arm64/kvm/id_regs.c:261:44: error: 'struct kvm_arch' has no member named 'config_lock'
     261 |                 mutex_lock(&vcpu->kvm->arch.config_lock);
         |                                            ^
   include/linux/mutex.h:187:44: note: in definition of macro 'mutex_lock'
     187 | #define mutex_lock(lock) mutex_lock_nested(lock, 0)
         |                                            ^~~~
   arch/arm64/kvm/id_regs.c:269:46: error: 'struct kvm_arch' has no member named 'config_lock'
     269 |                 mutex_unlock(&vcpu->kvm->arch.config_lock);
         |                                              ^
   arch/arm64/kvm/id_regs.c: In function 'set_id_dfr0_el1':
   arch/arm64/kvm/id_regs.c:311:44: error: 'struct kvm_arch' has no member named 'config_lock'
     311 |                 mutex_lock(&vcpu->kvm->arch.config_lock);
         |                                            ^
   include/linux/mutex.h:187:44: note: in definition of macro 'mutex_lock'
     187 | #define mutex_lock(lock) mutex_lock_nested(lock, 0)
         |                                            ^~~~
   arch/arm64/kvm/id_regs.c:318:46: error: 'struct kvm_arch' has no member named 'config_lock'
     318 |                 mutex_unlock(&vcpu->kvm->arch.config_lock);
         |                                              ^


vim +261 arch/arm64/kvm/id_regs.c

   228	
   229	static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
   230				       const struct sys_reg_desc *rd,
   231				       u64 val)
   232	{
   233		u8 pmuver, host_pmuver;
   234		bool valid_pmu;
   235	
   236		host_pmuver = kvm_arm_pmu_get_pmuver_limit();
   237	
   238		/*
   239		 * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
   240		 * as it doesn't promise more than what the HW gives us. We
   241		 * allow an IMPDEF PMU though, only if no PMU is supported
   242		 * (KVM backward compatibility handling).
   243		 */
   244		pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
   245		if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
   246			return -EINVAL;
   247	
   248		valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
   249	
   250		/* Make sure view register and PMU support do match */
   251		if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
   252			return -EINVAL;
   253	
   254		/* We can only differ with PMUver, and anything else is an error */
   255		val ^= read_id_reg(vcpu, rd);
   256		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
   257		if (val)
   258			return -EINVAL;
   259	
   260		if (valid_pmu) {
 > 261			mutex_lock(&vcpu->kvm->arch.config_lock);
   262			IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
   263			IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
   264									    pmuver);
   265	
   266			IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
   267			IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
   268									pmuver_to_perfmon(pmuver));
   269			mutex_unlock(&vcpu->kvm->arch.config_lock);
   270		} else {
   271			assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
   272				   pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
   273		}
   274	
   275		return 0;
   276	}
   277
kernel test robot April 11, 2023, 6:47 a.m. UTC | #2
Hi Jing,

kernel test robot noticed the following build errors:

[auto build test ERROR on 7e364e56293bb98cae1b55fd835f5991c4e96e7d]

url:    https://github.com/intel-lab-lkp/linux/commits/Jing-Zhang/KVM-arm64-Move-CPU-ID-feature-registers-emulation-into-a-separate-file/20230404-115612
base:   7e364e56293bb98cae1b55fd835f5991c4e96e7d
patch link:    https://lore.kernel.org/r/20230404035344.4043856-5-jingzhangos%40google.com
patch subject: [PATCH v6 4/6] KVM: arm64: Use per guest ID register for ID_AA64DFR0_EL1.PMUVer
config: arm64-randconfig-r031-20230410 (https://download.01.org/0day-ci/archive/20230411/202304111418.tQGXPpze-lkp@intel.com/config)
compiler: clang version 17.0.0 (https://github.com/llvm/llvm-project 2c57868e2e877f73c339796c3374ae660bb77f0d)
reproduce (this is a W=1 build):
        wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # install arm64 cross compiling tool for clang build
        # apt-get install binutils-aarch64-linux-gnu
        # https://github.com/intel-lab-lkp/linux/commit/66ece3020c02ab1206bb9478e8cb0172e125bbfc
        git remote add linux-review https://github.com/intel-lab-lkp/linux
        git fetch --no-tags linux-review Jing-Zhang/KVM-arm64-Move-CPU-ID-feature-registers-emulation-into-a-separate-file/20230404-115612
        git checkout 66ece3020c02ab1206bb9478e8cb0172e125bbfc
        # save the config file
        mkdir build_dir && cp config build_dir/.config
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=arm64 olddefconfig
        COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross W=1 O=build_dir ARCH=arm64 SHELL=/bin/bash arch/arm64/kvm/

If you fix the issue, kindly add following tag where applicable
| Reported-by: kernel test robot <lkp@intel.com>
| Link: https://lore.kernel.org/oe-kbuild-all/202304111418.tQGXPpze-lkp@intel.com/

All errors (new ones prefixed by >>):

>> arch/arm64/kvm/id_regs.c:261:31: error: no member named 'config_lock' in 'struct kvm_arch'
                   mutex_lock(&vcpu->kvm->arch.config_lock);
                               ~~~~~~~~~~~~~~~ ^
   include/linux/mutex.h:187:44: note: expanded from macro 'mutex_lock'
   #define mutex_lock(lock) mutex_lock_nested(lock, 0)
                                              ^~~~
   arch/arm64/kvm/id_regs.c:269:33: error: no member named 'config_lock' in 'struct kvm_arch'
                   mutex_unlock(&vcpu->kvm->arch.config_lock);
                                 ~~~~~~~~~~~~~~~ ^
   arch/arm64/kvm/id_regs.c:311:31: error: no member named 'config_lock' in 'struct kvm_arch'
                   mutex_lock(&vcpu->kvm->arch.config_lock);
                               ~~~~~~~~~~~~~~~ ^
   include/linux/mutex.h:187:44: note: expanded from macro 'mutex_lock'
   #define mutex_lock(lock) mutex_lock_nested(lock, 0)
                                              ^~~~
   arch/arm64/kvm/id_regs.c:318:33: error: no member named 'config_lock' in 'struct kvm_arch'
                   mutex_unlock(&vcpu->kvm->arch.config_lock);
                                 ~~~~~~~~~~~~~~~ ^
   4 errors generated.


vim +261 arch/arm64/kvm/id_regs.c

   228	
   229	static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
   230				       const struct sys_reg_desc *rd,
   231				       u64 val)
   232	{
   233		u8 pmuver, host_pmuver;
   234		bool valid_pmu;
   235	
   236		host_pmuver = kvm_arm_pmu_get_pmuver_limit();
   237	
   238		/*
   239		 * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
   240		 * as it doesn't promise more than what the HW gives us. We
   241		 * allow an IMPDEF PMU though, only if no PMU is supported
   242		 * (KVM backward compatibility handling).
   243		 */
   244		pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
   245		if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
   246			return -EINVAL;
   247	
   248		valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
   249	
   250		/* Make sure view register and PMU support do match */
   251		if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
   252			return -EINVAL;
   253	
   254		/* We can only differ with PMUver, and anything else is an error */
   255		val ^= read_id_reg(vcpu, rd);
   256		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
   257		if (val)
   258			return -EINVAL;
   259	
   260		if (valid_pmu) {
 > 261			mutex_lock(&vcpu->kvm->arch.config_lock);
   262			IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
   263			IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
   264									    pmuver);
   265	
   266			IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
   267			IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
   268									pmuver_to_perfmon(pmuver));
   269			mutex_unlock(&vcpu->kvm->arch.config_lock);
   270		} else {
   271			assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
   272				   pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
   273		}
   274	
   275		return 0;
   276	}
   277
Reiji Watanabe April 19, 2023, 3:40 a.m. UTC | #3
Hi Jing,

On Tue, Apr 04, 2023 at 03:53:42AM +0000, Jing Zhang wrote:
> With per guest ID registers, PMUver settings from userspace
> can be stored in its corresponding ID register.
> 
> No functional change intended.
> 
> Signed-off-by: Jing Zhang <jingzhangos@google.com>
> ---
>  arch/arm64/include/asm/kvm_host.h | 11 +++----
>  arch/arm64/kvm/arm.c              |  6 ----
>  arch/arm64/kvm/id_regs.c          | 50 ++++++++++++++++++++++++-------
>  include/kvm/arm_pmu.h             |  5 ++--
>  4 files changed, 49 insertions(+), 23 deletions(-)
> 
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 67a55177fd83..da46a2729581 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -237,6 +237,12 @@ struct kvm_arch {
>  #define KVM_ARCH_FLAG_EL1_32BIT				4
>  	/* PSCI SYSTEM_SUSPEND enabled for the guest */
>  #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED		5
> +	/*
> +	 * AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF
> +	 * or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from
> +	 * userspace for VCPUs without PMU.
> +	 */
> +#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU		6
>  
>  	unsigned long flags;
>  
> @@ -249,11 +255,6 @@ struct kvm_arch {
>  
>  	cpumask_var_t supported_cpus;
>  
> -	struct {
> -		u8 imp:4;
> -		u8 unimp:4;
> -	} dfr0_pmuver;
> -
>  	/* Hypercall features firmware registers' descriptor */
>  	struct kvm_smccc_features smccc_feat;
>  
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 64e1c19e5a9b..3fe28d545b54 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -138,12 +138,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
>  	kvm_arm_init_hypercalls(kvm);
>  	kvm_arm_init_id_regs(kvm);
>  
> -	/*
> -	 * Initialise the default PMUver before there is a chance to
> -	 * create an actual PMU.
> -	 */
> -	kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
> -
>  	return 0;
>  
>  err_free_cpumask:
> diff --git a/arch/arm64/kvm/id_regs.c b/arch/arm64/kvm/id_regs.c
> index 291311b1ecca..6f65d30693fe 100644
> --- a/arch/arm64/kvm/id_regs.c
> +++ b/arch/arm64/kvm/id_regs.c
> @@ -21,9 +21,12 @@
>  static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
>  {
>  	if (kvm_vcpu_has_pmu(vcpu))
> -		return vcpu->kvm->arch.dfr0_pmuver.imp;
> +		return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> +				 IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1));
> +	else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags))
> +		return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
>  
> -	return vcpu->kvm->arch.dfr0_pmuver.unimp;
> +	return 0;
>  }
>  
>  static u8 perfmon_to_pmuver(u8 perfmon)
> @@ -254,10 +257,20 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
>  	if (val)
>  		return -EINVAL;
>  
> -	if (valid_pmu)
> -		vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
> -	else
> -		vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
> +	if (valid_pmu) {
> +		mutex_lock(&vcpu->kvm->arch.config_lock);
> +		IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> +		IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
> +								    pmuver);
> +
> +		IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
> +		IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
> +								pmuver_to_perfmon(pmuver));

As those could be read without acquiring the lock, I don't think
we should expose the intermediate state of the register values.


> +		mutex_unlock(&vcpu->kvm->arch.config_lock);
> +	} else {
> +		assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> +			   pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
> +	}
>  
>  	return 0;
>  }
> @@ -294,10 +307,19 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
>  	if (val)
>  		return -EINVAL;
>  
> -	if (valid_pmu)
> -		vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
> -	else
> -		vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
> +	if (valid_pmu) {
> +		mutex_lock(&vcpu->kvm->arch.config_lock);
> +		IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
> +		IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon);
> +
> +		IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> +		IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
> +								    perfmon_to_pmuver(perfmon));

I have the same comment as set_id_aa64dfr0_el1().

Thank you,
Reiji

> +		mutex_unlock(&vcpu->kvm->arch.config_lock);
> +	} else {
> +		assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> +			   perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
> +	}
>  
>  	return 0;
>  }
> @@ -503,4 +525,12 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
>  	}
>  
>  	IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
> +
> +	/*
> +	 * Initialise the default PMUver before there is a chance to
> +	 * create an actual PMU.
> +	 */
> +	IDREG(kvm, SYS_ID_AA64DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> +	IDREG(kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> +						      kvm_arm_pmu_get_pmuver_limit());
>  }
> diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> index 628775334d5e..856ac59b6821 100644
> --- a/include/kvm/arm_pmu.h
> +++ b/include/kvm/arm_pmu.h
> @@ -92,8 +92,9 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
>  /*
>   * Evaluates as true when emulating PMUv3p5, and false otherwise.
>   */
> -#define kvm_pmu_is_3p5(vcpu)						\
> -	(vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
> +#define kvm_pmu_is_3p5(vcpu)									\
> +	 (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),					\
> +		 IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)
>  
>  u8 kvm_arm_pmu_get_pmuver_limit(void);
>  
> -- 
> 2.40.0.348.gf938b09366-goog
>
Jing Zhang April 24, 2023, 7:07 p.m. UTC | #4
Hi Reiji,

On Tue, Apr 18, 2023 at 8:40 PM Reiji Watanabe <reijiw@google.com> wrote:
>
> Hi Jing,
>
> On Tue, Apr 04, 2023 at 03:53:42AM +0000, Jing Zhang wrote:
> > With per guest ID registers, PMUver settings from userspace
> > can be stored in its corresponding ID register.
> >
> > No functional change intended.
> >
> > Signed-off-by: Jing Zhang <jingzhangos@google.com>
> > ---
> >  arch/arm64/include/asm/kvm_host.h | 11 +++----
> >  arch/arm64/kvm/arm.c              |  6 ----
> >  arch/arm64/kvm/id_regs.c          | 50 ++++++++++++++++++++++++-------
> >  include/kvm/arm_pmu.h             |  5 ++--
> >  4 files changed, 49 insertions(+), 23 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > index 67a55177fd83..da46a2729581 100644
> > --- a/arch/arm64/include/asm/kvm_host.h
> > +++ b/arch/arm64/include/asm/kvm_host.h
> > @@ -237,6 +237,12 @@ struct kvm_arch {
> >  #define KVM_ARCH_FLAG_EL1_32BIT                              4
> >       /* PSCI SYSTEM_SUSPEND enabled for the guest */
> >  #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED         5
> > +     /*
> > +      * AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF
> > +      * or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from
> > +      * userspace for VCPUs without PMU.
> > +      */
> > +#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU           6
> >
> >       unsigned long flags;
> >
> > @@ -249,11 +255,6 @@ struct kvm_arch {
> >
> >       cpumask_var_t supported_cpus;
> >
> > -     struct {
> > -             u8 imp:4;
> > -             u8 unimp:4;
> > -     } dfr0_pmuver;
> > -
> >       /* Hypercall features firmware registers' descriptor */
> >       struct kvm_smccc_features smccc_feat;
> >
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 64e1c19e5a9b..3fe28d545b54 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -138,12 +138,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
> >       kvm_arm_init_hypercalls(kvm);
> >       kvm_arm_init_id_regs(kvm);
> >
> > -     /*
> > -      * Initialise the default PMUver before there is a chance to
> > -      * create an actual PMU.
> > -      */
> > -     kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
> > -
> >       return 0;
> >
> >  err_free_cpumask:
> > diff --git a/arch/arm64/kvm/id_regs.c b/arch/arm64/kvm/id_regs.c
> > index 291311b1ecca..6f65d30693fe 100644
> > --- a/arch/arm64/kvm/id_regs.c
> > +++ b/arch/arm64/kvm/id_regs.c
> > @@ -21,9 +21,12 @@
> >  static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
> >  {
> >       if (kvm_vcpu_has_pmu(vcpu))
> > -             return vcpu->kvm->arch.dfr0_pmuver.imp;
> > +             return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> > +                              IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1));
> > +     else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags))
> > +             return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
> >
> > -     return vcpu->kvm->arch.dfr0_pmuver.unimp;
> > +     return 0;
> >  }
> >
> >  static u8 perfmon_to_pmuver(u8 perfmon)
> > @@ -254,10 +257,20 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
> >       if (val)
> >               return -EINVAL;
> >
> > -     if (valid_pmu)
> > -             vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
> > -     else
> > -             vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
> > +     if (valid_pmu) {
> > +             mutex_lock(&vcpu->kvm->arch.config_lock);
> > +             IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > +             IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
> > +                                                                 pmuver);
> > +
> > +             IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
> > +             IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
> > +                                                             pmuver_to_perfmon(pmuver));
>
> As those could be read without acquiring the lock, I don't think
> we should expose the intermediate state of the register values.
I will protect all reads/writes to KVM scope emulated ID registers
with the lock.
>
>
> > +             mutex_unlock(&vcpu->kvm->arch.config_lock);
> > +     } else {
> > +             assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> > +                        pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
> > +     }
> >
> >       return 0;
> >  }
> > @@ -294,10 +307,19 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
> >       if (val)
> >               return -EINVAL;
> >
> > -     if (valid_pmu)
> > -             vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
> > -     else
> > -             vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
> > +     if (valid_pmu) {
> > +             mutex_lock(&vcpu->kvm->arch.config_lock);
> > +             IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
> > +             IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon);
> > +
> > +             IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > +             IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
> > +                                                                 perfmon_to_pmuver(perfmon));
>
> I have the same comment as set_id_aa64dfr0_el1().
>
> Thank you,
> Reiji
>
> > +             mutex_unlock(&vcpu->kvm->arch.config_lock);
> > +     } else {
> > +             assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> > +                        perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
> > +     }
> >
> >       return 0;
> >  }
> > @@ -503,4 +525,12 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
> >       }
> >
> >       IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
> > +
> > +     /*
> > +      * Initialise the default PMUver before there is a chance to
> > +      * create an actual PMU.
> > +      */
> > +     IDREG(kvm, SYS_ID_AA64DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> > +     IDREG(kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> > +                                                   kvm_arm_pmu_get_pmuver_limit());
> >  }
> > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> > index 628775334d5e..856ac59b6821 100644
> > --- a/include/kvm/arm_pmu.h
> > +++ b/include/kvm/arm_pmu.h
> > @@ -92,8 +92,9 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
> >  /*
> >   * Evaluates as true when emulating PMUv3p5, and false otherwise.
> >   */
> > -#define kvm_pmu_is_3p5(vcpu)                                         \
> > -     (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
> > +#define kvm_pmu_is_3p5(vcpu)                                                                 \
> > +      (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),                                 \
> > +              IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)
> >
> >  u8 kvm_arm_pmu_get_pmuver_limit(void);
> >
> > --
> > 2.40.0.348.gf938b09366-goog
> >
Thanks,
Jing
Reiji Watanabe April 25, 2023, 1:45 a.m. UTC | #5
Hi Jing,

On Mon, Apr 24, 2023 at 12:07:31PM -0700, Jing Zhang wrote:
> Hi Reiji,
> 
> On Tue, Apr 18, 2023 at 8:40 PM Reiji Watanabe <reijiw@google.com> wrote:
> >
> > Hi Jing,
> >
> > On Tue, Apr 04, 2023 at 03:53:42AM +0000, Jing Zhang wrote:
> > > With per guest ID registers, PMUver settings from userspace
> > > can be stored in its corresponding ID register.
> > >
> > > No functional change intended.
> > >
> > > Signed-off-by: Jing Zhang <jingzhangos@google.com>
> > > ---
> > >  arch/arm64/include/asm/kvm_host.h | 11 +++----
> > >  arch/arm64/kvm/arm.c              |  6 ----
> > >  arch/arm64/kvm/id_regs.c          | 50 ++++++++++++++++++++++++-------
> > >  include/kvm/arm_pmu.h             |  5 ++--
> > >  4 files changed, 49 insertions(+), 23 deletions(-)
> > >
> > > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > > index 67a55177fd83..da46a2729581 100644
> > > --- a/arch/arm64/include/asm/kvm_host.h
> > > +++ b/arch/arm64/include/asm/kvm_host.h
> > > @@ -237,6 +237,12 @@ struct kvm_arch {
> > >  #define KVM_ARCH_FLAG_EL1_32BIT                              4
> > >       /* PSCI SYSTEM_SUSPEND enabled for the guest */
> > >  #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED         5
> > > +     /*
> > > +      * AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF
> > > +      * or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from
> > > +      * userspace for VCPUs without PMU.
> > > +      */
> > > +#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU           6
> > >
> > >       unsigned long flags;
> > >
> > > @@ -249,11 +255,6 @@ struct kvm_arch {
> > >
> > >       cpumask_var_t supported_cpus;
> > >
> > > -     struct {
> > > -             u8 imp:4;
> > > -             u8 unimp:4;
> > > -     } dfr0_pmuver;
> > > -
> > >       /* Hypercall features firmware registers' descriptor */
> > >       struct kvm_smccc_features smccc_feat;
> > >
> > > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > > index 64e1c19e5a9b..3fe28d545b54 100644
> > > --- a/arch/arm64/kvm/arm.c
> > > +++ b/arch/arm64/kvm/arm.c
> > > @@ -138,12 +138,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
> > >       kvm_arm_init_hypercalls(kvm);
> > >       kvm_arm_init_id_regs(kvm);
> > >
> > > -     /*
> > > -      * Initialise the default PMUver before there is a chance to
> > > -      * create an actual PMU.
> > > -      */
> > > -     kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
> > > -
> > >       return 0;
> > >
> > >  err_free_cpumask:
> > > diff --git a/arch/arm64/kvm/id_regs.c b/arch/arm64/kvm/id_regs.c
> > > index 291311b1ecca..6f65d30693fe 100644
> > > --- a/arch/arm64/kvm/id_regs.c
> > > +++ b/arch/arm64/kvm/id_regs.c
> > > @@ -21,9 +21,12 @@
> > >  static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
> > >  {
> > >       if (kvm_vcpu_has_pmu(vcpu))
> > > -             return vcpu->kvm->arch.dfr0_pmuver.imp;
> > > +             return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> > > +                              IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1));
> > > +     else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags))
> > > +             return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
> > >
> > > -     return vcpu->kvm->arch.dfr0_pmuver.unimp;
> > > +     return 0;
> > >  }
> > >
> > >  static u8 perfmon_to_pmuver(u8 perfmon)
> > > @@ -254,10 +257,20 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
> > >       if (val)
> > >               return -EINVAL;
> > >
> > > -     if (valid_pmu)
> > > -             vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
> > > -     else
> > > -             vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
> > > +     if (valid_pmu) {
> > > +             mutex_lock(&vcpu->kvm->arch.config_lock);
> > > +             IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > > +             IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
> > > +                                                                 pmuver);
> > > +
> > > +             IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
> > > +             IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
> > > +                                                             pmuver_to_perfmon(pmuver));
> >
> > As those could be read without acquiring the lock, I don't think
> > we should expose the intermediate state of the register values.
> I will protect all reads/writes to KVM scope emulated ID registers
> with the lock.

Or I think we could resolve it by writing the new value atomically
(copy the value to a local variable, set the local variable to the
new value, and update the ID_REG value with WRITE_ONCE).

Thank you,
Reiji


> >
> >
> > > +             mutex_unlock(&vcpu->kvm->arch.config_lock);
> > > +     } else {
> > > +             assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> > > +                        pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
> > > +     }
> > >
> > >       return 0;
> > >  }
> > > @@ -294,10 +307,19 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
> > >       if (val)
> > >               return -EINVAL;
> > >
> > > -     if (valid_pmu)
> > > -             vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
> > > -     else
> > > -             vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
> > > +     if (valid_pmu) {
> > > +             mutex_lock(&vcpu->kvm->arch.config_lock);
> > > +             IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
> > > +             IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon);
> > > +
> > > +             IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
> > > +             IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
> > > +                                                                 perfmon_to_pmuver(perfmon));
> >
> > I have the same comment as set_id_aa64dfr0_el1().
> >
> > Thank you,
> > Reiji
> >
> > > +             mutex_unlock(&vcpu->kvm->arch.config_lock);
> > > +     } else {
> > > +             assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
> > > +                        perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
> > > +     }
> > >
> > >       return 0;
> > >  }
> > > @@ -503,4 +525,12 @@ void kvm_arm_init_id_regs(struct kvm *kvm)
> > >       }
> > >
> > >       IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
> > > +
> > > +     /*
> > > +      * Initialise the default PMUver before there is a chance to
> > > +      * create an actual PMU.
> > > +      */
> > > +     IDREG(kvm, SYS_ID_AA64DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
> > > +     IDREG(kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
> > > +                                                   kvm_arm_pmu_get_pmuver_limit());
> > >  }
> > > diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
> > > index 628775334d5e..856ac59b6821 100644
> > > --- a/include/kvm/arm_pmu.h
> > > +++ b/include/kvm/arm_pmu.h
> > > @@ -92,8 +92,9 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
> > >  /*
> > >   * Evaluates as true when emulating PMUv3p5, and false otherwise.
> > >   */
> > > -#define kvm_pmu_is_3p5(vcpu)                                         \
> > > -     (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
> > > +#define kvm_pmu_is_3p5(vcpu)                                                                 \
> > > +      (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),                                 \
> > > +              IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)
> > >
> > >  u8 kvm_arm_pmu_get_pmuver_limit(void);
> > >
> > > --
> > > 2.40.0.348.gf938b09366-goog
> > >
> Thanks,
> Jing
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 67a55177fd83..da46a2729581 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -237,6 +237,12 @@  struct kvm_arch {
 #define KVM_ARCH_FLAG_EL1_32BIT				4
 	/* PSCI SYSTEM_SUSPEND enabled for the guest */
 #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED		5
+	/*
+	 * AA64DFR0_EL1.PMUver was set as ID_AA64DFR0_EL1_PMUVer_IMP_DEF
+	 * or DFR0_EL1.PerfMon was set as ID_DFR0_EL1_PerfMon_IMPDEF from
+	 * userspace for VCPUs without PMU.
+	 */
+#define KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU		6
 
 	unsigned long flags;
 
@@ -249,11 +255,6 @@  struct kvm_arch {
 
 	cpumask_var_t supported_cpus;
 
-	struct {
-		u8 imp:4;
-		u8 unimp:4;
-	} dfr0_pmuver;
-
 	/* Hypercall features firmware registers' descriptor */
 	struct kvm_smccc_features smccc_feat;
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 64e1c19e5a9b..3fe28d545b54 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -138,12 +138,6 @@  int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 	kvm_arm_init_hypercalls(kvm);
 	kvm_arm_init_id_regs(kvm);
 
-	/*
-	 * Initialise the default PMUver before there is a chance to
-	 * create an actual PMU.
-	 */
-	kvm->arch.dfr0_pmuver.imp = kvm_arm_pmu_get_pmuver_limit();
-
 	return 0;
 
 err_free_cpumask:
diff --git a/arch/arm64/kvm/id_regs.c b/arch/arm64/kvm/id_regs.c
index 291311b1ecca..6f65d30693fe 100644
--- a/arch/arm64/kvm/id_regs.c
+++ b/arch/arm64/kvm/id_regs.c
@@ -21,9 +21,12 @@ 
 static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
 {
 	if (kvm_vcpu_has_pmu(vcpu))
-		return vcpu->kvm->arch.dfr0_pmuver.imp;
+		return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
+				 IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1));
+	else if (test_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags))
+		return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
 
-	return vcpu->kvm->arch.dfr0_pmuver.unimp;
+	return 0;
 }
 
 static u8 perfmon_to_pmuver(u8 perfmon)
@@ -254,10 +257,20 @@  static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
 	if (val)
 		return -EINVAL;
 
-	if (valid_pmu)
-		vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
-	else
-		vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
+	if (valid_pmu) {
+		mutex_lock(&vcpu->kvm->arch.config_lock);
+		IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
+		IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
+								    pmuver);
+
+		IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
+		IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK,
+								pmuver_to_perfmon(pmuver));
+		mutex_unlock(&vcpu->kvm->arch.config_lock);
+	} else {
+		assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
+			   pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
+	}
 
 	return 0;
 }
@@ -294,10 +307,19 @@  static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
 	if (val)
 		return -EINVAL;
 
-	if (valid_pmu)
-		vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
-	else
-		vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
+	if (valid_pmu) {
+		mutex_lock(&vcpu->kvm->arch.config_lock);
+		IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) &= ~ID_DFR0_EL1_PerfMon_MASK;
+		IDREG(vcpu->kvm, SYS_ID_DFR0_EL1) |= FIELD_PREP(ID_DFR0_EL1_PerfMon_MASK, perfmon);
+
+		IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
+		IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ID_AA64DFR0_EL1_PMUVer_MASK,
+								    perfmon_to_pmuver(perfmon));
+		mutex_unlock(&vcpu->kvm->arch.config_lock);
+	} else {
+		assign_bit(KVM_ARCH_FLAG_VCPU_HAS_IMP_DEF_PMU, &vcpu->kvm->arch.flags,
+			   perfmon == ID_DFR0_EL1_PerfMon_IMPDEF);
+	}
 
 	return 0;
 }
@@ -503,4 +525,12 @@  void kvm_arm_init_id_regs(struct kvm *kvm)
 	}
 
 	IDREG(kvm, SYS_ID_AA64PFR0_EL1) = val;
+
+	/*
+	 * Initialise the default PMUver before there is a chance to
+	 * create an actual PMU.
+	 */
+	IDREG(kvm, SYS_ID_AA64DFR0_EL1) &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
+	IDREG(kvm, SYS_ID_AA64DFR0_EL1) |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
+						      kvm_arm_pmu_get_pmuver_limit());
 }
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 628775334d5e..856ac59b6821 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -92,8 +92,9 @@  void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 /*
  * Evaluates as true when emulating PMUv3p5, and false otherwise.
  */
-#define kvm_pmu_is_3p5(vcpu)						\
-	(vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5)
+#define kvm_pmu_is_3p5(vcpu)									\
+	 (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),					\
+		 IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1)) >= ID_AA64DFR0_EL1_PMUVer_V3P5)
 
 u8 kvm_arm_pmu_get_pmuver_limit(void);