diff mbox series

[v7,10/12] KVM: selftests: aarch64: Introduce vpmu_counter_access test

Message ID 20231009230858.3444834-11-rananta@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: PMU: Allow userspace to limit the number of PMCs on vCPU | expand

Commit Message

Raghavendra Rao Ananta Oct. 9, 2023, 11:08 p.m. UTC
From: Reiji Watanabe <reijiw@google.com>

Introduce vpmu_counter_access test for arm64 platforms.
The test configures PMUv3 for a vCPU, sets PMCR_EL0.N for the vCPU,
and check if the guest can consistently see the same number of the
PMU event counters (PMCR_EL0.N) that userspace sets.
This test case is done with each of the PMCR_EL0.N values from
0 to 31 (With the PMCR_EL0.N values greater than the host value,
the test expects KVM_SET_ONE_REG for the PMCR_EL0 to fail).

Signed-off-by: Reiji Watanabe <reijiw@google.com>
Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../kvm/aarch64/vpmu_counter_access.c         | 247 ++++++++++++++++++
 2 files changed, 248 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c

Comments

Sebastian Ott Oct. 12, 2023, 11:24 a.m. UTC | #1
On Mon, 9 Oct 2023, Raghavendra Rao Ananta wrote:
> +/* Create a VM that has one vCPU with PMUv3 configured. */
> +static void create_vpmu_vm(void *guest_code)
> +{
> +	struct kvm_vcpu_init init;
> +	uint8_t pmuver, ec;
> +	uint64_t dfr0, irq = 23;
> +	struct kvm_device_attr irq_attr = {
> +		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
> +		.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
> +		.addr = (uint64_t)&irq,
> +	};
> +	struct kvm_device_attr init_attr = {
> +		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
> +		.attr = KVM_ARM_VCPU_PMU_V3_INIT,
> +	};
> +
> +	/* The test creates the vpmu_vm multiple times. Ensure a clean state */
> +	memset(&vpmu_vm, 0, sizeof(vpmu_vm));
> +
> +	vpmu_vm.vm = vm_create(1);
> +	vm_init_descriptor_tables(vpmu_vm.vm);
> +	for (ec = 0; ec < ESR_EC_NUM; ec++) {
> +		vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
> +					guest_sync_handler);
> +	}
> +
> +	/* Create vCPU with PMUv3 */
> +	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
> +	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> +	vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
> +	vcpu_init_descriptor_tables(vpmu_vm.vcpu);
> +	vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
> +					GICD_BASE_GPA, GICR_BASE_GPA);
> +
> +	/* Make sure that PMUv3 support is indicated in the ID register */
> +	vcpu_get_reg(vpmu_vm.vcpu,
> +		     KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
> +	pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), dfr0);
> +	TEST_ASSERT(pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
> +		    pmuver >= ID_AA64DFR0_PMUVER_8_0,
> +		    "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
> +
> +	/* Initialize vPMU */
> +	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
> +	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
> +}

This one fails to build for me:
aarch64/vpmu_counter_access.c: In function ‘create_vpmu_vm’:
aarch64/vpmu_counter_access.c:456:47: error: ‘ID_AA64DFR0_PMUVER_MASK’ undeclared (first use in this function); did you mean ‘ID_AA64DFR0_EL1_PMUVer_MASK’?
   456 |         pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), dfr0);

Regards,
Sebastian
Sebastian Ott Oct. 12, 2023, 3:01 p.m. UTC | #2
On Thu, 12 Oct 2023, Sebastian Ott wrote:
> On Mon, 9 Oct 2023, Raghavendra Rao Ananta wrote:
>>  +/* Create a VM that has one vCPU with PMUv3 configured. */
>>  +static void create_vpmu_vm(void *guest_code)
>>  +{
>>  +	struct kvm_vcpu_init init;
>>  +	uint8_t pmuver, ec;
>>  +	uint64_t dfr0, irq = 23;
>>  +	struct kvm_device_attr irq_attr = {
>>  +		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
>>  +		.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
>>  +		.addr = (uint64_t)&irq,
>>  +	};
>>  +	struct kvm_device_attr init_attr = {
>>  +		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
>>  +		.attr = KVM_ARM_VCPU_PMU_V3_INIT,
>>  +	};
>>  +
>>  +	/* The test creates the vpmu_vm multiple times. Ensure a clean state
>>  */
>>  +	memset(&vpmu_vm, 0, sizeof(vpmu_vm));
>>  +
>>  +	vpmu_vm.vm = vm_create(1);
>>  +	vm_init_descriptor_tables(vpmu_vm.vm);
>>  +	for (ec = 0; ec < ESR_EC_NUM; ec++) {
>>  +		vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
>>  +					guest_sync_handler);
>>  +	}
>>  +
>>  +	/* Create vCPU with PMUv3 */
>>  +	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
>>  +	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
>>  +	vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
>>  +	vcpu_init_descriptor_tables(vpmu_vm.vcpu);
>>  +	vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
>>  +					GICD_BASE_GPA, GICR_BASE_GPA);
>>  +
>>  +	/* Make sure that PMUv3 support is indicated in the ID register */
>>  +	vcpu_get_reg(vpmu_vm.vcpu,
>>  +		     KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
>>  +	pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), dfr0);
>>  +	TEST_ASSERT(pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
>>  +		    pmuver >= ID_AA64DFR0_PMUVER_8_0,
>>  +		    "Unexpected PMUVER (0x%x) on the vCPU with PMUv3",
>>  pmuver);
>>  +
>>  +	/* Initialize vPMU */
>>  +	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
>>  +	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
>>  +}
>
> This one fails to build for me:
> aarch64/vpmu_counter_access.c: In function ‘create_vpmu_vm’:
> aarch64/vpmu_counter_access.c:456:47: error: ‘ID_AA64DFR0_PMUVER_MASK’ 
> undeclared (first use in this function); did you mean 
> ‘ID_AA64DFR0_EL1_PMUVer_MASK’?
>   456 |         pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER),
>   dfr0);

Looks like there's a clash with
"KVM: arm64: selftests: Import automatic generation of sysreg defs"
from:
 	https://lore.kernel.org/r/20231003230408.3405722-12-oliver.upton@linux.dev
Raghavendra Rao Ananta Oct. 13, 2023, 9:05 p.m. UTC | #3
On Thu, Oct 12, 2023 at 8:02 AM Sebastian Ott <sebott@redhat.com> wrote:
>
> On Thu, 12 Oct 2023, Sebastian Ott wrote:
> > On Mon, 9 Oct 2023, Raghavendra Rao Ananta wrote:
> >>  +/* Create a VM that has one vCPU with PMUv3 configured. */
> >>  +static void create_vpmu_vm(void *guest_code)
> >>  +{
> >>  +   struct kvm_vcpu_init init;
> >>  +   uint8_t pmuver, ec;
> >>  +   uint64_t dfr0, irq = 23;
> >>  +   struct kvm_device_attr irq_attr = {
> >>  +           .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> >>  +           .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
> >>  +           .addr = (uint64_t)&irq,
> >>  +   };
> >>  +   struct kvm_device_attr init_attr = {
> >>  +           .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> >>  +           .attr = KVM_ARM_VCPU_PMU_V3_INIT,
> >>  +   };
> >>  +
> >>  +   /* The test creates the vpmu_vm multiple times. Ensure a clean state
> >>  */
> >>  +   memset(&vpmu_vm, 0, sizeof(vpmu_vm));
> >>  +
> >>  +   vpmu_vm.vm = vm_create(1);
> >>  +   vm_init_descriptor_tables(vpmu_vm.vm);
> >>  +   for (ec = 0; ec < ESR_EC_NUM; ec++) {
> >>  +           vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
> >>  +                                   guest_sync_handler);
> >>  +   }
> >>  +
> >>  +   /* Create vCPU with PMUv3 */
> >>  +   vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
> >>  +   init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> >>  +   vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
> >>  +   vcpu_init_descriptor_tables(vpmu_vm.vcpu);
> >>  +   vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
> >>  +                                   GICD_BASE_GPA, GICR_BASE_GPA);
> >>  +
> >>  +   /* Make sure that PMUv3 support is indicated in the ID register */
> >>  +   vcpu_get_reg(vpmu_vm.vcpu,
> >>  +                KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
> >>  +   pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), dfr0);
> >>  +   TEST_ASSERT(pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
> >>  +               pmuver >= ID_AA64DFR0_PMUVER_8_0,
> >>  +               "Unexpected PMUVER (0x%x) on the vCPU with PMUv3",
> >>  pmuver);
> >>  +
> >>  +   /* Initialize vPMU */
> >>  +   vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
> >>  +   vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
> >>  +}
> >
> > This one fails to build for me:
> > aarch64/vpmu_counter_access.c: In function ‘create_vpmu_vm’:
> > aarch64/vpmu_counter_access.c:456:47: error: ‘ID_AA64DFR0_PMUVER_MASK’
> > undeclared (first use in this function); did you mean
> > ‘ID_AA64DFR0_EL1_PMUVer_MASK’?
> >   456 |         pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER),
> >   dfr0);
>
> Looks like there's a clash with
> "KVM: arm64: selftests: Import automatic generation of sysreg defs"
> from:
>         https://lore.kernel.org/r/20231003230408.3405722-12-oliver.upton@linux.dev
Thanks for the pointer, Sebastian! Surprisingly, I don't see the patch
when I sync to kvmarm/next.

Oliver,

Aren't the selftest patches from the 'Enable writable ID regs' series
[1] merged into kvmarm/next? Looking at the log, I couldn't find them
and the last patch that went from the series was [2]. Am I missing
something?

Thank you.
Raghavendra

[1]: https://lore.kernel.org/all/169644154288.3677537.15121340860793882283.b4-ty@linux.dev/
[2]: https://lore.kernel.org/all/20231003230408.3405722-11-oliver.upton@linux.dev/
Sebastian Ott Oct. 16, 2023, 10:01 a.m. UTC | #4
On Fri, 13 Oct 2023, Raghavendra Rao Ananta wrote:
> On Thu, Oct 12, 2023 at 8:02 AM Sebastian Ott <sebott@redhat.com> wrote:
>>
>> On Thu, 12 Oct 2023, Sebastian Ott wrote:
>>> On Mon, 9 Oct 2023, Raghavendra Rao Ananta wrote:
>>>>  +/* Create a VM that has one vCPU with PMUv3 configured. */
>>>>  +static void create_vpmu_vm(void *guest_code)
>>>>  +{
>>>>  +   struct kvm_vcpu_init init;
>>>>  +   uint8_t pmuver, ec;
>>>>  +   uint64_t dfr0, irq = 23;
>>>>  +   struct kvm_device_attr irq_attr = {
>>>>  +           .group = KVM_ARM_VCPU_PMU_V3_CTRL,
>>>>  +           .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
>>>>  +           .addr = (uint64_t)&irq,
>>>>  +   };
>>>>  +   struct kvm_device_attr init_attr = {
>>>>  +           .group = KVM_ARM_VCPU_PMU_V3_CTRL,
>>>>  +           .attr = KVM_ARM_VCPU_PMU_V3_INIT,
>>>>  +   };
>>>>  +
>>>>  +   /* The test creates the vpmu_vm multiple times. Ensure a clean state
>>>>  */
>>>>  +   memset(&vpmu_vm, 0, sizeof(vpmu_vm));
>>>>  +
>>>>  +   vpmu_vm.vm = vm_create(1);
>>>>  +   vm_init_descriptor_tables(vpmu_vm.vm);
>>>>  +   for (ec = 0; ec < ESR_EC_NUM; ec++) {
>>>>  +           vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
>>>>  +                                   guest_sync_handler);
>>>>  +   }
>>>>  +
>>>>  +   /* Create vCPU with PMUv3 */
>>>>  +   vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
>>>>  +   init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
>>>>  +   vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
>>>>  +   vcpu_init_descriptor_tables(vpmu_vm.vcpu);
>>>>  +   vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
>>>>  +                                   GICD_BASE_GPA, GICR_BASE_GPA);
>>>>  +
>>>>  +   /* Make sure that PMUv3 support is indicated in the ID register */
>>>>  +   vcpu_get_reg(vpmu_vm.vcpu,
>>>>  +                KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
>>>>  +   pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), dfr0);
>>>>  +   TEST_ASSERT(pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
>>>>  +               pmuver >= ID_AA64DFR0_PMUVER_8_0,
>>>>  +               "Unexpected PMUVER (0x%x) on the vCPU with PMUv3",
>>>>  pmuver);
>>>>  +
>>>>  +   /* Initialize vPMU */
>>>>  +   vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
>>>>  +   vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
>>>>  +}
>>>
>>> This one fails to build for me:
>>> aarch64/vpmu_counter_access.c: In function ‘create_vpmu_vm’:
>>> aarch64/vpmu_counter_access.c:456:47: error: ‘ID_AA64DFR0_PMUVER_MASK’
>>> undeclared (first use in this function); did you mean
>>> ‘ID_AA64DFR0_EL1_PMUVer_MASK’?
>>>   456 |         pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER),
>>>   dfr0);
>>
>> Looks like there's a clash with
>> "KVM: arm64: selftests: Import automatic generation of sysreg defs"
>> from:
>>         https://lore.kernel.org/r/20231003230408.3405722-12-oliver.upton@linux.dev
> Thanks for the pointer, Sebastian! Surprisingly, I don't see the patch
> when I sync to kvmarm/next.
>

Yea, sry - I've had both of these series applied locally.

Sebastian
Oliver Upton Oct. 16, 2023, 6:56 p.m. UTC | #5
On Fri, Oct 13, 2023 at 02:05:29PM -0700, Raghavendra Rao Ananta wrote:
> Oliver,
> 
> Aren't the selftest patches from the 'Enable writable ID regs' series
> [1] merged into kvmarm/next? Looking at the log, I couldn't find them
> and the last patch that went from the series was [2]. Am I missing
> something?
> 
> Thank you.
> Raghavendra
> 
> [1]: https://lore.kernel.org/all/169644154288.3677537.15121340860793882283.b4-ty@linux.dev/
> [2]: https://lore.kernel.org/all/20231003230408.3405722-11-oliver.upton@linux.dev/

This is intentional, updating the tools headers as it was done in the
original series broke the perftool build. I backed out the selftest
patches, but took the rest of the kernel changes into kvmarm/next so
they could soak while we sort out the selftests mess. Hopefully we can
get the fix reviewed in time [*]...

[*] https://lore.kernel.org/kvmarm/20231011195740.3349631-1-oliver.upton@linux.dev/
Raghavendra Rao Ananta Oct. 16, 2023, 7:05 p.m. UTC | #6
On Mon, Oct 16, 2023 at 11:56 AM Oliver Upton <oliver.upton@linux.dev> wrote:
>
> On Fri, Oct 13, 2023 at 02:05:29PM -0700, Raghavendra Rao Ananta wrote:
> > Oliver,
> >
> > Aren't the selftest patches from the 'Enable writable ID regs' series
> > [1] merged into kvmarm/next? Looking at the log, I couldn't find them
> > and the last patch that went from the series was [2]. Am I missing
> > something?
> >
> > Thank you.
> > Raghavendra
> >
> > [1]: https://lore.kernel.org/all/169644154288.3677537.15121340860793882283.b4-ty@linux.dev/
> > [2]: https://lore.kernel.org/all/20231003230408.3405722-11-oliver.upton@linux.dev/
>
> This is intentional, updating the tools headers as it was done in the
> original series broke the perftool build. I backed out the selftest
> patches, but took the rest of the kernel changes into kvmarm/next so
> they could soak while we sort out the selftests mess. Hopefully we can
> get the fix reviewed in time [*]...
>
> [*] https://lore.kernel.org/kvmarm/20231011195740.3349631-1-oliver.upton@linux.dev/
>
> --
Ah, I see. In that case, since it impacts this series, do you want me
to rebase my series on top of your selftests series for v8?

Thank you.
Raghavendra
> Thanks,
> Oliver
Oliver Upton Oct. 16, 2023, 7:07 p.m. UTC | #7
On Mon, Oct 16, 2023 at 12:05:16PM -0700, Raghavendra Rao Ananta wrote:
> On Mon, Oct 16, 2023 at 11:56 AM Oliver Upton <oliver.upton@linux.dev> wrote:
> >
> > On Fri, Oct 13, 2023 at 02:05:29PM -0700, Raghavendra Rao Ananta wrote:
> > > Oliver,
> > >
> > > Aren't the selftest patches from the 'Enable writable ID regs' series
> > > [1] merged into kvmarm/next? Looking at the log, I couldn't find them
> > > and the last patch that went from the series was [2]. Am I missing
> > > something?
> > >
> > > Thank you.
> > > Raghavendra
> > >
> > > [1]: https://lore.kernel.org/all/169644154288.3677537.15121340860793882283.b4-ty@linux.dev/
> > > [2]: https://lore.kernel.org/all/20231003230408.3405722-11-oliver.upton@linux.dev/
> >
> > This is intentional, updating the tools headers as it was done in the
> > original series broke the perftool build. I backed out the selftest
> > patches, but took the rest of the kernel changes into kvmarm/next so
> > they could soak while we sort out the selftests mess. Hopefully we can
> > get the fix reviewed in time [*]...
> >
> > [*] https://lore.kernel.org/kvmarm/20231011195740.3349631-1-oliver.upton@linux.dev/
> >
> > --
> Ah, I see. In that case, since it impacts this series, do you want me
> to rebase my series on top of your selftests series for v8?

No, please keep the two independent for now. I can fix it up when
applying the series.
Eric Auger Oct. 17, 2023, 2:51 p.m. UTC | #8
Hi Raghavendra,
On 10/10/23 01:08, Raghavendra Rao Ananta wrote:
> From: Reiji Watanabe <reijiw@google.com>
> 
> Introduce vpmu_counter_access test for arm64 platforms.
> The test configures PMUv3 for a vCPU, sets PMCR_EL0.N for the vCPU,
> and check if the guest can consistently see the same number of the
> PMU event counters (PMCR_EL0.N) that userspace sets.
> This test case is done with each of the PMCR_EL0.N values from
> 0 to 31 (With the PMCR_EL0.N values greater than the host value,
> the test expects KVM_SET_ONE_REG for the PMCR_EL0 to fail).
> 
> Signed-off-by: Reiji Watanabe <reijiw@google.com>
> Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> ---
>  tools/testing/selftests/kvm/Makefile          |   1 +
>  .../kvm/aarch64/vpmu_counter_access.c         | 247 ++++++++++++++++++
>  2 files changed, 248 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> 
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index a3bb36fb3cfc..416700aa196c 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -149,6 +149,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
>  TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
>  TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
>  TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
> +TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
>  TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
>  TEST_GEN_PROGS_aarch64 += demand_paging_test
>  TEST_GEN_PROGS_aarch64 += dirty_log_test
> diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> new file mode 100644
> index 000000000000..58949b17d76e
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> @@ -0,0 +1,247 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * vpmu_counter_access - Test vPMU event counter access
> + *
> + * Copyright (c) 2022 Google LLC.
2023 ;-)
> + *
> + * This test checks if the guest can see the same number of the PMU event
> + * counters (PMCR_EL0.N) that userspace sets.
> + * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
> + */
> +#include <kvm_util.h>
> +#include <processor.h>
> +#include <test_util.h>
> +#include <vgic.h>
> +#include <perf/arm_pmuv3.h>
> +#include <linux/bitfield.h>
> +
> +/* The max number of the PMU event counters (excluding the cycle counter) */
> +#define ARMV8_PMU_MAX_GENERAL_COUNTERS	(ARMV8_PMU_MAX_COUNTERS - 1)
> +
> +struct vpmu_vm {
> +	struct kvm_vm *vm;
> +	struct kvm_vcpu *vcpu;
> +	int gic_fd;
> +};
> +
> +static struct vpmu_vm vpmu_vm;
> +
> +static uint64_t get_pmcr_n(uint64_t pmcr)
> +{
> +	return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
> +}
> +
> +static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
> +{
> +	*pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
> +	*pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
> +}
> +
> +static void guest_sync_handler(struct ex_regs *regs)
> +{
> +	uint64_t esr, ec;
> +
> +	esr = read_sysreg(esr_el1);
> +	ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
> +	__GUEST_ASSERT(0, "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx", regs->pc, esr, ec);
> +}
> +
> +/*
> + * The guest is configured with PMUv3 with @expected_pmcr_n number of
> + * event counters.
> + * Check if @expected_pmcr_n is consistent with PMCR_EL0.N.
> + */
> +static void guest_code(uint64_t expected_pmcr_n)
> +{
> +	uint64_t pmcr, pmcr_n;
> +
> +	__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
> +			"Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx",
> +			expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
> +
> +	pmcr = read_sysreg(pmcr_el0);
> +	pmcr_n = get_pmcr_n(pmcr);
> +
> +	/* Make sure that PMCR_EL0.N indicates the value userspace set */
> +	__GUEST_ASSERT(pmcr_n == expected_pmcr_n,
> +			"Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
> +			pmcr_n, expected_pmcr_n);
> +
> +	GUEST_DONE();
> +}
> +
> +#define GICD_BASE_GPA	0x8000000ULL
> +#define GICR_BASE_GPA	0x80A0000ULL
> +
> +/* Create a VM that has one vCPU with PMUv3 configured. */
> +static void create_vpmu_vm(void *guest_code)
> +{
> +	struct kvm_vcpu_init init;
> +	uint8_t pmuver, ec;
> +	uint64_t dfr0, irq = 23;
> +	struct kvm_device_attr irq_attr = {
> +		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
> +		.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
> +		.addr = (uint64_t)&irq,
> +	};
> +	struct kvm_device_attr init_attr = {
> +		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
> +		.attr = KVM_ARM_VCPU_PMU_V3_INIT,
> +	};
> +
> +	/* The test creates the vpmu_vm multiple times. Ensure a clean state */
> +	memset(&vpmu_vm, 0, sizeof(vpmu_vm));
> +
> +	vpmu_vm.vm = vm_create(1);
> +	vm_init_descriptor_tables(vpmu_vm.vm);
> +	for (ec = 0; ec < ESR_EC_NUM; ec++) {
> +		vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
> +					guest_sync_handler);
> +	}
> +
> +	/* Create vCPU with PMUv3 */
> +	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
> +	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> +	vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
> +	vcpu_init_descriptor_tables(vpmu_vm.vcpu);
> +	vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
> +					GICD_BASE_GPA, GICR_BASE_GPA);
__TEST_REQUIRE(vpmu_vm.gic_fd >= 0, "Failed to create vgic-v3, skipping");
as done in some other tests

> +
> +	/* Make sure that PMUv3 support is indicated in the ID register */
> +	vcpu_get_reg(vpmu_vm.vcpu,
> +		     KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
> +	pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), dfr0);
> +	TEST_ASSERT(pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
> +		    pmuver >= ID_AA64DFR0_PMUVER_8_0,
> +		    "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
> +
> +	/* Initialize vPMU */
> +	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
> +	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
> +}
> +
> +static void destroy_vpmu_vm(void)
> +{
> +	close(vpmu_vm.gic_fd);
> +	kvm_vm_free(vpmu_vm.vm);
> +}
> +
> +static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
> +{
> +	struct ucall uc;
> +
> +	vcpu_args_set(vcpu, 1, pmcr_n);
> +	vcpu_run(vcpu);
> +	switch (get_ucall(vcpu, &uc)) {
> +	case UCALL_ABORT:
> +		REPORT_GUEST_ASSERT(uc);
> +		break;
> +	case UCALL_DONE:
> +		break;
> +	default:
> +		TEST_FAIL("Unknown ucall %lu", uc.cmd);
> +		break;
> +	}
> +}
> +
> +/*
> + * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
> + * and run the test.
> + */
> +static void run_test(uint64_t pmcr_n)
> +{
> +	struct kvm_vcpu *vcpu;
> +	uint64_t sp, pmcr;
> +	struct kvm_vcpu_init init;
> +
> +	pr_debug("Test with pmcr_n %lu\n", pmcr_n);
> +	create_vpmu_vm(guest_code);
> +
> +	vcpu = vpmu_vm.vcpu;
> +
> +	/* Save the initial sp to restore them later to run the guest again */
> +	vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
> +
> +	/* Update the PMCR_EL0.N with @pmcr_n */
> +	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
> +	set_pmcr_n(&pmcr, pmcr_n);
> +	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
> +
> +	run_vcpu(vcpu, pmcr_n);
> +
> +	/*
> +	 * Reset and re-initialize the vCPU, and run the guest code again to
> +	 * check if PMCR_EL0.N is preserved.
> +	 */
> +	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
> +	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> +	aarch64_vcpu_setup(vcpu, &init);
> +	vcpu_init_descriptor_tables(vcpu);
> +	vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
> +	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
> +
> +	run_vcpu(vcpu, pmcr_n);
> +
> +	destroy_vpmu_vm();
> +}
> +
> +/*
> + * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
> + * the vCPU to @pmcr_n, which is larger than the host value.
> + * The attempt should fail as @pmcr_n is too big to set for the vCPU.
> + */
> +static void run_error_test(uint64_t pmcr_n)
> +{
> +	struct kvm_vcpu *vcpu;
> +	uint64_t pmcr, pmcr_orig;
> +
> +	pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
> +	create_vpmu_vm(guest_code);
> +	vcpu = vpmu_vm.vcpu;
> +
> +	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
> +	pmcr = pmcr_orig;
> +
> +	/*
> +	 * Setting a larger value of PMCR.N should not modify the field, and
> +	 * return a success.
> +	 */
> +	set_pmcr_n(&pmcr, pmcr_n);
> +	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
> +	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
> +	TEST_ASSERT(pmcr_orig == pmcr,
> +		    "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n",
> +		    pmcr, pmcr_n);
nit: you could introduce a set_pmcr_n() routine  which creates the
vpmu_vm and set the PMCR.N and check whether the setting is applied. An
arg could tell the helper whether this is supposed to fail. This could
be used in both run_error_test and run_test which both mostly use the
same code.
> +
> +	destroy_vpmu_vm();
> +}
> +
> +/*
> + * Return the default number of implemented PMU event counters excluding
> + * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
> + */
> +static uint64_t get_pmcr_n_limit(void)
> +{
> +	uint64_t pmcr;
> +
> +	create_vpmu_vm(guest_code);
> +	vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
> +	destroy_vpmu_vm();
> +	return get_pmcr_n(pmcr);
> +}
> +
> +int main(void)
> +{
> +	uint64_t i, pmcr_n;
> +
> +	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
> +
> +	pmcr_n = get_pmcr_n_limit();
> +	for (i = 0; i <= pmcr_n; i++)
> +		run_test(i);
> +
> +	for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
> +		run_error_test(i);
> +
> +	return 0;
> +}

Besides this looks good to me.

Thanks

Eric
Sebastian Ott Oct. 17, 2023, 3:48 p.m. UTC | #9
On Mon, 9 Oct 2023, Raghavendra Rao Ananta wrote:
> +static void guest_code(uint64_t expected_pmcr_n)
> +{
> +	uint64_t pmcr, pmcr_n;
> +
> +	__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
> +			"Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx",
> +			expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
> +
> +	pmcr = read_sysreg(pmcr_el0);
> +	pmcr_n = get_pmcr_n(pmcr);
> +
> +	/* Make sure that PMCR_EL0.N indicates the value userspace set */
> +	__GUEST_ASSERT(pmcr_n == expected_pmcr_n,
> +			"Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
> +			pmcr_n, expected_pmcr_n);

Expected vs read value is swapped.


Also, since the kernel has special handling for this, should we add a
test like below?

+static void immutable_test(void)
+{
+	struct kvm_vcpu *vcpu;
+	uint64_t sp, pmcr, pmcr_n;
+	struct kvm_vcpu_init init;
+
+	create_vpmu_vm(guest_code);
+
+	vcpu = vpmu_vm.vcpu;
+
+	/* Save the initial sp to restore them later to run the guest again */
+	vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
+
+	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
+	pmcr_n = get_pmcr_n(pmcr);
+
+	run_vcpu(vcpu, pmcr_n);
+
+	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
+	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
+	aarch64_vcpu_setup(vcpu, &init);
+	vcpu_init_descriptor_tables(vcpu);
+	vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
+	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+
+	/* Update the PMCR_EL0.N after the VM ran once */
+	set_pmcr_n(&pmcr, 0);
+	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
+
+	/* Verify that the guest still gets the unmodified value */
+	run_vcpu(vcpu, pmcr_n);
+
+	destroy_vpmu_vm();
+}
Raghavendra Rao Ananta Oct. 17, 2023, 5:07 p.m. UTC | #10
Hi Eric,

On Tue, Oct 17, 2023 at 7:51 AM Eric Auger <eauger@redhat.com> wrote:
>
> Hi Raghavendra,
> On 10/10/23 01:08, Raghavendra Rao Ananta wrote:
> > From: Reiji Watanabe <reijiw@google.com>
> >
> > Introduce vpmu_counter_access test for arm64 platforms.
> > The test configures PMUv3 for a vCPU, sets PMCR_EL0.N for the vCPU,
> > and check if the guest can consistently see the same number of the
> > PMU event counters (PMCR_EL0.N) that userspace sets.
> > This test case is done with each of the PMCR_EL0.N values from
> > 0 to 31 (With the PMCR_EL0.N values greater than the host value,
> > the test expects KVM_SET_ONE_REG for the PMCR_EL0 to fail).
> >
> > Signed-off-by: Reiji Watanabe <reijiw@google.com>
> > Signed-off-by: Raghavendra Rao Ananta <rananta@google.com>
> > ---
> >  tools/testing/selftests/kvm/Makefile          |   1 +
> >  .../kvm/aarch64/vpmu_counter_access.c         | 247 ++++++++++++++++++
> >  2 files changed, 248 insertions(+)
> >  create mode 100644 tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> >
> > diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> > index a3bb36fb3cfc..416700aa196c 100644
> > --- a/tools/testing/selftests/kvm/Makefile
> > +++ b/tools/testing/selftests/kvm/Makefile
> > @@ -149,6 +149,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
> >  TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
> >  TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
> >  TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
> > +TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
> >  TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
> >  TEST_GEN_PROGS_aarch64 += demand_paging_test
> >  TEST_GEN_PROGS_aarch64 += dirty_log_test
> > diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> > new file mode 100644
> > index 000000000000..58949b17d76e
> > --- /dev/null
> > +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
> > @@ -0,0 +1,247 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +/*
> > + * vpmu_counter_access - Test vPMU event counter access
> > + *
> > + * Copyright (c) 2022 Google LLC.
> 2023 ;-)
Will fix in v8.
> > + *
> > + * This test checks if the guest can see the same number of the PMU event
> > + * counters (PMCR_EL0.N) that userspace sets.
> > + * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
> > + */
> > +#include <kvm_util.h>
> > +#include <processor.h>
> > +#include <test_util.h>
> > +#include <vgic.h>
> > +#include <perf/arm_pmuv3.h>
> > +#include <linux/bitfield.h>
> > +
> > +/* The max number of the PMU event counters (excluding the cycle counter) */
> > +#define ARMV8_PMU_MAX_GENERAL_COUNTERS       (ARMV8_PMU_MAX_COUNTERS - 1)
> > +
> > +struct vpmu_vm {
> > +     struct kvm_vm *vm;
> > +     struct kvm_vcpu *vcpu;
> > +     int gic_fd;
> > +};
> > +
> > +static struct vpmu_vm vpmu_vm;
> > +
> > +static uint64_t get_pmcr_n(uint64_t pmcr)
> > +{
> > +     return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
> > +}
> > +
> > +static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
> > +{
> > +     *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
> > +     *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
> > +}
> > +
> > +static void guest_sync_handler(struct ex_regs *regs)
> > +{
> > +     uint64_t esr, ec;
> > +
> > +     esr = read_sysreg(esr_el1);
> > +     ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
> > +     __GUEST_ASSERT(0, "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx", regs->pc, esr, ec);
> > +}
> > +
> > +/*
> > + * The guest is configured with PMUv3 with @expected_pmcr_n number of
> > + * event counters.
> > + * Check if @expected_pmcr_n is consistent with PMCR_EL0.N.
> > + */
> > +static void guest_code(uint64_t expected_pmcr_n)
> > +{
> > +     uint64_t pmcr, pmcr_n;
> > +
> > +     __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
> > +                     "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx",
> > +                     expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
> > +
> > +     pmcr = read_sysreg(pmcr_el0);
> > +     pmcr_n = get_pmcr_n(pmcr);
> > +
> > +     /* Make sure that PMCR_EL0.N indicates the value userspace set */
> > +     __GUEST_ASSERT(pmcr_n == expected_pmcr_n,
> > +                     "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
> > +                     pmcr_n, expected_pmcr_n);
> > +
> > +     GUEST_DONE();
> > +}
> > +
> > +#define GICD_BASE_GPA        0x8000000ULL
> > +#define GICR_BASE_GPA        0x80A0000ULL
> > +
> > +/* Create a VM that has one vCPU with PMUv3 configured. */
> > +static void create_vpmu_vm(void *guest_code)
> > +{
> > +     struct kvm_vcpu_init init;
> > +     uint8_t pmuver, ec;
> > +     uint64_t dfr0, irq = 23;
> > +     struct kvm_device_attr irq_attr = {
> > +             .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> > +             .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
> > +             .addr = (uint64_t)&irq,
> > +     };
> > +     struct kvm_device_attr init_attr = {
> > +             .group = KVM_ARM_VCPU_PMU_V3_CTRL,
> > +             .attr = KVM_ARM_VCPU_PMU_V3_INIT,
> > +     };
> > +
> > +     /* The test creates the vpmu_vm multiple times. Ensure a clean state */
> > +     memset(&vpmu_vm, 0, sizeof(vpmu_vm));
> > +
> > +     vpmu_vm.vm = vm_create(1);
> > +     vm_init_descriptor_tables(vpmu_vm.vm);
> > +     for (ec = 0; ec < ESR_EC_NUM; ec++) {
> > +             vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
> > +                                     guest_sync_handler);
> > +     }
> > +
> > +     /* Create vCPU with PMUv3 */
> > +     vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
> > +     init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> > +     vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
> > +     vcpu_init_descriptor_tables(vpmu_vm.vcpu);
> > +     vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
> > +                                     GICD_BASE_GPA, GICR_BASE_GPA);
> __TEST_REQUIRE(vpmu_vm.gic_fd >= 0, "Failed to create vgic-v3, skipping");
> as done in some other tests
>
I'll add this in v8.
> > +
> > +     /* Make sure that PMUv3 support is indicated in the ID register */
> > +     vcpu_get_reg(vpmu_vm.vcpu,
> > +                  KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
> > +     pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), dfr0);
> > +     TEST_ASSERT(pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
> > +                 pmuver >= ID_AA64DFR0_PMUVER_8_0,
> > +                 "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
> > +
> > +     /* Initialize vPMU */
> > +     vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
> > +     vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
> > +}
> > +
> > +static void destroy_vpmu_vm(void)
> > +{
> > +     close(vpmu_vm.gic_fd);
> > +     kvm_vm_free(vpmu_vm.vm);
> > +}
> > +
> > +static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
> > +{
> > +     struct ucall uc;
> > +
> > +     vcpu_args_set(vcpu, 1, pmcr_n);
> > +     vcpu_run(vcpu);
> > +     switch (get_ucall(vcpu, &uc)) {
> > +     case UCALL_ABORT:
> > +             REPORT_GUEST_ASSERT(uc);
> > +             break;
> > +     case UCALL_DONE:
> > +             break;
> > +     default:
> > +             TEST_FAIL("Unknown ucall %lu", uc.cmd);
> > +             break;
> > +     }
> > +}
> > +
> > +/*
> > + * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
> > + * and run the test.
> > + */
> > +static void run_test(uint64_t pmcr_n)
> > +{
> > +     struct kvm_vcpu *vcpu;
> > +     uint64_t sp, pmcr;
> > +     struct kvm_vcpu_init init;
> > +
> > +     pr_debug("Test with pmcr_n %lu\n", pmcr_n);
> > +     create_vpmu_vm(guest_code);
> > +
> > +     vcpu = vpmu_vm.vcpu;
> > +
> > +     /* Save the initial sp to restore them later to run the guest again */
> > +     vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
> > +
> > +     /* Update the PMCR_EL0.N with @pmcr_n */
> > +     vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
> > +     set_pmcr_n(&pmcr, pmcr_n);
> > +     vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
> > +
> > +     run_vcpu(vcpu, pmcr_n);
> > +
> > +     /*
> > +      * Reset and re-initialize the vCPU, and run the guest code again to
> > +      * check if PMCR_EL0.N is preserved.
> > +      */
> > +     vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
> > +     init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> > +     aarch64_vcpu_setup(vcpu, &init);
> > +     vcpu_init_descriptor_tables(vcpu);
> > +     vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
> > +     vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
> > +
> > +     run_vcpu(vcpu, pmcr_n);
> > +
> > +     destroy_vpmu_vm();
> > +}
> > +
> > +/*
> > + * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
> > + * the vCPU to @pmcr_n, which is larger than the host value.
> > + * The attempt should fail as @pmcr_n is too big to set for the vCPU.
> > + */
> > +static void run_error_test(uint64_t pmcr_n)
> > +{
> > +     struct kvm_vcpu *vcpu;
> > +     uint64_t pmcr, pmcr_orig;
> > +
> > +     pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
> > +     create_vpmu_vm(guest_code);
> > +     vcpu = vpmu_vm.vcpu;
> > +
> > +     vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
> > +     pmcr = pmcr_orig;
> > +
> > +     /*
> > +      * Setting a larger value of PMCR.N should not modify the field, and
> > +      * return a success.
> > +      */
> > +     set_pmcr_n(&pmcr, pmcr_n);
> > +     vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
> > +     vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
> > +     TEST_ASSERT(pmcr_orig == pmcr,
> > +                 "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n",
> > +                 pmcr, pmcr_n);
> nit: you could introduce a set_pmcr_n() routine  which creates the
> vpmu_vm and set the PMCR.N and check whether the setting is applied. An
> arg could tell the helper whether this is supposed to fail. This could
> be used in both run_error_test and run_test which both mostly use the
> same code.
Good idea. I'll think about it..

Thank you.
Raghavendra
> > +
> > +     destroy_vpmu_vm();
> > +}
> > +
> > +/*
> > + * Return the default number of implemented PMU event counters excluding
> > + * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
> > + */
> > +static uint64_t get_pmcr_n_limit(void)
> > +{
> > +     uint64_t pmcr;
> > +
> > +     create_vpmu_vm(guest_code);
> > +     vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
> > +     destroy_vpmu_vm();
> > +     return get_pmcr_n(pmcr);
> > +}
> > +
> > +int main(void)
> > +{
> > +     uint64_t i, pmcr_n;
> > +
> > +     TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
> > +
> > +     pmcr_n = get_pmcr_n_limit();
> > +     for (i = 0; i <= pmcr_n; i++)
> > +             run_test(i);
> > +
> > +     for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
> > +             run_error_test(i);
> > +
> > +     return 0;
> > +}
>
> Besides this looks good to me.
>
> Thanks
>
> Eric
>
Raghavendra Rao Ananta Oct. 17, 2023, 5:10 p.m. UTC | #11
On Tue, Oct 17, 2023 at 8:48 AM Sebastian Ott <sebott@redhat.com> wrote:
>
> On Mon, 9 Oct 2023, Raghavendra Rao Ananta wrote:
> > +static void guest_code(uint64_t expected_pmcr_n)
> > +{
> > +     uint64_t pmcr, pmcr_n;
> > +
> > +     __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
> > +                     "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx",
> > +                     expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
> > +
> > +     pmcr = read_sysreg(pmcr_el0);
> > +     pmcr_n = get_pmcr_n(pmcr);
> > +
> > +     /* Make sure that PMCR_EL0.N indicates the value userspace set */
> > +     __GUEST_ASSERT(pmcr_n == expected_pmcr_n,
> > +                     "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
> > +                     pmcr_n, expected_pmcr_n);
>
> Expected vs read value is swapped.
>
Good catch! I'll fix this in v8.
>
> Also, since the kernel has special handling for this, should we add a
> test like below?
>
> +static void immutable_test(void)
> +{
> +       struct kvm_vcpu *vcpu;
> +       uint64_t sp, pmcr, pmcr_n;
> +       struct kvm_vcpu_init init;
> +
> +       create_vpmu_vm(guest_code);
> +
> +       vcpu = vpmu_vm.vcpu;
> +
> +       /* Save the initial sp to restore them later to run the guest again */
> +       vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
> +
> +       vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
> +       pmcr_n = get_pmcr_n(pmcr);
> +
> +       run_vcpu(vcpu, pmcr_n);
> +
> +       vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
> +       init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
> +       aarch64_vcpu_setup(vcpu, &init);
> +       vcpu_init_descriptor_tables(vcpu);
> +       vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
> +       vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
> +
> +       /* Update the PMCR_EL0.N after the VM ran once */
> +       set_pmcr_n(&pmcr, 0);
> +       vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
> +
> +       /* Verify that the guest still gets the unmodified value */
> +       run_vcpu(vcpu, pmcr_n);
> +
> +       destroy_vpmu_vm();
> +}
Thanks for the suggestion! I'll add this test case in v8.

- Raghavendra
>
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index a3bb36fb3cfc..416700aa196c 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -149,6 +149,7 @@  TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
 TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq
+TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access
 TEST_GEN_PROGS_aarch64 += access_tracking_perf_test
 TEST_GEN_PROGS_aarch64 += demand_paging_test
 TEST_GEN_PROGS_aarch64 += dirty_log_test
diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
new file mode 100644
index 000000000000..58949b17d76e
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c
@@ -0,0 +1,247 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vpmu_counter_access - Test vPMU event counter access
+ *
+ * Copyright (c) 2022 Google LLC.
+ *
+ * This test checks if the guest can see the same number of the PMU event
+ * counters (PMCR_EL0.N) that userspace sets.
+ * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host.
+ */
+#include <kvm_util.h>
+#include <processor.h>
+#include <test_util.h>
+#include <vgic.h>
+#include <perf/arm_pmuv3.h>
+#include <linux/bitfield.h>
+
+/* The max number of the PMU event counters (excluding the cycle counter) */
+#define ARMV8_PMU_MAX_GENERAL_COUNTERS	(ARMV8_PMU_MAX_COUNTERS - 1)
+
+struct vpmu_vm {
+	struct kvm_vm *vm;
+	struct kvm_vcpu *vcpu;
+	int gic_fd;
+};
+
+static struct vpmu_vm vpmu_vm;
+
+static uint64_t get_pmcr_n(uint64_t pmcr)
+{
+	return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+}
+
+static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n)
+{
+	*pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT);
+	*pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT);
+}
+
+static void guest_sync_handler(struct ex_regs *regs)
+{
+	uint64_t esr, ec;
+
+	esr = read_sysreg(esr_el1);
+	ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK;
+	__GUEST_ASSERT(0, "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx", regs->pc, esr, ec);
+}
+
+/*
+ * The guest is configured with PMUv3 with @expected_pmcr_n number of
+ * event counters.
+ * Check if @expected_pmcr_n is consistent with PMCR_EL0.N.
+ */
+static void guest_code(uint64_t expected_pmcr_n)
+{
+	uint64_t pmcr, pmcr_n;
+
+	__GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS,
+			"Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx",
+			expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS);
+
+	pmcr = read_sysreg(pmcr_el0);
+	pmcr_n = get_pmcr_n(pmcr);
+
+	/* Make sure that PMCR_EL0.N indicates the value userspace set */
+	__GUEST_ASSERT(pmcr_n == expected_pmcr_n,
+			"Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx",
+			pmcr_n, expected_pmcr_n);
+
+	GUEST_DONE();
+}
+
+#define GICD_BASE_GPA	0x8000000ULL
+#define GICR_BASE_GPA	0x80A0000ULL
+
+/* Create a VM that has one vCPU with PMUv3 configured. */
+static void create_vpmu_vm(void *guest_code)
+{
+	struct kvm_vcpu_init init;
+	uint8_t pmuver, ec;
+	uint64_t dfr0, irq = 23;
+	struct kvm_device_attr irq_attr = {
+		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
+		.attr = KVM_ARM_VCPU_PMU_V3_IRQ,
+		.addr = (uint64_t)&irq,
+	};
+	struct kvm_device_attr init_attr = {
+		.group = KVM_ARM_VCPU_PMU_V3_CTRL,
+		.attr = KVM_ARM_VCPU_PMU_V3_INIT,
+	};
+
+	/* The test creates the vpmu_vm multiple times. Ensure a clean state */
+	memset(&vpmu_vm, 0, sizeof(vpmu_vm));
+
+	vpmu_vm.vm = vm_create(1);
+	vm_init_descriptor_tables(vpmu_vm.vm);
+	for (ec = 0; ec < ESR_EC_NUM; ec++) {
+		vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec,
+					guest_sync_handler);
+	}
+
+	/* Create vCPU with PMUv3 */
+	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
+	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
+	vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code);
+	vcpu_init_descriptor_tables(vpmu_vm.vcpu);
+	vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64,
+					GICD_BASE_GPA, GICR_BASE_GPA);
+
+	/* Make sure that PMUv3 support is indicated in the ID register */
+	vcpu_get_reg(vpmu_vm.vcpu,
+		     KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0);
+	pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), dfr0);
+	TEST_ASSERT(pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
+		    pmuver >= ID_AA64DFR0_PMUVER_8_0,
+		    "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver);
+
+	/* Initialize vPMU */
+	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr);
+	vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr);
+}
+
+static void destroy_vpmu_vm(void)
+{
+	close(vpmu_vm.gic_fd);
+	kvm_vm_free(vpmu_vm.vm);
+}
+
+static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n)
+{
+	struct ucall uc;
+
+	vcpu_args_set(vcpu, 1, pmcr_n);
+	vcpu_run(vcpu);
+	switch (get_ucall(vcpu, &uc)) {
+	case UCALL_ABORT:
+		REPORT_GUEST_ASSERT(uc);
+		break;
+	case UCALL_DONE:
+		break;
+	default:
+		TEST_FAIL("Unknown ucall %lu", uc.cmd);
+		break;
+	}
+}
+
+/*
+ * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n,
+ * and run the test.
+ */
+static void run_test(uint64_t pmcr_n)
+{
+	struct kvm_vcpu *vcpu;
+	uint64_t sp, pmcr;
+	struct kvm_vcpu_init init;
+
+	pr_debug("Test with pmcr_n %lu\n", pmcr_n);
+	create_vpmu_vm(guest_code);
+
+	vcpu = vpmu_vm.vcpu;
+
+	/* Save the initial sp to restore them later to run the guest again */
+	vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp);
+
+	/* Update the PMCR_EL0.N with @pmcr_n */
+	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
+	set_pmcr_n(&pmcr, pmcr_n);
+	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
+
+	run_vcpu(vcpu, pmcr_n);
+
+	/*
+	 * Reset and re-initialize the vCPU, and run the guest code again to
+	 * check if PMCR_EL0.N is preserved.
+	 */
+	vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init);
+	init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3);
+	aarch64_vcpu_setup(vcpu, &init);
+	vcpu_init_descriptor_tables(vcpu);
+	vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp);
+	vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
+
+	run_vcpu(vcpu, pmcr_n);
+
+	destroy_vpmu_vm();
+}
+
+/*
+ * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for
+ * the vCPU to @pmcr_n, which is larger than the host value.
+ * The attempt should fail as @pmcr_n is too big to set for the vCPU.
+ */
+static void run_error_test(uint64_t pmcr_n)
+{
+	struct kvm_vcpu *vcpu;
+	uint64_t pmcr, pmcr_orig;
+
+	pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n);
+	create_vpmu_vm(guest_code);
+	vcpu = vpmu_vm.vcpu;
+
+	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig);
+	pmcr = pmcr_orig;
+
+	/*
+	 * Setting a larger value of PMCR.N should not modify the field, and
+	 * return a success.
+	 */
+	set_pmcr_n(&pmcr, pmcr_n);
+	vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr);
+	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
+	TEST_ASSERT(pmcr_orig == pmcr,
+		    "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n",
+		    pmcr, pmcr_n);
+
+	destroy_vpmu_vm();
+}
+
+/*
+ * Return the default number of implemented PMU event counters excluding
+ * the cycle counter (i.e. PMCR_EL0.N value) for the guest.
+ */
+static uint64_t get_pmcr_n_limit(void)
+{
+	uint64_t pmcr;
+
+	create_vpmu_vm(guest_code);
+	vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr);
+	destroy_vpmu_vm();
+	return get_pmcr_n(pmcr);
+}
+
+int main(void)
+{
+	uint64_t i, pmcr_n;
+
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3));
+
+	pmcr_n = get_pmcr_n_limit();
+	for (i = 0; i <= pmcr_n; i++)
+		run_test(i);
+
+	for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++)
+		run_error_test(i);
+
+	return 0;
+}