diff mbox series

[v3,5/5] KVM: arm64: selftests: Test for setting ID register from usersapce

Message ID 20231011195740.3349631-6-oliver.upton@linux.dev (mailing list archive)
State New, archived
Headers show
Series KVM: selftests: Add ID reg test, update headers | expand

Commit Message

Oliver Upton Oct. 11, 2023, 7:57 p.m. UTC
From: Jing Zhang <jingzhangos@google.com>

Add tests to verify setting ID registers from userspace is handled
correctly by KVM. Also add a test case to use ioctl
KVM_ARM_GET_REG_WRITABLE_MASKS to get writable masks.

Signed-off-by: Jing Zhang <jingzhangos@google.com>
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
---
 tools/testing/selftests/kvm/Makefile          |   1 +
 .../selftests/kvm/aarch64/set_id_regs.c       | 479 ++++++++++++++++++
 2 files changed, 480 insertions(+)
 create mode 100644 tools/testing/selftests/kvm/aarch64/set_id_regs.c

Comments

Cornelia Huck Oct. 16, 2023, 3:30 p.m. UTC | #1
On Wed, Oct 11 2023, Oliver Upton <oliver.upton@linux.dev> wrote:

> From: Jing Zhang <jingzhangos@google.com>
>
> Add tests to verify setting ID registers from userspace is handled
> correctly by KVM. Also add a test case to use ioctl
> KVM_ARM_GET_REG_WRITABLE_MASKS to get writable masks.
>
> Signed-off-by: Jing Zhang <jingzhangos@google.com>
> Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> ---
>  tools/testing/selftests/kvm/Makefile          |   1 +
>  .../selftests/kvm/aarch64/set_id_regs.c       | 479 ++++++++++++++++++
>  2 files changed, 480 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/aarch64/set_id_regs.c

(...)

> +static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
> +{
> +	uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
> +	struct reg_mask_range range = {
> +		.addr = (__u64)masks,
> +	};
> +	int ret;
> +
> +	/* KVM should return error when reserved field is not zero */
> +	range.reserved[0] = 1;
> +	ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
> +	TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");

I think the code should first check for
KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES -- newer kselftests are supposed
to be able to run on older kernels, and we should just skip all of this
if the API isn't there.

> +
> +	/* Get writable masks for feature ID registers */
> +	memset(range.reserved, 0, sizeof(range.reserved));
> +	vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
Oliver Upton Oct. 17, 2023, 8:03 a.m. UTC | #2
On Mon, Oct 16, 2023 at 05:30:06PM +0200, Cornelia Huck wrote:
> On Wed, Oct 11 2023, Oliver Upton <oliver.upton@linux.dev> wrote:
> 
> > From: Jing Zhang <jingzhangos@google.com>
> >
> > Add tests to verify setting ID registers from userspace is handled
> > correctly by KVM. Also add a test case to use ioctl
> > KVM_ARM_GET_REG_WRITABLE_MASKS to get writable masks.
> >
> > Signed-off-by: Jing Zhang <jingzhangos@google.com>
> > Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> > ---
> >  tools/testing/selftests/kvm/Makefile          |   1 +
> >  .../selftests/kvm/aarch64/set_id_regs.c       | 479 ++++++++++++++++++
> >  2 files changed, 480 insertions(+)
> >  create mode 100644 tools/testing/selftests/kvm/aarch64/set_id_regs.c
> 
> (...)
> 
> > +static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
> > +{
> > +	uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
> > +	struct reg_mask_range range = {
> > +		.addr = (__u64)masks,
> > +	};
> > +	int ret;
> > +
> > +	/* KVM should return error when reserved field is not zero */
> > +	range.reserved[0] = 1;
> > +	ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
> > +	TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
> 
> I think the code should first check for
> KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES -- newer kselftests are supposed
> to be able to run on older kernels, and we should just skip all of this
> if the API isn't there.

Ah, thanks! I'll apply the following on top:

diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
index 5c0718fd1705..bac05210b539 100644
--- a/tools/testing/selftests/kvm/aarch64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
@@ -452,6 +452,8 @@ int main(void)
 	uint64_t val, el0;
 	int ftr_cnt;
 
+	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
+
 	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
 
 	/* Check for AARCH64 only system */
Cornelia Huck Oct. 18, 2023, 12:35 p.m. UTC | #3
On Tue, Oct 17 2023, Oliver Upton <oliver.upton@linux.dev> wrote:

> On Mon, Oct 16, 2023 at 05:30:06PM +0200, Cornelia Huck wrote:
>> On Wed, Oct 11 2023, Oliver Upton <oliver.upton@linux.dev> wrote:
>> 
>> > From: Jing Zhang <jingzhangos@google.com>
>> >
>> > Add tests to verify setting ID registers from userspace is handled
>> > correctly by KVM. Also add a test case to use ioctl
>> > KVM_ARM_GET_REG_WRITABLE_MASKS to get writable masks.
>> >
>> > Signed-off-by: Jing Zhang <jingzhangos@google.com>
>> > Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
>> > ---
>> >  tools/testing/selftests/kvm/Makefile          |   1 +
>> >  .../selftests/kvm/aarch64/set_id_regs.c       | 479 ++++++++++++++++++
>> >  2 files changed, 480 insertions(+)
>> >  create mode 100644 tools/testing/selftests/kvm/aarch64/set_id_regs.c
>> 
>> (...)
>> 
>> > +static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
>> > +{
>> > +	uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
>> > +	struct reg_mask_range range = {
>> > +		.addr = (__u64)masks,
>> > +	};
>> > +	int ret;
>> > +
>> > +	/* KVM should return error when reserved field is not zero */
>> > +	range.reserved[0] = 1;
>> > +	ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
>> > +	TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
>> 
>> I think the code should first check for
>> KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES -- newer kselftests are supposed
>> to be able to run on older kernels, and we should just skip all of this
>> if the API isn't there.
>
> Ah, thanks! I'll apply the following on top:
>
> diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
> index 5c0718fd1705..bac05210b539 100644
> --- a/tools/testing/selftests/kvm/aarch64/set_id_regs.c
> +++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
> @@ -452,6 +452,8 @@ int main(void)
>  	uint64_t val, el0;
>  	int ftr_cnt;
>  
> +	TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
> +
>  	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
>  
>  	/* Check for AARCH64 only system */

Thanks, LGTM.
Eric Auger Oct. 19, 2023, 8:38 a.m. UTC | #4
Hi,

On 10/11/23 21:57, Oliver Upton wrote:
> From: Jing Zhang <jingzhangos@google.com>

nit: typo in the title
> 
> Add tests to verify setting ID registers from userspace is handled
> correctly by KVM. Also add a test case to use ioctl
> KVM_ARM_GET_REG_WRITABLE_MASKS to get writable masks.
> 
> Signed-off-by: Jing Zhang <jingzhangos@google.com>
> Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
> ---
>  tools/testing/selftests/kvm/Makefile          |   1 +
>  .../selftests/kvm/aarch64/set_id_regs.c       | 479 ++++++++++++++++++
>  2 files changed, 480 insertions(+)
>  create mode 100644 tools/testing/selftests/kvm/aarch64/set_id_regs.c
> 
> diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
> index 07b3f4dc1a77..4f4f6ad025f4 100644
> --- a/tools/testing/selftests/kvm/Makefile
> +++ b/tools/testing/selftests/kvm/Makefile
> @@ -156,6 +156,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
>  TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
>  TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
>  TEST_GEN_PROGS_aarch64 += aarch64/psci_test
> +TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
>  TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
>  TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
>  TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
> diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
> new file mode 100644
> index 000000000000..5c0718fd1705
> --- /dev/null
> +++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
> @@ -0,0 +1,479 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * set_id_regs - Test for setting ID register from usersapce.
> + *
> + * Copyright (c) 2023 Google LLC.
> + *
> + *
> + * Test that KVM supports setting ID registers from userspace and handles the
> + * feature set correctly.
> + */
> +
> +#include <stdint.h>
> +#include "kvm_util.h"
> +#include "processor.h"
> +#include "test_util.h"
> +#include <linux/bitfield.h>
> +
> +enum ftr_type {
> +	FTR_EXACT,			/* Use a predefined safe value */
practically FTR_EXACT is not used in this patch. Is it worth to keep?
Same question for the associated logic in get_safe/invalid_value
> +	FTR_LOWER_SAFE,			/* Smaller value is safe */
> +	FTR_HIGHER_SAFE,		/* Bigger value is safe */
> +	FTR_HIGHER_OR_ZERO_SAFE,	/* Bigger value is safe, but 0 is biggest */
> +	FTR_END,			/* Mark the last ftr bits */
> +};
> +
> +#define FTR_SIGNED	true	/* Value should be treated as signed */
> +#define FTR_UNSIGNED	false	/* Value should be treated as unsigned */
> +
> +struct reg_ftr_bits {
> +	char *name;
> +	bool sign;
> +	enum ftr_type type;
> +	uint8_t shift;
> +	uint64_t mask;
> +	int64_t safe_val;
> +};
> +
> +struct test_feature_reg {
> +	uint32_t reg;
> +	const struct reg_ftr_bits *ftr_bits;
> +};
> +
> +#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL)	\
> +	{								\
> +		.name = #NAME,						\
> +		.sign = SIGNED,						\
> +		.type = TYPE,						\
> +		.shift = SHIFT,						\
> +		.mask = MASK,						\
> +		.safe_val = SAFE_VAL,					\
> +	}
> +
> +#define REG_FTR_BITS(type, reg, field, safe_val) \
> +	__REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
> +		       reg##_##field##_MASK, safe_val)
> +
> +#define S_REG_FTR_BITS(type, reg, field, safe_val) \
> +	__REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
> +		       reg##_##field##_MASK, safe_val)
> +
> +#define REG_FTR_END					\
> +	{						\
> +		.type = FTR_END,			\
> +	}
> +
> +static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
> +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
Strictly speaking this is not always safe to have a lower value. For
instance: From Armv8.1, if FEAT_PMUv3 is implemented, the value 0b0001
is not permitted. But I guess this consistency is to be taken into
account by the user space. But may be wort a comment. Here and below

You may at least clarify what does mean 'safe'
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0),> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {
> +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
> +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),
> +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),
> +	REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0),
> +	REG_FTR_END,
> +};
> +
> +static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0),
> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0),
> +	REG_FTR_END,
> +};
> +
> +#define TEST_REG(id, table)			\
> +	{					\
> +		.reg = id,			\
> +		.ftr_bits = &((table)[0]),	\
> +	}
> +
> +static struct test_feature_reg test_regs[] = {
> +	TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1),
> +	TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1),
> +	TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),
> +	TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
> +	TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
> +	TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
> +	TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
> +	TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
> +	TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
> +	TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),
> +};
> +
> +#define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0);
> +
> +static void guest_code(void)
> +{
> +	GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_DFR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
> +	GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
> +
> +	GUEST_DONE();
> +}
> +
> +/* Return a safe value to a given ftr_bits an ftr value */
and ftr value
> +uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
> +{
> +	uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
> +
> +	if (ftr_bits->type == FTR_UNSIGNED) {
> +		switch (ftr_bits->type) {
> +		case FTR_EXACT:
> +			ftr = ftr_bits->safe_val;
> +			break;
> +		case FTR_LOWER_SAFE:
> +			if (ftr > 0)
> +				ftr--;
> +			break;
> +		case FTR_HIGHER_SAFE:
> +			if (ftr < ftr_max)
> +				ftr++;
> +			break;
> +		case FTR_HIGHER_OR_ZERO_SAFE:
> +			if (ftr == ftr_max)
> +				ftr = 0;
> +			else if (ftr != 0)
> +				ftr++;
> +			break;
> +		default:
> +			break;
> +		}
> +	} else if (ftr != ftr_max) {
> +		switch (ftr_bits->type) {
> +		case FTR_EXACT:
> +			ftr = ftr_bits->safe_val;
> +			break;
> +		case FTR_LOWER_SAFE:
> +			if (ftr > 0)
> +				ftr--;
> +			break;
> +		case FTR_HIGHER_SAFE:
> +			if (ftr < ftr_max - 1)
> +				ftr++;
> +			break;
> +		case FTR_HIGHER_OR_ZERO_SAFE:
> +			if (ftr != 0 && ftr != ftr_max - 1)
> +				ftr++;
> +			break;
> +		default:
> +			break;
> +		}
> +	}
> +
> +	return ftr;
in some cases we are going to return the same value as ftr and the test
won't do much. Shouldn't we return an error value in that case and skip
the test instead?
> +}
> +
> +/* Return an invalid value to a given ftr_bits an ftr value */
> +uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
> +{
> +	uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
> +
> +	if (ftr_bits->type == FTR_UNSIGNED) {
> +		switch (ftr_bits->type) {
> +		case FTR_EXACT:
> +			ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
> +			break;
> +		case FTR_LOWER_SAFE:
> +			ftr++;
> +			break;
> +		case FTR_HIGHER_SAFE:
> +			ftr--;
> +			break;
> +		case FTR_HIGHER_OR_ZERO_SAFE:
> +			if (ftr == 0)
> +				ftr = ftr_max;
isn't it invalid as ftr_max >= ftr?
> +			else
> +				ftr--;
> +			break;
> +		default:
> +			break;
> +		}
> +	} else if (ftr != ftr_max) {
> +		switch (ftr_bits->type) {
> +		case FTR_EXACT:
> +			ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
> +			break;
> +		case FTR_LOWER_SAFE:
> +			ftr++;
> +			break;
> +		case FTR_HIGHER_SAFE:
> +			ftr--;
> +			break;
> +		case FTR_HIGHER_OR_ZERO_SAFE:
> +			if (ftr == 0)
> +				ftr = ftr_max - 1;
> +			else
> +				ftr--;
> +			break;
> +		default:
> +			break;
> +		}
> +	} else {
> +		ftr = 0;
> +	}
> +
> +	return ftr;
> +}
> +
> +static void test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
> +				 const struct reg_ftr_bits *ftr_bits)
> +{
> +	uint8_t shift = ftr_bits->shift;
> +	uint64_t mask = ftr_bits->mask;
> +	uint64_t val, new_val, ftr;
> +
> +	vcpu_get_reg(vcpu, reg, &val);
> +	ftr = (val & mask) >> shift;
> +
> +	ftr = get_safe_value(ftr_bits, ftr);
> +
> +	ftr <<= shift;
> +	val &= ~mask;
> +	val |= ftr;
> +
> +	vcpu_set_reg(vcpu, reg, val);
> +	vcpu_get_reg(vcpu, reg, &new_val);
> +	TEST_ASSERT_EQ(new_val, val);
> +}
> +
> +static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
> +			      const struct reg_ftr_bits *ftr_bits)
> +{
> +	uint8_t shift = ftr_bits->shift;
> +	uint64_t mask = ftr_bits->mask;
> +	uint64_t val, old_val, ftr;
> +	int r;
> +
> +	vcpu_get_reg(vcpu, reg, &val);
> +	ftr = (val & mask) >> shift;
> +
> +	ftr = get_invalid_value(ftr_bits, ftr);
> +
> +	old_val = val;
> +	ftr <<= shift;
> +	val &= ~mask;
> +	val |= ftr;
> +
> +	r = __vcpu_set_reg(vcpu, reg, val);
> +	TEST_ASSERT(r < 0 && errno == EINVAL,
> +		    "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
> +
> +	vcpu_get_reg(vcpu, reg, &val);
> +	TEST_ASSERT_EQ(val, old_val);
> +}
> +
> +static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
> +{
> +	uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
> +	struct reg_mask_range range = {
> +		.addr = (__u64)masks,
> +	};
> +	int ret;
> +
> +	/* KVM should return error when reserved field is not zero */
> +	range.reserved[0] = 1;
> +	ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
> +	TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
> +
> +	/* Get writable masks for feature ID registers */
> +	memset(range.reserved, 0, sizeof(range.reserved));
> +	vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
> +
> +	for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
> +		const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
> +		uint32_t reg_id = test_regs[i].reg;
> +		uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
> +		int idx;
> +
> +		/* Get the index to masks array for the idreg */
> +		idx = KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(reg_id), sys_reg_Op1(reg_id),
> +						   sys_reg_CRn(reg_id), sys_reg_CRm(reg_id),
> +						   sys_reg_Op2(reg_id));
> +
> +		for (int j = 0;  ftr_bits[j].type != FTR_END; j++) {
> +			/* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */
> +			if (aarch64_only && sys_reg_CRm(reg_id) < 4) {
> +				ksft_test_result_skip("%s on AARCH64 only system\n",
> +						      ftr_bits[j].name);
> +				continue;
> +			}
> +
> +			/* Make sure the feature field is writable */
> +			TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
> +
> +			test_reg_set_fail(vcpu, reg, &ftr_bits[j]);
> +			test_reg_set_success(vcpu, reg, &ftr_bits[j]);
> +
> +			ksft_test_result_pass("%s\n", ftr_bits[j].name);
> +		}
> +	}
> +}
> +
> +static void test_guest_reg_read(struct kvm_vcpu *vcpu)
> +{
> +	bool done = false;
> +	struct ucall uc;
> +	uint64_t val;
> +
> +	while (!done) {
> +		vcpu_run(vcpu);
> +
> +		switch (get_ucall(vcpu, &uc)) {
> +		case UCALL_ABORT:
> +			REPORT_GUEST_ASSERT(uc);
> +			break;
> +		case UCALL_SYNC:
> +			/* Make sure the written values are seen by guest */
> +			vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(uc.args[2]), &val);
> +			TEST_ASSERT_EQ(val, uc.args[3]);
> +			break;
> +		case UCALL_DONE:
> +			done = true;
> +			break;
> +		default:
> +			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
> +		}
> +	}
> +}
> +
> +int main(void)
> +{
> +	struct kvm_vcpu *vcpu;
> +	struct kvm_vm *vm;
> +	bool aarch64_only;
> +	uint64_t val, el0;
> +	int ftr_cnt;
> +
> +	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
> +
> +	/* Check for AARCH64 only system */
> +	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
> +	el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
> +	aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
> +
> +	ksft_print_header();
> +
> +	ftr_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
> +		  ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
> +		  ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
> +		  ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) +
> +		  ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) -
> +		  ARRAY_SIZE(test_regs);
> +
> +	ksft_set_plan(ftr_cnt);
> +
> +	test_user_set_reg(vcpu, aarch64_only);
> +	test_guest_reg_read(vcpu);
> +
> +	kvm_vm_free(vm);
> +
> +	ksft_finished();
> +}

Thanks

Eric
Zenghui Yu Jan. 5, 2024, 9:07 a.m. UTC | #5
On 2023/10/19 16:38, Eric Auger wrote:

>> +static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
>> +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
 >
> Strictly speaking this is not always safe to have a lower value. For
> instance: From Armv8.1, if FEAT_PMUv3 is implemented, the value 0b0001
> is not permitted. But I guess this consistency is to be taken into
> account by the user space. But may be wort a comment. Here and below
> 
> You may at least clarify what does mean 'safe'
 >
>> +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0),

I've seen the following failure on Cortex A72 where
ID_AA64DFR0_EL1.DebugVer is 6.

# ./aarch64/set_id_regs
TAP version 13
1..79
ok 1 ID_AA64DFR0_EL1_PMUVer
==== Test Assertion Failure ====
   include/kvm_util_base.h:553: !ret
   pid=2288505 tid=2288505 errno=22 - Invalid argument
      1	0x0000000000402787: vcpu_set_reg at kvm_util_base.h:553 
(discriminator 6)
      2	 (inlined by) test_reg_set_success at set_id_regs.c:342 
(discriminator 6)
      3	 (inlined by) test_user_set_reg at set_id_regs.c:413 
(discriminator 6)
      4	0x0000000000401943: main at set_id_regs.c:475
      5	0x0000ffffbdd5d03b: ?? ??:0
      6	0x0000ffffbdd5d113: ?? ??:0
      7	0x0000000000401a2f: _start at ??:?
   KVM_SET_ONE_REG failed, rc: -1 errno: 22 (Invalid argument)
Oliver Upton Jan. 8, 2024, 10:40 p.m. UTC | #6
Hi Zenghui,

On Fri, Jan 05, 2024 at 05:07:08PM +0800, Zenghui Yu wrote:
> On 2023/10/19 16:38, Eric Auger wrote:
> 
> > > +static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
> > > +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
> >
> > Strictly speaking this is not always safe to have a lower value. For
> > instance: From Armv8.1, if FEAT_PMUv3 is implemented, the value 0b0001
> > is not permitted. But I guess this consistency is to be taken into
> > account by the user space. But may be wort a comment. Here and below
> > 
> > You may at least clarify what does mean 'safe'
> >
> > > +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0),
> 
> I've seen the following failure on Cortex A72 where
> ID_AA64DFR0_EL1.DebugVer is 6.

Ah, yes, the test is wrong. KVM enforces a minimum value of 0x6 on this
field, yet get_safe_value() returns 0x5 for the field.

Jing, do you have time to check this test for similar failures and send
out a fix for Zenghui's observations?

> # ./aarch64/set_id_regs
> TAP version 13
> 1..79
> ok 1 ID_AA64DFR0_EL1_PMUVer
> ==== Test Assertion Failure ====
>   include/kvm_util_base.h:553: !ret
>   pid=2288505 tid=2288505 errno=22 - Invalid argument
>      1	0x0000000000402787: vcpu_set_reg at kvm_util_base.h:553
> (discriminator 6)
>      2	 (inlined by) test_reg_set_success at set_id_regs.c:342
> (discriminator 6)
>      3	 (inlined by) test_user_set_reg at set_id_regs.c:413 (discriminator
> 6)
>      4	0x0000000000401943: main at set_id_regs.c:475
>      5	0x0000ffffbdd5d03b: ?? ??:0
>      6	0x0000ffffbdd5d113: ?? ??:0
>      7	0x0000000000401a2f: _start at ??:?
>   KVM_SET_ONE_REG failed, rc: -1 errno: 22 (Invalid argument)
Jing Zhang Jan. 9, 2024, 1:31 a.m. UTC | #7
Hi Zenghui,

I don't have a Cortex A72 to fully verify the fix. Could you help
verify the following change?

diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c
b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
index bac05210b539..f17454dc6d9e 100644
--- a/tools/testing/selftests/kvm/aarch64/set_id_regs.c
+++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
@@ -224,13 +224,20 @@ uint64_t get_safe_value(const struct
reg_ftr_bits *ftr_bits, uint64_t ftr)
 {
        uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);

-       if (ftr_bits->type == FTR_UNSIGNED) {
+       if (ftr_bits->sign == FTR_UNSIGNED) {
                switch (ftr_bits->type) {
                case FTR_EXACT:
                        ftr = ftr_bits->safe_val;
                        break;
                case FTR_LOWER_SAFE:
-                       if (ftr > 0)
+                       uint64_t min_safe = 0;
+
+                       if (!strcmp(ftr_bits->name, "ID_AA64DFR0_EL1_DebugVer"))
+                               min_safe = ID_AA64DFR0_EL1_DebugVer_IMP;
+                       else if (!strcmp(ftr_bits->name, "ID_DFR0_EL1_CopDbg"))
+                               min_safe = ID_DFR0_EL1_CopDbg_Armv8;
+
+                       if (ftr > min_safe)
                                ftr--;
                        break;
                case FTR_HIGHER_SAFE:
@@ -252,7 +259,12 @@ uint64_t get_safe_value(const struct reg_ftr_bits
*ftr_bits, uint64_t ftr)
                        ftr = ftr_bits->safe_val;
                        break;
                case FTR_LOWER_SAFE:
-                       if (ftr > 0)
+                       uint64_t min_safe = 0;
+
+                       if (!strcmp(ftr_bits->name, "ID_DFR0_EL1_PerfMon"))
+                               min_safe = ID_DFR0_EL1_PerfMon_PMUv3;
+
+                       if (ftr > min_safe)
                                ftr--;
                        break;
                case FTR_HIGHER_SAFE:
@@ -276,7 +288,7 @@ uint64_t get_invalid_value(const struct
reg_ftr_bits *ftr_bits, uint64_t ftr)
 {
        uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);

-       if (ftr_bits->type == FTR_UNSIGNED) {
+       if (ftr_bits->sign == FTR_UNSIGNED) {
                switch (ftr_bits->type) {
                case FTR_EXACT:
                        ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);

On Mon, Jan 8, 2024 at 2:40 PM Oliver Upton <oliver.upton@linux.dev> wrote:
>
> Hi Zenghui,
>
> On Fri, Jan 05, 2024 at 05:07:08PM +0800, Zenghui Yu wrote:
> > On 2023/10/19 16:38, Eric Auger wrote:
> >
> > > > +static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
> > > > + S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
> > >
> > > Strictly speaking this is not always safe to have a lower value. For
> > > instance: From Armv8.1, if FEAT_PMUv3 is implemented, the value 0b0001
> > > is not permitted. But I guess this consistency is to be taken into
> > > account by the user space. But may be wort a comment. Here and below
> > >
> > > You may at least clarify what does mean 'safe'
> > >
> > > > + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0),
> >
> > I've seen the following failure on Cortex A72 where
> > ID_AA64DFR0_EL1.DebugVer is 6.
>
> Ah, yes, the test is wrong. KVM enforces a minimum value of 0x6 on this
> field, yet get_safe_value() returns 0x5 for the field.
>
> Jing, do you have time to check this test for similar failures and send
> out a fix for Zenghui's observations?
>
> > # ./aarch64/set_id_regs
> > TAP version 13
> > 1..79
> > ok 1 ID_AA64DFR0_EL1_PMUVer
> > ==== Test Assertion Failure ====
> >   include/kvm_util_base.h:553: !ret
> >   pid=2288505 tid=2288505 errno=22 - Invalid argument
> >      1        0x0000000000402787: vcpu_set_reg at kvm_util_base.h:553
> > (discriminator 6)
> >      2         (inlined by) test_reg_set_success at set_id_regs.c:342
> > (discriminator 6)
> >      3         (inlined by) test_user_set_reg at set_id_regs.c:413 (discriminator
> > 6)
> >      4        0x0000000000401943: main at set_id_regs.c:475
> >      5        0x0000ffffbdd5d03b: ?? ??:0
> >      6        0x0000ffffbdd5d113: ?? ??:0
> >      7        0x0000000000401a2f: _start at ??:?
> >   KVM_SET_ONE_REG failed, rc: -1 errno: 22 (Invalid argument)
>
> --
> Thanks,
> Oliver

Thanks,
Jing
Itaru Kitayama Jan. 9, 2024, 7:50 a.m. UTC | #8
On Mon, Jan 08, 2024 at 10:40:11PM +0000, Oliver Upton wrote:
> Hi Zenghui,
> 
> On Fri, Jan 05, 2024 at 05:07:08PM +0800, Zenghui Yu wrote:
> > On 2023/10/19 16:38, Eric Auger wrote:
> > 
> > > > +static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
> > > > +	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
> > >
> > > Strictly speaking this is not always safe to have a lower value. For
> > > instance: From Armv8.1, if FEAT_PMUv3 is implemented, the value 0b0001
> > > is not permitted. But I guess this consistency is to be taken into
> > > account by the user space. But may be wort a comment. Here and below
> > > 
> > > You may at least clarify what does mean 'safe'
> > >
> > > > +	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0),
> > 
> > I've seen the following failure on Cortex A72 where
> > ID_AA64DFR0_EL1.DebugVer is 6.
> 
> Ah, yes, the test is wrong. KVM enforces a minimum value of 0x6 on this
> field, yet get_safe_value() returns 0x5 for the field.

This is observed with the RevC AEM FVP as well.

Thanks,
Itaru.

> 
> Jing, do you have time to check this test for similar failures and send
> out a fix for Zenghui's observations?
> 
> > # ./aarch64/set_id_regs
> > TAP version 13
> > 1..79
> > ok 1 ID_AA64DFR0_EL1_PMUVer
> > ==== Test Assertion Failure ====
> >   include/kvm_util_base.h:553: !ret
> >   pid=2288505 tid=2288505 errno=22 - Invalid argument
> >      1	0x0000000000402787: vcpu_set_reg at kvm_util_base.h:553
> > (discriminator 6)
> >      2	 (inlined by) test_reg_set_success at set_id_regs.c:342
> > (discriminator 6)
> >      3	 (inlined by) test_user_set_reg at set_id_regs.c:413 (discriminator
> > 6)
> >      4	0x0000000000401943: main at set_id_regs.c:475
> >      5	0x0000ffffbdd5d03b: ?? ??:0
> >      6	0x0000ffffbdd5d113: ?? ??:0
> >      7	0x0000000000401a2f: _start at ??:?
> >   KVM_SET_ONE_REG failed, rc: -1 errno: 22 (Invalid argument)
> 
> -- 
> Thanks,
> Oliver
Zenghui Yu Jan. 9, 2024, 3:36 p.m. UTC | #9
On 2024/1/9 09:31, Jing Zhang wrote:
> Hi Zenghui,
> 
> I don't have a Cortex A72 to fully verify the fix. Could you help
> verify the following change?

It works for me (after fixing a compilation error locally ;-) ), thanks!

Zenghui
Jing Zhang Jan. 9, 2024, 4:23 p.m. UTC | #10
Thanks Zenghui.

On Tue, Jan 9, 2024 at 7:37 AM Zenghui Yu <zenghui.yu@linux.dev> wrote:
>
> On 2024/1/9 09:31, Jing Zhang wrote:
> > Hi Zenghui,
> >
> > I don't have a Cortex A72 to fully verify the fix. Could you help
> > verify the following change?
>
> It works for me (after fixing a compilation error locally ;-) ), thanks!
>
> Zenghui

Jing
diff mbox series

Patch

diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 07b3f4dc1a77..4f4f6ad025f4 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -156,6 +156,7 @@  TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions
 TEST_GEN_PROGS_aarch64 += aarch64/hypercalls
 TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test
 TEST_GEN_PROGS_aarch64 += aarch64/psci_test
+TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs
 TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter
 TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config
 TEST_GEN_PROGS_aarch64 += aarch64/vgic_init
diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
new file mode 100644
index 000000000000..5c0718fd1705
--- /dev/null
+++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c
@@ -0,0 +1,479 @@ 
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * set_id_regs - Test for setting ID register from usersapce.
+ *
+ * Copyright (c) 2023 Google LLC.
+ *
+ *
+ * Test that KVM supports setting ID registers from userspace and handles the
+ * feature set correctly.
+ */
+
+#include <stdint.h>
+#include "kvm_util.h"
+#include "processor.h"
+#include "test_util.h"
+#include <linux/bitfield.h>
+
+enum ftr_type {
+	FTR_EXACT,			/* Use a predefined safe value */
+	FTR_LOWER_SAFE,			/* Smaller value is safe */
+	FTR_HIGHER_SAFE,		/* Bigger value is safe */
+	FTR_HIGHER_OR_ZERO_SAFE,	/* Bigger value is safe, but 0 is biggest */
+	FTR_END,			/* Mark the last ftr bits */
+};
+
+#define FTR_SIGNED	true	/* Value should be treated as signed */
+#define FTR_UNSIGNED	false	/* Value should be treated as unsigned */
+
+struct reg_ftr_bits {
+	char *name;
+	bool sign;
+	enum ftr_type type;
+	uint8_t shift;
+	uint64_t mask;
+	int64_t safe_val;
+};
+
+struct test_feature_reg {
+	uint32_t reg;
+	const struct reg_ftr_bits *ftr_bits;
+};
+
+#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL)	\
+	{								\
+		.name = #NAME,						\
+		.sign = SIGNED,						\
+		.type = TYPE,						\
+		.shift = SHIFT,						\
+		.mask = MASK,						\
+		.safe_val = SAFE_VAL,					\
+	}
+
+#define REG_FTR_BITS(type, reg, field, safe_val) \
+	__REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
+		       reg##_##field##_MASK, safe_val)
+
+#define S_REG_FTR_BITS(type, reg, field, safe_val) \
+	__REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
+		       reg##_##field##_MASK, safe_val)
+
+#define REG_FTR_END					\
+	{						\
+		.type = FTR_END,			\
+	}
+
+static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
+	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0),
+	REG_FTR_END,
+};
+
+static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {
+	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, 0),
+	REG_FTR_END,
+};
+
+static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0),
+	REG_FTR_END,
+};
+
+static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = {
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0),
+	REG_FTR_END,
+};
+
+static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0),
+	REG_FTR_END,
+};
+
+static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
+	REG_FTR_END,
+};
+
+static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
+	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),
+	S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
+	REG_FTR_END,
+};
+
+static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),
+	REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0),
+	REG_FTR_END,
+};
+
+static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0),
+	REG_FTR_END,
+};
+
+static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0),
+	REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0),
+	REG_FTR_END,
+};
+
+#define TEST_REG(id, table)			\
+	{					\
+		.reg = id,			\
+		.ftr_bits = &((table)[0]),	\
+	}
+
+static struct test_feature_reg test_regs[] = {
+	TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1),
+	TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1),
+	TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),
+	TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
+	TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
+	TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
+	TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
+	TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
+	TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
+	TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),
+};
+
+#define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0);
+
+static void guest_code(void)
+{
+	GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1);
+	GUEST_REG_SYNC(SYS_ID_DFR0_EL1);
+	GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);
+	GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);
+	GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
+	GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
+	GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
+	GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
+	GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
+	GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
+
+	GUEST_DONE();
+}
+
+/* Return a safe value to a given ftr_bits an ftr value */
+uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
+{
+	uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
+
+	if (ftr_bits->type == FTR_UNSIGNED) {
+		switch (ftr_bits->type) {
+		case FTR_EXACT:
+			ftr = ftr_bits->safe_val;
+			break;
+		case FTR_LOWER_SAFE:
+			if (ftr > 0)
+				ftr--;
+			break;
+		case FTR_HIGHER_SAFE:
+			if (ftr < ftr_max)
+				ftr++;
+			break;
+		case FTR_HIGHER_OR_ZERO_SAFE:
+			if (ftr == ftr_max)
+				ftr = 0;
+			else if (ftr != 0)
+				ftr++;
+			break;
+		default:
+			break;
+		}
+	} else if (ftr != ftr_max) {
+		switch (ftr_bits->type) {
+		case FTR_EXACT:
+			ftr = ftr_bits->safe_val;
+			break;
+		case FTR_LOWER_SAFE:
+			if (ftr > 0)
+				ftr--;
+			break;
+		case FTR_HIGHER_SAFE:
+			if (ftr < ftr_max - 1)
+				ftr++;
+			break;
+		case FTR_HIGHER_OR_ZERO_SAFE:
+			if (ftr != 0 && ftr != ftr_max - 1)
+				ftr++;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return ftr;
+}
+
+/* Return an invalid value to a given ftr_bits an ftr value */
+uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
+{
+	uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
+
+	if (ftr_bits->type == FTR_UNSIGNED) {
+		switch (ftr_bits->type) {
+		case FTR_EXACT:
+			ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
+			break;
+		case FTR_LOWER_SAFE:
+			ftr++;
+			break;
+		case FTR_HIGHER_SAFE:
+			ftr--;
+			break;
+		case FTR_HIGHER_OR_ZERO_SAFE:
+			if (ftr == 0)
+				ftr = ftr_max;
+			else
+				ftr--;
+			break;
+		default:
+			break;
+		}
+	} else if (ftr != ftr_max) {
+		switch (ftr_bits->type) {
+		case FTR_EXACT:
+			ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
+			break;
+		case FTR_LOWER_SAFE:
+			ftr++;
+			break;
+		case FTR_HIGHER_SAFE:
+			ftr--;
+			break;
+		case FTR_HIGHER_OR_ZERO_SAFE:
+			if (ftr == 0)
+				ftr = ftr_max - 1;
+			else
+				ftr--;
+			break;
+		default:
+			break;
+		}
+	} else {
+		ftr = 0;
+	}
+
+	return ftr;
+}
+
+static void test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
+				 const struct reg_ftr_bits *ftr_bits)
+{
+	uint8_t shift = ftr_bits->shift;
+	uint64_t mask = ftr_bits->mask;
+	uint64_t val, new_val, ftr;
+
+	vcpu_get_reg(vcpu, reg, &val);
+	ftr = (val & mask) >> shift;
+
+	ftr = get_safe_value(ftr_bits, ftr);
+
+	ftr <<= shift;
+	val &= ~mask;
+	val |= ftr;
+
+	vcpu_set_reg(vcpu, reg, val);
+	vcpu_get_reg(vcpu, reg, &new_val);
+	TEST_ASSERT_EQ(new_val, val);
+}
+
+static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
+			      const struct reg_ftr_bits *ftr_bits)
+{
+	uint8_t shift = ftr_bits->shift;
+	uint64_t mask = ftr_bits->mask;
+	uint64_t val, old_val, ftr;
+	int r;
+
+	vcpu_get_reg(vcpu, reg, &val);
+	ftr = (val & mask) >> shift;
+
+	ftr = get_invalid_value(ftr_bits, ftr);
+
+	old_val = val;
+	ftr <<= shift;
+	val &= ~mask;
+	val |= ftr;
+
+	r = __vcpu_set_reg(vcpu, reg, val);
+	TEST_ASSERT(r < 0 && errno == EINVAL,
+		    "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
+
+	vcpu_get_reg(vcpu, reg, &val);
+	TEST_ASSERT_EQ(val, old_val);
+}
+
+static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only)
+{
+	uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
+	struct reg_mask_range range = {
+		.addr = (__u64)masks,
+	};
+	int ret;
+
+	/* KVM should return error when reserved field is not zero */
+	range.reserved[0] = 1;
+	ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
+	TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
+
+	/* Get writable masks for feature ID registers */
+	memset(range.reserved, 0, sizeof(range.reserved));
+	vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
+
+	for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
+		const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
+		uint32_t reg_id = test_regs[i].reg;
+		uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
+		int idx;
+
+		/* Get the index to masks array for the idreg */
+		idx = KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(reg_id), sys_reg_Op1(reg_id),
+						   sys_reg_CRn(reg_id), sys_reg_CRm(reg_id),
+						   sys_reg_Op2(reg_id));
+
+		for (int j = 0;  ftr_bits[j].type != FTR_END; j++) {
+			/* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */
+			if (aarch64_only && sys_reg_CRm(reg_id) < 4) {
+				ksft_test_result_skip("%s on AARCH64 only system\n",
+						      ftr_bits[j].name);
+				continue;
+			}
+
+			/* Make sure the feature field is writable */
+			TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
+
+			test_reg_set_fail(vcpu, reg, &ftr_bits[j]);
+			test_reg_set_success(vcpu, reg, &ftr_bits[j]);
+
+			ksft_test_result_pass("%s\n", ftr_bits[j].name);
+		}
+	}
+}
+
+static void test_guest_reg_read(struct kvm_vcpu *vcpu)
+{
+	bool done = false;
+	struct ucall uc;
+	uint64_t val;
+
+	while (!done) {
+		vcpu_run(vcpu);
+
+		switch (get_ucall(vcpu, &uc)) {
+		case UCALL_ABORT:
+			REPORT_GUEST_ASSERT(uc);
+			break;
+		case UCALL_SYNC:
+			/* Make sure the written values are seen by guest */
+			vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(uc.args[2]), &val);
+			TEST_ASSERT_EQ(val, uc.args[3]);
+			break;
+		case UCALL_DONE:
+			done = true;
+			break;
+		default:
+			TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
+		}
+	}
+}
+
+int main(void)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vm *vm;
+	bool aarch64_only;
+	uint64_t val, el0;
+	int ftr_cnt;
+
+	vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+	/* Check for AARCH64 only system */
+	vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val);
+	el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
+	aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
+
+	ksft_print_header();
+
+	ftr_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
+		  ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
+		  ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
+		  ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) +
+		  ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) -
+		  ARRAY_SIZE(test_regs);
+
+	ksft_set_plan(ftr_cnt);
+
+	test_user_set_reg(vcpu, aarch64_only);
+	test_guest_reg_read(vcpu);
+
+	kvm_vm_free(vm);
+
+	ksft_finished();
+}