@@ -837,6 +837,7 @@
#define ID_AA64PFR0_GIC3 0x1
/* id_aa64pfr1 */
+#define ID_AA64PFR1_CSV2FRAC_SHIFT 32
#define ID_AA64PFR1_MPAMFRAC_SHIFT 16
#define ID_AA64PFR1_RASFRAC_SHIFT 12
#define ID_AA64PFR1_MTE_SHIFT 8
@@ -410,6 +410,21 @@ static int validate_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
return 0;
}
+static int validate_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
+ const struct id_reg_desc *id_reg, u64 val)
+{
+ bool kvm_mte = kvm_has_mte(vcpu->kvm);
+ unsigned int mte;
+
+ mte = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR1_MTE_SHIFT);
+
+ /* Check if there is a conflict with a request via KVM_ARM_VCPU_INIT. */
+ if (kvm_mte ^ (mte > 0))
+ return -EPERM;
+
+ return 0;
+}
+
static void init_id_aa64pfr0_el1_desc(struct id_reg_desc *id_reg)
{
u64 limit = id_reg->vcpu_limit_val;
@@ -441,12 +456,24 @@ static void init_id_aa64pfr0_el1_desc(struct id_reg_desc *id_reg)
id_reg->vcpu_limit_val = limit;
}
+static void init_id_aa64pfr1_el1_desc(struct id_reg_desc *id_reg)
+{
+ if (!system_supports_mte())
+ id_reg->vcpu_limit_val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
+}
+
static u64 vcpu_mask_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu,
const struct id_reg_desc *idr)
{
return vcpu_has_sve(vcpu) ? 0 : ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
}
+static u64 vcpu_mask_id_aa64pfr1_el1(const struct kvm_vcpu *vcpu,
+ const struct id_reg_desc *idr)
+{
+ return kvm_has_mte(vcpu->kvm) ? 0 : (ARM64_FEATURE_MASK(ID_AA64PFR1_MTE));
+}
+
static int validate_id_reg(struct kvm_vcpu *vcpu,
const struct id_reg_desc *id_reg, u64 val)
{
@@ -1423,10 +1450,6 @@ static u64 read_id_reg_with_encoding(const struct kvm_vcpu *vcpu, u32 id)
val = read_kvm_id_reg(vcpu->kvm, id);
switch (id) {
- case SYS_ID_AA64PFR1_EL1:
- if (!kvm_has_mte(vcpu->kvm))
- val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
- break;
case SYS_ID_AA64ISAR1_EL1:
if (!vcpu_has_ptrauth(vcpu))
val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
@@ -3223,6 +3246,16 @@ static struct id_reg_desc id_aa64pfr0_el1_desc = {
}
};
+static struct id_reg_desc id_aa64pfr1_el1_desc = {
+ .reg_desc = ID_SANITISED(ID_AA64PFR1_EL1),
+ .ignore_mask = ARM64_FEATURE_MASK(ID_AA64PFR1_RASFRAC) |
+ ARM64_FEATURE_MASK(ID_AA64PFR1_MPAMFRAC) |
+ ARM64_FEATURE_MASK(ID_AA64PFR1_CSV2FRAC),
+ .init = init_id_aa64pfr1_el1_desc,
+ .validate = validate_id_aa64pfr1_el1,
+ .vcpu_mask = vcpu_mask_id_aa64pfr1_el1,
+};
+
#define ID_DESC(id_reg_name, id_reg_desc) \
[IDREG_IDX(SYS_##id_reg_name)] = (id_reg_desc)
@@ -3230,6 +3263,7 @@ static struct id_reg_desc id_aa64pfr0_el1_desc = {
static struct id_reg_desc *id_reg_desc_table[KVM_ARM_ID_REG_MAX_NUM] = {
/* CRm=4 */
ID_DESC(ID_AA64PFR0_EL1, &id_aa64pfr0_el1_desc),
+ ID_DESC(ID_AA64PFR1_EL1, &id_aa64pfr1_el1_desc),
};
static inline struct id_reg_desc *get_id_reg_desc(u32 id)
This patch adds id_reg_desc for ID_AA64PFR1_EL1 to make it writable by userspace. Return an error if userspace tries to set MTE field of the register to a value that conflicts with KVM_CAP_ARM_MTE configuration for the guest. Skip fractional feature fields validation at present and they will be handled by the following patches. Signed-off-by: Reiji Watanabe <reijiw@google.com> --- arch/arm64/include/asm/sysreg.h | 1 + arch/arm64/kvm/sys_regs.c | 42 +++++++++++++++++++++++++++++---- 2 files changed, 39 insertions(+), 4 deletions(-)