diff mbox series

[RFC,v4,14/26] KVM: arm64: Add consistency checking for frac fields of ID registers

Message ID 20220106042708.2869332-15-reijiw@google.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: Make CPU ID registers writable by userspace | expand

Commit Message

Reiji Watanabe Jan. 6, 2022, 4:26 a.m. UTC
Feature fractional field of an ID register cannot be simply validated
at KVM_SET_ONE_REG because its validity depends on its (main) feature
field value, which could be in a different ID register (and might be
set later).
Validate fractional fields at the first KVM_RUN instead.

Signed-off-by: Reiji Watanabe <reijiw@google.com>
---
 arch/arm64/include/asm/kvm_host.h |   1 +
 arch/arm64/kvm/arm.c              |   3 +
 arch/arm64/kvm/sys_regs.c         | 116 +++++++++++++++++++++++++++++-
 3 files changed, 117 insertions(+), 3 deletions(-)

Comments

Fuad Tabba Jan. 24, 2022, 5 p.m. UTC | #1
Hi Reiji,

On Thu, Jan 6, 2022 at 4:29 AM Reiji Watanabe <reijiw@google.com> wrote:
>
> Feature fractional field of an ID register cannot be simply validated
> at KVM_SET_ONE_REG because its validity depends on its (main) feature
> field value, which could be in a different ID register (and might be
> set later).
> Validate fractional fields at the first KVM_RUN instead.
>
> Signed-off-by: Reiji Watanabe <reijiw@google.com>
> ---
>  arch/arm64/include/asm/kvm_host.h |   1 +
>  arch/arm64/kvm/arm.c              |   3 +
>  arch/arm64/kvm/sys_regs.c         | 116 +++++++++++++++++++++++++++++-
>  3 files changed, 117 insertions(+), 3 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 4509f9e7472d..7b3f86bd6a6b 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -750,6 +750,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
>
>  void set_default_id_regs(struct kvm *kvm);
>  int kvm_set_id_reg_feature(struct kvm *kvm, u32 id, u8 field_shift, u8 fval);
> +int kvm_id_regs_consistency_check(const struct kvm_vcpu *vcpu);
>
>  /* Guest/host FPSIMD coordination helpers */
>  int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 5f497a0af254..16fc2ce32069 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -596,6 +596,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
>         if (!kvm_arm_vcpu_is_finalized(vcpu))
>                 return -EPERM;
>
> +       if (!kvm_vm_is_protected(kvm) && kvm_id_regs_consistency_check(vcpu))
> +               return -EPERM;
> +
>         vcpu->arch.has_run_once = true;
>
>         kvm_arm_vcpu_init_debug(vcpu);
> diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> index ddbeefc3881c..6adb7b04620c 100644
> --- a/arch/arm64/kvm/sys_regs.c
> +++ b/arch/arm64/kvm/sys_regs.c
> @@ -756,9 +756,6 @@ static struct id_reg_info id_aa64pfr0_el1_info = {
>
>  static struct id_reg_info id_aa64pfr1_el1_info = {
>         .sys_reg = SYS_ID_AA64PFR1_EL1,
> -       .ignore_mask = ARM64_FEATURE_MASK(ID_AA64PFR1_RASFRAC) |
> -                      ARM64_FEATURE_MASK(ID_AA64PFR1_MPAMFRAC) |
> -                      ARM64_FEATURE_MASK(ID_AA64PFR1_CSV2FRAC),
>         .init = init_id_aa64pfr1_el1_info,
>         .validate = validate_id_aa64pfr1_el1,
>         .vcpu_mask = vcpu_mask_id_aa64pfr1_el1,
> @@ -3434,10 +3431,109 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
>         return write_demux_regids(uindices);
>  }
>
> +/* ID register's fractional field information with its feature field. */
> +struct feature_frac {
> +       u32     id;
> +       u32     shift;
> +       u32     frac_id;
> +       u32     frac_shift;
> +       u8      frac_ftr_check;
> +};

frac_ftr_check doesn't seem to be used. Also, it would be easier to
read if the ordering of the fields match the ordering you initialize
them below.

> +
> +static struct feature_frac feature_frac_table[] = {
> +       {
> +               .frac_id = SYS_ID_AA64PFR1_EL1,
> +               .frac_shift = ID_AA64PFR1_RASFRAC_SHIFT,
> +               .id = SYS_ID_AA64PFR0_EL1,
> +               .shift = ID_AA64PFR0_RAS_SHIFT,
> +       },
> +       {
> +               .frac_id = SYS_ID_AA64PFR1_EL1,
> +               .frac_shift = ID_AA64PFR1_MPAMFRAC_SHIFT,
> +               .id = SYS_ID_AA64PFR0_EL1,
> +               .shift = ID_AA64PFR0_MPAM_SHIFT,
> +       },
> +       {
> +               .frac_id = SYS_ID_AA64PFR1_EL1,
> +               .frac_shift = ID_AA64PFR1_CSV2FRAC_SHIFT,
> +               .id = SYS_ID_AA64PFR0_EL1,
> +               .shift = ID_AA64PFR0_CSV2_SHIFT,
> +       },
> +};
> +
> +/*
> + * Return non-zero if the feature/fractional fields pair are not
> + * supported. Return zero otherwise.
> + * This function validates only the fractional feature field,
> + * and relies on the fact the feature field is validated before
> + * through arm64_check_features.
> + */
> +static int vcpu_id_reg_feature_frac_check(const struct kvm_vcpu *vcpu,
> +                                         const struct feature_frac *ftr_frac)
> +{
> +       const struct id_reg_info *id_reg;
> +       u32 id;
> +       u64 val, lim, mask;
> +
> +       /* Check if the feature field value is same as the limit */
> +       id = ftr_frac->id;
> +       id_reg = GET_ID_REG_INFO(id);
> +
> +       mask = (u64)ARM64_FEATURE_FIELD_MASK << ftr_frac->shift;
> +       val = __read_id_reg(vcpu, id) & mask;
> +       lim = id_reg ? id_reg->vcpu_limit_val : read_sanitised_ftr_reg(id);
> +       lim &= mask;
> +
> +       if (val != lim)
> +               /*
> +                * The feature level is lower than the limit.
> +                * Any fractional version should be fine.
> +                */
> +               return 0;
> +
> +       /* Check the fractional feature field */
> +       id = ftr_frac->frac_id;
> +       id_reg = GET_ID_REG_INFO(id);
> +
> +       mask = (u64)ARM64_FEATURE_FIELD_MASK << ftr_frac->frac_shift;
> +       val = __read_id_reg(vcpu, id) & mask;
> +       lim = id_reg ? id_reg->vcpu_limit_val : read_sanitised_ftr_reg(id);
> +       lim &= mask;
> +
> +       if (val == lim)
> +               /*
> +                * Both the feature and fractional fields are the same
> +                * as limit.
> +                */
> +               return 0;
> +
> +       return arm64_check_features(id, val, lim);
> +}
> +
> +int kvm_id_regs_consistency_check(const struct kvm_vcpu *vcpu)

Nit: considering that this is only checking the fractional fields,
should the function name reflect that?

> +{
> +       int i, err;
> +       const struct feature_frac *frac;
> +
> +       /*
> +        * Check ID registers' fractional fields, which aren't checked
> +        * at KVM_SET_ONE_REG.
> +        */
> +       for (i = 0; i < ARRAY_SIZE(feature_frac_table); i++) {
> +               frac = &feature_frac_table[i];
> +               err = vcpu_id_reg_feature_frac_check(vcpu, frac);
> +               if (err)
> +                       return err;
> +       }
> +       return 0;
> +}
> +
>  static void id_reg_info_init_all(void)
>  {
>         int i;
>         struct id_reg_info *id_reg;
> +       struct feature_frac *frac;
> +       u64 ftr_mask = ARM64_FEATURE_FIELD_MASK;
>
>         for (i = 0; i < ARRAY_SIZE(id_reg_info_table); i++) {
>                 id_reg = (struct id_reg_info *)id_reg_info_table[i];
> @@ -3446,6 +3542,20 @@ static void id_reg_info_init_all(void)
>
>                 id_reg_info_init(id_reg);
>         }
> +
> +       /*
> +        * Update ignore_mask of ID registers based on fractional fields
> +        * information.  Any ID register that have fractional fields
> +        * is expected to have its own id_reg_info.
> +        */
> +       for (i = 0; i < ARRAY_SIZE(feature_frac_table); i++) {
> +               frac = &feature_frac_table[i];
> +               id_reg = GET_ID_REG_INFO(frac->frac_id);
> +               if (WARN_ON_ONCE(!id_reg))
> +                       continue;
> +
> +               id_reg->ignore_mask |= ftr_mask << frac->frac_shift;
> +       }
>  }

Thanks,
/fuad


>
>  void kvm_sys_reg_table_init(void)
> --
> 2.34.1.448.ga2b2bfdf31-goog
>
> _______________________________________________
> kvmarm mailing list
> kvmarm@lists.cs.columbia.edu
> https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
Reiji Watanabe Jan. 27, 2022, 5:03 a.m. UTC | #2
Hi Fuad,

On Mon, Jan 24, 2022 at 9:01 AM Fuad Tabba <tabba@google.com> wrote:
>
> Hi Reiji,
>
> On Thu, Jan 6, 2022 at 4:29 AM Reiji Watanabe <reijiw@google.com> wrote:
> >
> > Feature fractional field of an ID register cannot be simply validated
> > at KVM_SET_ONE_REG because its validity depends on its (main) feature
> > field value, which could be in a different ID register (and might be
> > set later).
> > Validate fractional fields at the first KVM_RUN instead.
> >
> > Signed-off-by: Reiji Watanabe <reijiw@google.com>
> > ---
> >  arch/arm64/include/asm/kvm_host.h |   1 +
> >  arch/arm64/kvm/arm.c              |   3 +
> >  arch/arm64/kvm/sys_regs.c         | 116 +++++++++++++++++++++++++++++-
> >  3 files changed, 117 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> > index 4509f9e7472d..7b3f86bd6a6b 100644
> > --- a/arch/arm64/include/asm/kvm_host.h
> > +++ b/arch/arm64/include/asm/kvm_host.h
> > @@ -750,6 +750,7 @@ long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
> >
> >  void set_default_id_regs(struct kvm *kvm);
> >  int kvm_set_id_reg_feature(struct kvm *kvm, u32 id, u8 field_shift, u8 fval);
> > +int kvm_id_regs_consistency_check(const struct kvm_vcpu *vcpu);
> >
> >  /* Guest/host FPSIMD coordination helpers */
> >  int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
> > diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> > index 5f497a0af254..16fc2ce32069 100644
> > --- a/arch/arm64/kvm/arm.c
> > +++ b/arch/arm64/kvm/arm.c
> > @@ -596,6 +596,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
> >         if (!kvm_arm_vcpu_is_finalized(vcpu))
> >                 return -EPERM;
> >
> > +       if (!kvm_vm_is_protected(kvm) && kvm_id_regs_consistency_check(vcpu))
> > +               return -EPERM;
> > +
> >         vcpu->arch.has_run_once = true;
> >
> >         kvm_arm_vcpu_init_debug(vcpu);
> > diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
> > index ddbeefc3881c..6adb7b04620c 100644
> > --- a/arch/arm64/kvm/sys_regs.c
> > +++ b/arch/arm64/kvm/sys_regs.c
> > @@ -756,9 +756,6 @@ static struct id_reg_info id_aa64pfr0_el1_info = {
> >
> >  static struct id_reg_info id_aa64pfr1_el1_info = {
> >         .sys_reg = SYS_ID_AA64PFR1_EL1,
> > -       .ignore_mask = ARM64_FEATURE_MASK(ID_AA64PFR1_RASFRAC) |
> > -                      ARM64_FEATURE_MASK(ID_AA64PFR1_MPAMFRAC) |
> > -                      ARM64_FEATURE_MASK(ID_AA64PFR1_CSV2FRAC),
> >         .init = init_id_aa64pfr1_el1_info,
> >         .validate = validate_id_aa64pfr1_el1,
> >         .vcpu_mask = vcpu_mask_id_aa64pfr1_el1,
> > @@ -3434,10 +3431,109 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
> >         return write_demux_regids(uindices);
> >  }
> >
> > +/* ID register's fractional field information with its feature field. */
> > +struct feature_frac {
> > +       u32     id;
> > +       u32     shift;
> > +       u32     frac_id;
> > +       u32     frac_shift;
> > +       u8      frac_ftr_check;
> > +};
>
> frac_ftr_check doesn't seem to be used. Also, it would be easier to
> read if the ordering of the fields match the ordering you initialize
> them below.

Thank you for catching this.
I will remove frac_ftr_check and change the ordering.

>
> > +
> > +static struct feature_frac feature_frac_table[] = {
> > +       {
> > +               .frac_id = SYS_ID_AA64PFR1_EL1,
> > +               .frac_shift = ID_AA64PFR1_RASFRAC_SHIFT,
> > +               .id = SYS_ID_AA64PFR0_EL1,
> > +               .shift = ID_AA64PFR0_RAS_SHIFT,
> > +       },
> > +       {
> > +               .frac_id = SYS_ID_AA64PFR1_EL1,
> > +               .frac_shift = ID_AA64PFR1_MPAMFRAC_SHIFT,
> > +               .id = SYS_ID_AA64PFR0_EL1,
> > +               .shift = ID_AA64PFR0_MPAM_SHIFT,
> > +       },
> > +       {
> > +               .frac_id = SYS_ID_AA64PFR1_EL1,
> > +               .frac_shift = ID_AA64PFR1_CSV2FRAC_SHIFT,
> > +               .id = SYS_ID_AA64PFR0_EL1,
> > +               .shift = ID_AA64PFR0_CSV2_SHIFT,
> > +       },
> > +};
> > +
> > +/*
> > + * Return non-zero if the feature/fractional fields pair are not
> > + * supported. Return zero otherwise.
> > + * This function validates only the fractional feature field,
> > + * and relies on the fact the feature field is validated before
> > + * through arm64_check_features.
> > + */
> > +static int vcpu_id_reg_feature_frac_check(const struct kvm_vcpu *vcpu,
> > +                                         const struct feature_frac *ftr_frac)
> > +{
> > +       const struct id_reg_info *id_reg;
> > +       u32 id;
> > +       u64 val, lim, mask;
> > +
> > +       /* Check if the feature field value is same as the limit */
> > +       id = ftr_frac->id;
> > +       id_reg = GET_ID_REG_INFO(id);
> > +
> > +       mask = (u64)ARM64_FEATURE_FIELD_MASK << ftr_frac->shift;
> > +       val = __read_id_reg(vcpu, id) & mask;
> > +       lim = id_reg ? id_reg->vcpu_limit_val : read_sanitised_ftr_reg(id);
> > +       lim &= mask;
> > +
> > +       if (val != lim)
> > +               /*
> > +                * The feature level is lower than the limit.
> > +                * Any fractional version should be fine.
> > +                */
> > +               return 0;
> > +
> > +       /* Check the fractional feature field */
> > +       id = ftr_frac->frac_id;
> > +       id_reg = GET_ID_REG_INFO(id);
> > +
> > +       mask = (u64)ARM64_FEATURE_FIELD_MASK << ftr_frac->frac_shift;
> > +       val = __read_id_reg(vcpu, id) & mask;
> > +       lim = id_reg ? id_reg->vcpu_limit_val : read_sanitised_ftr_reg(id);
> > +       lim &= mask;
> > +
> > +       if (val == lim)
> > +               /*
> > +                * Both the feature and fractional fields are the same
> > +                * as limit.
> > +                */
> > +               return 0;
> > +
> > +       return arm64_check_features(id, val, lim);
> > +}
> > +
> > +int kvm_id_regs_consistency_check(const struct kvm_vcpu *vcpu)
>
> Nit: considering that this is only checking the fractional fields,
> should the function name reflect that?

Thank you for the suggestion.
I will change the function name to reflect that.
(There were more checks in older version and I forgot to
change the name...)

Thanks,
Reiji
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 4509f9e7472d..7b3f86bd6a6b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -750,6 +750,7 @@  long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
 
 void set_default_id_regs(struct kvm *kvm);
 int kvm_set_id_reg_feature(struct kvm *kvm, u32 id, u8 field_shift, u8 fval);
+int kvm_id_regs_consistency_check(const struct kvm_vcpu *vcpu);
 
 /* Guest/host FPSIMD coordination helpers */
 int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 5f497a0af254..16fc2ce32069 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -596,6 +596,9 @@  static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 	if (!kvm_arm_vcpu_is_finalized(vcpu))
 		return -EPERM;
 
+	if (!kvm_vm_is_protected(kvm) && kvm_id_regs_consistency_check(vcpu))
+		return -EPERM;
+
 	vcpu->arch.has_run_once = true;
 
 	kvm_arm_vcpu_init_debug(vcpu);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index ddbeefc3881c..6adb7b04620c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -756,9 +756,6 @@  static struct id_reg_info id_aa64pfr0_el1_info = {
 
 static struct id_reg_info id_aa64pfr1_el1_info = {
 	.sys_reg = SYS_ID_AA64PFR1_EL1,
-	.ignore_mask = ARM64_FEATURE_MASK(ID_AA64PFR1_RASFRAC) |
-		       ARM64_FEATURE_MASK(ID_AA64PFR1_MPAMFRAC) |
-		       ARM64_FEATURE_MASK(ID_AA64PFR1_CSV2FRAC),
 	.init = init_id_aa64pfr1_el1_info,
 	.validate = validate_id_aa64pfr1_el1,
 	.vcpu_mask = vcpu_mask_id_aa64pfr1_el1,
@@ -3434,10 +3431,109 @@  int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
 	return write_demux_regids(uindices);
 }
 
+/* ID register's fractional field information with its feature field. */
+struct feature_frac {
+	u32	id;
+	u32	shift;
+	u32	frac_id;
+	u32	frac_shift;
+	u8	frac_ftr_check;
+};
+
+static struct feature_frac feature_frac_table[] = {
+	{
+		.frac_id = SYS_ID_AA64PFR1_EL1,
+		.frac_shift = ID_AA64PFR1_RASFRAC_SHIFT,
+		.id = SYS_ID_AA64PFR0_EL1,
+		.shift = ID_AA64PFR0_RAS_SHIFT,
+	},
+	{
+		.frac_id = SYS_ID_AA64PFR1_EL1,
+		.frac_shift = ID_AA64PFR1_MPAMFRAC_SHIFT,
+		.id = SYS_ID_AA64PFR0_EL1,
+		.shift = ID_AA64PFR0_MPAM_SHIFT,
+	},
+	{
+		.frac_id = SYS_ID_AA64PFR1_EL1,
+		.frac_shift = ID_AA64PFR1_CSV2FRAC_SHIFT,
+		.id = SYS_ID_AA64PFR0_EL1,
+		.shift = ID_AA64PFR0_CSV2_SHIFT,
+	},
+};
+
+/*
+ * Return non-zero if the feature/fractional fields pair are not
+ * supported. Return zero otherwise.
+ * This function validates only the fractional feature field,
+ * and relies on the fact the feature field is validated before
+ * through arm64_check_features.
+ */
+static int vcpu_id_reg_feature_frac_check(const struct kvm_vcpu *vcpu,
+					  const struct feature_frac *ftr_frac)
+{
+	const struct id_reg_info *id_reg;
+	u32 id;
+	u64 val, lim, mask;
+
+	/* Check if the feature field value is same as the limit */
+	id = ftr_frac->id;
+	id_reg = GET_ID_REG_INFO(id);
+
+	mask = (u64)ARM64_FEATURE_FIELD_MASK << ftr_frac->shift;
+	val = __read_id_reg(vcpu, id) & mask;
+	lim = id_reg ? id_reg->vcpu_limit_val : read_sanitised_ftr_reg(id);
+	lim &= mask;
+
+	if (val != lim)
+		/*
+		 * The feature level is lower than the limit.
+		 * Any fractional version should be fine.
+		 */
+		return 0;
+
+	/* Check the fractional feature field */
+	id = ftr_frac->frac_id;
+	id_reg = GET_ID_REG_INFO(id);
+
+	mask = (u64)ARM64_FEATURE_FIELD_MASK << ftr_frac->frac_shift;
+	val = __read_id_reg(vcpu, id) & mask;
+	lim = id_reg ? id_reg->vcpu_limit_val : read_sanitised_ftr_reg(id);
+	lim &= mask;
+
+	if (val == lim)
+		/*
+		 * Both the feature and fractional fields are the same
+		 * as limit.
+		 */
+		return 0;
+
+	return arm64_check_features(id, val, lim);
+}
+
+int kvm_id_regs_consistency_check(const struct kvm_vcpu *vcpu)
+{
+	int i, err;
+	const struct feature_frac *frac;
+
+	/*
+	 * Check ID registers' fractional fields, which aren't checked
+	 * at KVM_SET_ONE_REG.
+	 */
+	for (i = 0; i < ARRAY_SIZE(feature_frac_table); i++) {
+		frac = &feature_frac_table[i];
+		err = vcpu_id_reg_feature_frac_check(vcpu, frac);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
 static void id_reg_info_init_all(void)
 {
 	int i;
 	struct id_reg_info *id_reg;
+	struct feature_frac *frac;
+	u64 ftr_mask = ARM64_FEATURE_FIELD_MASK;
 
 	for (i = 0; i < ARRAY_SIZE(id_reg_info_table); i++) {
 		id_reg = (struct id_reg_info *)id_reg_info_table[i];
@@ -3446,6 +3542,20 @@  static void id_reg_info_init_all(void)
 
 		id_reg_info_init(id_reg);
 	}
+
+	/*
+	 * Update ignore_mask of ID registers based on fractional fields
+	 * information.  Any ID register that have fractional fields
+	 * is expected to have its own id_reg_info.
+	 */
+	for (i = 0; i < ARRAY_SIZE(feature_frac_table); i++) {
+		frac = &feature_frac_table[i];
+		id_reg = GET_ID_REG_INFO(frac->frac_id);
+		if (WARN_ON_ONCE(!id_reg))
+			continue;
+
+		id_reg->ignore_mask |= ftr_mask << frac->frac_shift;
+	}
 }
 
 void kvm_sys_reg_table_init(void)