diff mbox series

[RFC,1/2] arm/kvm: enable MTE if available

Message ID 20220512131146.78457-2-cohuck@redhat.com (mailing list archive)
State New, archived
Headers show
Series arm: enable MTE for QEMU + kvm | expand

Commit Message

Cornelia Huck May 12, 2022, 1:11 p.m. UTC
We need to disable migration, as we do not yet have a way to migrate
the tags as well.

Signed-off-by: Cornelia Huck <cohuck@redhat.com>
---
 target/arm/cpu.c     | 18 ++++------
 target/arm/cpu.h     |  4 +++
 target/arm/cpu64.c   | 78 ++++++++++++++++++++++++++++++++++++++++++++
 target/arm/kvm64.c   |  5 +++
 target/arm/kvm_arm.h | 12 +++++++
 target/arm/monitor.c |  1 +
 6 files changed, 106 insertions(+), 12 deletions(-)

Comments

Eric Auger June 10, 2022, 8:48 p.m. UTC | #1
Hi Connie,
On 5/12/22 15:11, Cornelia Huck wrote:
> We need to disable migration, as we do not yet have a way to migrate
> the tags as well.

This patch does much more than adding a migration blocker ;-) you may
describe the new cpu option and how it works.
> 
> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
> ---
>  target/arm/cpu.c     | 18 ++++------
>  target/arm/cpu.h     |  4 +++
>  target/arm/cpu64.c   | 78 ++++++++++++++++++++++++++++++++++++++++++++
>  target/arm/kvm64.c   |  5 +++
>  target/arm/kvm_arm.h | 12 +++++++
>  target/arm/monitor.c |  1 +
>  6 files changed, 106 insertions(+), 12 deletions(-)
> 
> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
> index 029f644768b1..f0505815b1e7 100644
> --- a/target/arm/cpu.c
> +++ b/target/arm/cpu.c
> @@ -1435,6 +1435,11 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
>              error_propagate(errp, local_err);
>              return;
>          }
> +        arm_cpu_mte_finalize(cpu, &local_err);
> +        if (local_err != NULL) {
> +            error_propagate(errp, local_err);
> +            return;
> +        }
>      }
>  
>      if (kvm_enabled()) {
> @@ -1504,7 +1509,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
>          }
>          if (cpu->tag_memory) {
>              error_setg(errp,
> -                       "Cannot enable KVM when guest CPUs has MTE enabled");
> +                       "Cannot enable KVM when guest CPUs has tag memory enabled");
before this series, tag_memory was used to detect MTE was enabled at
machine level. And this was not compatible with KVM.

Hasn't it changed now with this series? Sorry I don't know much about
that tag_memory along with the KVM use case? Can you describe it as well
in the cover letter.
>              return;
>          }
>      }
> @@ -1882,17 +1887,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
>                                         ID_PFR1, VIRTUALIZATION, 0);
>      }
>  
> -#ifndef CONFIG_USER_ONLY
> -    if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
> -        /*
> -         * Disable the MTE feature bits if we do not have tag-memory
> -         * provided by the machine.
> -         */
> -        cpu->isar.id_aa64pfr1 =
> -            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
> -    }
> -#endif
> -
>      /* MPU can be configured out of a PMSA CPU either by setting has-mpu
>       * to false or by setting pmsav7-dregion to 0.
>       */
> diff --git a/target/arm/cpu.h b/target/arm/cpu.h
> index 18ca61e8e25b..183506713e96 100644
> --- a/target/arm/cpu.h
> +++ b/target/arm/cpu.h
> @@ -208,11 +208,13 @@ typedef struct {
>  void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
>  void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
>  void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
> +void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp);
>  #else
>  # define ARM_MAX_VQ    1
>  static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { }
>  static inline void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) { }
>  static inline void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp) { }
> +static inline void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp) { }
>  #endif
>  
>  typedef struct ARMVectorReg {
> @@ -993,6 +995,7 @@ struct ArchCPU {
>      bool prop_pauth;
>      bool prop_pauth_impdef;
>      bool prop_lpa2;
> +    bool prop_mte;
>  
>      /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
>      uint32_t dcz_blocksize;
> @@ -1091,6 +1094,7 @@ void aarch64_sve_change_el(CPUARMState *env, int old_el,
>                             int new_el, bool el0_a64);
>  void aarch64_add_sve_properties(Object *obj);
>  void aarch64_add_pauth_properties(Object *obj);
> +void aarch64_add_mte_properties(Object *obj);
>  
>  /*
>   * SVE registers are encoded in KVM's memory in an endianness-invariant format.
> diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
> index 04427e073f17..eea9ad195470 100644
> --- a/target/arm/cpu64.c
> +++ b/target/arm/cpu64.c
> @@ -35,7 +35,11 @@
>  #include "qapi/visitor.h"
>  #include "hw/qdev-properties.h"
>  #include "internals.h"
> +#include "migration/blocker.h"
>  
> +#ifdef CONFIG_KVM
> +static Error *mte_migration_blocker;
> +#endif
>  
>  static void aarch64_a57_initfn(Object *obj)
>  {
> @@ -785,6 +789,78 @@ void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
>      cpu->isar.id_aa64mmfr0 = t;
>  }
>  
> +static Property arm_cpu_mte_property =
> +    DEFINE_PROP_BOOL("mte", ARMCPU, prop_mte, true);
> +
> +void aarch64_add_mte_properties(Object *obj)
> +{
> +    ARMCPU *cpu = ARM_CPU(obj);
> +
> +    /*
> +     * For tcg, the machine type may provide tag memory for MTE emulation.
s/machine type/machine?
> +     * We do not know whether that is the case at this point in time, so
> +     * default MTE to on and check later.
> +     * This preserves pre-existing behaviour, but is really a bit awkward.
> +     */
> +    qdev_property_add_static(DEVICE(obj), &arm_cpu_mte_property);
> +    if (kvm_enabled()) {
> +        /*
> +         * Default MTE to off, as long as migration support is not
> +         * yet implemented.
> +         * TODO: implement migration support for kvm
> +         */
> +        cpu->prop_mte = false;
> +    }
> +}
> +
> +void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp)
> +{
> +    if (!cpu->prop_mte) {
> +        /* Disable MTE feature bits. */
> +        cpu->isar.id_aa64pfr1 =
> +            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
> +        return;
> +    }
> +#ifndef CONFIG_USER_ONLY
> +    if (!kvm_enabled()) {
> +        if (cpu_isar_feature(aa64_mte, cpu) && !cpu->tag_memory) {
> +            /*
> +             * Disable the MTE feature bits, unless we have tag-memory
> +             * provided by the machine.
> +             * This silent downgrade is not really nice if the user had
> +             * explicitly requested MTE to be enabled by the cpu, but it
> +             * preserves pre-existing behaviour. In an ideal world, we


Can't we "simply" prevent the end-user from using the prop_mte option
with a TCG CPU? and have something like

For TCG, MTE depends on the CPU feature availability + machine tag memory
For KVM, MTE depends on the user opt-in + CPU feature avail (if
relevant) + host VM capability (?)

But maybe I miss the point ...
> +             * would fail if MTE was requested, but no tag memory has
> +             * been provided.
> +             */
> +            cpu->isar.id_aa64pfr1 =
> +                FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
> +        }
> +        if (!cpu_isar_feature(aa64_mte, cpu)) {
> +            cpu->prop_mte = false;
> +        }
> +        return;
> +    }
> +    if (kvm_arm_mte_supported()) {
> +#ifdef CONFIG_KVM
> +        if (kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0)) {
> +            error_setg(errp, "Failed to enable KVM_CAP_ARM_MTE");
> +        } else {
> +            /* TODO: add proper migration support with MTE enabled */
> +            if (!mte_migration_blocker) {
> +                error_setg(&mte_migration_blocker,
> +                           "Live migration disabled due to MTE enabled");
> +                if (migrate_add_blocker(mte_migration_blocker, NULL)) {
error_free(mte_migration_blocker);
mte_migration_blocker = NULL;
> +                    error_setg(errp, "Failed to add MTE migration blocker");
> +                }
> +            }
> +        }
> +#endif
> +    }
> +    /* When HVF provides support for MTE, add it here */
> +#endif
> +}
> +
>  static void aarch64_host_initfn(Object *obj)
>  {
>  #if defined(CONFIG_KVM)
> @@ -793,6 +869,7 @@ static void aarch64_host_initfn(Object *obj)
>      if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
>          aarch64_add_sve_properties(obj);
>          aarch64_add_pauth_properties(obj);
> +        aarch64_add_mte_properties(obj);
>      }
>  #elif defined(CONFIG_HVF)
>      ARMCPU *cpu = ARM_CPU(obj);
> @@ -958,6 +1035,7 @@ static void aarch64_max_initfn(Object *obj)
>      object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
>                          cpu_max_set_sve_max_vq, NULL, NULL);
>      qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
> +    aarch64_add_mte_properties(obj);
>  }
>  
>  static void aarch64_a64fx_initfn(Object *obj)
> diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
> index b8cfaf5782ac..d129a264a3f6 100644
> --- a/target/arm/kvm64.c
> +++ b/target/arm/kvm64.c
> @@ -746,6 +746,11 @@ bool kvm_arm_steal_time_supported(void)
>      return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
>  }
>  
> +bool kvm_arm_mte_supported(void)
> +{
> +    return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
> +}
> +
>  QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
>  
>  void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
> diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
> index b7f78b521545..13f06ed5e0ea 100644
> --- a/target/arm/kvm_arm.h
> +++ b/target/arm/kvm_arm.h
> @@ -306,6 +306,13 @@ bool kvm_arm_pmu_supported(void);
>   */
>  bool kvm_arm_sve_supported(void);
>  
> +/**
> + * kvm_arm_mte_supported:
> + *
> + * Returns: true if KVM can enable MTE, and false otherwise.
> + */
> +bool kvm_arm_mte_supported(void);
> +
>  /**
>   * kvm_arm_get_max_vm_ipa_size:
>   * @ms: Machine state handle
> @@ -396,6 +403,11 @@ static inline bool kvm_arm_steal_time_supported(void)
>      return false;
>  }
>  
> +static inline bool kvm_arm_mte_supported(void)
> +{
> +    return false;
> +}
> +
>  /*
>   * These functions should never actually be called without KVM support.
>   */
> diff --git a/target/arm/monitor.c b/target/arm/monitor.c
> index 80c64fa3556d..f13ff2664b67 100644
> --- a/target/arm/monitor.c
> +++ b/target/arm/monitor.c
> @@ -96,6 +96,7 @@ static const char *cpu_model_advertised_features[] = {
>      "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
>      "kvm-no-adjvtime", "kvm-steal-time",
>      "pauth", "pauth-impdef",
> +    "mte",
>      NULL
>  };
>  
Eric
Cornelia Huck June 14, 2022, 8:40 a.m. UTC | #2
On Fri, Jun 10 2022, Eric Auger <eauger@redhat.com> wrote:

> Hi Connie,
> On 5/12/22 15:11, Cornelia Huck wrote:
>> We need to disable migration, as we do not yet have a way to migrate
>> the tags as well.
>
> This patch does much more than adding a migration blocker ;-) you may
> describe the new cpu option and how it works.

I admit this is a bit terse ;) The idea is to control mte at the cpu
level directly (and not indirectly via tag memory at the machine
level). I.e. the user gets whatever is available given the constraints
(host support etc.) if they don't specify anything, and they can
explicitly turn it off/on.

>> 
>> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
>> ---
>>  target/arm/cpu.c     | 18 ++++------
>>  target/arm/cpu.h     |  4 +++
>>  target/arm/cpu64.c   | 78 ++++++++++++++++++++++++++++++++++++++++++++
>>  target/arm/kvm64.c   |  5 +++
>>  target/arm/kvm_arm.h | 12 +++++++
>>  target/arm/monitor.c |  1 +
>>  6 files changed, 106 insertions(+), 12 deletions(-)
>> 
>> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
>> index 029f644768b1..f0505815b1e7 100644
>> --- a/target/arm/cpu.c
>> +++ b/target/arm/cpu.c
>> @@ -1435,6 +1435,11 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
>>              error_propagate(errp, local_err);
>>              return;
>>          }
>> +        arm_cpu_mte_finalize(cpu, &local_err);
>> +        if (local_err != NULL) {
>> +            error_propagate(errp, local_err);
>> +            return;
>> +        }
>>      }
>>  
>>      if (kvm_enabled()) {
>> @@ -1504,7 +1509,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
>>          }
>>          if (cpu->tag_memory) {
>>              error_setg(errp,
>> -                       "Cannot enable KVM when guest CPUs has MTE enabled");
>> +                       "Cannot enable KVM when guest CPUs has tag memory enabled");
> before this series, tag_memory was used to detect MTE was enabled at
> machine level. And this was not compatible with KVM.
>
> Hasn't it changed now with this series? Sorry I don't know much about
> that tag_memory along with the KVM use case? Can you describe it as well
> in the cover letter.

IIU the current code correctly, the purpose of tag_memory is twofold:
- control whether mte should be available in the first place
- provide a place where a memory region used by the tcg implemtation can
  be linked

The latter part (extra memory region) is not compatible with
kvm. "Presence of extra memory for the implementation" as the knob to
configure mte for tcg makes sense, but it didn't seem right to me to use
it for kvm while controlling something which is basically a cpu property.

>>              return;
>>          }
>>      }

(...)

>> +void aarch64_add_mte_properties(Object *obj)
>> +{
>> +    ARMCPU *cpu = ARM_CPU(obj);
>> +
>> +    /*
>> +     * For tcg, the machine type may provide tag memory for MTE emulation.
> s/machine type/machine?

Either, I guess, as only the virt machine type provides tag memory in
the first place.

>> +     * We do not know whether that is the case at this point in time, so
>> +     * default MTE to on and check later.
>> +     * This preserves pre-existing behaviour, but is really a bit awkward.
>> +     */
>> +    qdev_property_add_static(DEVICE(obj), &arm_cpu_mte_property);
>> +    if (kvm_enabled()) {
>> +        /*
>> +         * Default MTE to off, as long as migration support is not
>> +         * yet implemented.
>> +         * TODO: implement migration support for kvm
>> +         */
>> +        cpu->prop_mte = false;
>> +    }
>> +}
>> +
>> +void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp)
>> +{
>> +    if (!cpu->prop_mte) {
>> +        /* Disable MTE feature bits. */
>> +        cpu->isar.id_aa64pfr1 =
>> +            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
>> +        return;
>> +    }
>> +#ifndef CONFIG_USER_ONLY
>> +    if (!kvm_enabled()) {
>> +        if (cpu_isar_feature(aa64_mte, cpu) && !cpu->tag_memory) {
>> +            /*
>> +             * Disable the MTE feature bits, unless we have tag-memory
>> +             * provided by the machine.
>> +             * This silent downgrade is not really nice if the user had
>> +             * explicitly requested MTE to be enabled by the cpu, but it
>> +             * preserves pre-existing behaviour. In an ideal world, we
>
>
> Can't we "simply" prevent the end-user from using the prop_mte option
> with a TCG CPU? and have something like
>
> For TCG, MTE depends on the CPU feature availability + machine tag memory
> For KVM, MTE depends on the user opt-in + CPU feature avail (if
> relevant) + host VM capability (?)

I don't like kvm and tcg cpus behaving too differently... but then, tcg
is already different as it needs tag_memory.

Thinking about it, maybe we could repurpose tag_memory in the kvm case
(e.g. for a temporary buffer for migration purposes) and require it in
all cases (making kvm fail if the user specified tag memory, but the
host doesn't support it). A cpu feature still looks more natural to me,
but I'm not yet quite used to how things are done in arm :)

The big elefant in the room is how migration will end up
working... after reading the disscussions in
https://lore.kernel.org/all/CAJc+Z1FZxSYB_zJit4+0uTR-88VqQL+-01XNMSEfua-dXDy6Wg@mail.gmail.com/
I don't think it will be as "easy" as I thought, and we probably require
some further fiddling on the kernel side.

>
> But maybe I miss the point ...
>> +             * would fail if MTE was requested, but no tag memory has
>> +             * been provided.
>> +             */
>> +            cpu->isar.id_aa64pfr1 =
>> +                FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
>> +        }
>> +        if (!cpu_isar_feature(aa64_mte, cpu)) {
>> +            cpu->prop_mte = false;
>> +        }
>> +        return;
>> +    }
>> +    if (kvm_arm_mte_supported()) {
>> +#ifdef CONFIG_KVM
>> +        if (kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0)) {
>> +            error_setg(errp, "Failed to enable KVM_CAP_ARM_MTE");
>> +        } else {
>> +            /* TODO: add proper migration support with MTE enabled */
>> +            if (!mte_migration_blocker) {
>> +                error_setg(&mte_migration_blocker,
>> +                           "Live migration disabled due to MTE enabled");
>> +                if (migrate_add_blocker(mte_migration_blocker, NULL)) {
> error_free(mte_migration_blocker);
> mte_migration_blocker = NULL;

Ah, I missed that, thanks.

>> +                    error_setg(errp, "Failed to add MTE migration blocker");
>> +                }
>> +            }
>> +        }
>> +#endif
>> +    }
>> +    /* When HVF provides support for MTE, add it here */
>> +#endif
>> +}
>> +
Eric Auger June 29, 2022, 10:38 a.m. UTC | #3
Hi Connie,

On 6/14/22 10:40, Cornelia Huck wrote:
> On Fri, Jun 10 2022, Eric Auger <eauger@redhat.com> wrote:
> 
>> Hi Connie,
>> On 5/12/22 15:11, Cornelia Huck wrote:
>>> We need to disable migration, as we do not yet have a way to migrate
>>> the tags as well.
>>
>> This patch does much more than adding a migration blocker ;-) you may
>> describe the new cpu option and how it works.
> 
> I admit this is a bit terse ;) The idea is to control mte at the cpu
> level directly (and not indirectly via tag memory at the machine
> level). I.e. the user gets whatever is available given the constraints
> (host support etc.) if they don't specify anything, and they can
> explicitly turn it off/on.

Could the OnOffAuto property value be helpful?
> 
>>>
>>> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
>>> ---
>>>  target/arm/cpu.c     | 18 ++++------
>>>  target/arm/cpu.h     |  4 +++
>>>  target/arm/cpu64.c   | 78 ++++++++++++++++++++++++++++++++++++++++++++
>>>  target/arm/kvm64.c   |  5 +++
>>>  target/arm/kvm_arm.h | 12 +++++++
>>>  target/arm/monitor.c |  1 +
>>>  6 files changed, 106 insertions(+), 12 deletions(-)
>>>
>>> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
>>> index 029f644768b1..f0505815b1e7 100644
>>> --- a/target/arm/cpu.c
>>> +++ b/target/arm/cpu.c
>>> @@ -1435,6 +1435,11 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
>>>              error_propagate(errp, local_err);
>>>              return;
>>>          }
>>> +        arm_cpu_mte_finalize(cpu, &local_err);
>>> +        if (local_err != NULL) {
>>> +            error_propagate(errp, local_err);
>>> +            return;
>>> +        }
>>>      }
>>>  
>>>      if (kvm_enabled()) {
>>> @@ -1504,7 +1509,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
>>>          }
>>>          if (cpu->tag_memory) {
>>>              error_setg(errp,
>>> -                       "Cannot enable KVM when guest CPUs has MTE enabled");
>>> +                       "Cannot enable KVM when guest CPUs has tag memory enabled");
>> before this series, tag_memory was used to detect MTE was enabled at
>> machine level. And this was not compatible with KVM.
>>
>> Hasn't it changed now with this series? Sorry I don't know much about
>> that tag_memory along with the KVM use case? Can you describe it as well
>> in the cover letter.
> 
> IIU the current code correctly, the purpose of tag_memory is twofold:
> - control whether mte should be available in the first place
> - provide a place where a memory region used by the tcg implemtation can
>   be linked

OK I now understand the tag memory is a pure TCG thingy. So its setting
along with KVM shall be invalid indeed.
> 
> The latter part (extra memory region) is not compatible with
> kvm. "Presence of extra memory for the implementation" as the knob to
> configure mte for tcg makes sense, but it didn't seem right to me to use
> it for kvm while controlling something which is basically a cpu property.


> 
>>>              return;
>>>          }
>>>      }
> 
> (...)
> 
>>> +void aarch64_add_mte_properties(Object *obj)
>>> +{
>>> +    ARMCPU *cpu = ARM_CPU(obj);
>>> +
>>> +    /*
>>> +     * For tcg, the machine type may provide tag memory for MTE emulation.
>> s/machine type/machine?
> 
> Either, I guess, as only the virt machine type provides tag memory in
> the first place.
yeah that's what just a nit about the terminology.
> 
>>> +     * We do not know whether that is the case at this point in time, so
>>> +     * default MTE to on and check later.
>>> +     * This preserves pre-existing behaviour, but is really a bit awkward.
>>> +     */
>>> +    qdev_property_add_static(DEVICE(obj), &arm_cpu_mte_property);
>>> +    if (kvm_enabled()) {
>>> +        /*
>>> +         * Default MTE to off, as long as migration support is not
>>> +         * yet implemented.
>>> +         * TODO: implement migration support for kvm
>>> +         */
>>> +        cpu->prop_mte = false;
>>> +    }
>>> +}
>>> +
>>> +void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp)
>>> +{
>>> +    if (!cpu->prop_mte) {
>>> +        /* Disable MTE feature bits. */
>>> +        cpu->isar.id_aa64pfr1 =
>>> +            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
>>> +        return;
>>> +    }
>>> +#ifndef CONFIG_USER_ONLY
>>> +    if (!kvm_enabled()) {
>>> +        if (cpu_isar_feature(aa64_mte, cpu) && !cpu->tag_memory) {
>>> +            /*
>>> +             * Disable the MTE feature bits, unless we have tag-memory
>>> +             * provided by the machine.
>>> +             * This silent downgrade is not really nice if the user had
>>> +             * explicitly requested MTE to be enabled by the cpu, but it
>>> +             * preserves pre-existing behaviour. In an ideal world, we
>>
>>
>> Can't we "simply" prevent the end-user from using the prop_mte option
>> with a TCG CPU? and have something like
>>
>> For TCG, MTE depends on the CPU feature availability + machine tag memory
>> For KVM, MTE depends on the user opt-in + CPU feature avail (if
>> relevant) + host VM capability (?)
> 
> I don't like kvm and tcg cpus behaving too differently... but then, tcg
> is already different as it needs tag_memory.
> 
> Thinking about it, maybe we could repurpose tag_memory in the kvm case
> (e.g. for a temporary buffer for migration purposes) and require it in
> all cases (making kvm fail if the user specified tag memory, but the
> host doesn't support it). A cpu feature still looks more natural to me,
> but I'm not yet quite used to how things are done in arm :)
If the tag memory is an implementation "detail" for TCG then I agree
with you that a CPU property would be more adapted for KVM.
> 
> The big elefant in the room is how migration will end up
> working... after reading the disscussions in
> https://lore.kernel.org/all/CAJc+Z1FZxSYB_zJit4+0uTR-88VqQL+-01XNMSEfua-dXDy6Wg@mail.gmail.com/
> I don't think it will be as "easy" as I thought, and we probably require
> some further fiddling on the kernel side.
Yes maybe the MTE migration process shall be documented and discussed
separately on the ML? Is Haibu Xu's address bouncing?

Eric
> 
>>
>> But maybe I miss the point ...
>>> +             * would fail if MTE was requested, but no tag memory has
>>> +             * been provided.
>>> +             */
>>> +            cpu->isar.id_aa64pfr1 =
>>> +                FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
>>> +        }
>>> +        if (!cpu_isar_feature(aa64_mte, cpu)) {
>>> +            cpu->prop_mte = false;
>>> +        }
>>> +        return;
>>> +    }
>>> +    if (kvm_arm_mte_supported()) {
>>> +#ifdef CONFIG_KVM
>>> +        if (kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0)) {
>>> +            error_setg(errp, "Failed to enable KVM_CAP_ARM_MTE");
>>> +        } else {
>>> +            /* TODO: add proper migration support with MTE enabled */
>>> +            if (!mte_migration_blocker) {
>>> +                error_setg(&mte_migration_blocker,
>>> +                           "Live migration disabled due to MTE enabled");
>>> +                if (migrate_add_blocker(mte_migration_blocker, NULL)) {
>> error_free(mte_migration_blocker);
>> mte_migration_blocker = NULL;
> 
> Ah, I missed that, thanks.
> 
>>> +                    error_setg(errp, "Failed to add MTE migration blocker");
>>> +                }
>>> +            }
>>> +        }
>>> +#endif
>>> +    }
>>> +    /* When HVF provides support for MTE, add it here */
>>> +#endif
>>> +}
>>> +
>
Cornelia Huck June 30, 2022, 3:55 p.m. UTC | #4
On Wed, Jun 29 2022, Eric Auger <eauger@redhat.com> wrote:

> Hi Connie,
>
> On 6/14/22 10:40, Cornelia Huck wrote:
>> On Fri, Jun 10 2022, Eric Auger <eauger@redhat.com> wrote:
>> 
>>> Hi Connie,
>>> On 5/12/22 15:11, Cornelia Huck wrote:
>>>> We need to disable migration, as we do not yet have a way to migrate
>>>> the tags as well.
>>>
>>> This patch does much more than adding a migration blocker ;-) you may
>>> describe the new cpu option and how it works.
>> 
>> I admit this is a bit terse ;) The idea is to control mte at the cpu
>> level directly (and not indirectly via tag memory at the machine
>> level). I.e. the user gets whatever is available given the constraints
>> (host support etc.) if they don't specify anything, and they can
>> explicitly turn it off/on.
>
> Could the OnOffAuto property value be helpful?

I completely forgot that this exists; I hacked up something (still
untested), and it seems to be able to do what I want.

I'll post it after I've verified that it actually works :)

>> The big elefant in the room is how migration will end up
>> working... after reading the disscussions in
>> https://lore.kernel.org/all/CAJc+Z1FZxSYB_zJit4+0uTR-88VqQL+-01XNMSEfua-dXDy6Wg@mail.gmail.com/
>> I don't think it will be as "easy" as I thought, and we probably require
>> some further fiddling on the kernel side.
> Yes maybe the MTE migration process shall be documented and discussed
> separately on the ML? Is Haibu Xu's address bouncing?

Yes, that address is bouncing...

I've piggybacked onto a recent kvm discussion in
https://lore.kernel.org/all/875ykmcd8q.fsf@redhat.com/ -- I guess there
had not been any change for migration in the meantime, we need to find a
way to tie page data + metadata together.
diff mbox series

Patch

diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 029f644768b1..f0505815b1e7 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1435,6 +1435,11 @@  void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
             error_propagate(errp, local_err);
             return;
         }
+        arm_cpu_mte_finalize(cpu, &local_err);
+        if (local_err != NULL) {
+            error_propagate(errp, local_err);
+            return;
+        }
     }
 
     if (kvm_enabled()) {
@@ -1504,7 +1509,7 @@  static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
         }
         if (cpu->tag_memory) {
             error_setg(errp,
-                       "Cannot enable KVM when guest CPUs has MTE enabled");
+                       "Cannot enable KVM when guest CPUs has tag memory enabled");
             return;
         }
     }
@@ -1882,17 +1887,6 @@  static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
                                        ID_PFR1, VIRTUALIZATION, 0);
     }
 
-#ifndef CONFIG_USER_ONLY
-    if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
-        /*
-         * Disable the MTE feature bits if we do not have tag-memory
-         * provided by the machine.
-         */
-        cpu->isar.id_aa64pfr1 =
-            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
-    }
-#endif
-
     /* MPU can be configured out of a PMSA CPU either by setting has-mpu
      * to false or by setting pmsav7-dregion to 0.
      */
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 18ca61e8e25b..183506713e96 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -208,11 +208,13 @@  typedef struct {
 void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
+void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp);
 #else
 # define ARM_MAX_VQ    1
 static inline void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { }
 static inline void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) { }
 static inline void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp) { }
+static inline void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp) { }
 #endif
 
 typedef struct ARMVectorReg {
@@ -993,6 +995,7 @@  struct ArchCPU {
     bool prop_pauth;
     bool prop_pauth_impdef;
     bool prop_lpa2;
+    bool prop_mte;
 
     /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
     uint32_t dcz_blocksize;
@@ -1091,6 +1094,7 @@  void aarch64_sve_change_el(CPUARMState *env, int old_el,
                            int new_el, bool el0_a64);
 void aarch64_add_sve_properties(Object *obj);
 void aarch64_add_pauth_properties(Object *obj);
+void aarch64_add_mte_properties(Object *obj);
 
 /*
  * SVE registers are encoded in KVM's memory in an endianness-invariant format.
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 04427e073f17..eea9ad195470 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -35,7 +35,11 @@ 
 #include "qapi/visitor.h"
 #include "hw/qdev-properties.h"
 #include "internals.h"
+#include "migration/blocker.h"
 
+#ifdef CONFIG_KVM
+static Error *mte_migration_blocker;
+#endif
 
 static void aarch64_a57_initfn(Object *obj)
 {
@@ -785,6 +789,78 @@  void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp)
     cpu->isar.id_aa64mmfr0 = t;
 }
 
+static Property arm_cpu_mte_property =
+    DEFINE_PROP_BOOL("mte", ARMCPU, prop_mte, true);
+
+void aarch64_add_mte_properties(Object *obj)
+{
+    ARMCPU *cpu = ARM_CPU(obj);
+
+    /*
+     * For tcg, the machine type may provide tag memory for MTE emulation.
+     * We do not know whether that is the case at this point in time, so
+     * default MTE to on and check later.
+     * This preserves pre-existing behaviour, but is really a bit awkward.
+     */
+    qdev_property_add_static(DEVICE(obj), &arm_cpu_mte_property);
+    if (kvm_enabled()) {
+        /*
+         * Default MTE to off, as long as migration support is not
+         * yet implemented.
+         * TODO: implement migration support for kvm
+         */
+        cpu->prop_mte = false;
+    }
+}
+
+void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp)
+{
+    if (!cpu->prop_mte) {
+        /* Disable MTE feature bits. */
+        cpu->isar.id_aa64pfr1 =
+            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+        return;
+    }
+#ifndef CONFIG_USER_ONLY
+    if (!kvm_enabled()) {
+        if (cpu_isar_feature(aa64_mte, cpu) && !cpu->tag_memory) {
+            /*
+             * Disable the MTE feature bits, unless we have tag-memory
+             * provided by the machine.
+             * This silent downgrade is not really nice if the user had
+             * explicitly requested MTE to be enabled by the cpu, but it
+             * preserves pre-existing behaviour. In an ideal world, we
+             * would fail if MTE was requested, but no tag memory has
+             * been provided.
+             */
+            cpu->isar.id_aa64pfr1 =
+                FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+        }
+        if (!cpu_isar_feature(aa64_mte, cpu)) {
+            cpu->prop_mte = false;
+        }
+        return;
+    }
+    if (kvm_arm_mte_supported()) {
+#ifdef CONFIG_KVM
+        if (kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0)) {
+            error_setg(errp, "Failed to enable KVM_CAP_ARM_MTE");
+        } else {
+            /* TODO: add proper migration support with MTE enabled */
+            if (!mte_migration_blocker) {
+                error_setg(&mte_migration_blocker,
+                           "Live migration disabled due to MTE enabled");
+                if (migrate_add_blocker(mte_migration_blocker, NULL)) {
+                    error_setg(errp, "Failed to add MTE migration blocker");
+                }
+            }
+        }
+#endif
+    }
+    /* When HVF provides support for MTE, add it here */
+#endif
+}
+
 static void aarch64_host_initfn(Object *obj)
 {
 #if defined(CONFIG_KVM)
@@ -793,6 +869,7 @@  static void aarch64_host_initfn(Object *obj)
     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
         aarch64_add_sve_properties(obj);
         aarch64_add_pauth_properties(obj);
+        aarch64_add_mte_properties(obj);
     }
 #elif defined(CONFIG_HVF)
     ARMCPU *cpu = ARM_CPU(obj);
@@ -958,6 +1035,7 @@  static void aarch64_max_initfn(Object *obj)
     object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
                         cpu_max_set_sve_max_vq, NULL, NULL);
     qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
+    aarch64_add_mte_properties(obj);
 }
 
 static void aarch64_a64fx_initfn(Object *obj)
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index b8cfaf5782ac..d129a264a3f6 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -746,6 +746,11 @@  bool kvm_arm_steal_time_supported(void)
     return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
 }
 
+bool kvm_arm_mte_supported(void)
+{
+    return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
+}
+
 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
 
 void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map)
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index b7f78b521545..13f06ed5e0ea 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -306,6 +306,13 @@  bool kvm_arm_pmu_supported(void);
  */
 bool kvm_arm_sve_supported(void);
 
+/**
+ * kvm_arm_mte_supported:
+ *
+ * Returns: true if KVM can enable MTE, and false otherwise.
+ */
+bool kvm_arm_mte_supported(void);
+
 /**
  * kvm_arm_get_max_vm_ipa_size:
  * @ms: Machine state handle
@@ -396,6 +403,11 @@  static inline bool kvm_arm_steal_time_supported(void)
     return false;
 }
 
+static inline bool kvm_arm_mte_supported(void)
+{
+    return false;
+}
+
 /*
  * These functions should never actually be called without KVM support.
  */
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
index 80c64fa3556d..f13ff2664b67 100644
--- a/target/arm/monitor.c
+++ b/target/arm/monitor.c
@@ -96,6 +96,7 @@  static const char *cpu_model_advertised_features[] = {
     "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
     "kvm-no-adjvtime", "kvm-steal-time",
     "pauth", "pauth-impdef",
+    "mte",
     NULL
 };