diff mbox series

[v5,2/3] arm/kvm: add support for MTE

Message ID 20230203134433.31513-3-cohuck@redhat.com (mailing list archive)
State New, archived
Headers show
Series arm: enable MTE for QEMU + kvm | expand

Commit Message

Cornelia Huck Feb. 3, 2023, 1:44 p.m. UTC
Introduce a new cpu feature flag to control MTE support. To preserve
backwards compatibility for tcg, MTE will continue to be enabled as
long as tag memory has been provided.

If MTE has been enabled, we need to disable migration, as we do not
yet have a way to migrate the tags as well. Therefore, MTE will stay
off with KVM unless requested explicitly.

Signed-off-by: Cornelia Huck <cohuck@redhat.com>
---
 docs/system/arm/cpu-features.rst |  21 ++++++
 hw/arm/virt.c                    |   2 +-
 target/arm/cpu.c                 |  18 ++---
 target/arm/cpu.h                 |   1 +
 target/arm/cpu64.c               | 114 +++++++++++++++++++++++++++++++
 target/arm/internals.h           |   1 +
 target/arm/kvm.c                 |  29 ++++++++
 target/arm/kvm64.c               |   5 ++
 target/arm/kvm_arm.h             |  19 ++++++
 target/arm/monitor.c             |   1 +
 10 files changed, 198 insertions(+), 13 deletions(-)

Comments

Richard Henderson Feb. 3, 2023, 8:40 p.m. UTC | #1
On 2/3/23 03:44, Cornelia Huck wrote:
> +static void aarch64_cpu_get_mte(Object *obj, Visitor *v, const char *name,
> +                                void *opaque, Error **errp)
> +{
> +    ARMCPU *cpu = ARM_CPU(obj);
> +    OnOffAuto mte = cpu->prop_mte;
> +
> +    visit_type_OnOffAuto(v, name, &mte, errp);
> +}

You don't need to copy to a local variable here.

> +
> +static void aarch64_cpu_set_mte(Object *obj, Visitor *v, const char *name,
> +                                void *opaque, Error **errp)
> +{
> +    ARMCPU *cpu = ARM_CPU(obj);
> +
> +    visit_type_OnOffAuto(v, name, &cpu->prop_mte, errp);
> +}

... which makes get and set functions identical.
No need for both.

> +static inline bool arm_machine_has_tag_memory(void)
> +{
> +#ifndef CONFIG_USER_ONLY
> +    Object *obj = object_dynamic_cast(qdev_get_machine(), TYPE_VIRT_MACHINE);
> +
> +    /* so far, only the virt machine has support for tag memory */
> +    if (obj) {
> +        VirtMachineState *vms = VIRT_MACHINE(obj);

VIRT_MACHINE() does object_dynamic_cast_assert, and we've just done that.

As this is startup, it's not the speed that matters.  But it does look unfortunate.  Not 
for this patch set, but perhaps we ought to add TRY_OBJ_NAME to DECLARE_INSTANCE_CHECKER?

> +void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp)
> +{
> +    bool enable_mte;
> +
> +    switch (cpu->prop_mte) {
> +    case ON_OFF_AUTO_OFF:
> +        enable_mte = false;
> +        break;
> +    case ON_OFF_AUTO_ON:
> +        if (tcg_enabled()) {
> +            if (cpu_isar_feature(aa64_mte, cpu)) {
> +                if (!arm_machine_has_tag_memory()) {
> +                    error_setg(errp, "mte=on requires tag memory");
> +                    return;
> +                }
> +            } else {
> +                error_setg(errp, "mte not supported by this CPU type");
> +                return;
> +            }
> +        }
> +        if (kvm_enabled() && !kvm_arm_mte_supported()) {
> +            error_setg(errp, "mte not supported by kvm");
> +            return;
> +        }
> +        enable_mte = true;
> +        break;

What's here is not wrong, but maybe better structured as

	enable_mte = true;
         if (qtest_enabled()) {
             break;
         }
         if (tcg_enabled()) {
             if (arm_machine_tag_mem) {
                 break;
             }
             error;
             return;
         }
         if (kvm_enabled() && kvm_arm_mte_supported) {
             break;
         }
         error("mte not supported by %s", current_accel_type());
         return;

We only add the property for tcg via -cpu max, so the isar check is redundant.


r~
Eric Auger Feb. 6, 2023, 1:10 p.m. UTC | #2
Hi,

On 2/3/23 21:40, Richard Henderson wrote:
> On 2/3/23 03:44, Cornelia Huck wrote:
>> +static void aarch64_cpu_get_mte(Object *obj, Visitor *v, const char
>> *name,
>> +                                void *opaque, Error **errp)
>> +{
>> +    ARMCPU *cpu = ARM_CPU(obj);
>> +    OnOffAuto mte = cpu->prop_mte;
>> +
>> +    visit_type_OnOffAuto(v, name, &mte, errp);
>> +}
> 
> You don't need to copy to a local variable here.
> 
>> +
>> +static void aarch64_cpu_set_mte(Object *obj, Visitor *v, const char
>> *name,
>> +                                void *opaque, Error **errp)
>> +{
>> +    ARMCPU *cpu = ARM_CPU(obj);
>> +
>> +    visit_type_OnOffAuto(v, name, &cpu->prop_mte, errp);
>> +}
> 
> ... which makes get and set functions identical.
> No need for both.
This looks like a common pattern though. virt_get_acpi/set_acpi in
virt.c or pc_machine_get_vmport/set_vmport in i386/pc.c and many other
places (microvm ...). Do those other callers also need some simplifications?

Eric
> 
>> +static inline bool arm_machine_has_tag_memory(void)
>> +{
>> +#ifndef CONFIG_USER_ONLY
>> +    Object *obj = object_dynamic_cast(qdev_get_machine(),
>> TYPE_VIRT_MACHINE);
>> +
>> +    /* so far, only the virt machine has support for tag memory */
>> +    if (obj) {
>> +        VirtMachineState *vms = VIRT_MACHINE(obj);
> 
> VIRT_MACHINE() does object_dynamic_cast_assert, and we've just done that.
> 
> As this is startup, it's not the speed that matters.  But it does look
> unfortunate.  Not for this patch set, but perhaps we ought to add
> TRY_OBJ_NAME to DECLARE_INSTANCE_CHECKER?
> 
>> +void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp)
>> +{
>> +    bool enable_mte;
>> +
>> +    switch (cpu->prop_mte) {
>> +    case ON_OFF_AUTO_OFF:
>> +        enable_mte = false;
>> +        break;
>> +    case ON_OFF_AUTO_ON:
>> +        if (tcg_enabled()) {
>> +            if (cpu_isar_feature(aa64_mte, cpu)) {
>> +                if (!arm_machine_has_tag_memory()) {
>> +                    error_setg(errp, "mte=on requires tag memory");
>> +                    return;
>> +                }
>> +            } else {
>> +                error_setg(errp, "mte not supported by this CPU type");
>> +                return;
>> +            }
>> +        }
>> +        if (kvm_enabled() && !kvm_arm_mte_supported()) {
>> +            error_setg(errp, "mte not supported by kvm");
>> +            return;
>> +        }
>> +        enable_mte = true;
>> +        break;
> 
> What's here is not wrong, but maybe better structured as
> 
>     enable_mte = true;
>         if (qtest_enabled()) {
>             break;
>         }
>         if (tcg_enabled()) {
>             if (arm_machine_tag_mem) {
>                 break;
>             }
>             error;
>             return;
>         }
>         if (kvm_enabled() && kvm_arm_mte_supported) {
>             break;
>         }
>         error("mte not supported by %s", current_accel_type());
>         return;
> 
> We only add the property for tcg via -cpu max, so the isar check is
> redundant.
> 
> 
> r~
>
Eric Auger Feb. 6, 2023, 1:32 p.m. UTC | #3
Hi Connie,

On 2/3/23 14:44, Cornelia Huck wrote:
> Introduce a new cpu feature flag to control MTE support. To preserve
> backwards compatibility for tcg, MTE will continue to be enabled as
> long as tag memory has been provided.
> 
> If MTE has been enabled, we need to disable migration, as we do not
> yet have a way to migrate the tags as well. Therefore, MTE will stay
> off with KVM unless requested explicitly.
> 
> Signed-off-by: Cornelia Huck <cohuck@redhat.com>
> ---
>  docs/system/arm/cpu-features.rst |  21 ++++++
>  hw/arm/virt.c                    |   2 +-
>  target/arm/cpu.c                 |  18 ++---
>  target/arm/cpu.h                 |   1 +
>  target/arm/cpu64.c               | 114 +++++++++++++++++++++++++++++++
>  target/arm/internals.h           |   1 +
>  target/arm/kvm.c                 |  29 ++++++++
>  target/arm/kvm64.c               |   5 ++
>  target/arm/kvm_arm.h             |  19 ++++++
>  target/arm/monitor.c             |   1 +
>  10 files changed, 198 insertions(+), 13 deletions(-)
> 
> diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst
> index 00c444042ff5..f8b0f339d32d 100644
> --- a/docs/system/arm/cpu-features.rst
> +++ b/docs/system/arm/cpu-features.rst
> @@ -443,3 +443,24 @@ As with ``sve-default-vector-length``, if the default length is larger
>  than the maximum vector length enabled, the actual vector length will
>  be reduced.  If this property is set to ``-1`` then the default vector
>  length is set to the maximum possible length.
> +
> +MTE CPU Property
> +================
> +
> +The ``mte`` property controls the Memory Tagging Extension. For TCG, it requires
> +presence of tag memory (which can be turned on for the ``virt`` machine via
> +``mte=on``). For KVM, it requires the ``KVM_CAP_ARM_MTE`` capability; until
> +proper migration support is implemented, enabling MTE will install a migration
> +blocker.
> +
> +If not specified explicitly via ``on`` or ``off``, MTE will be available
> +according to the following rules:
> +
> +* When TCG is used, MTE will be available if and only if tag memory is available;
> +  i.e. it preserves the behaviour prior to the introduction of the feature.
> +
> +* When KVM is used, MTE will default to off, so that migration will not
> +  unintentionally be blocked. This might change in a future QEMU version.
> +
> +* Other accelerators currently don't support MTE.
> +
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index bdc297a4570c..3aff0b8425f4 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -2136,7 +2136,7 @@ static void machvirt_init(MachineState *machine)
>  
>      if (vms->mte && (kvm_enabled() || hvf_enabled())) {
>          error_report("mach-virt: %s does not support providing "
> -                     "MTE to the guest CPU",
> +                     "emulated MTE to the guest CPU (tag memory not supported)",
>                       current_accel_name());
>          exit(1);
>      }
> diff --git a/target/arm/cpu.c b/target/arm/cpu.c
> index 5f63316dbf22..decab743d0d5 100644
> --- a/target/arm/cpu.c
> +++ b/target/arm/cpu.c
> @@ -1529,6 +1529,11 @@ void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
>              error_propagate(errp, local_err);
>              return;
>          }
> +        arm_cpu_mte_finalize(cpu, &local_err);
> +        if (local_err != NULL) {
> +            error_propagate(errp, local_err);
> +            return;
> +        }
>      }
>  #endif
>  
> @@ -1605,7 +1610,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
>          }
>          if (cpu->tag_memory) {
>              error_setg(errp,
> -                       "Cannot enable %s when guest CPUs has MTE enabled",
> +                       "Cannot enable %s when guest CPUs has tag memory enabled",
>                         current_accel_name());
>              return;
>          }
> @@ -1984,17 +1989,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
>                                         ID_PFR1, VIRTUALIZATION, 0);
>      }
>  
> -#ifndef CONFIG_USER_ONLY
> -    if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
> -        /*
> -         * Disable the MTE feature bits if we do not have tag-memory
> -         * provided by the machine.
> -         */
> -        cpu->isar.id_aa64pfr1 =
> -            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
> -    }
> -#endif
> -
>      if (tcg_enabled()) {
>          /*
>           * Don't report the Statistical Profiling Extension in the ID
> diff --git a/target/arm/cpu.h b/target/arm/cpu.h
> index 8cf70693be41..6cfaba30e30f 100644
> --- a/target/arm/cpu.h
> +++ b/target/arm/cpu.h
> @@ -1039,6 +1039,7 @@ struct ArchCPU {
>      bool prop_pauth;
>      bool prop_pauth_impdef;
>      bool prop_lpa2;
> +    OnOffAuto prop_mte;
>  
>      /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
>      uint32_t dcz_blocksize;
> diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
> index 0e021960fb5b..20c18b607b0b 100644
> --- a/target/arm/cpu64.c
> +++ b/target/arm/cpu64.c
> @@ -24,11 +24,16 @@
>  #include "qemu/module.h"
>  #include "sysemu/kvm.h"
>  #include "sysemu/hvf.h"
> +#include "sysemu/tcg.h"
>  #include "kvm_arm.h"
>  #include "hvf_arm.h"
>  #include "qapi/visitor.h"
>  #include "hw/qdev-properties.h"
>  #include "internals.h"
> +#include "qapi/qapi-visit-common.h"
> +#if !defined(CONFIG_USER_ONLY)
> +#include "hw/arm/virt.h"
> +#endif
>  
>  static void aarch64_a35_initfn(Object *obj)
>  {
> @@ -1096,6 +1101,113 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
>      cpu->isar.reset_pmcr_el0 = 0x410c3000;
>  }
>  
> +static void aarch64_cpu_get_mte(Object *obj, Visitor *v, const char *name,
> +                                void *opaque, Error **errp)
> +{
> +    ARMCPU *cpu = ARM_CPU(obj);
> +    OnOffAuto mte = cpu->prop_mte;
> +
> +    visit_type_OnOffAuto(v, name, &mte, errp);
> +}
> +
> +static void aarch64_cpu_set_mte(Object *obj, Visitor *v, const char *name,
> +                                void *opaque, Error **errp)
> +{
> +    ARMCPU *cpu = ARM_CPU(obj);
> +
> +    visit_type_OnOffAuto(v, name, &cpu->prop_mte, errp);
> +}
> +
> +static void aarch64_add_mte_properties(Object *obj)
> +{
> +    /*
> +     * For tcg, "AUTO" means turn on mte if tag memory has been provided, and
> +     * turn it off (without error) if not.
> +     * For kvm, "AUTO" currently means mte off, as migration is not supported
> +     * yet.
> +     * For all others, "AUTO" means mte off.
> +     */
> +    object_property_add(obj, "mte", "OnOffAuto", aarch64_cpu_get_mte,
> +                        aarch64_cpu_set_mte, NULL, NULL);
> +}
> +
> +static inline bool arm_machine_has_tag_memory(void)
> +{
> +#ifndef CONFIG_USER_ONLY
> +    Object *obj = object_dynamic_cast(qdev_get_machine(), TYPE_VIRT_MACHINE);
> +
> +    /* so far, only the virt machine has support for tag memory */
> +    if (obj) {
> +        VirtMachineState *vms = VIRT_MACHINE(obj);
> +
> +        return vms->mte;
> +    }
> +    return false;
> +#endif
> +    return true;
> +}
> +
> +void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp)
> +{
> +    bool enable_mte;
> +
> +    switch (cpu->prop_mte) {
> +    case ON_OFF_AUTO_OFF:
> +        enable_mte = false;
> +        break;
> +    case ON_OFF_AUTO_ON:
> +        if (tcg_enabled()) {
> +            if (cpu_isar_feature(aa64_mte, cpu)) {
> +                if (!arm_machine_has_tag_memory()) {
> +                    error_setg(errp, "mte=on requires tag memory");
> +                    return;
> +                }
> +            } else {
> +                error_setg(errp, "mte not supported by this CPU type");
> +                return;
> +            }
> +        }
> +        if (kvm_enabled() && !kvm_arm_mte_supported()) {
> +            error_setg(errp, "mte not supported by kvm");
> +            return;
> +        }
> +        enable_mte = true;
> +        break;
> +    default: /* AUTO */
> +        if (tcg_enabled()) {
> +            if (cpu_isar_feature(aa64_mte, cpu)) {
> +                /*
> +                 * Tie mte enablement to presence of tag memory, in order to
> +                 * preserve pre-existing behaviour.
> +                 */
> +                enable_mte = arm_machine_has_tag_memory();
> +            } else {
> +                enable_mte = false;
> +            }
> +            break;
> +        } else {
> +            /*
> +             * This cannot yet be
> +             * enable_mte = kvm_arm_mte_supported();
> +             * as we don't support migration yet.
> +             */
> +            enable_mte = false;
> +        }
> +    }
> +
> +    if (!enable_mte) {
> +        /* Disable MTE feature bits. */
> +        cpu->isar.id_aa64pfr1 =
> +            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
> +        return;
> +    }
> +
> +    /* accelerator-specific enablement */
> +    if (kvm_enabled()) {
> +        kvm_arm_enable_mte(errp);
> +    }
> +}
> +
>  static void aarch64_host_initfn(Object *obj)
>  {
>  #if defined(CONFIG_KVM)
> @@ -1104,6 +1216,7 @@ static void aarch64_host_initfn(Object *obj)
>      if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
>          aarch64_add_sve_properties(obj);
>          aarch64_add_pauth_properties(obj);
> +        aarch64_add_mte_properties(obj);
>      }
>  #elif defined(CONFIG_HVF)
>      ARMCPU *cpu = ARM_CPU(obj);
> @@ -1300,6 +1413,7 @@ static void aarch64_max_initfn(Object *obj)
>      object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
>                          cpu_max_set_sve_max_vq, NULL, NULL);
>      qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
> +    aarch64_add_mte_properties(obj);
>  }
>  
>  static const ARMCPUInfo aarch64_cpus[] = {
> diff --git a/target/arm/internals.h b/target/arm/internals.h
> index d9555309df0f..4dc6d19be42b 100644
> --- a/target/arm/internals.h
> +++ b/target/arm/internals.h
> @@ -1348,6 +1348,7 @@ void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
>  void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
>  void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
>  void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
> +void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp);
>  #endif
>  
>  #ifdef CONFIG_USER_ONLY
> diff --git a/target/arm/kvm.c b/target/arm/kvm.c
> index f022c644d2ff..e6f2cb807bde 100644
> --- a/target/arm/kvm.c
> +++ b/target/arm/kvm.c
> @@ -31,6 +31,7 @@
>  #include "hw/boards.h"
>  #include "hw/irq.h"
>  #include "qemu/log.h"
> +#include "migration/blocker.h"
>  
>  const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
>      KVM_CAP_LAST_INFO
> @@ -1062,3 +1063,31 @@ bool kvm_arch_cpu_check_are_resettable(void)
>  void kvm_arch_accel_class_init(ObjectClass *oc)
>  {
>  }
> +
> +void kvm_arm_enable_mte(Error **errp)
> +{
> +    static bool tried_to_enable = false;
> +    Error *mte_migration_blocker = NULL;
can't you make the mte_migration_blocker static instead?

Thanks

Eric
> +    int ret;
> +
> +    if (tried_to_enable) {
> +        /*
> +         * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
> +         * sense), and we only want a single migration blocker as well.
> +         */
> +        return;
> +    }
> +    tried_to_enable = true;
> +
> +    if ((ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0))) {
> +        error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
> +        return;
> +    }
> +
> +    /* TODO: add proper migration support with MTE enabled */
> +    error_setg(&mte_migration_blocker,
> +               "Live migration disabled due to MTE enabled");
> +    if (migrate_add_blocker(mte_migration_blocker, errp)) {
> +        error_free(mte_migration_blocker);
> +    }
> +}
> diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
> index 1197253d12f7..b777bd0a11d2 100644
> --- a/target/arm/kvm64.c
> +++ b/target/arm/kvm64.c
> @@ -764,6 +764,11 @@ bool kvm_arm_steal_time_supported(void)
>      return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
>  }
>  
> +bool kvm_arm_mte_supported(void)
> +{
> +    return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
> +}
> +
>  QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
>  
>  uint32_t kvm_arm_sve_get_vls(CPUState *cs)
> diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
> index 99017b635ce4..9f88b0722293 100644
> --- a/target/arm/kvm_arm.h
> +++ b/target/arm/kvm_arm.h
> @@ -305,6 +305,13 @@ bool kvm_arm_pmu_supported(void);
>   */
>  bool kvm_arm_sve_supported(void);
>  
> +/**
> + * kvm_arm_mte_supported:
> + *
> + * Returns: true if KVM can enable MTE, and false otherwise.
> + */
> +bool kvm_arm_mte_supported(void);
> +
>  /**
>   * kvm_arm_get_max_vm_ipa_size:
>   * @ms: Machine state handle
> @@ -369,6 +376,8 @@ void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa);
>  
>  int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
>  
> +void kvm_arm_enable_mte(Error **errp);
> +
>  #else
>  
>  /*
> @@ -395,6 +404,11 @@ static inline bool kvm_arm_steal_time_supported(void)
>      return false;
>  }
>  
> +static inline bool kvm_arm_mte_supported(void)
> +{
> +    return false;
> +}
> +
>  /*
>   * These functions should never actually be called without KVM support.
>   */
> @@ -443,6 +457,11 @@ static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs)
>      g_assert_not_reached();
>  }
>  
> +static inline void kvm_arm_enable_mte(Error **errp)
> +{
> +    g_assert_not_reached();
> +}
> +
>  #endif
>  
>  static inline const char *gic_class_name(void)
> diff --git a/target/arm/monitor.c b/target/arm/monitor.c
> index ecdd5ee81742..c419c81612ed 100644
> --- a/target/arm/monitor.c
> +++ b/target/arm/monitor.c
> @@ -96,6 +96,7 @@ static const char *cpu_model_advertised_features[] = {
>      "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
>      "kvm-no-adjvtime", "kvm-steal-time",
>      "pauth", "pauth-impdef",
> +    "mte",
>      NULL
>  };
>
Cornelia Huck Feb. 6, 2023, 4:15 p.m. UTC | #4
On Mon, Feb 06 2023, Eric Auger <eauger@redhat.com> wrote:

> Hi,
>
> On 2/3/23 21:40, Richard Henderson wrote:
>> On 2/3/23 03:44, Cornelia Huck wrote:
>>> +static void aarch64_cpu_get_mte(Object *obj, Visitor *v, const char
>>> *name,
>>> +                                void *opaque, Error **errp)
>>> +{
>>> +    ARMCPU *cpu = ARM_CPU(obj);
>>> +    OnOffAuto mte = cpu->prop_mte;
>>> +
>>> +    visit_type_OnOffAuto(v, name, &mte, errp);
>>> +}
>> 
>> You don't need to copy to a local variable here.
>> 
>>> +
>>> +static void aarch64_cpu_set_mte(Object *obj, Visitor *v, const char
>>> *name,
>>> +                                void *opaque, Error **errp)
>>> +{
>>> +    ARMCPU *cpu = ARM_CPU(obj);
>>> +
>>> +    visit_type_OnOffAuto(v, name, &cpu->prop_mte, errp);
>>> +}
>> 
>> ... which makes get and set functions identical.
>> No need for both.
> This looks like a common pattern though. virt_get_acpi/set_acpi in
> virt.c or pc_machine_get_vmport/set_vmport in i386/pc.c and many other
> places (microvm ...). Do those other callers also need some simplifications?

Indeed, I'm pretty sure that I copied + adapted it from somewhere :)

Should we clean up all instances in one go instead? (Probably on top of
this series, in order to minimize conflicts with other changes.)
Cornelia Huck Feb. 6, 2023, 4:41 p.m. UTC | #5
On Fri, Feb 03 2023, Richard Henderson <richard.henderson@linaro.org> wrote:

> On 2/3/23 03:44, Cornelia Huck wrote:
>> +static inline bool arm_machine_has_tag_memory(void)
>> +{
>> +#ifndef CONFIG_USER_ONLY
>> +    Object *obj = object_dynamic_cast(qdev_get_machine(), TYPE_VIRT_MACHINE);
>> +
>> +    /* so far, only the virt machine has support for tag memory */
>> +    if (obj) {
>> +        VirtMachineState *vms = VIRT_MACHINE(obj);
>
> VIRT_MACHINE() does object_dynamic_cast_assert, and we've just done that.
>
> As this is startup, it's not the speed that matters.  But it does look unfortunate.  Not 
> for this patch set, but perhaps we ought to add TRY_OBJ_NAME to DECLARE_INSTANCE_CHECKER?

Instead of the pattern above, we could also do

VirtMachineState *vms = (VirtMachineState *) object_dynamic_cast(...);
if (vms) {
(...)


>
>> +void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp)
>> +{
>> +    bool enable_mte;
>> +
>> +    switch (cpu->prop_mte) {
>> +    case ON_OFF_AUTO_OFF:
>> +        enable_mte = false;
>> +        break;
>> +    case ON_OFF_AUTO_ON:
>> +        if (tcg_enabled()) {
>> +            if (cpu_isar_feature(aa64_mte, cpu)) {
>> +                if (!arm_machine_has_tag_memory()) {
>> +                    error_setg(errp, "mte=on requires tag memory");
>> +                    return;
>> +                }
>> +            } else {
>> +                error_setg(errp, "mte not supported by this CPU type");
>> +                return;
>> +            }
>> +        }
>> +        if (kvm_enabled() && !kvm_arm_mte_supported()) {
>> +            error_setg(errp, "mte not supported by kvm");
>> +            return;
>> +        }
>> +        enable_mte = true;
>> +        break;
>
> What's here is not wrong, but maybe better structured as
>
> 	enable_mte = true;
>          if (qtest_enabled()) {
>              break;
>          }
>          if (tcg_enabled()) {
>              if (arm_machine_tag_mem) {
>                  break;
>              }
>              error;
>              return;
>          }
>          if (kvm_enabled() && kvm_arm_mte_supported) {
>              break;
>          }
>          error("mte not supported by %s", current_accel_type());
>          return;

That's indeed better, as we also see what's going on for the different
accelarators.

> We only add the property for tcg via -cpu max, so the isar check is redundant.
Richard Henderson Feb. 6, 2023, 6:27 p.m. UTC | #6
On 2/6/23 03:32, Eric Auger wrote:
>> +void kvm_arm_enable_mte(Error **errp)
>> +{
>> +    static bool tried_to_enable = false;
>> +    Error *mte_migration_blocker = NULL;
> can't you make the mte_migration_blocker static instead?
> 
>> +    int ret;
>> +
>> +    if (tried_to_enable) {
>> +        /*
>> +         * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
>> +         * sense), and we only want a single migration blocker as well.
>> +         */
>> +        return;
>> +    }
>> +    tried_to_enable = true;
>> +
>> +    if ((ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0))) {
>> +        error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
>> +        return;
>> +    }
>> +
>> +    /* TODO: add proper migration support with MTE enabled */
>> +    error_setg(&mte_migration_blocker,
>> +               "Live migration disabled due to MTE enabled");

Making the blocker static wouldn't stop multiple errors from kvm_vm_enable_cap.


r~
Eric Auger Feb. 15, 2023, 10:36 a.m. UTC | #7
Hi Richard,
On 2/6/23 19:27, Richard Henderson wrote:
> On 2/6/23 03:32, Eric Auger wrote:
>>> +void kvm_arm_enable_mte(Error **errp)
>>> +{
>>> +    static bool tried_to_enable = false;
>>> +    Error *mte_migration_blocker = NULL;
>> can't you make the mte_migration_blocker static instead?
>>
>>> +    int ret;
>>> +
>>> +    if (tried_to_enable) {
>>> +        /*
>>> +         * MTE on KVM is enabled on a per-VM basis (and retrying
>>> doesn't make
>>> +         * sense), and we only want a single migration blocker as well.
>>> +         */
>>> +        return;
>>> +    }
>>> +    tried_to_enable = true;
>>> +
>>> +    if ((ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0))) {
>>> +        error_setg_errno(errp, -ret, "Failed to enable
>>> KVM_CAP_ARM_MTE");
>>> +        return;
>>> +    }
>>> +
>>> +    /* TODO: add proper migration support with MTE enabled */
>>> +    error_setg(&mte_migration_blocker,
>>> +               "Live migration disabled due to MTE enabled");
> 
> Making the blocker static wouldn't stop multiple errors from
> kvm_vm_enable_cap.
Sorry I don't get what you mean. instead of checking tried_to_enable why
can't we check !mte_migration_blocker?

Eric
> 
> 
> r~
>
Cornelia Huck Feb. 27, 2023, 3:11 p.m. UTC | #8
On Wed, Feb 15 2023, Eric Auger <eauger@redhat.com> wrote:

> Hi Richard,
> On 2/6/23 19:27, Richard Henderson wrote:
>> On 2/6/23 03:32, Eric Auger wrote:
>>>> +void kvm_arm_enable_mte(Error **errp)
>>>> +{
>>>> +    static bool tried_to_enable = false;
>>>> +    Error *mte_migration_blocker = NULL;
>>> can't you make the mte_migration_blocker static instead?
>>>
>>>> +    int ret;
>>>> +
>>>> +    if (tried_to_enable) {
>>>> +        /*
>>>> +         * MTE on KVM is enabled on a per-VM basis (and retrying
>>>> doesn't make
>>>> +         * sense), and we only want a single migration blocker as well.
>>>> +         */
>>>> +        return;
>>>> +    }
>>>> +    tried_to_enable = true;
>>>> +
>>>> +    if ((ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0))) {
>>>> +        error_setg_errno(errp, -ret, "Failed to enable
>>>> KVM_CAP_ARM_MTE");
>>>> +        return;
>>>> +    }
>>>> +
>>>> +    /* TODO: add proper migration support with MTE enabled */
>>>> +    error_setg(&mte_migration_blocker,
>>>> +               "Live migration disabled due to MTE enabled");
>> 
>> Making the blocker static wouldn't stop multiple errors from
>> kvm_vm_enable_cap.
> Sorry I don't get what you mean. instead of checking tried_to_enable why
> can't we check !mte_migration_blocker?

[missed this one]

Do you mean

if (mte_migration_blocker) {
    return;
}

error_setg(&mte_migration_blocker, ...);

if ((ret = kvm_vm_enable_cap(...))) {
    return;
}

if (migrate_add_blocker(...)) {
    error_free(mte_migration_blocker);
    // is mte_migration_blocker guaranteed != NULL here?
}
Cornelia Huck Feb. 27, 2023, 3:12 p.m. UTC | #9
On Mon, Feb 06 2023, Cornelia Huck <cohuck@redhat.com> wrote:

> On Mon, Feb 06 2023, Eric Auger <eauger@redhat.com> wrote:
>
>> Hi,
>>
>> On 2/3/23 21:40, Richard Henderson wrote:
>>> On 2/3/23 03:44, Cornelia Huck wrote:
>>>> +static void aarch64_cpu_get_mte(Object *obj, Visitor *v, const char
>>>> *name,
>>>> +                                void *opaque, Error **errp)
>>>> +{
>>>> +    ARMCPU *cpu = ARM_CPU(obj);
>>>> +    OnOffAuto mte = cpu->prop_mte;
>>>> +
>>>> +    visit_type_OnOffAuto(v, name, &mte, errp);
>>>> +}
>>> 
>>> You don't need to copy to a local variable here.
>>> 
>>>> +
>>>> +static void aarch64_cpu_set_mte(Object *obj, Visitor *v, const char
>>>> *name,
>>>> +                                void *opaque, Error **errp)
>>>> +{
>>>> +    ARMCPU *cpu = ARM_CPU(obj);
>>>> +
>>>> +    visit_type_OnOffAuto(v, name, &cpu->prop_mte, errp);
>>>> +}
>>> 
>>> ... which makes get and set functions identical.
>>> No need for both.
>> This looks like a common pattern though. virt_get_acpi/set_acpi in
>> virt.c or pc_machine_get_vmport/set_vmport in i386/pc.c and many other
>> places (microvm ...). Do those other callers also need some simplifications?
>
> Indeed, I'm pretty sure that I copied + adapted it from somewhere :)
>
> Should we clean up all instances in one go instead? (Probably on top of
> this series, in order to minimize conflicts with other changes.)

Any objections to going with the code above and just doing a general
cleanup on top?
diff mbox series

Patch

diff --git a/docs/system/arm/cpu-features.rst b/docs/system/arm/cpu-features.rst
index 00c444042ff5..f8b0f339d32d 100644
--- a/docs/system/arm/cpu-features.rst
+++ b/docs/system/arm/cpu-features.rst
@@ -443,3 +443,24 @@  As with ``sve-default-vector-length``, if the default length is larger
 than the maximum vector length enabled, the actual vector length will
 be reduced.  If this property is set to ``-1`` then the default vector
 length is set to the maximum possible length.
+
+MTE CPU Property
+================
+
+The ``mte`` property controls the Memory Tagging Extension. For TCG, it requires
+presence of tag memory (which can be turned on for the ``virt`` machine via
+``mte=on``). For KVM, it requires the ``KVM_CAP_ARM_MTE`` capability; until
+proper migration support is implemented, enabling MTE will install a migration
+blocker.
+
+If not specified explicitly via ``on`` or ``off``, MTE will be available
+according to the following rules:
+
+* When TCG is used, MTE will be available if and only if tag memory is available;
+  i.e. it preserves the behaviour prior to the introduction of the feature.
+
+* When KVM is used, MTE will default to off, so that migration will not
+  unintentionally be blocked. This might change in a future QEMU version.
+
+* Other accelerators currently don't support MTE.
+
diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index bdc297a4570c..3aff0b8425f4 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -2136,7 +2136,7 @@  static void machvirt_init(MachineState *machine)
 
     if (vms->mte && (kvm_enabled() || hvf_enabled())) {
         error_report("mach-virt: %s does not support providing "
-                     "MTE to the guest CPU",
+                     "emulated MTE to the guest CPU (tag memory not supported)",
                      current_accel_name());
         exit(1);
     }
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index 5f63316dbf22..decab743d0d5 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -1529,6 +1529,11 @@  void arm_cpu_finalize_features(ARMCPU *cpu, Error **errp)
             error_propagate(errp, local_err);
             return;
         }
+        arm_cpu_mte_finalize(cpu, &local_err);
+        if (local_err != NULL) {
+            error_propagate(errp, local_err);
+            return;
+        }
     }
 #endif
 
@@ -1605,7 +1610,7 @@  static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
         }
         if (cpu->tag_memory) {
             error_setg(errp,
-                       "Cannot enable %s when guest CPUs has MTE enabled",
+                       "Cannot enable %s when guest CPUs has tag memory enabled",
                        current_accel_name());
             return;
         }
@@ -1984,17 +1989,6 @@  static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
                                        ID_PFR1, VIRTUALIZATION, 0);
     }
 
-#ifndef CONFIG_USER_ONLY
-    if (cpu->tag_memory == NULL && cpu_isar_feature(aa64_mte, cpu)) {
-        /*
-         * Disable the MTE feature bits if we do not have tag-memory
-         * provided by the machine.
-         */
-        cpu->isar.id_aa64pfr1 =
-            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
-    }
-#endif
-
     if (tcg_enabled()) {
         /*
          * Don't report the Statistical Profiling Extension in the ID
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 8cf70693be41..6cfaba30e30f 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -1039,6 +1039,7 @@  struct ArchCPU {
     bool prop_pauth;
     bool prop_pauth_impdef;
     bool prop_lpa2;
+    OnOffAuto prop_mte;
 
     /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
     uint32_t dcz_blocksize;
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 0e021960fb5b..20c18b607b0b 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -24,11 +24,16 @@ 
 #include "qemu/module.h"
 #include "sysemu/kvm.h"
 #include "sysemu/hvf.h"
+#include "sysemu/tcg.h"
 #include "kvm_arm.h"
 #include "hvf_arm.h"
 #include "qapi/visitor.h"
 #include "hw/qdev-properties.h"
 #include "internals.h"
+#include "qapi/qapi-visit-common.h"
+#if !defined(CONFIG_USER_ONLY)
+#include "hw/arm/virt.h"
+#endif
 
 static void aarch64_a35_initfn(Object *obj)
 {
@@ -1096,6 +1101,113 @@  static void aarch64_neoverse_n1_initfn(Object *obj)
     cpu->isar.reset_pmcr_el0 = 0x410c3000;
 }
 
+static void aarch64_cpu_get_mte(Object *obj, Visitor *v, const char *name,
+                                void *opaque, Error **errp)
+{
+    ARMCPU *cpu = ARM_CPU(obj);
+    OnOffAuto mte = cpu->prop_mte;
+
+    visit_type_OnOffAuto(v, name, &mte, errp);
+}
+
+static void aarch64_cpu_set_mte(Object *obj, Visitor *v, const char *name,
+                                void *opaque, Error **errp)
+{
+    ARMCPU *cpu = ARM_CPU(obj);
+
+    visit_type_OnOffAuto(v, name, &cpu->prop_mte, errp);
+}
+
+static void aarch64_add_mte_properties(Object *obj)
+{
+    /*
+     * For tcg, "AUTO" means turn on mte if tag memory has been provided, and
+     * turn it off (without error) if not.
+     * For kvm, "AUTO" currently means mte off, as migration is not supported
+     * yet.
+     * For all others, "AUTO" means mte off.
+     */
+    object_property_add(obj, "mte", "OnOffAuto", aarch64_cpu_get_mte,
+                        aarch64_cpu_set_mte, NULL, NULL);
+}
+
+static inline bool arm_machine_has_tag_memory(void)
+{
+#ifndef CONFIG_USER_ONLY
+    Object *obj = object_dynamic_cast(qdev_get_machine(), TYPE_VIRT_MACHINE);
+
+    /* so far, only the virt machine has support for tag memory */
+    if (obj) {
+        VirtMachineState *vms = VIRT_MACHINE(obj);
+
+        return vms->mte;
+    }
+    return false;
+#endif
+    return true;
+}
+
+void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp)
+{
+    bool enable_mte;
+
+    switch (cpu->prop_mte) {
+    case ON_OFF_AUTO_OFF:
+        enable_mte = false;
+        break;
+    case ON_OFF_AUTO_ON:
+        if (tcg_enabled()) {
+            if (cpu_isar_feature(aa64_mte, cpu)) {
+                if (!arm_machine_has_tag_memory()) {
+                    error_setg(errp, "mte=on requires tag memory");
+                    return;
+                }
+            } else {
+                error_setg(errp, "mte not supported by this CPU type");
+                return;
+            }
+        }
+        if (kvm_enabled() && !kvm_arm_mte_supported()) {
+            error_setg(errp, "mte not supported by kvm");
+            return;
+        }
+        enable_mte = true;
+        break;
+    default: /* AUTO */
+        if (tcg_enabled()) {
+            if (cpu_isar_feature(aa64_mte, cpu)) {
+                /*
+                 * Tie mte enablement to presence of tag memory, in order to
+                 * preserve pre-existing behaviour.
+                 */
+                enable_mte = arm_machine_has_tag_memory();
+            } else {
+                enable_mte = false;
+            }
+            break;
+        } else {
+            /*
+             * This cannot yet be
+             * enable_mte = kvm_arm_mte_supported();
+             * as we don't support migration yet.
+             */
+            enable_mte = false;
+        }
+    }
+
+    if (!enable_mte) {
+        /* Disable MTE feature bits. */
+        cpu->isar.id_aa64pfr1 =
+            FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0);
+        return;
+    }
+
+    /* accelerator-specific enablement */
+    if (kvm_enabled()) {
+        kvm_arm_enable_mte(errp);
+    }
+}
+
 static void aarch64_host_initfn(Object *obj)
 {
 #if defined(CONFIG_KVM)
@@ -1104,6 +1216,7 @@  static void aarch64_host_initfn(Object *obj)
     if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
         aarch64_add_sve_properties(obj);
         aarch64_add_pauth_properties(obj);
+        aarch64_add_mte_properties(obj);
     }
 #elif defined(CONFIG_HVF)
     ARMCPU *cpu = ARM_CPU(obj);
@@ -1300,6 +1413,7 @@  static void aarch64_max_initfn(Object *obj)
     object_property_add(obj, "sve-max-vq", "uint32", cpu_max_get_sve_max_vq,
                         cpu_max_set_sve_max_vq, NULL, NULL);
     qdev_property_add_static(DEVICE(obj), &arm_cpu_lpa2_property);
+    aarch64_add_mte_properties(obj);
 }
 
 static const ARMCPUInfo aarch64_cpus[] = {
diff --git a/target/arm/internals.h b/target/arm/internals.h
index d9555309df0f..4dc6d19be42b 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1348,6 +1348,7 @@  void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp);
 void arm_cpu_sme_finalize(ARMCPU *cpu, Error **errp);
 void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp);
 void arm_cpu_lpa2_finalize(ARMCPU *cpu, Error **errp);
+void arm_cpu_mte_finalize(ARMCPU *cpu, Error **errp);
 #endif
 
 #ifdef CONFIG_USER_ONLY
diff --git a/target/arm/kvm.c b/target/arm/kvm.c
index f022c644d2ff..e6f2cb807bde 100644
--- a/target/arm/kvm.c
+++ b/target/arm/kvm.c
@@ -31,6 +31,7 @@ 
 #include "hw/boards.h"
 #include "hw/irq.h"
 #include "qemu/log.h"
+#include "migration/blocker.h"
 
 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
     KVM_CAP_LAST_INFO
@@ -1062,3 +1063,31 @@  bool kvm_arch_cpu_check_are_resettable(void)
 void kvm_arch_accel_class_init(ObjectClass *oc)
 {
 }
+
+void kvm_arm_enable_mte(Error **errp)
+{
+    static bool tried_to_enable = false;
+    Error *mte_migration_blocker = NULL;
+    int ret;
+
+    if (tried_to_enable) {
+        /*
+         * MTE on KVM is enabled on a per-VM basis (and retrying doesn't make
+         * sense), and we only want a single migration blocker as well.
+         */
+        return;
+    }
+    tried_to_enable = true;
+
+    if ((ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_MTE, 0))) {
+        error_setg_errno(errp, -ret, "Failed to enable KVM_CAP_ARM_MTE");
+        return;
+    }
+
+    /* TODO: add proper migration support with MTE enabled */
+    error_setg(&mte_migration_blocker,
+               "Live migration disabled due to MTE enabled");
+    if (migrate_add_blocker(mte_migration_blocker, errp)) {
+        error_free(mte_migration_blocker);
+    }
+}
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 1197253d12f7..b777bd0a11d2 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -764,6 +764,11 @@  bool kvm_arm_steal_time_supported(void)
     return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
 }
 
+bool kvm_arm_mte_supported(void)
+{
+    return kvm_check_extension(kvm_state, KVM_CAP_ARM_MTE);
+}
+
 QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
 
 uint32_t kvm_arm_sve_get_vls(CPUState *cs)
diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h
index 99017b635ce4..9f88b0722293 100644
--- a/target/arm/kvm_arm.h
+++ b/target/arm/kvm_arm.h
@@ -305,6 +305,13 @@  bool kvm_arm_pmu_supported(void);
  */
 bool kvm_arm_sve_supported(void);
 
+/**
+ * kvm_arm_mte_supported:
+ *
+ * Returns: true if KVM can enable MTE, and false otherwise.
+ */
+bool kvm_arm_mte_supported(void);
+
 /**
  * kvm_arm_get_max_vm_ipa_size:
  * @ms: Machine state handle
@@ -369,6 +376,8 @@  void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa);
 
 int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
 
+void kvm_arm_enable_mte(Error **errp);
+
 #else
 
 /*
@@ -395,6 +404,11 @@  static inline bool kvm_arm_steal_time_supported(void)
     return false;
 }
 
+static inline bool kvm_arm_mte_supported(void)
+{
+    return false;
+}
+
 /*
  * These functions should never actually be called without KVM support.
  */
@@ -443,6 +457,11 @@  static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs)
     g_assert_not_reached();
 }
 
+static inline void kvm_arm_enable_mte(Error **errp)
+{
+    g_assert_not_reached();
+}
+
 #endif
 
 static inline const char *gic_class_name(void)
diff --git a/target/arm/monitor.c b/target/arm/monitor.c
index ecdd5ee81742..c419c81612ed 100644
--- a/target/arm/monitor.c
+++ b/target/arm/monitor.c
@@ -96,6 +96,7 @@  static const char *cpu_model_advertised_features[] = {
     "sve1408", "sve1536", "sve1664", "sve1792", "sve1920", "sve2048",
     "kvm-no-adjvtime", "kvm-steal-time",
     "pauth", "pauth-impdef",
+    "mte",
     NULL
 };