diff mbox series

[6/6] xen/x86: Add topology generator

Message ID 20240109153834.4192-7-alejandro.vallejo@cloud.com (mailing list archive)
State New, archived
Headers show
Series x86: Expose consistent topology to guests | expand

Commit Message

Alejandro Vallejo Jan. 9, 2024, 3:38 p.m. UTC
This allows toolstack to synthesise sensible topologies for guests. In
particular, this patch causes x2APIC IDs to be packed according to the
topology now exposed to the guests on leaf 0xb.

Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com>
---
 tools/include/xenguest.h        |  15 ++++
 tools/libs/guest/xg_cpuid_x86.c | 144 ++++++++++++++++++++------------
 xen/arch/x86/cpu-policy.c       |   6 +-
 3 files changed, 107 insertions(+), 58 deletions(-)

Comments

Alejandro Vallejo Jan. 10, 2024, 2:16 p.m. UTC | #1
Review-to-self after running in Gitlab:

On 09/01/2024 15:38, Alejandro Vallejo wrote:
> +    p->basic.lppp = 0xff;
> +    if ( threads_per_pkg < 0xff )
> +        p->basic.lppp = threads_per_pkg;
> +
> +    switch ( p->x86_vendor )
> +    {
> +        case X86_VENDOR_INTEL:
> +            struct cpuid_cache_leaf *sl = p->cache.subleaf;
> +            for ( size_t i = 0; sl->type &&
> +                                i < ARRAY_SIZE(p->cache.raw); i++, sl++ )
> +            {
> +                sl->cores_per_package = cores_per_pkg - 1;
> +                sl->threads_per_cache = threads_per_core - 1;
> +                if ( sl->type == 3 /* unified cache */ )
> +                    sl->threads_per_cache = threads_per_pkg - 1;
> +            }
> +            break;
> +
> +        case X86_VENDOR_AMD:
> +        case X86_VENDOR_HYGON:

Missing braces around the INTEL block due to the variable declarared
there. I'll include that in v2 after the rest of the review comments
come through.

Cheers,
Alejandro
Jan Beulich Jan. 10, 2024, 2:28 p.m. UTC | #2
On 10.01.2024 15:16, Alejandro Vallejo wrote:
> Review-to-self after running in Gitlab:
> 
> On 09/01/2024 15:38, Alejandro Vallejo wrote:
>> +    p->basic.lppp = 0xff;
>> +    if ( threads_per_pkg < 0xff )
>> +        p->basic.lppp = threads_per_pkg;
>> +
>> +    switch ( p->x86_vendor )
>> +    {
>> +        case X86_VENDOR_INTEL:
>> +            struct cpuid_cache_leaf *sl = p->cache.subleaf;
>> +            for ( size_t i = 0; sl->type &&
>> +                                i < ARRAY_SIZE(p->cache.raw); i++, sl++ )
>> +            {
>> +                sl->cores_per_package = cores_per_pkg - 1;
>> +                sl->threads_per_cache = threads_per_core - 1;
>> +                if ( sl->type == 3 /* unified cache */ )
>> +                    sl->threads_per_cache = threads_per_pkg - 1;
>> +            }
>> +            break;
>> +
>> +        case X86_VENDOR_AMD:
>> +        case X86_VENDOR_HYGON:
> 
> Missing braces around the INTEL block due to the variable declarared
> there. I'll include that in v2 after the rest of the review comments
> come through.

And (just looking at the fragment above) too deep indentation as well.

Jan
Roger Pau Monné March 20, 2024, 10:29 a.m. UTC | #3
On Tue, Jan 09, 2024 at 03:38:34PM +0000, Alejandro Vallejo wrote:
> This allows toolstack to synthesise sensible topologies for guests. In
> particular, this patch causes x2APIC IDs to be packed according to the
> topology now exposed to the guests on leaf 0xb.
> 
> Signed-off-by: Alejandro Vallejo <alejandro.vallejo@cloud.com>
> ---
>  tools/include/xenguest.h        |  15 ++++
>  tools/libs/guest/xg_cpuid_x86.c | 144 ++++++++++++++++++++------------
>  xen/arch/x86/cpu-policy.c       |   6 +-
>  3 files changed, 107 insertions(+), 58 deletions(-)
> 
> diff --git a/tools/include/xenguest.h b/tools/include/xenguest.h
> index 4e9078fdee..f0043c559b 100644
> --- a/tools/include/xenguest.h
> +++ b/tools/include/xenguest.h
> @@ -843,5 +843,20 @@ enum xc_static_cpu_featuremask {
>      XC_FEATUREMASK_HVM_HAP_DEF,
>  };
>  const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
> +
> +/**
> + * Synthesise topology information in `p` given high-level constraints
> + *
> + * Topology is given in various fields accross several leaves, some of
> + * which are vendor-specific. This function uses the policy itself to
> + * derive such leaves from threads/core and cores/package.
> + *
> + * @param p                   CPU policy of the domain.
> + * @param threads_per_core    threads/core. Doesn't need to be a power of 2.
> + * @param cores_per_package   cores/package. Doesn't need to be a power of 2.
> + */
> +void xc_topo_from_parts(struct cpu_policy *p,
> +                        uint32_t threads_per_core, uint32_t cores_per_pkg);

I think you can use plain unsigned int here.

> +
>  #endif /* __i386__ || __x86_64__ */
>  #endif /* XENGUEST_H */
> diff --git a/tools/libs/guest/xg_cpuid_x86.c b/tools/libs/guest/xg_cpuid_x86.c
> index 4453178100..7a622721be 100644
> --- a/tools/libs/guest/xg_cpuid_x86.c
> +++ b/tools/libs/guest/xg_cpuid_x86.c
> @@ -584,7 +584,7 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
>      bool hvm;
>      xc_domaininfo_t di;
>      struct xc_cpu_policy *p = xc_cpu_policy_init();
> -    unsigned int i, nr_leaves = ARRAY_SIZE(p->leaves), nr_msrs = 0;
> +    unsigned int nr_leaves = ARRAY_SIZE(p->leaves), nr_msrs = 0;
>      uint32_t err_leaf = -1, err_subleaf = -1, err_msr = -1;
>      uint32_t host_featureset[FEATURESET_NR_ENTRIES] = {};
>      uint32_t len = ARRAY_SIZE(host_featureset);
> @@ -727,60 +727,8 @@ int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
>      }
>      else
>      {
> -        /*
> -         * Topology for HVM guests is entirely controlled by Xen.  For now, we
> -         * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT.
> -         */
> -        p->policy.basic.htt = true;
> -        p->policy.extd.cmp_legacy = false;
> -
> -        /*
> -         * Leaf 1 EBX[23:16] is Maximum Logical Processors Per Package.
> -         * Update to reflect vLAPIC_ID = vCPU_ID * 2, but make sure to avoid
> -         * overflow.
> -         */
> -        if ( !p->policy.basic.lppp )
> -            p->policy.basic.lppp = 2;
> -        else if ( !(p->policy.basic.lppp & 0x80) )
> -            p->policy.basic.lppp *= 2;
> -
> -        switch ( p->policy.x86_vendor )
> -        {
> -        case X86_VENDOR_INTEL:
> -            for ( i = 0; (p->policy.cache.subleaf[i].type &&
> -                          i < ARRAY_SIZE(p->policy.cache.raw)); ++i )
> -            {
> -                p->policy.cache.subleaf[i].cores_per_package =
> -                    (p->policy.cache.subleaf[i].cores_per_package << 1) | 1;
> -                p->policy.cache.subleaf[i].threads_per_cache = 0;
> -            }
> -            break;
> -
> -        case X86_VENDOR_AMD:
> -        case X86_VENDOR_HYGON:
> -            /*
> -             * Leaf 0x80000008 ECX[15:12] is ApicIdCoreSize.
> -             * Leaf 0x80000008 ECX[7:0] is NumberOfCores (minus one).
> -             * Update to reflect vLAPIC_ID = vCPU_ID * 2.  But avoid
> -             * - overflow,
> -             * - going out of sync with leaf 1 EBX[23:16],
> -             * - incrementing ApicIdCoreSize when it's zero (which changes the
> -             *   meaning of bits 7:0).
> -             *
> -             * UPDATE: I addition to avoiding overflow, some
> -             * proprietary operating systems have trouble with
> -             * apic_id_size values greater than 7.  Limit the value to
> -             * 7 for now.
> -             */
> -            if ( p->policy.extd.nc < 0x7f )
> -            {
> -                if ( p->policy.extd.apic_id_size != 0 && p->policy.extd.apic_id_size < 0x7 )
> -                    p->policy.extd.apic_id_size++;
> -
> -                p->policy.extd.nc = (p->policy.extd.nc << 1) | 1;
> -            }
> -            break;
> -        }
> +        /* TODO: Expose the ability to choose a custom topology for HVM/PVH */
> +        xc_topo_from_parts(&p->policy, 1, di.max_vcpu_id + 1);

It would be better if this was split into two commits.  First commit
would move the code as-is into xc_topo_from_parts() without any
changes.  Second patch would do the functional changes.  That was it's
far easier to spot what are the relevant changes vs pure code
movement.

>      }
>  
>      nr_leaves = ARRAY_SIZE(p->leaves);
> @@ -1028,3 +976,89 @@ bool xc_cpu_policy_is_compatible(xc_interface *xch, xc_cpu_policy_t *host,
>  
>      return false;
>  }
> +
> +static uint32_t order(uint32_t n)
> +{
> +    return 32 - __builtin_clz(n);
> +}
> +
> +void xc_topo_from_parts(struct cpu_policy *p,
> +                        uint32_t threads_per_core, uint32_t cores_per_pkg)
> +{
> +    uint32_t threads_per_pkg = threads_per_core * cores_per_pkg;
> +    uint32_t apic_id_size;
> +
> +    if ( p->basic.max_leaf < 0xb )
> +        p->basic.max_leaf = 0xb;
> +
> +    memset(p->topo.raw, 0, sizeof(p->topo.raw));
> +
> +    /* thread level */
> +    p->topo.subleaf[0].nr_logical = threads_per_core;
> +    p->topo.subleaf[0].id_shift = 0;
> +    p->topo.subleaf[0].level = 0;
> +    p->topo.subleaf[0].type = 1;
> +    if ( threads_per_core > 1 )
> +        p->topo.subleaf[0].id_shift = order(threads_per_core - 1);
> +
> +    /* core level */
> +    p->topo.subleaf[1].nr_logical = cores_per_pkg;
> +    if ( p->x86_vendor == X86_VENDOR_INTEL )
> +        p->topo.subleaf[1].nr_logical = threads_per_pkg;
> +    p->topo.subleaf[1].id_shift = p->topo.subleaf[0].id_shift;
> +    p->topo.subleaf[1].level = 1;
> +    p->topo.subleaf[1].type = 2;
> +    if ( cores_per_pkg > 1 )
> +        p->topo.subleaf[1].id_shift += order(cores_per_pkg - 1);

I was kind of expecting this to be part of cpu-policy rather than xc,
as we could then also test this like you do test
x86_x2apic_id_from_vcpu_id().

It could also be used to populate the topologies in the tests
themselves.

Thanks, Roger.
Jan Beulich March 26, 2024, 5:02 p.m. UTC | #4
On 09.01.2024 16:38, Alejandro Vallejo wrote:
> --- a/tools/include/xenguest.h
> +++ b/tools/include/xenguest.h
> @@ -843,5 +843,20 @@ enum xc_static_cpu_featuremask {
>      XC_FEATUREMASK_HVM_HAP_DEF,
>  };
>  const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
> +
> +/**
> + * Synthesise topology information in `p` given high-level constraints
> + *
> + * Topology is given in various fields accross several leaves, some of
> + * which are vendor-specific. This function uses the policy itself to
> + * derive such leaves from threads/core and cores/package.
> + *
> + * @param p                   CPU policy of the domain.
> + * @param threads_per_core    threads/core. Doesn't need to be a power of 2.
> + * @param cores_per_package   cores/package. Doesn't need to be a power of 2.
> + */
> +void xc_topo_from_parts(struct cpu_policy *p,
> +                        uint32_t threads_per_core, uint32_t cores_per_pkg);

Do we really want to constrain things to just two (or in fact any fixed number
of) levels? Iirc on AMD there already can be up to 4.

> @@ -1028,3 +976,89 @@ bool xc_cpu_policy_is_compatible(xc_interface *xch, xc_cpu_policy_t *host,
>  
>      return false;
>  }
> +
> +static uint32_t order(uint32_t n)
> +{
> +    return 32 - __builtin_clz(n);
> +}

This isn't really portable. __builtin_clz() takes an unsigned int, which may
in principle be wider than 32 bits. I also can't see a reason for the
function to have a fixed-width return type. Perhaps

static unsigned int order(unsigned int n)
{
    return sizeof(n) * CHAR_BIT - __builtin_clz(n);
}

?

> +void xc_topo_from_parts(struct cpu_policy *p,
> +                        uint32_t threads_per_core, uint32_t cores_per_pkg)
> +{
> +    uint32_t threads_per_pkg = threads_per_core * cores_per_pkg;
> +    uint32_t apic_id_size;
> +
> +    if ( p->basic.max_leaf < 0xb )
> +        p->basic.max_leaf = 0xb;
> +
> +    memset(p->topo.raw, 0, sizeof(p->topo.raw));
> +
> +    /* thread level */
> +    p->topo.subleaf[0].nr_logical = threads_per_core;
> +    p->topo.subleaf[0].id_shift = 0;
> +    p->topo.subleaf[0].level = 0;
> +    p->topo.subleaf[0].type = 1;
> +    if ( threads_per_core > 1 )
> +        p->topo.subleaf[0].id_shift = order(threads_per_core - 1);
> +
> +    /* core level */
> +    p->topo.subleaf[1].nr_logical = cores_per_pkg;
> +    if ( p->x86_vendor == X86_VENDOR_INTEL )
> +        p->topo.subleaf[1].nr_logical = threads_per_pkg;

Same concern as in the other patch regarding "== Intel".

> +    p->topo.subleaf[1].id_shift = p->topo.subleaf[0].id_shift;
> +    p->topo.subleaf[1].level = 1;
> +    p->topo.subleaf[1].type = 2;
> +    if ( cores_per_pkg > 1 )
> +        p->topo.subleaf[1].id_shift += order(cores_per_pkg - 1);

Don't you want to return an error when any of the X_per_Y values is 0?

> +    apic_id_size = p->topo.subleaf[1].id_shift;
> +
> +    /*
> +     * Contrary to what the name might seem to imply. HTT is an enabler for
> +     * SMP and there's no harm in setting it even with a single vCPU.
> +     */
> +    p->basic.htt = true;
> +
> +    p->basic.lppp = 0xff;
> +    if ( threads_per_pkg < 0xff )
> +        p->basic.lppp = threads_per_pkg;
> +
> +    switch ( p->x86_vendor )
> +    {
> +        case X86_VENDOR_INTEL:
> +            struct cpuid_cache_leaf *sl = p->cache.subleaf;
> +            for ( size_t i = 0; sl->type &&
> +                                i < ARRAY_SIZE(p->cache.raw); i++, sl++ )
> +            {
> +                sl->cores_per_package = cores_per_pkg - 1;
> +                sl->threads_per_cache = threads_per_core - 1;

IOW the names in struct cpuid_cache_leaf aren't quite correct.

> +                if ( sl->type == 3 /* unified cache */ )
> +                    sl->threads_per_cache = threads_per_pkg - 1;
> +            }
> +            break;
> +
> +        case X86_VENDOR_AMD:
> +        case X86_VENDOR_HYGON:
> +            /* Expose p->basic.lppp */
> +            p->extd.cmp_legacy = true;
> +
> +            /* Clip NC to the maximum value it can hold */
> +            p->extd.nc = 0xff;
> +            if ( threads_per_pkg <= 0xff )
> +                p->extd.nc = threads_per_pkg - 1;
> +
> +            /* TODO: Expose leaf e1E */
> +            p->extd.topoext = false;
> +
> +            /*
> +             * Clip APIC ID to 8, as that's what high core-count machines do

Nit: "... to 8 bits, ..."

> +             *
> +             * That what AMD EPYC 9654 does with >256 CPUs
> +             */
> +            p->extd.apic_id_size = 8;
> +            if ( apic_id_size < 8 )
> +                p->extd.apic_id_size = apic_id_size;

Use min(), with apic_id_size's type suitably adjusted?

> --- a/xen/arch/x86/cpu-policy.c
> +++ b/xen/arch/x86/cpu-policy.c
> @@ -278,9 +278,6 @@ static void recalculate_misc(struct cpu_policy *p)
>  
>      p->basic.raw[0x8] = EMPTY_LEAF;
>  
> -    /* TODO: Rework topology logic. */
> -    memset(p->topo.raw, 0, sizeof(p->topo.raw));
> -
>      p->basic.raw[0xc] = EMPTY_LEAF;
>  
>      p->extd.e1d &= ~CPUID_COMMON_1D_FEATURES;
> @@ -387,6 +384,9 @@ static void __init calculate_host_policy(void)
>      recalculate_xstate(p);
>      recalculate_misc(p);
>  
> +    /* Wipe host topology. Toolstack is expected to synthesise a sensible one */
> +    memset(p->topo.raw, 0, sizeof(p->topo.raw));

I don't think this should be zapped from the host policy. It wants zapping
from the guest ones instead, imo. The host policy may (will) want using in
Xen itself, and hence it should reflect reality.

Jan
Alejandro Vallejo May 1, 2024, 5:06 p.m. UTC | #5
On 26/03/2024 17:02, Jan Beulich wrote:
> On 09.01.2024 16:38, Alejandro Vallejo wrote:
>> --- a/tools/include/xenguest.h
>> +++ b/tools/include/xenguest.h
>> @@ -843,5 +843,20 @@ enum xc_static_cpu_featuremask {
>>      XC_FEATUREMASK_HVM_HAP_DEF,
>>  };
>>  const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
>> +
>> +/**
>> + * Synthesise topology information in `p` given high-level constraints
>> + *
>> + * Topology is given in various fields accross several leaves, some of
>> + * which are vendor-specific. This function uses the policy itself to
>> + * derive such leaves from threads/core and cores/package.
>> + *
>> + * @param p                   CPU policy of the domain.
>> + * @param threads_per_core    threads/core. Doesn't need to be a power of 2.
>> + * @param cores_per_package   cores/package. Doesn't need to be a power of 2.
>> + */
>> +void xc_topo_from_parts(struct cpu_policy *p,
>> +                        uint32_t threads_per_core, uint32_t cores_per_pkg);
> 
> Do we really want to constrain things to just two (or in fact any fixed number
> of) levels? Iirc on AMD there already can be up to 4.

For the time being, I think we should keep it simple(ish).

Leaf 0xb is always 2 levels, and it implicitly defines the offset into
the x2APIC ID for the 3rd level (the package, or the die). This approach
can be used for a long time before we need to expose anything else. It
can already be used for exposing multi-socket configurations, but it's
not properly wired to xl.

I suspect we won't have a need to expose more complicated topologies
until hetero systems are more common, and by that time the generator
will need tweaking anyway.

> 
>> @@ -1028,3 +976,89 @@ bool xc_cpu_policy_is_compatible(xc_interface *xch, xc_cpu_policy_t *host,
>>  
>>      return false;
>>  }
>> +
>> +static uint32_t order(uint32_t n)
>> +{
>> +    return 32 - __builtin_clz(n);
>> +}
> 
> This isn't really portable. __builtin_clz() takes an unsigned int, which may
> in principle be wider than 32 bits. I also can't see a reason for the
> function to have a fixed-width return type. Perhaps

Sure to unsigned int. I'll s/CHAR_BIT/8/ as that's mandated by POSIX
already.

> 
> static unsigned int order(unsigned int n)
> {
>     return sizeof(n) * CHAR_BIT - __builtin_clz(n);
> }
> 
> ?
> 
>> +void xc_topo_from_parts(struct cpu_policy *p,
>> +                        uint32_t threads_per_core, uint32_t cores_per_pkg)
>> +{
>> +    uint32_t threads_per_pkg = threads_per_core * cores_per_pkg;
>> +    uint32_t apic_id_size;
>> +
>> +    if ( p->basic.max_leaf < 0xb )
>> +        p->basic.max_leaf = 0xb;
>> +
>> +    memset(p->topo.raw, 0, sizeof(p->topo.raw));
>> +
>> +    /* thread level */
>> +    p->topo.subleaf[0].nr_logical = threads_per_core;
>> +    p->topo.subleaf[0].id_shift = 0;
>> +    p->topo.subleaf[0].level = 0;
>> +    p->topo.subleaf[0].type = 1;
>> +    if ( threads_per_core > 1 )
>> +        p->topo.subleaf[0].id_shift = order(threads_per_core - 1);
>> +
>> +    /* core level */
>> +    p->topo.subleaf[1].nr_logical = cores_per_pkg;
>> +    if ( p->x86_vendor == X86_VENDOR_INTEL )
>> +        p->topo.subleaf[1].nr_logical = threads_per_pkg;
> 
> Same concern as in the other patch regarding "== Intel".
> 

Can you please articulate the concern?

>> +    p->topo.subleaf[1].id_shift = p->topo.subleaf[0].id_shift;
>> +    p->topo.subleaf[1].level = 1;
>> +    p->topo.subleaf[1].type = 2;
>> +    if ( cores_per_pkg > 1 )
>> +        p->topo.subleaf[1].id_shift += order(cores_per_pkg - 1);
> 
> Don't you want to return an error when any of the X_per_Y values is 0?

I'd rather not.

The checks on input parameters should be done wherever the inputs are
taken from. Currently the call site passes threads_per_core=1 and
cores_per_pkg=1+max_vcpus, so it's all guaranteed to work out.

Once it comes from xl, libxl should be in charge of the validations.
Furthermore there's validations the function simply cannot do (nor
should it) in its current form, like checking that...

    max_vcpus == threads_per_core * cores_per_pkg * n_pkgs.

> 
>> +    apic_id_size = p->topo.subleaf[1].id_shift;
>> +
>> +    /*
>> +     * Contrary to what the name might seem to imply. HTT is an enabler for
>> +     * SMP and there's no harm in setting it even with a single vCPU.
>> +     */
>> +    p->basic.htt = true;
>> +
>> +    p->basic.lppp = 0xff;
>> +    if ( threads_per_pkg < 0xff )
>> +        p->basic.lppp = threads_per_pkg;
>> +
>> +    switch ( p->x86_vendor )
>> +    {
>> +        case X86_VENDOR_INTEL:
>> +            struct cpuid_cache_leaf *sl = p->cache.subleaf;
>> +            for ( size_t i = 0; sl->type &&
>> +                                i < ARRAY_SIZE(p->cache.raw); i++, sl++ )
>> +            {
>> +                sl->cores_per_package = cores_per_pkg - 1;
>> +                sl->threads_per_cache = threads_per_core - 1;
> 
> IOW the names in struct cpuid_cache_leaf aren't quite correct.

Because of the - 1, you mean? If anything our name is marginally clearer
than the SDM description. It goes on to say "Add one to the return value
to get the result" in a [**] note, so it's not something we made up.

Xen: threads_per_cache => SDM: Maximum number of addressable IDs for
logical processors sharing this cache

Xen: cores_per_package => SDM: Maximum number of addressable IDs for
processor cores in the physical package

> 
>> +                if ( sl->type == 3 /* unified cache */ )
>> +                    sl->threads_per_cache = threads_per_pkg - 1;
>> +            }
>> +            break;
>> +
>> +        case X86_VENDOR_AMD:
>> +        case X86_VENDOR_HYGON:
>> +            /* Expose p->basic.lppp */
>> +            p->extd.cmp_legacy = true;
>> +
>> +            /* Clip NC to the maximum value it can hold */
>> +            p->extd.nc = 0xff;
>> +            if ( threads_per_pkg <= 0xff )
>> +                p->extd.nc = threads_per_pkg - 1;
>> +
>> +            /* TODO: Expose leaf e1E */
>> +            p->extd.topoext = false;
>> +
>> +            /*
>> +             * Clip APIC ID to 8, as that's what high core-count machines do
> 
> Nit: "... to 8 bits, ..."

ack

> 
>> +             *
>> +             * That what AMD EPYC 9654 does with >256 CPUs
>> +             */
>> +            p->extd.apic_id_size = 8;
>> +            if ( apic_id_size < 8 )
>> +                p->extd.apic_id_size = apic_id_size;
> 
> Use min(), with apic_id_size's type suitably adjusted?

ack

> 
>> --- a/xen/arch/x86/cpu-policy.c
>> +++ b/xen/arch/x86/cpu-policy.c
>> @@ -278,9 +278,6 @@ static void recalculate_misc(struct cpu_policy *p)
>>  
>>      p->basic.raw[0x8] = EMPTY_LEAF;
>>  
>> -    /* TODO: Rework topology logic. */
>> -    memset(p->topo.raw, 0, sizeof(p->topo.raw));
>> -
>>      p->basic.raw[0xc] = EMPTY_LEAF;
>>  
>>      p->extd.e1d &= ~CPUID_COMMON_1D_FEATURES;
>> @@ -387,6 +384,9 @@ static void __init calculate_host_policy(void)
>>      recalculate_xstate(p);
>>      recalculate_misc(p);
>>  
>> +    /* Wipe host topology. Toolstack is expected to synthesise a sensible one */
>> +    memset(p->topo.raw, 0, sizeof(p->topo.raw));
> 
> I don't think this should be zapped from the host policy. It wants zapping
> from the guest ones instead, imo. The host policy may (will) want using in
> Xen itself, and hence it should reflect reality.
> 
> Jan

Shouldn't Xen be checking its own cached state from the raw policy
instead? My understanding was that to a first approximation the host
policy is a template for guest creation. It already has a bunch of
overrides that don't match the real hardware configuration.

Cheers,
Alejandro
Jan Beulich May 2, 2024, 7:13 a.m. UTC | #6
On 01.05.2024 19:06, Alejandro Vallejo wrote:
> On 26/03/2024 17:02, Jan Beulich wrote:
>> On 09.01.2024 16:38, Alejandro Vallejo wrote:
>>> --- a/tools/include/xenguest.h
>>> +++ b/tools/include/xenguest.h
>>> @@ -843,5 +843,20 @@ enum xc_static_cpu_featuremask {
>>>      XC_FEATUREMASK_HVM_HAP_DEF,
>>>  };
>>>  const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
>>> +
>>> +/**
>>> + * Synthesise topology information in `p` given high-level constraints
>>> + *
>>> + * Topology is given in various fields accross several leaves, some of
>>> + * which are vendor-specific. This function uses the policy itself to
>>> + * derive such leaves from threads/core and cores/package.
>>> + *
>>> + * @param p                   CPU policy of the domain.
>>> + * @param threads_per_core    threads/core. Doesn't need to be a power of 2.
>>> + * @param cores_per_package   cores/package. Doesn't need to be a power of 2.
>>> + */
>>> +void xc_topo_from_parts(struct cpu_policy *p,
>>> +                        uint32_t threads_per_core, uint32_t cores_per_pkg);
>>
>> Do we really want to constrain things to just two (or in fact any fixed number
>> of) levels? Iirc on AMD there already can be up to 4.
> 
> For the time being, I think we should keep it simple(ish).

Perhaps, with (briefly) stating the reason(s) for doing so.

>>> +void xc_topo_from_parts(struct cpu_policy *p,
>>> +                        uint32_t threads_per_core, uint32_t cores_per_pkg)
>>> +{
>>> +    uint32_t threads_per_pkg = threads_per_core * cores_per_pkg;
>>> +    uint32_t apic_id_size;
>>> +
>>> +    if ( p->basic.max_leaf < 0xb )
>>> +        p->basic.max_leaf = 0xb;
>>> +
>>> +    memset(p->topo.raw, 0, sizeof(p->topo.raw));
>>> +
>>> +    /* thread level */
>>> +    p->topo.subleaf[0].nr_logical = threads_per_core;
>>> +    p->topo.subleaf[0].id_shift = 0;
>>> +    p->topo.subleaf[0].level = 0;
>>> +    p->topo.subleaf[0].type = 1;
>>> +    if ( threads_per_core > 1 )
>>> +        p->topo.subleaf[0].id_shift = order(threads_per_core - 1);
>>> +
>>> +    /* core level */
>>> +    p->topo.subleaf[1].nr_logical = cores_per_pkg;
>>> +    if ( p->x86_vendor == X86_VENDOR_INTEL )
>>> +        p->topo.subleaf[1].nr_logical = threads_per_pkg;
>>
>> Same concern as in the other patch regarding "== Intel".
> 
> Can you please articulate the concern?

See my replies to patch 5. Exactly the same applies here.

>>> +    p->topo.subleaf[1].id_shift = p->topo.subleaf[0].id_shift;
>>> +    p->topo.subleaf[1].level = 1;
>>> +    p->topo.subleaf[1].type = 2;
>>> +    if ( cores_per_pkg > 1 )
>>> +        p->topo.subleaf[1].id_shift += order(cores_per_pkg - 1);
>>
>> Don't you want to return an error when any of the X_per_Y values is 0?
> 
> I'd rather not.
> 
> The checks on input parameters should be done wherever the inputs are
> taken from. Currently the call site passes threads_per_core=1 and
> cores_per_pkg=1+max_vcpus, so it's all guaranteed to work out.

Hmm, and then have this function produce potentially bogus results?

> Once it comes from xl, libxl should be in charge of the validations.
> Furthermore there's validations the function simply cannot do (nor
> should it) in its current form, like checking that...
> 
>     max_vcpus == threads_per_core * cores_per_pkg * n_pkgs.

But that isn't an equality that needs to hold, is it? It would put
constraints on exposing e.g. HT to a guest with an odd number of
vCPU-s. Imo especially with core scheduling HT-ness wants properly
reflecting from underlying hardware, so core-sched-like behavior in
the guest itself can actually achieve its goals.

>>> +    apic_id_size = p->topo.subleaf[1].id_shift;
>>> +
>>> +    /*
>>> +     * Contrary to what the name might seem to imply. HTT is an enabler for
>>> +     * SMP and there's no harm in setting it even with a single vCPU.
>>> +     */
>>> +    p->basic.htt = true;
>>> +
>>> +    p->basic.lppp = 0xff;
>>> +    if ( threads_per_pkg < 0xff )
>>> +        p->basic.lppp = threads_per_pkg;
>>> +
>>> +    switch ( p->x86_vendor )
>>> +    {
>>> +        case X86_VENDOR_INTEL:
>>> +            struct cpuid_cache_leaf *sl = p->cache.subleaf;
>>> +            for ( size_t i = 0; sl->type &&
>>> +                                i < ARRAY_SIZE(p->cache.raw); i++, sl++ )
>>> +            {
>>> +                sl->cores_per_package = cores_per_pkg - 1;
>>> +                sl->threads_per_cache = threads_per_core - 1;
>>
>> IOW the names in struct cpuid_cache_leaf aren't quite correct.
> 
> Because of the - 1, you mean?

Yes. The expressions above simply read as if there was an (obvious)
off-by-1.

> If anything our name is marginally clearer
> than the SDM description. It goes on to say "Add one to the return value
> to get the result" in a [**] note, so it's not something we made up.
> 
> Xen: threads_per_cache => SDM: Maximum number of addressable IDs for
> logical processors sharing this cache
> 
> Xen: cores_per_package => SDM: Maximum number of addressable IDs for
> processor cores in the physical package

I'm afraid I don't follow what you're trying to justify here. Following
SDM naming is generally advisable, yes, but only as far as no confusion
results. Otherwise imo a better name wants picking, with the struct
field declaration then accompanied by a comment clarifying the
difference wrt SDM.

>>> --- a/xen/arch/x86/cpu-policy.c
>>> +++ b/xen/arch/x86/cpu-policy.c
>>> @@ -278,9 +278,6 @@ static void recalculate_misc(struct cpu_policy *p)
>>>  
>>>      p->basic.raw[0x8] = EMPTY_LEAF;
>>>  
>>> -    /* TODO: Rework topology logic. */
>>> -    memset(p->topo.raw, 0, sizeof(p->topo.raw));
>>> -
>>>      p->basic.raw[0xc] = EMPTY_LEAF;
>>>  
>>>      p->extd.e1d &= ~CPUID_COMMON_1D_FEATURES;
>>> @@ -387,6 +384,9 @@ static void __init calculate_host_policy(void)
>>>      recalculate_xstate(p);
>>>      recalculate_misc(p);
>>>  
>>> +    /* Wipe host topology. Toolstack is expected to synthesise a sensible one */
>>> +    memset(p->topo.raw, 0, sizeof(p->topo.raw));
>>
>> I don't think this should be zapped from the host policy. It wants zapping
>> from the guest ones instead, imo. The host policy may (will) want using in
>> Xen itself, and hence it should reflect reality.
> 
> Shouldn't Xen be checking its own cached state from the raw policy
> instead? My understanding was that to a first approximation the host
> policy is a template for guest creation. It already has a bunch of
> overrides that don't match the real hardware configuration.

No, raw policy is what comes from hardware, entirely unadjusted for
e.g. command line options of Xen internal restrictions. Any decisions
Xen takes for itself should be based on the host policy (whereas guest
related decisions are to use the respective guest policy).

Jan
diff mbox series

Patch

diff --git a/tools/include/xenguest.h b/tools/include/xenguest.h
index 4e9078fdee..f0043c559b 100644
--- a/tools/include/xenguest.h
+++ b/tools/include/xenguest.h
@@ -843,5 +843,20 @@  enum xc_static_cpu_featuremask {
     XC_FEATUREMASK_HVM_HAP_DEF,
 };
 const uint32_t *xc_get_static_cpu_featuremask(enum xc_static_cpu_featuremask);
+
+/**
+ * Synthesise topology information in `p` given high-level constraints
+ *
+ * Topology is given in various fields accross several leaves, some of
+ * which are vendor-specific. This function uses the policy itself to
+ * derive such leaves from threads/core and cores/package.
+ *
+ * @param p                   CPU policy of the domain.
+ * @param threads_per_core    threads/core. Doesn't need to be a power of 2.
+ * @param cores_per_package   cores/package. Doesn't need to be a power of 2.
+ */
+void xc_topo_from_parts(struct cpu_policy *p,
+                        uint32_t threads_per_core, uint32_t cores_per_pkg);
+
 #endif /* __i386__ || __x86_64__ */
 #endif /* XENGUEST_H */
diff --git a/tools/libs/guest/xg_cpuid_x86.c b/tools/libs/guest/xg_cpuid_x86.c
index 4453178100..7a622721be 100644
--- a/tools/libs/guest/xg_cpuid_x86.c
+++ b/tools/libs/guest/xg_cpuid_x86.c
@@ -584,7 +584,7 @@  int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
     bool hvm;
     xc_domaininfo_t di;
     struct xc_cpu_policy *p = xc_cpu_policy_init();
-    unsigned int i, nr_leaves = ARRAY_SIZE(p->leaves), nr_msrs = 0;
+    unsigned int nr_leaves = ARRAY_SIZE(p->leaves), nr_msrs = 0;
     uint32_t err_leaf = -1, err_subleaf = -1, err_msr = -1;
     uint32_t host_featureset[FEATURESET_NR_ENTRIES] = {};
     uint32_t len = ARRAY_SIZE(host_featureset);
@@ -727,60 +727,8 @@  int xc_cpuid_apply_policy(xc_interface *xch, uint32_t domid, bool restore,
     }
     else
     {
-        /*
-         * Topology for HVM guests is entirely controlled by Xen.  For now, we
-         * hardcode APIC_ID = vcpu_id * 2 to give the illusion of no SMT.
-         */
-        p->policy.basic.htt = true;
-        p->policy.extd.cmp_legacy = false;
-
-        /*
-         * Leaf 1 EBX[23:16] is Maximum Logical Processors Per Package.
-         * Update to reflect vLAPIC_ID = vCPU_ID * 2, but make sure to avoid
-         * overflow.
-         */
-        if ( !p->policy.basic.lppp )
-            p->policy.basic.lppp = 2;
-        else if ( !(p->policy.basic.lppp & 0x80) )
-            p->policy.basic.lppp *= 2;
-
-        switch ( p->policy.x86_vendor )
-        {
-        case X86_VENDOR_INTEL:
-            for ( i = 0; (p->policy.cache.subleaf[i].type &&
-                          i < ARRAY_SIZE(p->policy.cache.raw)); ++i )
-            {
-                p->policy.cache.subleaf[i].cores_per_package =
-                    (p->policy.cache.subleaf[i].cores_per_package << 1) | 1;
-                p->policy.cache.subleaf[i].threads_per_cache = 0;
-            }
-            break;
-
-        case X86_VENDOR_AMD:
-        case X86_VENDOR_HYGON:
-            /*
-             * Leaf 0x80000008 ECX[15:12] is ApicIdCoreSize.
-             * Leaf 0x80000008 ECX[7:0] is NumberOfCores (minus one).
-             * Update to reflect vLAPIC_ID = vCPU_ID * 2.  But avoid
-             * - overflow,
-             * - going out of sync with leaf 1 EBX[23:16],
-             * - incrementing ApicIdCoreSize when it's zero (which changes the
-             *   meaning of bits 7:0).
-             *
-             * UPDATE: I addition to avoiding overflow, some
-             * proprietary operating systems have trouble with
-             * apic_id_size values greater than 7.  Limit the value to
-             * 7 for now.
-             */
-            if ( p->policy.extd.nc < 0x7f )
-            {
-                if ( p->policy.extd.apic_id_size != 0 && p->policy.extd.apic_id_size < 0x7 )
-                    p->policy.extd.apic_id_size++;
-
-                p->policy.extd.nc = (p->policy.extd.nc << 1) | 1;
-            }
-            break;
-        }
+        /* TODO: Expose the ability to choose a custom topology for HVM/PVH */
+        xc_topo_from_parts(&p->policy, 1, di.max_vcpu_id + 1);
     }
 
     nr_leaves = ARRAY_SIZE(p->leaves);
@@ -1028,3 +976,89 @@  bool xc_cpu_policy_is_compatible(xc_interface *xch, xc_cpu_policy_t *host,
 
     return false;
 }
+
+static uint32_t order(uint32_t n)
+{
+    return 32 - __builtin_clz(n);
+}
+
+void xc_topo_from_parts(struct cpu_policy *p,
+                        uint32_t threads_per_core, uint32_t cores_per_pkg)
+{
+    uint32_t threads_per_pkg = threads_per_core * cores_per_pkg;
+    uint32_t apic_id_size;
+
+    if ( p->basic.max_leaf < 0xb )
+        p->basic.max_leaf = 0xb;
+
+    memset(p->topo.raw, 0, sizeof(p->topo.raw));
+
+    /* thread level */
+    p->topo.subleaf[0].nr_logical = threads_per_core;
+    p->topo.subleaf[0].id_shift = 0;
+    p->topo.subleaf[0].level = 0;
+    p->topo.subleaf[0].type = 1;
+    if ( threads_per_core > 1 )
+        p->topo.subleaf[0].id_shift = order(threads_per_core - 1);
+
+    /* core level */
+    p->topo.subleaf[1].nr_logical = cores_per_pkg;
+    if ( p->x86_vendor == X86_VENDOR_INTEL )
+        p->topo.subleaf[1].nr_logical = threads_per_pkg;
+    p->topo.subleaf[1].id_shift = p->topo.subleaf[0].id_shift;
+    p->topo.subleaf[1].level = 1;
+    p->topo.subleaf[1].type = 2;
+    if ( cores_per_pkg > 1 )
+        p->topo.subleaf[1].id_shift += order(cores_per_pkg - 1);
+
+    apic_id_size = p->topo.subleaf[1].id_shift;
+
+    /*
+     * Contrary to what the name might seem to imply. HTT is an enabler for
+     * SMP and there's no harm in setting it even with a single vCPU.
+     */
+    p->basic.htt = true;
+
+    p->basic.lppp = 0xff;
+    if ( threads_per_pkg < 0xff )
+        p->basic.lppp = threads_per_pkg;
+
+    switch ( p->x86_vendor )
+    {
+        case X86_VENDOR_INTEL:
+            struct cpuid_cache_leaf *sl = p->cache.subleaf;
+            for ( size_t i = 0; sl->type &&
+                                i < ARRAY_SIZE(p->cache.raw); i++, sl++ )
+            {
+                sl->cores_per_package = cores_per_pkg - 1;
+                sl->threads_per_cache = threads_per_core - 1;
+                if ( sl->type == 3 /* unified cache */ )
+                    sl->threads_per_cache = threads_per_pkg - 1;
+            }
+            break;
+
+        case X86_VENDOR_AMD:
+        case X86_VENDOR_HYGON:
+            /* Expose p->basic.lppp */
+            p->extd.cmp_legacy = true;
+
+            /* Clip NC to the maximum value it can hold */
+            p->extd.nc = 0xff;
+            if ( threads_per_pkg <= 0xff )
+                p->extd.nc = threads_per_pkg - 1;
+
+            /* TODO: Expose leaf e1E */
+            p->extd.topoext = false;
+
+            /*
+             * Clip APIC ID to 8, as that's what high core-count machines do
+             *
+             * That what AMD EPYC 9654 does with >256 CPUs
+             */
+            p->extd.apic_id_size = 8;
+            if ( apic_id_size < 8 )
+                p->extd.apic_id_size = apic_id_size;
+
+            break;
+    }
+}
diff --git a/xen/arch/x86/cpu-policy.c b/xen/arch/x86/cpu-policy.c
index 76efb050ed..679d1fe4fa 100644
--- a/xen/arch/x86/cpu-policy.c
+++ b/xen/arch/x86/cpu-policy.c
@@ -278,9 +278,6 @@  static void recalculate_misc(struct cpu_policy *p)
 
     p->basic.raw[0x8] = EMPTY_LEAF;
 
-    /* TODO: Rework topology logic. */
-    memset(p->topo.raw, 0, sizeof(p->topo.raw));
-
     p->basic.raw[0xc] = EMPTY_LEAF;
 
     p->extd.e1d &= ~CPUID_COMMON_1D_FEATURES;
@@ -387,6 +384,9 @@  static void __init calculate_host_policy(void)
     recalculate_xstate(p);
     recalculate_misc(p);
 
+    /* Wipe host topology. Toolstack is expected to synthesise a sensible one */
+    memset(p->topo.raw, 0, sizeof(p->topo.raw));
+
     /* When vPMU is disabled, drop it from the host policy. */
     if ( vpmu_mode == XENPMU_MODE_OFF )
         p->basic.raw[0xa] = EMPTY_LEAF;