diff mbox series

[v2,2/3] hw/acpi/aml-build: Use existing CPU topology to build PPTT table

Message ID 20220303031152.145960-3-gshan@redhat.com (mailing list archive)
State New, archived
Headers show
Series hw/arm/virt: Fix CPU's default NUMA node ID | expand

Commit Message

Gavin Shan March 3, 2022, 3:11 a.m. UTC
When the PPTT table is built, the CPU topology is re-calculated, but
it's unecessary because the CPU topology, except the cluster IDs,
has been populated in virt_possible_cpu_arch_ids() on arm/virt machine.

This avoids to re-calculate the CPU topology by reusing the existing
one in ms->possible_cpus. However, the cluster ID for the CPU instance
has to be calculated dynamically because there is no corresponding
field in struct CpuInstanceProperties. Currently, the only user of
build_pptt() is arm/virt machine.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 hw/acpi/aml-build.c | 106 ++++++++++++++++++++++++++++++++++----------
 1 file changed, 82 insertions(+), 24 deletions(-)

Comments

Gonglei (Arei)" via March 18, 2022, 6:34 a.m. UTC | #1
Hi Gavin,

On 2022/3/3 11:11, Gavin Shan wrote:
> When the PPTT table is built, the CPU topology is re-calculated, but
> it's unecessary because the CPU topology, except the cluster IDs,
> has been populated in virt_possible_cpu_arch_ids() on arm/virt machine.
>
> This avoids to re-calculate the CPU topology by reusing the existing
> one in ms->possible_cpus. However, the cluster ID for the CPU instance
> has to be calculated dynamically because there is no corresponding
> field in struct CpuInstanceProperties. Currently, the only user of
> build_pptt() is arm/virt machine.
>
> Signed-off-by: Gavin Shan <gshan@redhat.com>
> ---
>   hw/acpi/aml-build.c | 106 ++++++++++++++++++++++++++++++++++----------
>   1 file changed, 82 insertions(+), 24 deletions(-)
>
> diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
> index 8966e16320..572cf5fc00 100644
> --- a/hw/acpi/aml-build.c
> +++ b/hw/acpi/aml-build.c
> @@ -2002,18 +2002,27 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
>                   const char *oem_id, const char *oem_table_id)
>   {
>       MachineClass *mc = MACHINE_GET_CLASS(ms);
> +    CPUArchIdList *cpus = ms->possible_cpus;
> +    GQueue *socket_list = g_queue_new();
> +    GQueue *cluster_list = g_queue_new();
> +    GQueue *core_list = g_queue_new();
>       GQueue *list = g_queue_new();
>       guint pptt_start = table_data->len;
>       guint parent_offset;
>       guint length, i;
> -    int uid = 0;
> -    int socket;
> +    int n, id, socket_id, cluster_id, core_id, thread_id;
>       AcpiTable table = { .sig = "PPTT", .rev = 2,
>                           .oem_id = oem_id, .oem_table_id = oem_table_id };
>   
>       acpi_table_begin(&table, table_data);
>   
> -    for (socket = 0; socket < ms->smp.sockets; socket++) {
> +    for (n = 0; n < cpus->len; n++) {
> +        socket_id = cpus->cpus[n].props.socket_id;
> +        if (g_queue_find(socket_list, GUINT_TO_POINTER(socket_id))) {
> +            continue;
> +        }
> +
> +        g_queue_push_tail(socket_list, GUINT_TO_POINTER(socket_id));
>           g_queue_push_tail(list,
>               GUINT_TO_POINTER(table_data->len - pptt_start));
>           build_processor_hierarchy_node(
> @@ -2023,65 +2032,114 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
>                * of a physical package
>                */
>               (1 << 0),
> -            0, socket, NULL, 0);
> +            0, socket_id, NULL, 0);
>       }
>   
>       if (mc->smp_props.clusters_supported) {
>           length = g_queue_get_length(list);
>           for (i = 0; i < length; i++) {
> -            int cluster;
> -
>               parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
> -            for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
> +            socket_id = GPOINTER_TO_UINT(g_queue_pop_head(socket_list));
> +
> +            for (n = 0; n < cpus->len; n++) {
> +                if (cpus->cpus[n].props.socket_id != socket_id) {
> +                    continue;
> +                }
> +
> +                /*
> +                 * We have to calculate the cluster ID because it isn't
> +                 * available in the CPU instance properties.
> +                 */
Since we need cluster ID now, maybe we can simply make it supported
in the CPU instance properties.

Thanks,
Yanan
> +                cluster_id = cpus->cpus[n].props.thread_id /
> +                             (ms->smp.cores * ms->smp.threads);
> +                if (g_queue_find(cluster_list, GUINT_TO_POINTER(cluster_id))) {
> +                    continue;
> +                }
> +
> +                g_queue_push_tail(cluster_list, GUINT_TO_POINTER(cluster_id));
>                   g_queue_push_tail(list,
>                       GUINT_TO_POINTER(table_data->len - pptt_start));
>                   build_processor_hierarchy_node(
>                       table_data,
>                       (0 << 0), /* not a physical package */
> -                    parent_offset, cluster, NULL, 0);
> +                    parent_offset, cluster_id, NULL, 0);
>               }
>           }
>       }
>   
>       length = g_queue_get_length(list);
>       for (i = 0; i < length; i++) {
> -        int core;
> -
>           parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
> -        for (core = 0; core < ms->smp.cores; core++) {
> -            if (ms->smp.threads > 1) {
> -                g_queue_push_tail(list,
> -                    GUINT_TO_POINTER(table_data->len - pptt_start));
> -                build_processor_hierarchy_node(
> -                    table_data,
> -                    (0 << 0), /* not a physical package */
> -                    parent_offset, core, NULL, 0);
> -            } else {
> +        if (!mc->smp_props.clusters_supported) {
> +            socket_id = GPOINTER_TO_UINT(g_queue_pop_head(socket_list));
> +        } else {
> +            cluster_id = GPOINTER_TO_UINT(g_queue_pop_head(cluster_list));
> +        }
> +
> +        for (n = 0; n < cpus->len; n++) {
> +            if (!mc->smp_props.clusters_supported &&
> +                cpus->cpus[n].props.socket_id != socket_id) {
> +                continue;
> +            }
> +
> +            /*
> +             * We have to calculate the cluster ID because it isn't
> +             * available in the CPU instance properties.
> +             */
> +            id = cpus->cpus[n].props.thread_id /
> +                (ms->smp.cores * ms->smp.threads);
> +            if (mc->smp_props.clusters_supported && id != cluster_id) {
> +                continue;
> +            }
> +
> +            core_id = cpus->cpus[n].props.core_id;
> +            if (ms->smp.threads <= 1) {
>                   build_processor_hierarchy_node(
>                       table_data,
>                       (1 << 1) | /* ACPI Processor ID valid */
>                       (1 << 3),  /* Node is a Leaf */
> -                    parent_offset, uid++, NULL, 0);
> +                    parent_offset, core_id, NULL, 0);
> +                continue;
>               }
> +
> +            if (g_queue_find(core_list, GUINT_TO_POINTER(core_id))) {
> +                continue;
> +            }
> +
> +            g_queue_push_tail(core_list, GUINT_TO_POINTER(core_id));
> +            g_queue_push_tail(list,
> +                GUINT_TO_POINTER(table_data->len - pptt_start));
> +            build_processor_hierarchy_node(
> +                table_data,
> +                (0 << 0), /* not a physical package */
> +                parent_offset, core_id, NULL, 0);
>           }
>       }
>   
>       length = g_queue_get_length(list);
>       for (i = 0; i < length; i++) {
> -        int thread;
> -
>           parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
> -        for (thread = 0; thread < ms->smp.threads; thread++) {
> +        core_id = GPOINTER_TO_UINT(g_queue_pop_head(core_list));
> +
> +        for (n = 0; n < cpus->len; n++) {
> +            if (cpus->cpus[n].props.core_id != core_id) {
> +                continue;
> +            }
> +
> +            thread_id = cpus->cpus[n].props.thread_id;
>               build_processor_hierarchy_node(
>                   table_data,
>                   (1 << 1) | /* ACPI Processor ID valid */
>                   (1 << 2) | /* Processor is a Thread */
>                   (1 << 3),  /* Node is a Leaf */
> -                parent_offset, uid++, NULL, 0);
> +                parent_offset, thread_id, NULL, 0);
>           }
>       }
>   
>       g_queue_free(list);
> +    g_queue_free(core_list);
> +    g_queue_free(cluster_list);
> +    g_queue_free(socket_list);
>       acpi_table_end(linker, &table);
>   }
>
Igor Mammedov March 18, 2022, 1:28 p.m. UTC | #2
On Fri, 18 Mar 2022 14:34:12 +0800
"wangyanan (Y)" <wangyanan55@huawei.com> wrote:

> Hi Gavin,
> 
> On 2022/3/3 11:11, Gavin Shan wrote:
> > When the PPTT table is built, the CPU topology is re-calculated, but
> > it's unecessary because the CPU topology, except the cluster IDs,
> > has been populated in virt_possible_cpu_arch_ids() on arm/virt machine.
> >
> > This avoids to re-calculate the CPU topology by reusing the existing
> > one in ms->possible_cpus. However, the cluster ID for the CPU instance
> > has to be calculated dynamically because there is no corresponding
> > field in struct CpuInstanceProperties. Currently, the only user of
> > build_pptt() is arm/virt machine.
> >
> > Signed-off-by: Gavin Shan <gshan@redhat.com>
> > ---
> >   hw/acpi/aml-build.c | 106 ++++++++++++++++++++++++++++++++++----------
> >   1 file changed, 82 insertions(+), 24 deletions(-)
> >
> > diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
> > index 8966e16320..572cf5fc00 100644
> > --- a/hw/acpi/aml-build.c
> > +++ b/hw/acpi/aml-build.c
> > @@ -2002,18 +2002,27 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
> >                   const char *oem_id, const char *oem_table_id)
> >   {
> >       MachineClass *mc = MACHINE_GET_CLASS(ms);
> > +    CPUArchIdList *cpus = ms->possible_cpus;
> > +    GQueue *socket_list = g_queue_new();
> > +    GQueue *cluster_list = g_queue_new();
> > +    GQueue *core_list = g_queue_new();
> >       GQueue *list = g_queue_new();
> >       guint pptt_start = table_data->len;
> >       guint parent_offset;
> >       guint length, i;
> > -    int uid = 0;
> > -    int socket;
> > +    int n, id, socket_id, cluster_id, core_id, thread_id;
> >       AcpiTable table = { .sig = "PPTT", .rev = 2,
> >                           .oem_id = oem_id, .oem_table_id = oem_table_id };
> >   
> >       acpi_table_begin(&table, table_data);
> >   
> > -    for (socket = 0; socket < ms->smp.sockets; socket++) {
> > +    for (n = 0; n < cpus->len; n++) {
> > +        socket_id = cpus->cpus[n].props.socket_id;
> > +        if (g_queue_find(socket_list, GUINT_TO_POINTER(socket_id))) {
> > +            continue;
> > +        }
> > +
> > +        g_queue_push_tail(socket_list, GUINT_TO_POINTER(socket_id));
> >           g_queue_push_tail(list,
> >               GUINT_TO_POINTER(table_data->len - pptt_start));
> >           build_processor_hierarchy_node(
> > @@ -2023,65 +2032,114 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
> >                * of a physical package
> >                */
> >               (1 << 0),
> > -            0, socket, NULL, 0);
> > +            0, socket_id, NULL, 0);
> >       }
> >   
> >       if (mc->smp_props.clusters_supported) {
> >           length = g_queue_get_length(list);
> >           for (i = 0; i < length; i++) {
> > -            int cluster;
> > -
> >               parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
> > -            for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
> > +            socket_id = GPOINTER_TO_UINT(g_queue_pop_head(socket_list));
> > +
> > +            for (n = 0; n < cpus->len; n++) {
> > +                if (cpus->cpus[n].props.socket_id != socket_id) {
> > +                    continue;
> > +                }
> > +
> > +                /*
> > +                 * We have to calculate the cluster ID because it isn't
> > +                 * available in the CPU instance properties.
> > +                 */  
> Since we need cluster ID now, maybe we can simply make it supported
> in the CPU instance properties.

agreed

> 
> Thanks,
> Yanan
> > +                cluster_id = cpus->cpus[n].props.thread_id /
> > +                             (ms->smp.cores * ms->smp.threads);
> > +                if (g_queue_find(cluster_list, GUINT_TO_POINTER(cluster_id))) {
> > +                    continue;
> > +                }
> > +
> > +                g_queue_push_tail(cluster_list, GUINT_TO_POINTER(cluster_id));
> >                   g_queue_push_tail(list,
> >                       GUINT_TO_POINTER(table_data->len - pptt_start));
> >                   build_processor_hierarchy_node(
> >                       table_data,
> >                       (0 << 0), /* not a physical package */
> > -                    parent_offset, cluster, NULL, 0);
> > +                    parent_offset, cluster_id, NULL, 0);
> >               }
> >           }
> >       }
> >   
> >       length = g_queue_get_length(list);
> >       for (i = 0; i < length; i++) {
> > -        int core;
> > -
> >           parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
> > -        for (core = 0; core < ms->smp.cores; core++) {
> > -            if (ms->smp.threads > 1) {
> > -                g_queue_push_tail(list,
> > -                    GUINT_TO_POINTER(table_data->len - pptt_start));
> > -                build_processor_hierarchy_node(
> > -                    table_data,
> > -                    (0 << 0), /* not a physical package */
> > -                    parent_offset, core, NULL, 0);
> > -            } else {
> > +        if (!mc->smp_props.clusters_supported) {
> > +            socket_id = GPOINTER_TO_UINT(g_queue_pop_head(socket_list));
> > +        } else {
> > +            cluster_id = GPOINTER_TO_UINT(g_queue_pop_head(cluster_list));
> > +        }
> > +
> > +        for (n = 0; n < cpus->len; n++) {
> > +            if (!mc->smp_props.clusters_supported &&
> > +                cpus->cpus[n].props.socket_id != socket_id) {
> > +                continue;
> > +            }
> > +
> > +            /*
> > +             * We have to calculate the cluster ID because it isn't
> > +             * available in the CPU instance properties.
> > +             */
> > +            id = cpus->cpus[n].props.thread_id /
> > +                (ms->smp.cores * ms->smp.threads);
> > +            if (mc->smp_props.clusters_supported && id != cluster_id) {
> > +                continue;
> > +            }
> > +
> > +            core_id = cpus->cpus[n].props.core_id;
> > +            if (ms->smp.threads <= 1) {
> >                   build_processor_hierarchy_node(
> >                       table_data,
> >                       (1 << 1) | /* ACPI Processor ID valid */
> >                       (1 << 3),  /* Node is a Leaf */
> > -                    parent_offset, uid++, NULL, 0);
> > +                    parent_offset, core_id, NULL, 0);
> > +                continue;
> >               }
> > +
> > +            if (g_queue_find(core_list, GUINT_TO_POINTER(core_id))) {
> > +                continue;
> > +            }
> > +
> > +            g_queue_push_tail(core_list, GUINT_TO_POINTER(core_id));
> > +            g_queue_push_tail(list,
> > +                GUINT_TO_POINTER(table_data->len - pptt_start));
> > +            build_processor_hierarchy_node(
> > +                table_data,
> > +                (0 << 0), /* not a physical package */
> > +                parent_offset, core_id, NULL, 0);
> >           }
> >       }
> >   
> >       length = g_queue_get_length(list);
> >       for (i = 0; i < length; i++) {
> > -        int thread;
> > -
> >           parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
> > -        for (thread = 0; thread < ms->smp.threads; thread++) {
> > +        core_id = GPOINTER_TO_UINT(g_queue_pop_head(core_list));
> > +
> > +        for (n = 0; n < cpus->len; n++) {
> > +            if (cpus->cpus[n].props.core_id != core_id) {
> > +                continue;
> > +            }
> > +
> > +            thread_id = cpus->cpus[n].props.thread_id;
> >               build_processor_hierarchy_node(
> >                   table_data,
> >                   (1 << 1) | /* ACPI Processor ID valid */
> >                   (1 << 2) | /* Processor is a Thread */
> >                   (1 << 3),  /* Node is a Leaf */
> > -                parent_offset, uid++, NULL, 0);
> > +                parent_offset, thread_id, NULL, 0);
> >           }
> >       }
> >   
> >       g_queue_free(list);
> > +    g_queue_free(core_list);
> > +    g_queue_free(cluster_list);
> > +    g_queue_free(socket_list);
> >       acpi_table_end(linker, &table);
> >   }
> >     
>
Gavin Shan March 23, 2022, 3:31 a.m. UTC | #3
Hi Igor and Yanan,

On 3/18/22 9:28 PM, Igor Mammedov wrote:
> On Fri, 18 Mar 2022 14:34:12 +0800
> "wangyanan (Y)" <wangyanan55@huawei.com> wrote:
>> On 2022/3/3 11:11, Gavin Shan wrote:
>>> When the PPTT table is built, the CPU topology is re-calculated, but
>>> it's unecessary because the CPU topology, except the cluster IDs,
>>> has been populated in virt_possible_cpu_arch_ids() on arm/virt machine.
>>>
>>> This avoids to re-calculate the CPU topology by reusing the existing
>>> one in ms->possible_cpus. However, the cluster ID for the CPU instance
>>> has to be calculated dynamically because there is no corresponding
>>> field in struct CpuInstanceProperties. Currently, the only user of
>>> build_pptt() is arm/virt machine.
>>>
>>> Signed-off-by: Gavin Shan <gshan@redhat.com>
>>> ---
>>>    hw/acpi/aml-build.c | 106 ++++++++++++++++++++++++++++++++++----------
>>>    1 file changed, 82 insertions(+), 24 deletions(-)
>>>
>>> diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
>>> index 8966e16320..572cf5fc00 100644
>>> --- a/hw/acpi/aml-build.c
>>> +++ b/hw/acpi/aml-build.c
>>> @@ -2002,18 +2002,27 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
>>>                    const char *oem_id, const char *oem_table_id)
>>>    {
>>>        MachineClass *mc = MACHINE_GET_CLASS(ms);
>>> +    CPUArchIdList *cpus = ms->possible_cpus;
>>> +    GQueue *socket_list = g_queue_new();
>>> +    GQueue *cluster_list = g_queue_new();
>>> +    GQueue *core_list = g_queue_new();
>>>        GQueue *list = g_queue_new();
>>>        guint pptt_start = table_data->len;
>>>        guint parent_offset;
>>>        guint length, i;
>>> -    int uid = 0;
>>> -    int socket;
>>> +    int n, id, socket_id, cluster_id, core_id, thread_id;
>>>        AcpiTable table = { .sig = "PPTT", .rev = 2,
>>>                            .oem_id = oem_id, .oem_table_id = oem_table_id };
>>>    
>>>        acpi_table_begin(&table, table_data);
>>>    
>>> -    for (socket = 0; socket < ms->smp.sockets; socket++) {
>>> +    for (n = 0; n < cpus->len; n++) {
>>> +        socket_id = cpus->cpus[n].props.socket_id;
>>> +        if (g_queue_find(socket_list, GUINT_TO_POINTER(socket_id))) {
>>> +            continue;
>>> +        }
>>> +
>>> +        g_queue_push_tail(socket_list, GUINT_TO_POINTER(socket_id));
>>>            g_queue_push_tail(list,
>>>                GUINT_TO_POINTER(table_data->len - pptt_start));
>>>            build_processor_hierarchy_node(
>>> @@ -2023,65 +2032,114 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
>>>                 * of a physical package
>>>                 */
>>>                (1 << 0),
>>> -            0, socket, NULL, 0);
>>> +            0, socket_id, NULL, 0);
>>>        }
>>>    
>>>        if (mc->smp_props.clusters_supported) {
>>>            length = g_queue_get_length(list);
>>>            for (i = 0; i < length; i++) {
>>> -            int cluster;
>>> -
>>>                parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
>>> -            for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
>>> +            socket_id = GPOINTER_TO_UINT(g_queue_pop_head(socket_list));
>>> +
>>> +            for (n = 0; n < cpus->len; n++) {
>>> +                if (cpus->cpus[n].props.socket_id != socket_id) {
>>> +                    continue;
>>> +                }
>>> +
>>> +                /*
>>> +                 * We have to calculate the cluster ID because it isn't
>>> +                 * available in the CPU instance properties.
>>> +                 */
>> Since we need cluster ID now, maybe we can simply make it supported
>> in the CPU instance properties.
> 
> agreed
> 

Thanks for your review. I will add it in v3. FYI, the addition
needs to be done in the PATCH[v3 01/04] where the CPU topology
is populated :)

Thanks,
Gavin

>>> +                cluster_id = cpus->cpus[n].props.thread_id /
>>> +                             (ms->smp.cores * ms->smp.threads);
>>> +                if (g_queue_find(cluster_list, GUINT_TO_POINTER(cluster_id))) {
>>> +                    continue;
>>> +                }
>>> +
>>> +                g_queue_push_tail(cluster_list, GUINT_TO_POINTER(cluster_id));
>>>                    g_queue_push_tail(list,
>>>                        GUINT_TO_POINTER(table_data->len - pptt_start));
>>>                    build_processor_hierarchy_node(
>>>                        table_data,
>>>                        (0 << 0), /* not a physical package */
>>> -                    parent_offset, cluster, NULL, 0);
>>> +                    parent_offset, cluster_id, NULL, 0);
>>>                }
>>>            }
>>>        }
>>>    
>>>        length = g_queue_get_length(list);
>>>        for (i = 0; i < length; i++) {
>>> -        int core;
>>> -
>>>            parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
>>> -        for (core = 0; core < ms->smp.cores; core++) {
>>> -            if (ms->smp.threads > 1) {
>>> -                g_queue_push_tail(list,
>>> -                    GUINT_TO_POINTER(table_data->len - pptt_start));
>>> -                build_processor_hierarchy_node(
>>> -                    table_data,
>>> -                    (0 << 0), /* not a physical package */
>>> -                    parent_offset, core, NULL, 0);
>>> -            } else {
>>> +        if (!mc->smp_props.clusters_supported) {
>>> +            socket_id = GPOINTER_TO_UINT(g_queue_pop_head(socket_list));
>>> +        } else {
>>> +            cluster_id = GPOINTER_TO_UINT(g_queue_pop_head(cluster_list));
>>> +        }
>>> +
>>> +        for (n = 0; n < cpus->len; n++) {
>>> +            if (!mc->smp_props.clusters_supported &&
>>> +                cpus->cpus[n].props.socket_id != socket_id) {
>>> +                continue;
>>> +            }
>>> +
>>> +            /*
>>> +             * We have to calculate the cluster ID because it isn't
>>> +             * available in the CPU instance properties.
>>> +             */
>>> +            id = cpus->cpus[n].props.thread_id /
>>> +                (ms->smp.cores * ms->smp.threads);
>>> +            if (mc->smp_props.clusters_supported && id != cluster_id) {
>>> +                continue;
>>> +            }
>>> +
>>> +            core_id = cpus->cpus[n].props.core_id;
>>> +            if (ms->smp.threads <= 1) {
>>>                    build_processor_hierarchy_node(
>>>                        table_data,
>>>                        (1 << 1) | /* ACPI Processor ID valid */
>>>                        (1 << 3),  /* Node is a Leaf */
>>> -                    parent_offset, uid++, NULL, 0);
>>> +                    parent_offset, core_id, NULL, 0);
>>> +                continue;
>>>                }
>>> +
>>> +            if (g_queue_find(core_list, GUINT_TO_POINTER(core_id))) {
>>> +                continue;
>>> +            }
>>> +
>>> +            g_queue_push_tail(core_list, GUINT_TO_POINTER(core_id));
>>> +            g_queue_push_tail(list,
>>> +                GUINT_TO_POINTER(table_data->len - pptt_start));
>>> +            build_processor_hierarchy_node(
>>> +                table_data,
>>> +                (0 << 0), /* not a physical package */
>>> +                parent_offset, core_id, NULL, 0);
>>>            }
>>>        }
>>>    
>>>        length = g_queue_get_length(list);
>>>        for (i = 0; i < length; i++) {
>>> -        int thread;
>>> -
>>>            parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
>>> -        for (thread = 0; thread < ms->smp.threads; thread++) {
>>> +        core_id = GPOINTER_TO_UINT(g_queue_pop_head(core_list));
>>> +
>>> +        for (n = 0; n < cpus->len; n++) {
>>> +            if (cpus->cpus[n].props.core_id != core_id) {
>>> +                continue;
>>> +            }
>>> +
>>> +            thread_id = cpus->cpus[n].props.thread_id;
>>>                build_processor_hierarchy_node(
>>>                    table_data,
>>>                    (1 << 1) | /* ACPI Processor ID valid */
>>>                    (1 << 2) | /* Processor is a Thread */
>>>                    (1 << 3),  /* Node is a Leaf */
>>> -                parent_offset, uid++, NULL, 0);
>>> +                parent_offset, thread_id, NULL, 0);
>>>            }
>>>        }
>>>    
>>>        g_queue_free(list);
>>> +    g_queue_free(core_list);
>>> +    g_queue_free(cluster_list);
>>> +    g_queue_free(socket_list);
>>>        acpi_table_end(linker, &table);
>>>    }
>>>      
>>
>
diff mbox series

Patch

diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c
index 8966e16320..572cf5fc00 100644
--- a/hw/acpi/aml-build.c
+++ b/hw/acpi/aml-build.c
@@ -2002,18 +2002,27 @@  void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
                 const char *oem_id, const char *oem_table_id)
 {
     MachineClass *mc = MACHINE_GET_CLASS(ms);
+    CPUArchIdList *cpus = ms->possible_cpus;
+    GQueue *socket_list = g_queue_new();
+    GQueue *cluster_list = g_queue_new();
+    GQueue *core_list = g_queue_new();
     GQueue *list = g_queue_new();
     guint pptt_start = table_data->len;
     guint parent_offset;
     guint length, i;
-    int uid = 0;
-    int socket;
+    int n, id, socket_id, cluster_id, core_id, thread_id;
     AcpiTable table = { .sig = "PPTT", .rev = 2,
                         .oem_id = oem_id, .oem_table_id = oem_table_id };
 
     acpi_table_begin(&table, table_data);
 
-    for (socket = 0; socket < ms->smp.sockets; socket++) {
+    for (n = 0; n < cpus->len; n++) {
+        socket_id = cpus->cpus[n].props.socket_id;
+        if (g_queue_find(socket_list, GUINT_TO_POINTER(socket_id))) {
+            continue;
+        }
+
+        g_queue_push_tail(socket_list, GUINT_TO_POINTER(socket_id));
         g_queue_push_tail(list,
             GUINT_TO_POINTER(table_data->len - pptt_start));
         build_processor_hierarchy_node(
@@ -2023,65 +2032,114 @@  void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms,
              * of a physical package
              */
             (1 << 0),
-            0, socket, NULL, 0);
+            0, socket_id, NULL, 0);
     }
 
     if (mc->smp_props.clusters_supported) {
         length = g_queue_get_length(list);
         for (i = 0; i < length; i++) {
-            int cluster;
-
             parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
-            for (cluster = 0; cluster < ms->smp.clusters; cluster++) {
+            socket_id = GPOINTER_TO_UINT(g_queue_pop_head(socket_list));
+
+            for (n = 0; n < cpus->len; n++) {
+                if (cpus->cpus[n].props.socket_id != socket_id) {
+                    continue;
+                }
+
+                /*
+                 * We have to calculate the cluster ID because it isn't
+                 * available in the CPU instance properties.
+                 */
+                cluster_id = cpus->cpus[n].props.thread_id /
+                             (ms->smp.cores * ms->smp.threads);
+                if (g_queue_find(cluster_list, GUINT_TO_POINTER(cluster_id))) {
+                    continue;
+                }
+
+                g_queue_push_tail(cluster_list, GUINT_TO_POINTER(cluster_id));
                 g_queue_push_tail(list,
                     GUINT_TO_POINTER(table_data->len - pptt_start));
                 build_processor_hierarchy_node(
                     table_data,
                     (0 << 0), /* not a physical package */
-                    parent_offset, cluster, NULL, 0);
+                    parent_offset, cluster_id, NULL, 0);
             }
         }
     }
 
     length = g_queue_get_length(list);
     for (i = 0; i < length; i++) {
-        int core;
-
         parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
-        for (core = 0; core < ms->smp.cores; core++) {
-            if (ms->smp.threads > 1) {
-                g_queue_push_tail(list,
-                    GUINT_TO_POINTER(table_data->len - pptt_start));
-                build_processor_hierarchy_node(
-                    table_data,
-                    (0 << 0), /* not a physical package */
-                    parent_offset, core, NULL, 0);
-            } else {
+        if (!mc->smp_props.clusters_supported) {
+            socket_id = GPOINTER_TO_UINT(g_queue_pop_head(socket_list));
+        } else {
+            cluster_id = GPOINTER_TO_UINT(g_queue_pop_head(cluster_list));
+        }
+
+        for (n = 0; n < cpus->len; n++) {
+            if (!mc->smp_props.clusters_supported &&
+                cpus->cpus[n].props.socket_id != socket_id) {
+                continue;
+            }
+
+            /*
+             * We have to calculate the cluster ID because it isn't
+             * available in the CPU instance properties.
+             */
+            id = cpus->cpus[n].props.thread_id /
+                (ms->smp.cores * ms->smp.threads);
+            if (mc->smp_props.clusters_supported && id != cluster_id) {
+                continue;
+            }
+
+            core_id = cpus->cpus[n].props.core_id;
+            if (ms->smp.threads <= 1) {
                 build_processor_hierarchy_node(
                     table_data,
                     (1 << 1) | /* ACPI Processor ID valid */
                     (1 << 3),  /* Node is a Leaf */
-                    parent_offset, uid++, NULL, 0);
+                    parent_offset, core_id, NULL, 0);
+                continue;
             }
+
+            if (g_queue_find(core_list, GUINT_TO_POINTER(core_id))) {
+                continue;
+            }
+
+            g_queue_push_tail(core_list, GUINT_TO_POINTER(core_id));
+            g_queue_push_tail(list,
+                GUINT_TO_POINTER(table_data->len - pptt_start));
+            build_processor_hierarchy_node(
+                table_data,
+                (0 << 0), /* not a physical package */
+                parent_offset, core_id, NULL, 0);
         }
     }
 
     length = g_queue_get_length(list);
     for (i = 0; i < length; i++) {
-        int thread;
-
         parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list));
-        for (thread = 0; thread < ms->smp.threads; thread++) {
+        core_id = GPOINTER_TO_UINT(g_queue_pop_head(core_list));
+
+        for (n = 0; n < cpus->len; n++) {
+            if (cpus->cpus[n].props.core_id != core_id) {
+                continue;
+            }
+
+            thread_id = cpus->cpus[n].props.thread_id;
             build_processor_hierarchy_node(
                 table_data,
                 (1 << 1) | /* ACPI Processor ID valid */
                 (1 << 2) | /* Processor is a Thread */
                 (1 << 3),  /* Node is a Leaf */
-                parent_offset, uid++, NULL, 0);
+                parent_offset, thread_id, NULL, 0);
         }
     }
 
     g_queue_free(list);
+    g_queue_free(core_list);
+    g_queue_free(cluster_list);
+    g_queue_free(socket_list);
     acpi_table_end(linker, &table);
 }