diff mbox series

[v4,3/7] arm/virt.c: add cache hierarchy to device tree

Message ID 20241216175414.1953-4-alireza.sanaee@huawei.com (mailing list archive)
State New
Headers show
Series Specifying cache topology on ARM | expand

Commit Message

Alireza Sanaee Dec. 16, 2024, 5:54 p.m. UTC
Specify which layer (core/cluster/socket) caches found at in the CPU
topology. Updating cache topology to device tree (spec v0.4).
Example:

Here, 2 sockets (packages), and 2 clusters, 4 cores and 2 threads
created, in aggregate 2*2*4*2 logical cores. In the smp-cache object,
cores will have l1d and l1i.  However, extending this is not difficult).
The clusters will share a unified l2 level cache, and finally sockets
will share l3. In this patch, threads will share l1 caches by default,
but this can be adjusted if case required.

Currently only three levels of caches are supported.  The patch does not
allow partial declaration of caches. In another word, all caches must be
defined or caches must be skipped.

./qemu-system-aarch64 \
    -machine virt,\
         smp-cache.0.cache=l1i,smp-cache.0.topology=core,\
         smp-cache.1.cache=l1d,smp-cache.1.topology=core,\
         smp-cache.2.cache=l2,smp-cache.2.topology=cluster,\
         smp-cache.3.cache=l3,smp-cache.3.topology=socket\
    -cpu max \
    -m 2048 \
    -smp sockets=2,clusters=2,cores=4,threads=1 \
    -kernel ./Image.gz \
    -append "console=ttyAMA0 root=/dev/ram rdinit=/init acpi=force" \
    -initrd rootfs.cpio.gz \
    -bios ./edk2-aarch64-code.fd \
    -nographic

For instance, following device tree will be generated for a scenario
where we have 2 sockets, 2 clusters, 2 cores and 2 threads, in total 16
PEs. L1i and L1d are private to each thread, and l2 and l3 are shared at
socket level as an example.

Limitation: SMT cores cannot share L1 cache for now. This
problem does not exist in PPTT tables.

Signed-off-by: Alireza Sanaee <alireza.sanaee@huawei.com>
Co-developed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Signed-off-by: Jonathan Cameron <jonathan.cameron@huawei.com>
---
 hw/arm/virt.c         | 390 ++++++++++++++++++++++++++++++++++++++++++
 hw/cpu/core.c         |  97 +++++++++++
 include/hw/arm/virt.h |   5 +
 include/hw/cpu/core.h |  26 +++
 4 files changed, 518 insertions(+)

Comments

Jonathan Cameron Dec. 23, 2024, 6:09 p.m. UTC | #1
On Mon, 16 Dec 2024 17:54:10 +0000
Alireza Sanaee <alireza.sanaee@huawei.com> wrote:

> Specify which layer (core/cluster/socket) caches found at in the CPU
> topology. Updating cache topology to device tree (spec v0.4).
> Example:
> 
> Here, 2 sockets (packages), and 2 clusters, 4 cores and 2 threads
> created, in aggregate 2*2*4*2 logical cores. In the smp-cache object,
> cores will have l1d and l1i.  However, extending this is not difficult).
> The clusters will share a unified l2 level cache, and finally sockets
> will share l3. In this patch, threads will share l1 caches by default,
> but this can be adjusted if case required.
> 
> Currently only three levels of caches are supported.  The patch does not
> allow partial declaration of caches. In another word, all caches must be
> defined or caches must be skipped.
> 
> ./qemu-system-aarch64 \
>     -machine virt,\
>          smp-cache.0.cache=l1i,smp-cache.0.topology=core,\
>          smp-cache.1.cache=l1d,smp-cache.1.topology=core,\
>          smp-cache.2.cache=l2,smp-cache.2.topology=cluster,\
>          smp-cache.3.cache=l3,smp-cache.3.topology=socket\
>     -cpu max \
>     -m 2048 \
>     -smp sockets=2,clusters=2,cores=4,threads=1 \
>     -kernel ./Image.gz \
>     -append "console=ttyAMA0 root=/dev/ram rdinit=/init acpi=force" \
>     -initrd rootfs.cpio.gz \
>     -bios ./edk2-aarch64-code.fd \
>     -nographic
> 
> For instance, following device tree will be generated for a scenario
> where we have 2 sockets, 2 clusters, 2 cores and 2 threads, in total 16
> PEs. L1i and L1d are private to each thread, and l2 and l3 are shared at
> socket level as an example.
> 
> Limitation: SMT cores cannot share L1 cache for now. This
> problem does not exist in PPTT tables.
> 
> Signed-off-by: Alireza Sanaee <alireza.sanaee@huawei.com>
> Co-developed-by: Jonathan Cameron <jonathan.cameron@huawei.com>
> Signed-off-by: Jonathan Cameron <jonathan.cameron@huawei.com>
Hi Ali,


Hmm. So the bit where the Co-dev makes sense is the utility functions pulled
forwards from the ACPI code. That is quite a bit of the code so fair enough.

A few comments and questions follow that I missed during internal reviews.
Sorry they are late!


What happens if we see registers that indicate an L4 or higher?  Does it
just ignore it for now?  I've no problem with it doing so as those are
rare systems at best and describing the lower caches is still better
than nothing. I'm not sure what Linux does though if the CPU registers
say there are more caches than in DT / ACPI?

> ---
>  hw/arm/virt.c         | 390 ++++++++++++++++++++++++++++++++++++++++++
>  hw/cpu/core.c         |  97 +++++++++++
>  include/hw/arm/virt.h |   5 +
>  include/hw/cpu/core.h |  26 +++
>  4 files changed, 518 insertions(+)


>  static void fdt_add_cpu_nodes(const VirtMachineState *vms)
>  {
>      int cpu;
> @@ -418,6 +608,24 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
>      const MachineState *ms = MACHINE(vms);
>      const VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
>      int smp_cpus = ms->smp.cpus;
> +    int socket_id, cluster_id, core_id, thread_id;
> +    uint32_t next_level = 0;
> +    uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0;
> +    uint32_t thread_offset = 0;
> +    int last_socket = -1, last_cluster = -1, last_core = -1, last_thread = -1;
> +    int top_node = 3, top_cluster = 3, top_core = 3, top_thread = 3;

I wonder if a few comments here make sense, or something that makes it clear 3
is about the levels of cache currently describable.  

> +    int bottom_node = 3, bottom_cluster = 3, bottom_core = 3, bottom_thread = 3;
> +    unsigned int num_cache;
> +    CPUCaches caches[16];
> +    bool cache_created = false;
> +
> +    num_cache = virt_get_caches(vms, caches);
> +
> +    if (ms->smp_cache.IsDefined &&
> +        partial_cache_description(ms, caches, num_cache)) {
> +            error_setg(&error_fatal, "Missing cache description");
> +            return;
> +    }
>  
>      /*
>       * See Linux Documentation/devicetree/bindings/arm/cpus.yaml
> @@ -446,9 +654,15 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
>      qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
>  
>      for (cpu = smp_cpus - 1; cpu >= 0; cpu--) {
> +        socket_id = cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads);
> +        cluster_id = cpu / (ms->smp.cores * ms->smp.threads) % ms->smp.clusters;
> +        core_id = cpu / (ms->smp.threads) % ms->smp.cores;
> +        thread_id = cpu % ms->smp.cores;
> +
>          char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
>          ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
>          CPUState *cs = CPU(armcpu);
> +        const char *prefix = NULL;
>  
>          qemu_fdt_add_subnode(ms->fdt, nodename);
>          qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu");
> @@ -478,6 +692,177 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms)
>                                    qemu_fdt_alloc_phandle(ms->fdt));
>          }
>  
> +        if (!vmc->no_cpu_topology && num_cache) {
> +            for (uint8_t i = 0; i < num_cache; i++) {
> +                /* only level 1 in the CPU entry */
> +                if (caches[i].level > 1) {
> +                    continue;
> +                }
> +

One blank line only.

> +
> +                if (caches[i].type == INSTRUCTION) {
> +                    prefix = "i-cache";
> +                } else if (caches[i].type == DATA) {
> +                    prefix = "d-cache";
> +                } else if (caches[i].type == UNIFIED) {
> +                    error_setg(&error_fatal,
> +                               "Unified type is not implemented at level %d",
> +                               caches[i].level);
> +                    return;
> +                } else {
> +                    error_setg(&error_fatal, "Undefined cache type");
> +                    return;
> +                }
> +
> +                set_cache_properties(ms->fdt, nodename, prefix, caches[i]);
> +            }
> +        }
> +
> +        if (socket_id != last_socket) {
> +            bottom_node = top_node;
> +            /* this assumes socket as the highest topological level */

Is there any way we can check that remains true?  Maybe a question for other
reviewers.

> +            socket_offset = 0;
> +            cluster_offset = 0;
> +            if (cache_described_at(ms, CPU_TOPOLOGY_LEVEL_SOCKET) &&
> +                find_the_lowest_level_cache_defined_at_level(ms,
> +                    &bottom_node,
> +                    CPU_TOPOLOGY_LEVEL_SOCKET)) {
> +
> +                if (bottom_node == 1) {
> +                    error_report("Cannot share L1 at socket_id %d.", socket_id);

Maybe add a comment on why.  (DT limitation rather than a theoretical one).

> +                }
> +
> +                cache_created = add_cpu_cache_hierarchy(ms->fdt, caches,
> +                                                        num_cache,
> +                                                        top_node,
> +                                                        bottom_node, cpu,
> +                                                        &socket_offset);
> +
> +                if (!cache_created) {
> +                    error_setg(&error_fatal,
> +                               "Socket: No caches at levels %d-%d",
> +                               top_node, bottom_node);
> +                    return;
> +                }
> +
> +                top_cluster = bottom_node - 1;
> +            }
> +
> +            last_socket = socket_id;
> +        }
> +
...
> diff --git a/hw/cpu/core.c b/hw/cpu/core.c
> index 495a5c30ff..186bb367e7 100644
> --- a/hw/cpu/core.c
> +++ b/hw/cpu/core.c
> @@ -102,4 +102,101 @@ static void cpu_core_register_types(void)
>      type_register_static(&cpu_core_type_info);
>  }
>  
> +bool cache_described_at(const MachineState *ms, CpuTopologyLevel level)
> +{
> +    if (machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3) == level ||
> +        machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2) == level ||
> +        machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I) == level ||
> +        machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D) == level) {
> +        return true;
> +    }
> +    return false;
> +}
> +
> +int partial_cache_description(const MachineState *ms,
> +                              CPUCaches *caches,
> +                              int num_caches)

Can be wrapped onto fewer sub 80 char lines.

> +{
> +    int level, c;
> +
> +    for (level = 1; level < num_caches; level++) {
> +        for (c = 0; c < num_caches; c++) {
> +            if (caches[c].level != level) {
> +                continue;
> +            }
> +
> +            switch (level) {
> +            case 1:
> +                /*
> +                 * L1 cache is assumed to have both L1I and L1D available.
> +                 * Technically both need to be checked.
> +                 */
> +                if (machine_get_cache_topo_level(ms, 
> +                                                 CACHE_LEVEL_AND_TYPE_L1I) ==
> +                    CPU_TOPOLOGY_LEVEL_DEFAULT)
> +                {
> +                    assert(machine_get_cache_topo_level(ms,
> +                                CACHE_LEVEL_AND_TYPE_L1D) ==
> +                           CPU_TOPOLOGY_LEVEL_DEFAULT);

assert not really appropriate way to trigger an exit.
error_setg() or similar or just trigger an error_fatal.
Not sure what is common in related code.

> +                    return level;
> +                }
> +                break;
> +            case 2:
> +                if (machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2) ==
> +                    CPU_TOPOLOGY_LEVEL_DEFAULT) {
> +                    return level;
> +                }
> +                break;
> +            case 3:
> +                if (machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3) ==
> +                    CPU_TOPOLOGY_LEVEL_DEFAULT) {
> +                    return level;
> +                }
> +                break;
> +            }
> +        }
> +    }
> +
> +    return 0;
> +}
> +
> +/*
> + * This function assumes l3 and l2 have unified cache and l1 is split l1d
> + * and l1i, and further prepares the lowest cache level for a topology
> + * level.  The info will be fed to build_caches to create caches at the
> + * right level.
> + */
> +int find_the_lowest_level_cache_defined_at_level(const MachineState *ms,
> +                                                 int *level_found,
> +                                                 CpuTopologyLevel topo_level) {
> +
> +    CpuTopologyLevel level;
> +
> +    level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I);
> +    if (level == topo_level) {
> +        *level_found = 1;
> +        return 1;

This code evolved, but now it makes little sense to return the level found
and the put it in level_found.  Perhaps just return a bool?

> +    }
> +
> +    level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D);
> +    if (level == topo_level) {
> +        *level_found = 1;
> +        return 1;
> +    }
> +
> +    level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2);
> +    if (level == topo_level) {
> +        *level_found = 2;
> +        return 2;
> +    }
> +
> +    level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3);
> +    if (level == topo_level) {
> +        *level_found = 3;
> +        return 3;
> +    }
> +
> +    return 0;
> +}
> +
>  type_init(cpu_core_register_types)
> diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
> index a4d937ed45..7b0311ce6e 100644
> --- a/include/hw/arm/virt.h
> +++ b/include/hw/arm/virt.h
> @@ -39,6 +39,7 @@
>  #include "sysemu/kvm.h"
>  #include "hw/intc/arm_gicv3_common.h"
>  #include "qom/object.h"
> +#include "hw/cpu/core.h"
>  
>  #define NUM_GICV2M_SPIS       64
>  #define NUM_VIRTIO_TRANSPORTS 32
> @@ -50,6 +51,8 @@
>  /* GPIO pins */
>  #define GPIO_PIN_POWER_BUTTON  3
>  
> +#define CPU_MAX_CACHES 16
> +
>  enum {
>      VIRT_FLASH,
>      VIRT_MEM,
> @@ -188,6 +191,8 @@ OBJECT_DECLARE_TYPE(VirtMachineState, VirtMachineClass, VIRT_MACHINE)
>  
>  void virt_acpi_setup(VirtMachineState *vms);
>  bool virt_is_acpi_enabled(VirtMachineState *vms);

> +unsigned int virt_get_caches(const VirtMachineState *vms,
> +                             CPUCaches *caches);
>  
Fits on one line under 80 chars, so undo that wrap.

Thanks,

Jonathan
diff mbox series

Patch

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 687fe0bb8b..ae35539fb9 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -228,6 +228,132 @@  static const int a15irqmap[] = {
     [VIRT_PLATFORM_BUS] = 112, /* ...to 112 + PLATFORM_BUS_NUM_IRQS -1 */
 };
 
+unsigned int virt_get_caches(const VirtMachineState *vms,
+                             CPUCaches *caches)
+{
+    ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(0)); /* assume homogeneous CPUs */
+    bool ccidx = cpu_isar_feature(any_ccidx, armcpu);
+    unsigned int num_cache, i;
+    int level_instr = 1, level_data = 1;
+
+    for (i = 0, num_cache = 0; i < CPU_MAX_CACHES; i++, num_cache++) {
+        int type = (armcpu->clidr >> (3 * i)) & 7;
+        int bank_index;
+        int level;
+        CPUCacheType cache_type;
+
+        if (type == 0) {
+            break;
+        }
+
+        switch (type) {
+        case 1:
+            cache_type = INSTRUCTION;
+            level = level_instr;
+            break;
+        case 2:
+            cache_type = DATA;
+            level = level_data;
+            break;
+        case 4:
+            cache_type = UNIFIED;
+            level = level_instr > level_data ? level_instr : level_data;
+            break;
+        case 3: /* Split - Do data first */
+            cache_type = DATA;
+            level = level_data;
+            break;
+        default:
+            error_setg(&error_abort, "Unrecognized cache type");
+            return 0;
+        }
+        /*
+         * ccsidr is indexed using both the level and whether it is
+         * an instruction cache. Unified caches use the same storage
+         * as data caches.
+         */
+        bank_index = (i * 2) | ((type == 1) ? 1 : 0);
+        if (ccidx) {
+            caches[num_cache] = (CPUCaches) {
+                .type =  cache_type,
+                .level = level,
+                .linesize = 1 << (FIELD_EX64(armcpu->ccsidr[bank_index],
+                                             CCSIDR_EL1,
+                                             CCIDX_LINESIZE) + 4),
+                .associativity = FIELD_EX64(armcpu->ccsidr[bank_index],
+                                            CCSIDR_EL1,
+                                            CCIDX_ASSOCIATIVITY) + 1,
+                .sets = FIELD_EX64(armcpu->ccsidr[bank_index], CCSIDR_EL1,
+                                   CCIDX_NUMSETS) + 1,
+            };
+        } else {
+            caches[num_cache] = (CPUCaches) {
+                .type =  cache_type,
+                .level = level,
+                .linesize = 1 << (FIELD_EX64(armcpu->ccsidr[bank_index],
+                                             CCSIDR_EL1, LINESIZE) + 4),
+                .associativity = FIELD_EX64(armcpu->ccsidr[bank_index],
+                                            CCSIDR_EL1,
+                                            ASSOCIATIVITY) + 1,
+                .sets = FIELD_EX64(armcpu->ccsidr[bank_index], CCSIDR_EL1,
+                                   NUMSETS) + 1,
+            };
+        }
+        caches[num_cache].size = caches[num_cache].associativity *
+            caches[num_cache].sets * caches[num_cache].linesize;
+
+        /* Break one 'split' entry up into two records */
+        if (type == 3) {
+            num_cache++;
+            bank_index = (i * 2) | 1;
+            if (ccidx) {
+                /* Instruction cache: bottom bit set when reading banked reg */
+                caches[num_cache] = (CPUCaches) {
+                    .type = INSTRUCTION,
+                    .level = level_instr,
+                    .linesize = 1 << (FIELD_EX64(armcpu->ccsidr[bank_index],
+                                                 CCSIDR_EL1,
+                                                 CCIDX_LINESIZE) + 4),
+                    .associativity = FIELD_EX64(armcpu->ccsidr[bank_index],
+                                                CCSIDR_EL1,
+                                                CCIDX_ASSOCIATIVITY) + 1,
+                    .sets = FIELD_EX64(armcpu->ccsidr[bank_index], CCSIDR_EL1,
+                                       CCIDX_NUMSETS) + 1,
+                };
+            } else {
+                caches[num_cache] = (CPUCaches) {
+                    .type = INSTRUCTION,
+                    .level = level_instr,
+                    .linesize = 1 << (FIELD_EX64(armcpu->ccsidr[bank_index],
+                                                 CCSIDR_EL1, LINESIZE) + 4),
+                    .associativity = FIELD_EX64(armcpu->ccsidr[bank_index],
+                                                CCSIDR_EL1,
+                                                ASSOCIATIVITY) + 1,
+                    .sets = FIELD_EX64(armcpu->ccsidr[bank_index], CCSIDR_EL1,
+                                       NUMSETS) + 1,
+                };
+            }
+            caches[num_cache].size = caches[num_cache].associativity *
+                caches[num_cache].sets * caches[num_cache].linesize;
+        }
+        switch (type) {
+        case 1:
+            level_instr++;
+            break;
+        case 2:
+            level_data++;
+            break;
+        case 3:
+        case 4:
+            level_instr++;
+            level_data++;
+            break;
+        }
+    }
+
+    return num_cache;
+}
+
 static void create_randomness(MachineState *ms, const char *node)
 {
     struct {
@@ -411,6 +537,70 @@  static void fdt_add_timer_nodes(const VirtMachineState *vms)
     }
 }
 
+static void add_cache_node(void *fdt, char * nodepath, CPUCaches cache,
+                           uint32_t *next_level) {
+    /* Assume L2/3 are unified caches. */
+
+    uint32_t phandle;
+
+    qemu_fdt_add_path(fdt, nodepath);
+    phandle = qemu_fdt_alloc_phandle(fdt);
+    qemu_fdt_setprop_cell(fdt, nodepath, "phandle", phandle);
+    qemu_fdt_setprop_cell(fdt, nodepath, "cache-level", cache.level);
+    qemu_fdt_setprop_cell(fdt, nodepath, "cache-size", cache.size);
+    qemu_fdt_setprop_cell(fdt, nodepath, "cache-block-size", cache.linesize);
+    qemu_fdt_setprop_cell(fdt, nodepath, "cache-sets", cache.sets);
+    qemu_fdt_setprop(fdt, nodepath, "cache-unified", NULL, 0);
+    if (cache.level != 3) {
+        /* top level cache doesn't have next-level-cache property */
+        qemu_fdt_setprop_cell(fdt, nodepath, "next-level-cache", *next_level);
+    }
+
+    *next_level = phandle;
+}
+
+static bool add_cpu_cache_hierarchy(void *fdt, CPUCaches* cache,
+                                    uint32_t cache_cnt,
+                                    uint32_t top_level,
+                                    uint32_t bottom_level,
+                                    uint32_t cpu_id,
+                                    uint32_t *next_level) {
+    bool found_cache = false;
+    char *nodepath;
+
+    for (int level = top_level; level >= bottom_level; level--) {
+        for (int i = 0; i < cache_cnt; i++) {
+            if (i != level) {
+                continue;
+            }
+
+            nodepath = g_strdup_printf("/cpus/cpu@%d/l%d-cache",
+                                       cpu_id, level);
+            add_cache_node(fdt, nodepath, cache[i], next_level);
+            found_cache = true;
+            g_free(nodepath);
+
+        }
+    }
+
+    return found_cache;
+}
+
+static void set_cache_properties(void *fdt, const char *nodename,
+                                 const char *prefix, CPUCaches cache)
+{
+    char prop_name[64];
+
+    snprintf(prop_name, sizeof(prop_name), "%s-block-size", prefix);
+    qemu_fdt_setprop_cell(fdt, nodename, prop_name, cache.linesize);
+
+    snprintf(prop_name, sizeof(prop_name), "%s-size", prefix);
+    qemu_fdt_setprop_cell(fdt, nodename, prop_name, cache.size);
+
+    snprintf(prop_name, sizeof(prop_name), "%s-sets", prefix);
+    qemu_fdt_setprop_cell(fdt, nodename, prop_name, cache.sets);
+}
+
 static void fdt_add_cpu_nodes(const VirtMachineState *vms)
 {
     int cpu;
@@ -418,6 +608,24 @@  static void fdt_add_cpu_nodes(const VirtMachineState *vms)
     const MachineState *ms = MACHINE(vms);
     const VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms);
     int smp_cpus = ms->smp.cpus;
+    int socket_id, cluster_id, core_id, thread_id;
+    uint32_t next_level = 0;
+    uint32_t socket_offset = 0, cluster_offset = 0, core_offset = 0;
+    uint32_t thread_offset = 0;
+    int last_socket = -1, last_cluster = -1, last_core = -1, last_thread = -1;
+    int top_node = 3, top_cluster = 3, top_core = 3, top_thread = 3;
+    int bottom_node = 3, bottom_cluster = 3, bottom_core = 3, bottom_thread = 3;
+    unsigned int num_cache;
+    CPUCaches caches[16];
+    bool cache_created = false;
+
+    num_cache = virt_get_caches(vms, caches);
+
+    if (ms->smp_cache.IsDefined &&
+        partial_cache_description(ms, caches, num_cache)) {
+            error_setg(&error_fatal, "Missing cache description");
+            return;
+    }
 
     /*
      * See Linux Documentation/devicetree/bindings/arm/cpus.yaml
@@ -446,9 +654,15 @@  static void fdt_add_cpu_nodes(const VirtMachineState *vms)
     qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0);
 
     for (cpu = smp_cpus - 1; cpu >= 0; cpu--) {
+        socket_id = cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads);
+        cluster_id = cpu / (ms->smp.cores * ms->smp.threads) % ms->smp.clusters;
+        core_id = cpu / (ms->smp.threads) % ms->smp.cores;
+        thread_id = cpu % ms->smp.cores;
+
         char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
         ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu));
         CPUState *cs = CPU(armcpu);
+        const char *prefix = NULL;
 
         qemu_fdt_add_subnode(ms->fdt, nodename);
         qemu_fdt_setprop_string(ms->fdt, nodename, "device_type", "cpu");
@@ -478,6 +692,177 @@  static void fdt_add_cpu_nodes(const VirtMachineState *vms)
                                   qemu_fdt_alloc_phandle(ms->fdt));
         }
 
+        if (!vmc->no_cpu_topology && num_cache) {
+            for (uint8_t i = 0; i < num_cache; i++) {
+                /* only level 1 in the CPU entry */
+                if (caches[i].level > 1) {
+                    continue;
+                }
+
+
+                if (caches[i].type == INSTRUCTION) {
+                    prefix = "i-cache";
+                } else if (caches[i].type == DATA) {
+                    prefix = "d-cache";
+                } else if (caches[i].type == UNIFIED) {
+                    error_setg(&error_fatal,
+                               "Unified type is not implemented at level %d",
+                               caches[i].level);
+                    return;
+                } else {
+                    error_setg(&error_fatal, "Undefined cache type");
+                    return;
+                }
+
+                set_cache_properties(ms->fdt, nodename, prefix, caches[i]);
+            }
+        }
+
+        if (socket_id != last_socket) {
+            bottom_node = top_node;
+            /* this assumes socket as the highest topological level */
+            socket_offset = 0;
+            cluster_offset = 0;
+            if (cache_described_at(ms, CPU_TOPOLOGY_LEVEL_SOCKET) &&
+                find_the_lowest_level_cache_defined_at_level(ms,
+                    &bottom_node,
+                    CPU_TOPOLOGY_LEVEL_SOCKET)) {
+
+                if (bottom_node == 1) {
+                    error_report("Cannot share L1 at socket_id %d.", socket_id);
+                }
+
+                cache_created = add_cpu_cache_hierarchy(ms->fdt, caches,
+                                                        num_cache,
+                                                        top_node,
+                                                        bottom_node, cpu,
+                                                        &socket_offset);
+
+                if (!cache_created) {
+                    error_setg(&error_fatal,
+                               "Socket: No caches at levels %d-%d",
+                               top_node, bottom_node);
+                    return;
+                }
+
+                top_cluster = bottom_node - 1;
+            }
+
+            last_socket = socket_id;
+        }
+
+        if (cluster_id != last_cluster) {
+            bottom_cluster = top_cluster;
+            cluster_offset = socket_offset;
+            core_offset = 0;
+            if (cache_described_at(ms, CPU_TOPOLOGY_LEVEL_CLUSTER) &&
+                find_the_lowest_level_cache_defined_at_level(ms,
+                    &bottom_cluster,
+                    CPU_TOPOLOGY_LEVEL_CLUSTER)) {
+
+                cache_created = add_cpu_cache_hierarchy(ms->fdt, caches,
+                                                        num_cache,
+                                                        top_cluster,
+                                                        bottom_cluster, cpu,
+                                                        &cluster_offset);
+                if (bottom_cluster == 1) {
+                    error_report(
+                        "Cannot share L1 at socket_id %d, cluster_id %d.",
+                        socket_id, cluster_id);
+                }
+
+                if (!cache_created) {
+                    error_setg(&error_fatal,
+                               "Cluster: No caches at levels %d-%d",
+                               top_cluster, bottom_cluster);
+                    return;
+                }
+
+                top_core = bottom_cluster - 1;
+                top_thread = top_core;
+            } else if (top_cluster == bottom_node - 1) {
+                top_core = bottom_node - 1;
+                top_thread = top_core;
+            }
+
+            last_cluster = cluster_id;
+        }
+
+        if (core_id != last_core) {
+            bottom_core = top_core;
+            core_offset = cluster_offset;
+            if (cache_described_at(ms, CPU_TOPOLOGY_LEVEL_CORE) &&
+                find_the_lowest_level_cache_defined_at_level(ms,
+                    &bottom_core,
+                    CPU_TOPOLOGY_LEVEL_CORE)) {
+
+                if (bottom_core == 1) {
+                    bottom_core++;
+                } else {
+                    cache_created = add_cpu_cache_hierarchy(ms->fdt,
+                                                            caches,
+                                                            num_cache,
+                                                            top_core,
+                                                            bottom_core, cpu,
+                                                            &core_offset);
+
+                    if (!cache_created) {
+                        error_setg(&error_fatal,
+                                   "Core: No caches at levels %d-%d",
+                                   top_core, bottom_core);
+                        return;
+                    }
+                }
+
+                top_thread = bottom_core - 1;
+            } else if (top_cluster == bottom_node - 1) {
+                /* socket cache but no cluster cache and no core cache */
+                top_thread = top_cluster;
+            } else if (top_core == bottom_cluster - 1) {
+                /* cluster cache but no socket and no core cache */
+                top_thread = top_core;
+            } 
+
+            last_core = core_id;
+        }
+
+        if (ms->smp.threads > 1) {
+            thread_offset = core_offset;
+            if (thread_id != last_thread) {
+                bottom_thread = top_thread;
+                if (cache_described_at(ms, CPU_TOPOLOGY_LEVEL_THREAD) &&
+                    find_the_lowest_level_cache_defined_at_level(ms,
+                        &bottom_thread,
+                        CPU_TOPOLOGY_LEVEL_THREAD)) {
+
+                    if (bottom_thread == 1) {
+                        bottom_thread++;
+                    } else {
+                        cache_created = add_cpu_cache_hierarchy(ms->fdt,
+                                                                caches,
+                                                                num_cache,
+                                                                top_thread,
+                                                                bottom_thread,
+                                                                cpu,
+                                                                &thread_offset);
+
+                        if (!cache_created) {
+                            error_setg(&error_fatal,
+                                       "No caches at levels %d-%d",
+                                       top_thread, bottom_thread);
+                            return;
+                        }
+                    }
+                }
+
+                last_thread = thread_id;
+            }
+        }
+
+        next_level = (ms->smp.threads > 1) ? thread_offset : core_offset;
+        qemu_fdt_setprop_cell(ms->fdt, nodename, "next-level-cache",
+                              next_level);
+
         g_free(nodename);
     }
 
@@ -3094,6 +3479,11 @@  static void virt_machine_class_init(ObjectClass *oc, void *data)
     hc->unplug = virt_machine_device_unplug_cb;
     mc->nvdimm_supported = true;
     mc->smp_props.clusters_supported = true;
+    /* Supported caches */
+    mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L1D] = true;
+    mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L1I] = true;
+    mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L2] = true;
+    mc->smp_props.cache_supported[CACHE_LEVEL_AND_TYPE_L3] = true;
     mc->auto_enable_numa_with_memhp = true;
     mc->auto_enable_numa_with_memdev = true;
     /* platform instead of architectural choice */
diff --git a/hw/cpu/core.c b/hw/cpu/core.c
index 495a5c30ff..186bb367e7 100644
--- a/hw/cpu/core.c
+++ b/hw/cpu/core.c
@@ -102,4 +102,101 @@  static void cpu_core_register_types(void)
     type_register_static(&cpu_core_type_info);
 }
 
+bool cache_described_at(const MachineState *ms, CpuTopologyLevel level)
+{
+    if (machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3) == level ||
+        machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2) == level ||
+        machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I) == level ||
+        machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D) == level) {
+        return true;
+    }
+    return false;
+}
+
+int partial_cache_description(const MachineState *ms,
+                              CPUCaches *caches,
+                              int num_caches)
+{
+    int level, c;
+
+    for (level = 1; level < num_caches; level++) {
+        for (c = 0; c < num_caches; c++) {
+            if (caches[c].level != level) {
+                continue;
+            }
+
+            switch (level) {
+            case 1:
+                /*
+                 * L1 cache is assumed to have both L1I and L1D available.
+                 * Technically both need to be checked.
+                 */
+                if (machine_get_cache_topo_level(ms, 
+                                                 CACHE_LEVEL_AND_TYPE_L1I) ==
+                    CPU_TOPOLOGY_LEVEL_DEFAULT)
+                {
+                    assert(machine_get_cache_topo_level(ms,
+                                CACHE_LEVEL_AND_TYPE_L1D) ==
+                           CPU_TOPOLOGY_LEVEL_DEFAULT);
+                    return level;
+                }
+                break;
+            case 2:
+                if (machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2) ==
+                    CPU_TOPOLOGY_LEVEL_DEFAULT) {
+                    return level;
+                }
+                break;
+            case 3:
+                if (machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3) ==
+                    CPU_TOPOLOGY_LEVEL_DEFAULT) {
+                    return level;
+                }
+                break;
+            }
+        }
+    }
+
+    return 0;
+}
+
+/*
+ * This function assumes l3 and l2 have unified cache and l1 is split l1d
+ * and l1i, and further prepares the lowest cache level for a topology
+ * level.  The info will be fed to build_caches to create caches at the
+ * right level.
+ */
+int find_the_lowest_level_cache_defined_at_level(const MachineState *ms,
+                                                 int *level_found,
+                                                 CpuTopologyLevel topo_level) {
+
+    CpuTopologyLevel level;
+
+    level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1I);
+    if (level == topo_level) {
+        *level_found = 1;
+        return 1;
+    }
+
+    level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L1D);
+    if (level == topo_level) {
+        *level_found = 1;
+        return 1;
+    }
+
+    level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L2);
+    if (level == topo_level) {
+        *level_found = 2;
+        return 2;
+    }
+
+    level = machine_get_cache_topo_level(ms, CACHE_LEVEL_AND_TYPE_L3);
+    if (level == topo_level) {
+        *level_found = 3;
+        return 3;
+    }
+
+    return 0;
+}
+
 type_init(cpu_core_register_types)
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index a4d937ed45..7b0311ce6e 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -39,6 +39,7 @@ 
 #include "sysemu/kvm.h"
 #include "hw/intc/arm_gicv3_common.h"
 #include "qom/object.h"
+#include "hw/cpu/core.h"
 
 #define NUM_GICV2M_SPIS       64
 #define NUM_VIRTIO_TRANSPORTS 32
@@ -50,6 +51,8 @@ 
 /* GPIO pins */
 #define GPIO_PIN_POWER_BUTTON  3
 
+#define CPU_MAX_CACHES 16
+
 enum {
     VIRT_FLASH,
     VIRT_MEM,
@@ -188,6 +191,8 @@  OBJECT_DECLARE_TYPE(VirtMachineState, VirtMachineClass, VIRT_MACHINE)
 
 void virt_acpi_setup(VirtMachineState *vms);
 bool virt_is_acpi_enabled(VirtMachineState *vms);
+unsigned int virt_get_caches(const VirtMachineState *vms,
+                             CPUCaches *caches);
 
 /* Return number of redistributors that fit in the specified region */
 static uint32_t virt_redist_capacity(VirtMachineState *vms, int region)
diff --git a/include/hw/cpu/core.h b/include/hw/cpu/core.h
index 98ab91647e..bcb9f7bb3e 100644
--- a/include/hw/cpu/core.h
+++ b/include/hw/cpu/core.h
@@ -25,6 +25,32 @@  struct CPUCore {
     int nr_threads;
 };
 
+typedef enum CPUCacheType {
+    DATA,
+    INSTRUCTION,
+    UNIFIED,
+} CPUCacheType;
+
+typedef struct CPUCaches {
+    CPUCacheType type;
+    uint32_t pptt_id;
+    uint32_t sets;
+    uint32_t size;
+    uint32_t level;
+    uint16_t linesize;
+    uint8_t attributes; /* write policy: 0x0 write back, 0x1 write through */
+    uint8_t associativity;
+} CPUCaches;
+
+int partial_cache_description(const MachineState *ms, CPUCaches *caches,
+                              int num_caches);
+
+bool cache_described_at(const MachineState *ms, CpuTopologyLevel level);
+
+int find_the_lowest_level_cache_defined_at_level(const MachineState *ms,
+                                                int *level_found,
+                                                CpuTopologyLevel topo_level);
+
 /* Note: topology field names need to be kept in sync with
  * 'CpuInstanceProperties' */