@@ -235,13 +235,15 @@ typedef struct DeviceMemoryState {
/**
* CpuTopology:
* @cpus: the number of present logical processors on the machine
- * @cores: the number of cores in one package
+ * @clusters: the number of clusters in one package
+ * @cores: the number of cores in one cluster
* @threads: the number of threads in one core
* @sockets: the number of sockets on the machine
* @max_cpus: the maximum number of logical processors on the machine
*/
typedef struct CpuTopology {
unsigned int cpus;
+ unsigned int clusters;
unsigned int cores;
unsigned int threads;
unsigned int sockets;
@@ -184,25 +184,30 @@ SRST
ERST
DEF("smp", HAS_ARG, QEMU_OPTION_smp,
- "-smp [cpus=]n[,maxcpus=cpus][,cores=cores][,threads=threads][,dies=dies][,sockets=sockets]\n"
+ "-smp [cpus=]n[,maxcpus=cpus][,clusters=clusters][,cores=cores][,threads=threads][,dies=dies][,sockets=sockets]\n"
" set the number of CPUs to 'n' [default=1]\n"
" maxcpus= maximum number of total cpus, including\n"
" offline CPUs for hotplug, etc\n"
- " cores= number of CPU cores on one socket (for PC, it's on one die)\n"
+ " clusters= number of CPU clusters on one socket\n"
+ " (for PC, it's on one die)\n"
+ " cores= number of CPU cores on one cluster\n"
" threads= number of threads on one CPU core\n"
" dies= number of CPU dies on one socket (for PC only)\n"
" sockets= number of discrete sockets in the system\n",
QEMU_ARCH_ALL)
SRST
-``-smp [cpus=]n[,cores=cores][,threads=threads][,dies=dies][,sockets=sockets][,maxcpus=maxcpus]``
- Simulate an SMP system with n CPUs. On the PC target, up to 255 CPUs
- are supported. On Sparc32 target, Linux limits the number of usable
- CPUs to 4. For the PC target, the number of cores per die, the
- number of threads per cores, the number of dies per packages and the
- total number of sockets can be specified. Missing values will be
- computed. If any on the three values is given, the total number of
- CPUs n can be omitted. maxcpus specifies the maximum number of
- hotpluggable CPUs.
+``-smp [cpus=]n[,maxcpus=cpus][,clusters=clusters][,cores=cores][,threads=threads][,dies=dies][,sockets=sockets]``
+ Simulate an SMP system with n CPUs. On the PC target, up to 255
+ CPUs are supported. On the Sparc32 target, Linux limits the number
+ of usable CPUs to 4. For the PC target, the number of threads per
+ core, the number of cores per cluster, the number of clusters per
+ die, the number of dies per package and the total number of sockets
+ can be specified. For the ARM target, the number of threads per core,
+ the number of cores per cluster, the number of clusters per socket
+ and the total number of sockets can be specified. And missing values
+ will be computed. If any of the five values is given, the total
+ number of CPUs n can be omitted. Maxcpus specifies the maximum
+ number of hotpluggable CPUs.
ERST
DEF("numa", HAS_ARG, QEMU_OPTION_numa,
@@ -711,6 +711,9 @@ static QemuOptsList qemu_smp_opts = {
}, {
.name = "dies",
.type = QEMU_OPT_NUMBER,
+ }, {
+ .name = "clusters",
+ .type = QEMU_OPT_NUMBER,
}, {
.name = "cores",
.type = QEMU_OPT_NUMBER,
A cluster means a group of cores that share some resources (e.g. cache) among them under the LLC. For example, ARM64 server chip Kunpeng 920 has 6 or 8 clusters in each NUMA, and each cluster has 4 cores. All clusters share L3 cache data while cores within each cluster share the L2 cache. Also, there are some x86 CPU implementations (e.g. Jacobsville) where L2 cache is shared among a cluster of cores instead of being exclusive to one single core. For example, on Jacobsville there are 6 clusters of 4 Atom cores, each cluster sharing a separate L2, and 24 cores sharing L3). The cache affinity of cluster has been proved to improve the Linux kernel scheduling performance and a patchset has been posted, in which a general sched_domain for clusters was added and a cluster level was added in the arch-neutral cpu topology struct like below. struct cpu_topology { int thread_id; int core_id; int cluster_id; int package_id; int llc_id; cpumask_t thread_sibling; cpumask_t core_sibling; cpumask_t cluster_sibling; cpumask_t llc_sibling; } Also the Kernel Doc: Documentation/devicetree/bindings/cpu/cpu-topology.txt defines a four-level CPU topology hierarchy like socket/cluster/core/thread. According to the context, a socket node's child nodes must be one or more cluster nodes and a cluster node's child nodes must be one or more cluster nodes/one or more core nodes. So let's add the arch-neutral -smp, clusters=* command line support, so that future guest os could make use of cluster cpu topology for better scheduling performance. And whichever architecture that has groups of cpus sharing some separate resources(e.g. L2 cache) internely under LLC can use this command line parameter to define a VM with cluster level cpu topology. For ARM machines, a four-level cpu hierarchy can be defined and it will be sockets/clusters/cores/threads. For PC machines, a five-level cpu hierarchy can be defined and it will be sockets/dies/clusters/cores/threads. Signed-off-by: Yanan Wang <wangyanan55@huawei.com> --- include/hw/boards.h | 4 +++- qemu-options.hx | 27 ++++++++++++++++----------- softmmu/vl.c | 3 +++ 3 files changed, 22 insertions(+), 12 deletions(-)