@@ -248,7 +248,7 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
CPUX86State *env = &cpu->env;
MachineState *ms = MACHINE(hotplug_dev);
X86MachineState *x86ms = X86_MACHINE(hotplug_dev);
- X86CPUTopoInfo topo_info;
+ X86CPUTopoInfo *topo_info = &env->topo_info;
if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) {
error_setg(errp, "Invalid CPU type, expected cpu type: '%s'",
@@ -267,15 +267,13 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
}
}
- init_topo_info(&topo_info, x86ms);
+ init_topo_info(topo_info, x86ms);
if (ms->smp.modules > 1) {
- env->nr_modules = ms->smp.modules;
set_bit(CPU_TOPOLOGY_LEVEL_MODULE, env->avail_cpu_topo);
}
if (ms->smp.dies > 1) {
- env->nr_dies = ms->smp.dies;
set_bit(CPU_TOPOLOGY_LEVEL_DIE, env->avail_cpu_topo);
}
@@ -346,12 +344,12 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
topo_ids.module_id = cpu->module_id;
topo_ids.core_id = cpu->core_id;
topo_ids.smt_id = cpu->thread_id;
- cpu->apic_id = x86_apicid_from_topo_ids(&topo_info, &topo_ids);
+ cpu->apic_id = x86_apicid_from_topo_ids(topo_info, &topo_ids);
}
cpu_slot = x86_find_cpu_slot(MACHINE(x86ms), cpu->apic_id, &idx);
if (!cpu_slot) {
- x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+ x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids);
error_setg(errp,
"Invalid CPU [socket: %u, die: %u, module: %u, core: %u, thread: %u]"
@@ -374,7 +372,7 @@ void x86_cpu_pre_plug(HotplugHandler *hotplug_dev,
/* TODO: move socket_id/core_id/thread_id checks into x86_cpu_realizefn()
* once -smp refactoring is complete and there will be CPU private
* CPUState::nr_cores and CPUState::nr_threads fields instead of globals */
- x86_topo_ids_from_apicid(cpu->apic_id, &topo_info, &topo_ids);
+ x86_topo_ids_from_apicid(cpu->apic_id, topo_info, &topo_ids);
if (cpu->socket_id != -1 && cpu->socket_id != topo_ids.pkg_id) {
error_setg(errp, "property socket-id: %u doesn't match set apic-id:"
" 0x%x (socket-id: %u)", cpu->socket_id, cpu->apic_id,
@@ -312,11 +312,11 @@ void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
uint64_t cpu_x86_get_msr_core_thread_count(X86CPU *cpu)
{
- CPUState *cs = CPU(cpu);
+ CPUX86State *env = &cpu->env;
uint64_t val;
- val = cs->nr_threads * cs->nr_cores; /* thread count, bits 15..0 */
- val |= ((uint32_t)cs->nr_cores << 16); /* core count, bits 31..16 */
+ val = x86_threads_per_pkg(&env->topo_info); /* thread count, bits 15..0 */
+ val |= x86_cores_per_pkg(&env->topo_info) << 16; /* core count, bits 31..16 */
return val;
}
@@ -6495,15 +6495,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
CPUState *cs = env_cpu(env);
uint32_t limit;
uint32_t signature[3];
- X86CPUTopoInfo topo_info;
+ X86CPUTopoInfo *topo_info = &env->topo_info;
uint32_t threads_per_pkg;
- topo_info.dies_per_pkg = env->nr_dies;
- topo_info.modules_per_die = env->nr_modules;
- topo_info.cores_per_module = cs->nr_cores / env->nr_dies / env->nr_modules;
- topo_info.threads_per_core = cs->nr_threads;
-
- threads_per_pkg = x86_threads_per_pkg(&topo_info);
+ threads_per_pkg = x86_threads_per_pkg(topo_info);
/* Calculate & apply limits for different index ranges */
if (index >= 0xC0000000) {
@@ -6580,12 +6575,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
*eax &= ~0xFC000000;
- *eax |= max_core_ids_in_package(&topo_info) << 26;
+ *eax |= max_core_ids_in_package(topo_info) << 26;
if (host_vcpus_per_cache > threads_per_pkg) {
*eax &= ~0x3FFC000;
/* Share the cache at package level. */
- *eax |= max_thread_ids_for_cache(&topo_info,
+ *eax |= max_thread_ids_for_cache(topo_info,
CPU_TOPOLOGY_LEVEL_SOCKET) << 14;
}
}
@@ -6597,7 +6592,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
switch (count) {
case 0: /* L1 dcache info */
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
if (!cpu->l1_cache_per_core) {
*eax &= ~MAKE_64BIT_MASK(14, 12);
@@ -6605,7 +6600,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 1: /* L1 icache info */
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
if (!cpu->l1_cache_per_core) {
*eax &= ~MAKE_64BIT_MASK(14, 12);
@@ -6613,13 +6608,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 2: /* L2 cache info */
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
if (cpu->enable_l3_cache) {
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
- &topo_info,
+ topo_info,
eax, ebx, ecx, edx);
break;
}
@@ -6702,12 +6697,12 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
switch (count) {
case 0:
- *eax = apicid_core_offset(&topo_info);
- *ebx = topo_info.threads_per_core;
+ *eax = apicid_core_offset(topo_info);
+ *ebx = topo_info->threads_per_core;
*ecx |= CPUID_B_ECX_TOPO_LEVEL_SMT << 8;
break;
case 1:
- *eax = apicid_pkg_offset(&topo_info);
+ *eax = apicid_pkg_offset(topo_info);
*ebx = threads_per_pkg;
*ecx |= CPUID_B_ECX_TOPO_LEVEL_CORE << 8;
break;
@@ -6733,7 +6728,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
}
- encode_topo_cpuid1f(env, count, &topo_info, eax, ebx, ecx, edx);
+ encode_topo_cpuid1f(env, count, topo_info, eax, ebx, ecx, edx);
break;
case 0xD: {
/* Processor Extended State */
@@ -7036,7 +7031,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
* thread ID within a package".
* Bits 7:0 is "The number of threads in the package is NC+1"
*/
- *ecx = (apicid_pkg_offset(&topo_info) << 12) |
+ *ecx = (apicid_pkg_offset(topo_info) << 12) |
(threads_per_pkg - 1);
} else {
*ecx = 0;
@@ -7065,19 +7060,19 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
switch (count) {
case 0: /* L1 dcache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l1d_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l1i_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l2_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
encode_cache_cpuid8000001d(env->cache_info_amd.l3_cache,
- &topo_info, eax, ebx, ecx, edx);
+ topo_info, eax, ebx, ecx, edx);
break;
default: /* end of info */
*eax = *ebx = *ecx = *edx = 0;
@@ -7089,7 +7084,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0x8000001E:
if (cpu->core_id <= 255) {
- encode_topo_cpuid8000001e(cpu, &topo_info, eax, ebx, ecx, edx);
+ encode_topo_cpuid8000001e(cpu, topo_info, eax, ebx, ecx, edx);
} else {
*eax = 0;
*ebx = 0;
@@ -7994,17 +7989,14 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
* fixes this issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
* based on inputs (sockets,cores,threads), it is still better to give
* users a warning.
- *
- * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
- * cs->nr_threads hasn't be populated yet and the checking is incorrect.
*/
if (IS_AMD_CPU(env) &&
!(env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_TOPOEXT) &&
- cs->nr_threads > 1) {
+ env->topo_info.threads_per_core > 1) {
warn_report_once("This family of AMD CPU doesn't support "
"hyperthreading(%d). Please configure -smp "
"options properly or try enabling topoext "
- "feature.", cs->nr_threads);
+ "feature.", env->topo_info.threads_per_core);
}
#ifndef CONFIG_USER_ONLY
@@ -8165,9 +8157,6 @@ static void x86_cpu_init_default_topo(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- env->nr_modules = 1;
- env->nr_dies = 1;
-
/* thread, core and socket levels are set by default. */
set_bit(CPU_TOPOLOGY_LEVEL_THREAD, env->avail_cpu_topo);
set_bit(CPU_TOPOLOGY_LEVEL_CORE, env->avail_cpu_topo);
@@ -2045,11 +2045,7 @@ typedef struct CPUArchState {
TPRAccess tpr_access_type;
- /* Number of dies within this CPU package. */
- unsigned nr_dies;
-
- /* Number of modules within one die. */
- unsigned nr_modules;
+ X86CPUTopoInfo topo_info;
/* Bitmap of available CPU topology levels for this CPU. */
DECLARE_BITMAP(avail_cpu_topo, CPU_TOPOLOGY_LEVEL__MAX);
The name of nr_modules/nr_dies are ambiguous and they mislead people. The purpose of them is to record and form the topology information. So just maintain a X86CPUTopoInfo member in CPUX86State instead. Then nr_modules and nr_dies can be dropped. As the benefit, x86 can switch to use information in CPUX86State::topo_info and get rid of the nr_cores and nr_threads in CPUState. This helps remove the dependency on qemu_init_vcpu() so that x86 can get and use topology info earlier in x86_cpu_realizefn(). Signed-off-by: Xiaoyao Li <xiaoyao.li@intel.com> --- hw/i386/x86-common.c | 12 ++++------ target/i386/cpu-sysemu.c | 6 ++--- target/i386/cpu.c | 51 ++++++++++++++++------------------------ target/i386/cpu.h | 6 +---- 4 files changed, 29 insertions(+), 46 deletions(-)