@@ -6014,7 +6014,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
{
X86CPU *cpu = env_archcpu(env);
CPUState *cs = env_cpu(env);
- uint32_t die_offset;
uint32_t limit;
uint32_t signature[3];
X86CPUTopoInfo topo_info;
@@ -6086,7 +6085,18 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
(cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
break;
- case 4:
+ case 4: {
+ /*
+ * CPUID.04H:EAX[bits 25:14]: Maximum number of addressable IDs for
+ * logical processors sharing this cache.
+ */
+ int addressable_threads_width;
+ /*
+ * CPUID.04H:EAX[bits 31:26]: Maximum number of addressable IDs for
+ * processor cores in the physical package.
+ */
+ int addressable_cores_width;
+
/* cache info: needed for Core compatibility */
if (cpu->cache_info_passthrough) {
x86_cpu_get_cache_cpuid(index, count, eax, ebx, ecx, edx);
@@ -6098,39 +6108,55 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
int host_vcpus_per_cache = 1 + ((*eax & 0x3FFC000) >> 14);
int vcpus_per_socket = cs->nr_cores * cs->nr_threads;
if (cs->nr_cores > 1) {
+ addressable_cores_width = apicid_pkg_offset(&topo_info) -
+ apicid_core_offset(&topo_info);
+
*eax &= ~0xFC000000;
- *eax |= (pow2ceil(cs->nr_cores) - 1) << 26;
+ *eax |= ((1 << addressable_cores_width) - 1) << 26;
}
if (host_vcpus_per_cache > vcpus_per_socket) {
+ /* Share the cache at package level. */
+ addressable_threads_width = apicid_pkg_offset(&topo_info);
+
*eax &= ~0x3FFC000;
- *eax |= (pow2ceil(vcpus_per_socket) - 1) << 14;
+ *eax |= ((1 << addressable_threads_width) - 1) << 14;
}
}
} else if (cpu->vendor_cpuid_only && IS_AMD_CPU(env)) {
*eax = *ebx = *ecx = *edx = 0;
} else {
*eax = 0;
+ addressable_cores_width = apicid_pkg_offset(&topo_info) -
+ apicid_core_offset(&topo_info);
+
switch (count) {
case 0: /* L1 dcache info */
+ addressable_threads_width = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
- cs->nr_threads, cs->nr_cores,
+ (1 << addressable_threads_width),
+ (1 << addressable_cores_width),
eax, ebx, ecx, edx);
break;
case 1: /* L1 icache info */
+ addressable_threads_width = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
- cs->nr_threads, cs->nr_cores,
+ (1 << addressable_threads_width),
+ (1 << addressable_cores_width),
eax, ebx, ecx, edx);
break;
case 2: /* L2 cache info */
+ addressable_threads_width = apicid_core_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
- cs->nr_threads, cs->nr_cores,
+ (1 << addressable_threads_width),
+ (1 << addressable_cores_width),
eax, ebx, ecx, edx);
break;
case 3: /* L3 cache info */
- die_offset = apicid_die_offset(&topo_info);
if (cpu->enable_l3_cache) {
+ addressable_threads_width = apicid_die_offset(&topo_info);
encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
- (1 << die_offset), cs->nr_cores,
+ (1 << addressable_threads_width),
+ (1 << addressable_cores_width),
eax, ebx, ecx, edx);
break;
}
@@ -6141,6 +6167,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
}
}
break;
+ }
case 5:
/* MONITOR/MWAIT Leaf */
*eax = cpu->mwait.eax; /* Smallest monitor-line size in bytes */