Message ID | 1510622154-17224-6-git-send-email-zhuyijun@huawei.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, Nov 14, 2017 at 09:15:54AM +0800, zhuyijun@huawei.com wrote: > From: Zhu Yijun <zhuyijun@huawei.com> > > In case of accessing memory holes, we build srat table by > traversing mem_list. > > Signed-off-by: Zhu Yijun <zhuyijun@huawei.com> > --- > hw/arm/virt-acpi-build.c | 40 +++++++++++++++++++++++++++++++++++----- > 1 file changed, 35 insertions(+), 5 deletions(-) > > diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c > index 3d78ff6..e8dd7c9 100644 > --- a/hw/arm/virt-acpi-build.c > +++ b/hw/arm/virt-acpi-build.c > @@ -487,7 +487,12 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) > AcpiSratProcessorGiccAffinity *core; > AcpiSratMemoryAffinity *numamem; > int i, srat_start; > - uint64_t mem_base; > + hwaddr region_offset = 0; /* region base addr offset */ > + hwaddr region_eat_size = 0; /* region consumed size */ > + hwaddr node_mem_size = 0; > + RAMRegion *begin_search_region = QLIST_FIRST(&vms->bootinfo.mem_list); > + RAMRegion *reg; > + > MachineClass *mc = MACHINE_GET_CLASS(vms); > const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(MACHINE(vms)); > > @@ -504,12 +509,37 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) > core->flags = cpu_to_le32(1); > } > > - mem_base = vms->memmap[VIRT_MEM].base; > for (i = 0; i < nb_numa_nodes; ++i) { > numamem = acpi_data_push(table_data, sizeof(*numamem)); > - build_srat_memory(numamem, mem_base, numa_info[i].node_mem, i, > - MEM_AFFINITY_ENABLED); > - mem_base += numa_info[i].node_mem; > + node_mem_size = numa_info[i].node_mem; > + QLIST_FOREACH(reg, &vms->bootinfo.mem_list, next) { > + if (reg->base != begin_search_region->base) { > + continue; > + } > + > + if (node_mem_size >= (reg->size - region_offset)) { > + region_eat_size = reg->size - region_offset; > + } else { > + region_eat_size = node_mem_size; > + } > + > + build_srat_memory(numamem, reg->base + region_offset, > + region_eat_size, i, MEM_AFFINITY_ENABLED); > + > + node_mem_size -= region_eat_size; > + region_offset += region_eat_size; > + begin_search_region = reg; > + > + /* The region is depleted */ > + if (reg->size == region_offset) { > + region_offset = 0; > + begin_search_region = QLIST_NEXT(reg, next); > + } > + > + if (node_mem_size == 0) { > + break; > + } > + } > } > > build_header(linker, table_data, (void *)srat, "SRAT", > -- > 1.8.3.1 > > > Same pc-dimm assigning to NUMA node comment from last patch applies to this one. Thanks, drew
On 2017/11/14 22:51, Andrew Jones wrote: > On Tue, Nov 14, 2017 at 09:15:54AM +0800, zhuyijun@huawei.com wrote: >> From: Zhu Yijun <zhuyijun@huawei.com> >> >> In case of accessing memory holes, we build srat table by >> traversing mem_list. >> >> Signed-off-by: Zhu Yijun <zhuyijun@huawei.com> >> --- >> hw/arm/virt-acpi-build.c | 40 +++++++++++++++++++++++++++++++++++----- >> 1 file changed, 35 insertions(+), 5 deletions(-) >> >> diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c >> index 3d78ff6..e8dd7c9 100644 >> --- a/hw/arm/virt-acpi-build.c >> +++ b/hw/arm/virt-acpi-build.c >> @@ -487,7 +487,12 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) >> AcpiSratProcessorGiccAffinity *core; >> AcpiSratMemoryAffinity *numamem; >> int i, srat_start; >> - uint64_t mem_base; >> + hwaddr region_offset = 0; /* region base addr offset */ >> + hwaddr region_eat_size = 0; /* region consumed size */ >> + hwaddr node_mem_size = 0; >> + RAMRegion *begin_search_region = QLIST_FIRST(&vms->bootinfo.mem_list); >> + RAMRegion *reg; >> + >> MachineClass *mc = MACHINE_GET_CLASS(vms); >> const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(MACHINE(vms)); >> >> @@ -504,12 +509,37 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) >> core->flags = cpu_to_le32(1); >> } >> >> - mem_base = vms->memmap[VIRT_MEM].base; >> for (i = 0; i < nb_numa_nodes; ++i) { >> numamem = acpi_data_push(table_data, sizeof(*numamem)); >> - build_srat_memory(numamem, mem_base, numa_info[i].node_mem, i, >> - MEM_AFFINITY_ENABLED); >> - mem_base += numa_info[i].node_mem; >> + node_mem_size = numa_info[i].node_mem; >> + QLIST_FOREACH(reg, &vms->bootinfo.mem_list, next) { >> + if (reg->base != begin_search_region->base) { >> + continue; >> + } >> + >> + if (node_mem_size >= (reg->size - region_offset)) { >> + region_eat_size = reg->size - region_offset; >> + } else { >> + region_eat_size = node_mem_size; >> + } >> + >> + build_srat_memory(numamem, reg->base + region_offset, >> + region_eat_size, i, MEM_AFFINITY_ENABLED); >> + >> + node_mem_size -= region_eat_size; >> + region_offset += region_eat_size; >> + begin_search_region = reg; >> + >> + /* The region is depleted */ >> + if (reg->size == region_offset) { >> + region_offset = 0; >> + begin_search_region = QLIST_NEXT(reg, next); >> + } >> + >> + if (node_mem_size == 0) { >> + break; >> + } >> + } >> } >> >> build_header(linker, table_data, (void *)srat, "SRAT", >> -- >> 1.8.3.1 >> >> >> > Same pc-dimm assigning to NUMA node comment from last patch applies to this > one. ok, thanks! > Thanks, > drew > > . >
diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 3d78ff6..e8dd7c9 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -487,7 +487,12 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) AcpiSratProcessorGiccAffinity *core; AcpiSratMemoryAffinity *numamem; int i, srat_start; - uint64_t mem_base; + hwaddr region_offset = 0; /* region base addr offset */ + hwaddr region_eat_size = 0; /* region consumed size */ + hwaddr node_mem_size = 0; + RAMRegion *begin_search_region = QLIST_FIRST(&vms->bootinfo.mem_list); + RAMRegion *reg; + MachineClass *mc = MACHINE_GET_CLASS(vms); const CPUArchIdList *cpu_list = mc->possible_cpu_arch_ids(MACHINE(vms)); @@ -504,12 +509,37 @@ build_srat(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) core->flags = cpu_to_le32(1); } - mem_base = vms->memmap[VIRT_MEM].base; for (i = 0; i < nb_numa_nodes; ++i) { numamem = acpi_data_push(table_data, sizeof(*numamem)); - build_srat_memory(numamem, mem_base, numa_info[i].node_mem, i, - MEM_AFFINITY_ENABLED); - mem_base += numa_info[i].node_mem; + node_mem_size = numa_info[i].node_mem; + QLIST_FOREACH(reg, &vms->bootinfo.mem_list, next) { + if (reg->base != begin_search_region->base) { + continue; + } + + if (node_mem_size >= (reg->size - region_offset)) { + region_eat_size = reg->size - region_offset; + } else { + region_eat_size = node_mem_size; + } + + build_srat_memory(numamem, reg->base + region_offset, + region_eat_size, i, MEM_AFFINITY_ENABLED); + + node_mem_size -= region_eat_size; + region_offset += region_eat_size; + begin_search_region = reg; + + /* The region is depleted */ + if (reg->size == region_offset) { + region_offset = 0; + begin_search_region = QLIST_NEXT(reg, next); + } + + if (node_mem_size == 0) { + break; + } + } } build_header(linker, table_data, (void *)srat, "SRAT",