@@ -473,44 +473,57 @@ acpi_build_srat_memory(struct srat_memory_affinity *numamem,
static void *
build_srat(void)
{
- int nb_numa_nodes = qemu_cfg_get_numa_nodes();
+ int nr_nodes = qemu_cfg_get_numa_nodes();
+ int *node_cpu = malloc_tmp(sizeof(int));
- if (nb_numa_nodes == 0)
+ if (nr_nodes == 0)
return NULL;
- u64 *numadata = malloc_tmphigh(sizeof(u64) * (MaxCountCPUs + nb_numa_nodes));
- if (!numadata) {
+ if (!node_cpu)
+ return NULL;
+
+ struct srat_data *sd = malloc_tmp(sizeof(struct srat_data)*nr_nodes);
+ if (!sd) {
warn_noalloc();
return NULL;
}
- qemu_cfg_get_numa_data(numadata, MaxCountCPUs + nb_numa_nodes);
+ qemu_cfg_get_numa_data((u64 *)sd, nr_nodes);
struct system_resource_affinity_table *srat;
int srat_size = sizeof(*srat) +
sizeof(struct srat_processor_affinity) * MaxCountCPUs +
- sizeof(struct srat_memory_affinity) * (nb_numa_nodes + 2);
+ sizeof(struct srat_memory_affinity) * (nr_nodes + 2);
srat = malloc_high(srat_size);
if (!srat) {
warn_noalloc();
- free(numadata);
+ free(srat);
return NULL;
}
memset(srat, 0, srat_size);
- srat->reserved1=1;
+ srat->reserved1 = 1;
struct srat_processor_affinity *core = (void*)(srat + 1);
- int i;
+ int i, j;
u64 curnode;
+ for (i = 0; i < nr_nodes; i++) {
+ if (sd[i].apic_map == 0)
+ continue;
+ for (j = 0; j < MaxCountCPUs; j++) {
+ if (sd[i].apic_map & 1 << j)
+ node_cpu[j] = i;
+ }
+ }
+
for (i = 0; i < MaxCountCPUs; ++i) {
core->type = SRAT_PROCESSOR;
core->length = sizeof(*core);
core->local_apic_id = i;
- curnode = *numadata++;
+ curnode = i;
core->proximity_lo = curnode;
- memset(core->proximity_hi, 0, 3);
+ memset(core->proximity_hi, 0, sizeof(core->proximity_hi));
core->local_sapic_eid = 0;
if (apic_id_is_present(i))
core->flags = cpu_to_le32(1);
@@ -527,15 +540,19 @@ build_srat(void)
int slots = 0;
u64 mem_len, mem_base, next_base = 0;
- acpi_build_srat_memory(numamem, 0, 640*1024, 0, 1);
- next_base = 1024 * 1024;
+#define MEM_1K (1UL << 10)
+#define MEM_1M (1UL << 20)
+#define MEM_1G (1ULL << 30)
+
+ acpi_build_srat_memory(numamem, 0, 640*MEM_1K, 0, 1);
+ next_base = MEM_1M;
numamem++;
slots++;
- for (i = 1; i < nb_numa_nodes + 1; ++i) {
+ for (i = 1; i < nr_nodes + 1; ++i) {
mem_base = next_base;
- mem_len = *numadata++;
+ mem_len = sd[i].memory_size;
if (i == 1)
- mem_len -= 1024 * 1024;
+ mem_len -= MEM_1M;
next_base = mem_base + mem_len;
/* Cut out the PCI hole */
@@ -546,22 +563,22 @@ build_srat(void)
numamem++;
slots++;
}
- mem_base = 1ULL << 32;
+ mem_base = MEM_1G;
mem_len = next_base - RamSize;
- next_base += (1ULL << 32) - RamSize;
+ next_base += MEM_1G - RamSize;
}
acpi_build_srat_memory(numamem, mem_base, mem_len, i-1, 1);
numamem++;
slots++;
}
- for (; slots < nb_numa_nodes + 2; slots++) {
+ for (; slots < nr_nodes + 2; slots++) {
acpi_build_srat_memory(numamem, 0, 0, 0, 0);
numamem++;
}
build_header((void*)srat, SRAT_SIGNATURE, srat_size, 1);
- free(numadata);
+ free(sd);
return srat;
}
@@ -321,6 +321,11 @@ struct srat_memory_affinity
u32 reserved3[2];
} PACKED;
+struct srat_data {
+ u64 apic_map;
+ u64 memory_size;
+};
+
#include "acpi-dsdt.hex"
@@ -291,7 +291,7 @@ void qemu_cfg_get_numa_data(u64 *data, int n)
int i;
for (i = 0; i < n; i++)
- qemu_cfg_read((u8*)(data + i), sizeof(u64));
+ qemu_cfg_read((u8*)(data + i), sizeof(struct srat_data));
}
u16 qemu_cfg_get_max_cpus(void)
@@ -3,6 +3,7 @@
#include "config.h" // CONFIG_COREBOOT
#include "util.h"
+#include "acpi.h"
/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
* should be used to determine that a VM is running under KVM.
the old numa format got form fw_cfg is: number of nodes node id of cpu (array) node memory size (array) now, format it like array of: apci_map, memory_size, it has the advantage of simple and clear. Signed-off-by: liguang <lig.fnst@cn.fujitsu.com> --- src/acpi.c | 57 ++++++++++++++++++++++++++++++++++++------------------- src/acpi.h | 5 ++++ src/paravirt.c | 2 +- src/paravirt.h | 1 + 4 files changed, 44 insertions(+), 21 deletions(-)