Patchwork [2/6] pc/numa: refactor bios_init function

login
register
mail settings
Submitter liguang
Date Feb. 4, 2013, 2:27 a.m.
Message ID <1359944880-6039-3-git-send-email-lig.fnst@cn.fujitsu.com>
Download mbox | patch
Permalink /patch/217817/
State New
Headers show

Comments

liguang - Feb. 4, 2013, 2:27 a.m.
orginally, numa data was packed into an array,
which was implicit and hard to maintain, we
define a struct for this data, hope to be as
clear as enough.
also, we only pass cpumask of corresponding
nodes to seabios, and leave the paring work
for it.

Signed-off-by: liguang <lig.fnst@cn.fujitsu.com>
---
 hw/pc.c |   40 ++++++++++++++++++----------------------
 1 files changed, 18 insertions(+), 22 deletions(-)
Blue Swirl - Feb. 4, 2013, 6:24 p.m.
On Mon, Feb 4, 2013 at 2:27 AM, liguang <lig.fnst@cn.fujitsu.com> wrote:
> orginally, numa data was packed into an array,
> which was implicit and hard to maintain, we
> define a struct for this data, hope to be as
> clear as enough.
> also, we only pass cpumask of corresponding
> nodes to seabios, and leave the paring work
> for it.
>
> Signed-off-by: liguang <lig.fnst@cn.fujitsu.com>
> ---
>  hw/pc.c |   40 ++++++++++++++++++----------------------
>  1 files changed, 18 insertions(+), 22 deletions(-)
>
> diff --git a/hw/pc.c b/hw/pc.c
> index d010c75..893c930 100644
> --- a/hw/pc.c
> +++ b/hw/pc.c
> @@ -562,13 +562,21 @@ static unsigned int pc_apic_id_limit(unsigned int max_cpus)
>      return x86_cpu_apic_id_from_index(max_cpus - 1) + 1;
>  }
>
> +struct srat_data {

SRATData and typedef.

> +    uint64_t apic_map; /* size is MAX_NODES */
> +    uint64_t memory_size;
> +};
> +
>  static void *bios_init(void)
>  {
>      void *fw_cfg;
>      uint8_t *smbios_table;
>      size_t smbios_len;
> -    uint64_t *numa_fw_cfg;
> -    int i, j;
> +    struct fw_numa_cfg {

FwNUMACfg

> +        uint32_t nr_node;
> +        struct srat_data *srat_data;
> +    } fw_cfg_numa;
> +    int i;
>      unsigned int apic_id_limit = pc_apic_id_limit(max_cpus);
>
>      fw_cfg = fw_cfg_init(FW_CFG_CTL_IOPORT, FW_CFG_DATA_IOPORT, 0, 0);
> @@ -601,28 +609,16 @@ static void *bios_init(void)
>                       &e820_table, sizeof(e820_table));
>
>      fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, &hpet_cfg, sizeof(hpet_cfg));
> -    /* allocate memory for the NUMA channel: one (64bit) word for the number
> -     * of nodes, one word for each VCPU->node and one word for each node to
> -     * hold the amount of memory.
> -     */
> -    numa_fw_cfg = g_new0(uint64_t, 1 + apic_id_limit + nb_numa_nodes);
> -    numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes);
> -    for (i = 0; i < max_cpus; i++) {
> -        unsigned int apic_id = x86_cpu_apic_id_from_index(i);
> -        assert(apic_id < apic_id_limit);
> -        for (j = 0; j < nb_numa_nodes; j++) {
> -            if (test_bit(i, node_cpumask[j])) {
> -                numa_fw_cfg[apic_id + 1] = cpu_to_le64(j);
> -                break;
> -            }
> -        }
> -    }
> +
> +    fw_cfg_numa.srat_data = g_new0(struct srat_data, nb_numa_nodes);
> +    fw_cfg_numa.nr_node = cpu_to_le64(nb_numa_nodes);
> +
>      for (i = 0; i < nb_numa_nodes; i++) {
> -        numa_fw_cfg[apic_id_limit + 1 + i] = cpu_to_le64(node_mem[i]);
> +        fw_cfg_numa.srat_data[i].apic_map = *node_cpumask[i];
> +        fw_cfg_numa.srat_data[i].memory_size = cpu_to_le64(node_mem[i]);
>      }
> -    fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, numa_fw_cfg,
> -                     (1 + apic_id_limit + nb_numa_nodes) *
> -                     sizeof(*numa_fw_cfg));
> +
> +    fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, &fw_cfg_numa, sizeof(fw_cfg_numa));
>
>      return fw_cfg;
>  }
> --
> 1.7.2.5
>
>

Patch

diff --git a/hw/pc.c b/hw/pc.c
index d010c75..893c930 100644
--- a/hw/pc.c
+++ b/hw/pc.c
@@ -562,13 +562,21 @@  static unsigned int pc_apic_id_limit(unsigned int max_cpus)
     return x86_cpu_apic_id_from_index(max_cpus - 1) + 1;
 }
 
+struct srat_data {
+    uint64_t apic_map; /* size is MAX_NODES */
+    uint64_t memory_size;
+};
+
 static void *bios_init(void)
 {
     void *fw_cfg;
     uint8_t *smbios_table;
     size_t smbios_len;
-    uint64_t *numa_fw_cfg;
-    int i, j;
+    struct fw_numa_cfg {
+        uint32_t nr_node;
+        struct srat_data *srat_data;
+    } fw_cfg_numa;
+    int i;
     unsigned int apic_id_limit = pc_apic_id_limit(max_cpus);
 
     fw_cfg = fw_cfg_init(FW_CFG_CTL_IOPORT, FW_CFG_DATA_IOPORT, 0, 0);
@@ -601,28 +609,16 @@  static void *bios_init(void)
                      &e820_table, sizeof(e820_table));
 
     fw_cfg_add_bytes(fw_cfg, FW_CFG_HPET, &hpet_cfg, sizeof(hpet_cfg));
-    /* allocate memory for the NUMA channel: one (64bit) word for the number
-     * of nodes, one word for each VCPU->node and one word for each node to
-     * hold the amount of memory.
-     */
-    numa_fw_cfg = g_new0(uint64_t, 1 + apic_id_limit + nb_numa_nodes);
-    numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes);
-    for (i = 0; i < max_cpus; i++) {
-        unsigned int apic_id = x86_cpu_apic_id_from_index(i);
-        assert(apic_id < apic_id_limit);
-        for (j = 0; j < nb_numa_nodes; j++) {
-            if (test_bit(i, node_cpumask[j])) {
-                numa_fw_cfg[apic_id + 1] = cpu_to_le64(j);
-                break;
-            }
-        }
-    }
+
+    fw_cfg_numa.srat_data = g_new0(struct srat_data, nb_numa_nodes);
+    fw_cfg_numa.nr_node = cpu_to_le64(nb_numa_nodes);
+
     for (i = 0; i < nb_numa_nodes; i++) {
-        numa_fw_cfg[apic_id_limit + 1 + i] = cpu_to_le64(node_mem[i]);
+        fw_cfg_numa.srat_data[i].apic_map = *node_cpumask[i];
+        fw_cfg_numa.srat_data[i].memory_size = cpu_to_le64(node_mem[i]);
     }
-    fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, numa_fw_cfg,
-                     (1 + apic_id_limit + nb_numa_nodes) *
-                     sizeof(*numa_fw_cfg));
+
+    fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, &fw_cfg_numa, sizeof(fw_cfg_numa));
 
     return fw_cfg;
 }