diff mbox series

[2/4] hw/riscv: spike: Allow creating multiple sockets

Message ID 20200516063746.18296-3-anup.patel@wdc.com
State New
Headers show
Series RISC-V multi-socket support | expand

Commit Message

Anup Patel May 16, 2020, 6:37 a.m. UTC
We extend RISC-V spike machine to allow creating a multi-socket machine.
Each RISC-V spike machine socket is a set of HARTs and a CLINT instance.
Other peripherals are shared between all RISC-V spike machine sockets.
We also update RISC-V spike machine device tree to treat each socket as
a NUMA node.

The number of sockets in RISC-V spike machine can be specified using
the "sockets=" sub-option of QEMU "-smp" command-line option. By
default, only one socket RISC-V spike machine will be created.

Currently, we only allow creating upto maximum 4 sockets with minimum
2 HARTs per socket. In future, this limits can be changed.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
---
 hw/riscv/spike.c         | 206 ++++++++++++++++++++++++---------------
 include/hw/riscv/spike.h |   8 +-
 2 files changed, 133 insertions(+), 81 deletions(-)

Comments

Palmer Dabbelt May 21, 2020, 8:16 p.m. UTC | #1
On Fri, 15 May 2020 23:37:44 PDT (-0700), Anup Patel wrote:
> We extend RISC-V spike machine to allow creating a multi-socket machine.
> Each RISC-V spike machine socket is a set of HARTs and a CLINT instance.
> Other peripherals are shared between all RISC-V spike machine sockets.
> We also update RISC-V spike machine device tree to treat each socket as
> a NUMA node.
>
> The number of sockets in RISC-V spike machine can be specified using
> the "sockets=" sub-option of QEMU "-smp" command-line option. By
> default, only one socket RISC-V spike machine will be created.
>
> Currently, we only allow creating upto maximum 4 sockets with minimum
> 2 HARTs per socket. In future, this limits can be changed.
>
> Signed-off-by: Anup Patel <anup.patel@wdc.com>
> ---
>  hw/riscv/spike.c         | 206 ++++++++++++++++++++++++---------------
>  include/hw/riscv/spike.h |   8 +-
>  2 files changed, 133 insertions(+), 81 deletions(-)
>
> diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
> index d5e0103d89..f63c57a87c 100644
> --- a/hw/riscv/spike.c
> +++ b/hw/riscv/spike.c
> @@ -64,9 +64,11 @@ static void create_fdt(SpikeState *s, const struct MemmapEntry *memmap,
>      uint64_t mem_size, const char *cmdline)
>  {
>      void *fdt;
> -    int cpu;
> -    uint32_t *cells;
> -    char *nodename;
> +    int cpu, socket;
> +    uint32_t *clint_cells;
> +    unsigned long clint_addr;
> +    uint32_t cpu_phandle, intc_phandle, phandle = 1;
> +    char *name, *clint_name, *clust_name, *core_name, *cpu_name, *intc_name;
>
>      fdt = s->fdt = create_device_tree(&s->fdt_size);
>      if (!fdt) {
> @@ -88,68 +90,85 @@ static void create_fdt(SpikeState *s, const struct MemmapEntry *memmap,
>      qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2);
>      qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2);
>
> -    nodename = g_strdup_printf("/memory@%lx",
> -        (long)memmap[SPIKE_DRAM].base);
> -    qemu_fdt_add_subnode(fdt, nodename);
> -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> +    name = g_strdup_printf("/memory@%lx", (long)memmap[SPIKE_DRAM].base);
> +    qemu_fdt_add_subnode(fdt, name);
> +    qemu_fdt_setprop_cells(fdt, name, "reg",
>          memmap[SPIKE_DRAM].base >> 32, memmap[SPIKE_DRAM].base,
>          mem_size >> 32, mem_size);
> -    qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
> -    g_free(nodename);
> +    qemu_fdt_setprop_string(fdt, name, "device_type", "memory");
> +    g_free(name);
>
>      qemu_fdt_add_subnode(fdt, "/cpus");
>      qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency",
>          SIFIVE_CLINT_TIMEBASE_FREQ);
>      qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0);
>      qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1);
> +    qemu_fdt_add_subnode(fdt, "/cpus/cpu-map");
>
> -    for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) {
> -        nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
> -        char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
> -        char *isa = riscv_isa_string(&s->soc.harts[cpu]);
> -        qemu_fdt_add_subnode(fdt, nodename);
> +    for (socket = (s->num_socs - 1); socket >= 0; socket--) {
> +        clust_name = g_strdup_printf("/cpus/cpu-map/cluster0%d", socket);
> +        qemu_fdt_add_subnode(fdt, clust_name);
> +
> +        clint_cells =  g_new0(uint32_t, s->soc[socket].num_harts * 4);
> +
> +        for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
> +            cpu_phandle = phandle++;
> +
> +            cpu_name = g_strdup_printf("/cpus/cpu@%d",
> +                s->soc[socket].hartid_base + cpu);
> +            qemu_fdt_add_subnode(fdt, cpu_name);
>  #if defined(TARGET_RISCV32)
> -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv32");
> +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv32");
>  #else
> -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48");
> +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv48");
>  #endif
> -        qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
> -        qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
> -        qemu_fdt_setprop_string(fdt, nodename, "status", "okay");
> -        qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu);
> -        qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu");
> -        qemu_fdt_add_subnode(fdt, intc);
> -        qemu_fdt_setprop_cell(fdt, intc, "phandle", 1);
> -        qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc");
> -        qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0);
> -        qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1);
> -        g_free(isa);
> -        g_free(intc);
> -        g_free(nodename);
> -    }
> +            name = riscv_isa_string(&s->soc[socket].harts[cpu]);
> +            qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
> +            g_free(name);
> +            qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv");
> +            qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
> +            qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
> +                s->soc[socket].hartid_base + cpu);
> +            qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
> +            qemu_fdt_setprop_cell(fdt, cpu_name, "phandle", cpu_phandle);
> +
> +            intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name);
> +            qemu_fdt_add_subnode(fdt, intc_name);
> +            intc_phandle = phandle++;
> +            qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_phandle);
> +            qemu_fdt_setprop_string(fdt, intc_name, "compatible",
> +                "riscv,cpu-intc");
> +            qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0);
> +            qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells", 1);
> +
> +            clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> +            clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> +            clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> +            clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> +
> +            core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
> +            qemu_fdt_add_subnode(fdt, core_name);
> +            qemu_fdt_setprop_cell(fdt, core_name, "cpu", cpu_phandle);
> +
> +            g_free(core_name);
> +            g_free(intc_name);
> +            g_free(cpu_name);
> +        }
>
> -    cells =  g_new0(uint32_t, s->soc.num_harts * 4);
> -    for (cpu = 0; cpu < s->soc.num_harts; cpu++) {
> -        nodename =
> -            g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
> -        uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename);
> -        cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> -        cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> -        cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> -        cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> -        g_free(nodename);
> +        clint_addr = memmap[SPIKE_CLINT].base +
> +            (memmap[SPIKE_CLINT].size * socket);
> +        clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
> +        qemu_fdt_add_subnode(fdt, clint_name);
> +        qemu_fdt_setprop_string(fdt, clint_name, "compatible", "riscv,clint0");
> +        qemu_fdt_setprop_cells(fdt, clint_name, "reg",
> +            0x0, clint_addr, 0x0, memmap[SPIKE_CLINT].size);
> +        qemu_fdt_setprop(fdt, clint_name, "interrupts-extended",
> +            clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
> +
> +        g_free(clint_name);
> +        g_free(clint_cells);
> +        g_free(clust_name);
>      }
> -    nodename = g_strdup_printf("/soc/clint@%lx",
> -        (long)memmap[SPIKE_CLINT].base);
> -    qemu_fdt_add_subnode(fdt, nodename);
> -    qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv,clint0");
> -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> -        0x0, memmap[SPIKE_CLINT].base,
> -        0x0, memmap[SPIKE_CLINT].size);
> -    qemu_fdt_setprop(fdt, nodename, "interrupts-extended",
> -        cells, s->soc.num_harts * sizeof(uint32_t) * 4);
> -    g_free(cells);
> -    g_free(nodename);
>
>      if (cmdline) {
>          qemu_fdt_add_subnode(fdt, "/chosen");
> @@ -160,23 +179,51 @@ static void create_fdt(SpikeState *s, const struct MemmapEntry *memmap,
>  static void spike_board_init(MachineState *machine)
>  {
>      const struct MemmapEntry *memmap = spike_memmap;
> -
>      SpikeState *s = g_new0(SpikeState, 1);
>      MemoryRegion *system_memory = get_system_memory();
>      MemoryRegion *main_mem = g_new(MemoryRegion, 1);
>      MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
>      int i;
> +    char *soc_name;
>      unsigned int smp_cpus = machine->smp.cpus;
> -
> -    /* Initialize SOC */
> -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
> -                            TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> -    object_property_set_str(OBJECT(&s->soc), machine->cpu_type, "cpu-type",
> -                            &error_abort);
> -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> -                            &error_abort);
> -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> -                            &error_abort);
> +    unsigned int base_hartid, cpus_per_socket;
> +
> +    s->num_socs = machine->smp.sockets;
> +
> +    /* Ensure minumum required CPUs per socket */
> +    if ((smp_cpus / s->num_socs) < SPIKE_CPUS_PER_SOCKET_MIN)
> +        s->num_socs = 1;

Why?  It seems like creating single-hart sockets would be a good test case, and
I'm pretty sure it's a configuration that we had in embedded systems.

> +    /* Limit the number of sockets */
> +    if (SPIKE_SOCKETS_MAX < s->num_socs)
> +        s->num_socs = SPIKE_SOCKETS_MAX;
> +
> +    /* Initialize socket */
> +    for (i = 0; i < s->num_socs; i++) {
> +        base_hartid = i * (smp_cpus / s->num_socs);
> +        if (i == (s->num_socs - 1))
> +            cpus_per_socket = smp_cpus - base_hartid;
> +        else
> +            cpus_per_socket = smp_cpus / s->num_socs;
> +        soc_name = g_strdup_printf("soc%d", i);
> +        object_initialize_child(OBJECT(machine), soc_name, &s->soc[i],
> +            sizeof(s->soc[i]), TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> +        g_free(soc_name);
> +        object_property_set_str(OBJECT(&s->soc[i]),
> +            machine->cpu_type, "cpu-type", &error_abort);
> +        object_property_set_int(OBJECT(&s->soc[i]),
> +            base_hartid, "hartid-base", &error_abort);
> +        object_property_set_int(OBJECT(&s->soc[i]),
> +            cpus_per_socket, "num-harts", &error_abort);
> +        object_property_set_bool(OBJECT(&s->soc[i]),
> +            true, "realized", &error_abort);
> +
> +        /* Core Local Interruptor (timer and IPI) for each socket */
> +        sifive_clint_create(
> +            memmap[SPIKE_CLINT].base + i * memmap[SPIKE_CLINT].size,
> +            memmap[SPIKE_CLINT].size, base_hartid, cpus_per_socket,
> +            SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE, false);
> +    }
>
>      /* register system main memory (actual RAM) */
>      memory_region_init_ram(main_mem, NULL, "riscv.spike.ram",
> @@ -249,12 +296,7 @@ static void spike_board_init(MachineState *machine)
>                            &address_space_memory);
>
>      /* initialize HTIF using symbols found in load_kernel */
> -    htif_mm_init(system_memory, mask_rom, &s->soc.harts[0].env, serial_hd(0));
> -
> -    /* Core Local Interruptor (timer and IPI) */
> -    sifive_clint_create(memmap[SPIKE_CLINT].base, memmap[SPIKE_CLINT].size,
> -        0, smp_cpus, SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE,
> -        false);
> +    htif_mm_init(system_memory, mask_rom, &s->soc[0].harts[0].env, serial_hd(0));
>  }
>
>  static void spike_v1_10_0_board_init(MachineState *machine)
> @@ -268,6 +310,8 @@ static void spike_v1_10_0_board_init(MachineState *machine)
>      int i;
>      unsigned int smp_cpus = machine->smp.cpus;
>
> +    s->num_socs = 1;
> +
>      if (!qtest_enabled()) {
>          info_report("The Spike v1.10.0 machine has been deprecated. "
>                      "Please use the generic spike machine and specify the ISA "
> @@ -275,13 +319,13 @@ static void spike_v1_10_0_board_init(MachineState *machine)
>      }
>
>      /* Initialize SOC */
> -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
> +    object_initialize_child(OBJECT(machine), "soc", &s->soc[0], sizeof(s->soc[0]),
>                              TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> -    object_property_set_str(OBJECT(&s->soc), SPIKE_V1_10_0_CPU, "cpu-type",
> +    object_property_set_str(OBJECT(&s->soc[0]), SPIKE_V1_10_0_CPU, "cpu-type",
>                              &error_abort);
> -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> +    object_property_set_int(OBJECT(&s->soc[0]), smp_cpus, "num-harts",
>                              &error_abort);
> -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> +    object_property_set_bool(OBJECT(&s->soc[0]), true, "realized",
>                              &error_abort);
>
>      /* register system main memory (actual RAM) */
> @@ -339,7 +383,7 @@ static void spike_v1_10_0_board_init(MachineState *machine)
>                            &address_space_memory);
>
>      /* initialize HTIF using symbols found in load_kernel */
> -    htif_mm_init(system_memory, mask_rom, &s->soc.harts[0].env, serial_hd(0));
> +    htif_mm_init(system_memory, mask_rom, &s->soc[0].harts[0].env, serial_hd(0));
>
>      /* Core Local Interruptor (timer and IPI) */
>      sifive_clint_create(memmap[SPIKE_CLINT].base, memmap[SPIKE_CLINT].size,
> @@ -358,6 +402,8 @@ static void spike_v1_09_1_board_init(MachineState *machine)
>      int i;
>      unsigned int smp_cpus = machine->smp.cpus;
>
> +    s->num_socs = 1;
> +
>      if (!qtest_enabled()) {
>          info_report("The Spike v1.09.1 machine has been deprecated. "
>                      "Please use the generic spike machine and specify the ISA "
> @@ -365,13 +411,13 @@ static void spike_v1_09_1_board_init(MachineState *machine)
>      }
>
>      /* Initialize SOC */
> -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
> +    object_initialize_child(OBJECT(machine), "soc", &s->soc[0], sizeof(s->soc[0]),
>                              TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> -    object_property_set_str(OBJECT(&s->soc), SPIKE_V1_09_1_CPU, "cpu-type",
> +    object_property_set_str(OBJECT(&s->soc[0]), SPIKE_V1_09_1_CPU, "cpu-type",
>                              &error_abort);
> -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> +    object_property_set_int(OBJECT(&s->soc[0]), smp_cpus, "num-harts",
>                              &error_abort);
> -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> +    object_property_set_bool(OBJECT(&s->soc[0]), true, "realized",
>                              &error_abort);
>
>      /* register system main memory (actual RAM) */
> @@ -425,7 +471,7 @@ static void spike_v1_09_1_board_init(MachineState *machine)
>          "};\n";
>
>      /* build config string with supplied memory size */
> -    char *isa = riscv_isa_string(&s->soc.harts[0]);
> +    char *isa = riscv_isa_string(&s->soc[0].harts[0]);
>      char *config_string = g_strdup_printf(config_string_tmpl,
>          (uint64_t)memmap[SPIKE_CLINT].base + SIFIVE_TIME_BASE,
>          (uint64_t)memmap[SPIKE_DRAM].base,
> @@ -448,7 +494,7 @@ static void spike_v1_09_1_board_init(MachineState *machine)
>                            &address_space_memory);
>
>      /* initialize HTIF using symbols found in load_kernel */
> -    htif_mm_init(system_memory, mask_rom, &s->soc.harts[0].env, serial_hd(0));
> +    htif_mm_init(system_memory, mask_rom, &s->soc[0].harts[0].env, serial_hd(0));
>
>      /* Core Local Interruptor (timer and IPI) */
>      sifive_clint_create(memmap[SPIKE_CLINT].base, memmap[SPIKE_CLINT].size,
> @@ -476,7 +522,7 @@ static void spike_machine_init(MachineClass *mc)
>  {
>      mc->desc = "RISC-V Spike Board";
>      mc->init = spike_board_init;
> -    mc->max_cpus = 8;
> +    mc->max_cpus = SPIKE_CPUS_MAX;
>      mc->is_default = true;
>      mc->default_cpu_type = SPIKE_V1_10_0_CPU;
>  }
> diff --git a/include/hw/riscv/spike.h b/include/hw/riscv/spike.h
> index dc770421bc..04a9f593b5 100644
> --- a/include/hw/riscv/spike.h
> +++ b/include/hw/riscv/spike.h
> @@ -22,12 +22,18 @@
>  #include "hw/riscv/riscv_hart.h"
>  #include "hw/sysbus.h"
>
> +#define SPIKE_SOCKETS_MAX 4
> +#define SPIKE_CPUS_PER_SOCKET_MIN 2
> +#define SPIKE_CPUS_PER_SOCKET_MAX 4
> +#define SPIKE_CPUS_MAX (SPIKE_SOCKETS_MAX * SPIKE_CPUS_PER_SOCKET_MAX)
> +
>  typedef struct {
>      /*< private >*/
>      SysBusDevice parent_obj;
>
>      /*< public >*/
> -    RISCVHartArrayState soc;
> +    unsigned int num_socs;
> +    RISCVHartArrayState soc[SPIKE_SOCKETS_MAX];
>      void *fdt;
>      int fdt_size;
>  } SpikeState;
Anup Patel May 22, 2020, 10:09 a.m. UTC | #2
> -----Original Message-----
> From: Palmer Dabbelt <palmer@dabbelt.com>
> Sent: 22 May 2020 01:46
> To: Anup Patel <Anup.Patel@wdc.com>
> Cc: Peter Maydell <peter.maydell@linaro.org>; Alistair Francis
> <Alistair.Francis@wdc.com>; sagark@eecs.berkeley.edu; Atish Patra
> <Atish.Patra@wdc.com>; anup@brainfault.org; qemu-riscv@nongnu.org;
> qemu-devel@nongnu.org; Anup Patel <Anup.Patel@wdc.com>
> Subject: Re: [PATCH 2/4] hw/riscv: spike: Allow creating multiple sockets
> 
> On Fri, 15 May 2020 23:37:44 PDT (-0700), Anup Patel wrote:
> > We extend RISC-V spike machine to allow creating a multi-socket machine.
> > Each RISC-V spike machine socket is a set of HARTs and a CLINT instance.
> > Other peripherals are shared between all RISC-V spike machine sockets.
> > We also update RISC-V spike machine device tree to treat each socket
> > as a NUMA node.
> >
> > The number of sockets in RISC-V spike machine can be specified using
> > the "sockets=" sub-option of QEMU "-smp" command-line option. By
> > default, only one socket RISC-V spike machine will be created.
> >
> > Currently, we only allow creating upto maximum 4 sockets with minimum
> > 2 HARTs per socket. In future, this limits can be changed.
> >
> > Signed-off-by: Anup Patel <anup.patel@wdc.com>
> > ---
> >  hw/riscv/spike.c         | 206 ++++++++++++++++++++++++---------------
> >  include/hw/riscv/spike.h |   8 +-
> >  2 files changed, 133 insertions(+), 81 deletions(-)
> >
> > diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index
> > d5e0103d89..f63c57a87c 100644
> > --- a/hw/riscv/spike.c
> > +++ b/hw/riscv/spike.c
> > @@ -64,9 +64,11 @@ static void create_fdt(SpikeState *s, const struct
> MemmapEntry *memmap,
> >      uint64_t mem_size, const char *cmdline)  {
> >      void *fdt;
> > -    int cpu;
> > -    uint32_t *cells;
> > -    char *nodename;
> > +    int cpu, socket;
> > +    uint32_t *clint_cells;
> > +    unsigned long clint_addr;
> > +    uint32_t cpu_phandle, intc_phandle, phandle = 1;
> > +    char *name, *clint_name, *clust_name, *core_name, *cpu_name,
> > + *intc_name;
> >
> >      fdt = s->fdt = create_device_tree(&s->fdt_size);
> >      if (!fdt) {
> > @@ -88,68 +90,85 @@ static void create_fdt(SpikeState *s, const struct
> MemmapEntry *memmap,
> >      qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2);
> >      qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2);
> >
> > -    nodename = g_strdup_printf("/memory@%lx",
> > -        (long)memmap[SPIKE_DRAM].base);
> > -    qemu_fdt_add_subnode(fdt, nodename);
> > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > +    name = g_strdup_printf("/memory@%lx",
> (long)memmap[SPIKE_DRAM].base);
> > +    qemu_fdt_add_subnode(fdt, name);
> > +    qemu_fdt_setprop_cells(fdt, name, "reg",
> >          memmap[SPIKE_DRAM].base >> 32, memmap[SPIKE_DRAM].base,
> >          mem_size >> 32, mem_size);
> > -    qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
> > -    g_free(nodename);
> > +    qemu_fdt_setprop_string(fdt, name, "device_type", "memory");
> > +    g_free(name);
> >
> >      qemu_fdt_add_subnode(fdt, "/cpus");
> >      qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency",
> >          SIFIVE_CLINT_TIMEBASE_FREQ);
> >      qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0);
> >      qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1);
> > +    qemu_fdt_add_subnode(fdt, "/cpus/cpu-map");
> >
> > -    for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) {
> > -        nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
> > -        char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
> > -        char *isa = riscv_isa_string(&s->soc.harts[cpu]);
> > -        qemu_fdt_add_subnode(fdt, nodename);
> > +    for (socket = (s->num_socs - 1); socket >= 0; socket--) {
> > +        clust_name = g_strdup_printf("/cpus/cpu-map/cluster0%d", socket);
> > +        qemu_fdt_add_subnode(fdt, clust_name);
> > +
> > +        clint_cells =  g_new0(uint32_t, s->soc[socket].num_harts *
> > + 4);
> > +
> > +        for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
> > +            cpu_phandle = phandle++;
> > +
> > +            cpu_name = g_strdup_printf("/cpus/cpu@%d",
> > +                s->soc[socket].hartid_base + cpu);
> > +            qemu_fdt_add_subnode(fdt, cpu_name);
> >  #if defined(TARGET_RISCV32)
> > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv32");
> > +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> > + "riscv,sv32");
> >  #else
> > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48");
> > +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> > + "riscv,sv48");
> >  #endif
> > -        qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
> > -        qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
> > -        qemu_fdt_setprop_string(fdt, nodename, "status", "okay");
> > -        qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu);
> > -        qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu");
> > -        qemu_fdt_add_subnode(fdt, intc);
> > -        qemu_fdt_setprop_cell(fdt, intc, "phandle", 1);
> > -        qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc");
> > -        qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0);
> > -        qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1);
> > -        g_free(isa);
> > -        g_free(intc);
> > -        g_free(nodename);
> > -    }
> > +            name = riscv_isa_string(&s->soc[socket].harts[cpu]);
> > +            qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
> > +            g_free(name);
> > +            qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv");
> > +            qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
> > +            qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
> > +                s->soc[socket].hartid_base + cpu);
> > +            qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
> > +            qemu_fdt_setprop_cell(fdt, cpu_name, "phandle",
> > + cpu_phandle);
> > +
> > +            intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name);
> > +            qemu_fdt_add_subnode(fdt, intc_name);
> > +            intc_phandle = phandle++;
> > +            qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_phandle);
> > +            qemu_fdt_setprop_string(fdt, intc_name, "compatible",
> > +                "riscv,cpu-intc");
> > +            qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0);
> > +            qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells",
> > + 1);
> > +
> > +            clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > +            clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > +            clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > +            clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> > +
> > +            core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
> > +            qemu_fdt_add_subnode(fdt, core_name);
> > +            qemu_fdt_setprop_cell(fdt, core_name, "cpu",
> > + cpu_phandle);
> > +
> > +            g_free(core_name);
> > +            g_free(intc_name);
> > +            g_free(cpu_name);
> > +        }
> >
> > -    cells =  g_new0(uint32_t, s->soc.num_harts * 4);
> > -    for (cpu = 0; cpu < s->soc.num_harts; cpu++) {
> > -        nodename =
> > -            g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
> > -        uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename);
> > -        cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > -        cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > -        cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > -        cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> > -        g_free(nodename);
> > +        clint_addr = memmap[SPIKE_CLINT].base +
> > +            (memmap[SPIKE_CLINT].size * socket);
> > +        clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
> > +        qemu_fdt_add_subnode(fdt, clint_name);
> > +        qemu_fdt_setprop_string(fdt, clint_name, "compatible", "riscv,clint0");
> > +        qemu_fdt_setprop_cells(fdt, clint_name, "reg",
> > +            0x0, clint_addr, 0x0, memmap[SPIKE_CLINT].size);
> > +        qemu_fdt_setprop(fdt, clint_name, "interrupts-extended",
> > +            clint_cells, s->soc[socket].num_harts * sizeof(uint32_t)
> > + * 4);
> > +
> > +        g_free(clint_name);
> > +        g_free(clint_cells);
> > +        g_free(clust_name);
> >      }
> > -    nodename = g_strdup_printf("/soc/clint@%lx",
> > -        (long)memmap[SPIKE_CLINT].base);
> > -    qemu_fdt_add_subnode(fdt, nodename);
> > -    qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv,clint0");
> > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > -        0x0, memmap[SPIKE_CLINT].base,
> > -        0x0, memmap[SPIKE_CLINT].size);
> > -    qemu_fdt_setprop(fdt, nodename, "interrupts-extended",
> > -        cells, s->soc.num_harts * sizeof(uint32_t) * 4);
> > -    g_free(cells);
> > -    g_free(nodename);
> >
> >      if (cmdline) {
> >          qemu_fdt_add_subnode(fdt, "/chosen"); @@ -160,23 +179,51 @@
> > static void create_fdt(SpikeState *s, const struct MemmapEntry
> > *memmap,  static void spike_board_init(MachineState *machine)  {
> >      const struct MemmapEntry *memmap = spike_memmap;
> > -
> >      SpikeState *s = g_new0(SpikeState, 1);
> >      MemoryRegion *system_memory = get_system_memory();
> >      MemoryRegion *main_mem = g_new(MemoryRegion, 1);
> >      MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
> >      int i;
> > +    char *soc_name;
> >      unsigned int smp_cpus = machine->smp.cpus;
> > -
> > -    /* Initialize SOC */
> > -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
> > -                            TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> > -    object_property_set_str(OBJECT(&s->soc), machine->cpu_type, "cpu-
> type",
> > -                            &error_abort);
> > -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> > -                            &error_abort);
> > -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> > -                            &error_abort);
> > +    unsigned int base_hartid, cpus_per_socket;
> > +
> > +    s->num_socs = machine->smp.sockets;
> > +
> > +    /* Ensure minumum required CPUs per socket */
> > +    if ((smp_cpus / s->num_socs) < SPIKE_CPUS_PER_SOCKET_MIN)
> > +        s->num_socs = 1;
> 
> Why?  It seems like creating single-hart sockets would be a good test case, and
> I'm pretty sure it's a configuration that we had in embedded systems.

Yes, single-hart sockets are sensible for testing software.

When "sockets=" sub-option is not provided in "-smp " command line
options, the machine->smp.sockets is set same as machine->smp.cpus
by smp_parse() function in hw/core/machine.c. This means by default
we will always get single-hart per socket. In other words, "-smp 4" will
be 4 cpus and 4 sockets. This is counter intuitive for users because when
"sockets=" is not provided we should default to single socket irrespective
to number of cpus.

I had added SPIKE_CPUS_PER_SOCKET_MIN to handle the default case
when no "sockets=" sub-option is provided.

Alternate approach will be: 
1. Add more members in struct CpuTopology of include/hw/boards.h
    to help us know whether "sockets=" option was passed or not
2. Update smp_parse() for new members in struct CpuTopology
3. Assume single-socket machine in QEMU RISC-V virt and QEMU
    RISC-V spike machines when "sockets=" option was not passed

Suggestions ??

> 
> > +    /* Limit the number of sockets */
> > +    if (SPIKE_SOCKETS_MAX < s->num_socs)
> > +        s->num_socs = SPIKE_SOCKETS_MAX;
> > +
> > +    /* Initialize socket */
> > +    for (i = 0; i < s->num_socs; i++) {
> > +        base_hartid = i * (smp_cpus / s->num_socs);
> > +        if (i == (s->num_socs - 1))
> > +            cpus_per_socket = smp_cpus - base_hartid;
> > +        else
> > +            cpus_per_socket = smp_cpus / s->num_socs;
> > +        soc_name = g_strdup_printf("soc%d", i);
> > +        object_initialize_child(OBJECT(machine), soc_name, &s->soc[i],
> > +            sizeof(s->soc[i]), TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> > +        g_free(soc_name);
> > +        object_property_set_str(OBJECT(&s->soc[i]),
> > +            machine->cpu_type, "cpu-type", &error_abort);
> > +        object_property_set_int(OBJECT(&s->soc[i]),
> > +            base_hartid, "hartid-base", &error_abort);
> > +        object_property_set_int(OBJECT(&s->soc[i]),
> > +            cpus_per_socket, "num-harts", &error_abort);
> > +        object_property_set_bool(OBJECT(&s->soc[i]),
> > +            true, "realized", &error_abort);
> > +
> > +        /* Core Local Interruptor (timer and IPI) for each socket */
> > +        sifive_clint_create(
> > +            memmap[SPIKE_CLINT].base + i * memmap[SPIKE_CLINT].size,
> > +            memmap[SPIKE_CLINT].size, base_hartid, cpus_per_socket,
> > +            SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE, false);
> > +    }
> >
> >      /* register system main memory (actual RAM) */
> >      memory_region_init_ram(main_mem, NULL, "riscv.spike.ram", @@
> > -249,12 +296,7 @@ static void spike_board_init(MachineState *machine)
> >                            &address_space_memory);
> >
> >      /* initialize HTIF using symbols found in load_kernel */
> > -    htif_mm_init(system_memory, mask_rom, &s->soc.harts[0].env,
> serial_hd(0));
> > -
> > -    /* Core Local Interruptor (timer and IPI) */
> > -    sifive_clint_create(memmap[SPIKE_CLINT].base,
> memmap[SPIKE_CLINT].size,
> > -        0, smp_cpus, SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE,
> SIFIVE_TIME_BASE,
> > -        false);
> > +    htif_mm_init(system_memory, mask_rom, &s->soc[0].harts[0].env,
> > + serial_hd(0));
> >  }
> >
> >  static void spike_v1_10_0_board_init(MachineState *machine) @@ -268,6
> > +310,8 @@ static void spike_v1_10_0_board_init(MachineState *machine)
> >      int i;
> >      unsigned int smp_cpus = machine->smp.cpus;
> >
> > +    s->num_socs = 1;
> > +
> >      if (!qtest_enabled()) {
> >          info_report("The Spike v1.10.0 machine has been deprecated. "
> >                      "Please use the generic spike machine and specify the ISA "
> > @@ -275,13 +319,13 @@ static void
> spike_v1_10_0_board_init(MachineState *machine)
> >      }
> >
> >      /* Initialize SOC */
> > -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
> > +    object_initialize_child(OBJECT(machine), "soc", &s->soc[0],
> > + sizeof(s->soc[0]),
> >                              TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> > -    object_property_set_str(OBJECT(&s->soc), SPIKE_V1_10_0_CPU, "cpu-
> type",
> > +    object_property_set_str(OBJECT(&s->soc[0]), SPIKE_V1_10_0_CPU,
> > + "cpu-type",
> >                              &error_abort);
> > -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> > +    object_property_set_int(OBJECT(&s->soc[0]), smp_cpus,
> > + "num-harts",
> >                              &error_abort);
> > -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> > +    object_property_set_bool(OBJECT(&s->soc[0]), true, "realized",
> >                              &error_abort);
> >
> >      /* register system main memory (actual RAM) */ @@ -339,7 +383,7
> > @@ static void spike_v1_10_0_board_init(MachineState *machine)
> >                            &address_space_memory);
> >
> >      /* initialize HTIF using symbols found in load_kernel */
> > -    htif_mm_init(system_memory, mask_rom, &s->soc.harts[0].env,
> serial_hd(0));
> > +    htif_mm_init(system_memory, mask_rom, &s->soc[0].harts[0].env,
> > + serial_hd(0));
> >
> >      /* Core Local Interruptor (timer and IPI) */
> >      sifive_clint_create(memmap[SPIKE_CLINT].base,
> > memmap[SPIKE_CLINT].size, @@ -358,6 +402,8 @@ static void
> spike_v1_09_1_board_init(MachineState *machine)
> >      int i;
> >      unsigned int smp_cpus = machine->smp.cpus;
> >
> > +    s->num_socs = 1;
> > +
> >      if (!qtest_enabled()) {
> >          info_report("The Spike v1.09.1 machine has been deprecated. "
> >                      "Please use the generic spike machine and specify the ISA "
> > @@ -365,13 +411,13 @@ static void
> spike_v1_09_1_board_init(MachineState *machine)
> >      }
> >
> >      /* Initialize SOC */
> > -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
> > +    object_initialize_child(OBJECT(machine), "soc", &s->soc[0],
> > + sizeof(s->soc[0]),
> >                              TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> > -    object_property_set_str(OBJECT(&s->soc), SPIKE_V1_09_1_CPU, "cpu-
> type",
> > +    object_property_set_str(OBJECT(&s->soc[0]), SPIKE_V1_09_1_CPU,
> > + "cpu-type",
> >                              &error_abort);
> > -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> > +    object_property_set_int(OBJECT(&s->soc[0]), smp_cpus,
> > + "num-harts",
> >                              &error_abort);
> > -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> > +    object_property_set_bool(OBJECT(&s->soc[0]), true, "realized",
> >                              &error_abort);
> >
> >      /* register system main memory (actual RAM) */ @@ -425,7 +471,7
> > @@ static void spike_v1_09_1_board_init(MachineState *machine)
> >          "};\n";
> >
> >      /* build config string with supplied memory size */
> > -    char *isa = riscv_isa_string(&s->soc.harts[0]);
> > +    char *isa = riscv_isa_string(&s->soc[0].harts[0]);
> >      char *config_string = g_strdup_printf(config_string_tmpl,
> >          (uint64_t)memmap[SPIKE_CLINT].base + SIFIVE_TIME_BASE,
> >          (uint64_t)memmap[SPIKE_DRAM].base,
> > @@ -448,7 +494,7 @@ static void spike_v1_09_1_board_init(MachineState
> *machine)
> >                            &address_space_memory);
> >
> >      /* initialize HTIF using symbols found in load_kernel */
> > -    htif_mm_init(system_memory, mask_rom, &s->soc.harts[0].env,
> serial_hd(0));
> > +    htif_mm_init(system_memory, mask_rom, &s->soc[0].harts[0].env,
> > + serial_hd(0));
> >
> >      /* Core Local Interruptor (timer and IPI) */
> >      sifive_clint_create(memmap[SPIKE_CLINT].base,
> > memmap[SPIKE_CLINT].size, @@ -476,7 +522,7 @@ static void
> > spike_machine_init(MachineClass *mc)  {
> >      mc->desc = "RISC-V Spike Board";
> >      mc->init = spike_board_init;
> > -    mc->max_cpus = 8;
> > +    mc->max_cpus = SPIKE_CPUS_MAX;
> >      mc->is_default = true;
> >      mc->default_cpu_type = SPIKE_V1_10_0_CPU;  } diff --git
> > a/include/hw/riscv/spike.h b/include/hw/riscv/spike.h index
> > dc770421bc..04a9f593b5 100644
> > --- a/include/hw/riscv/spike.h
> > +++ b/include/hw/riscv/spike.h
> > @@ -22,12 +22,18 @@
> >  #include "hw/riscv/riscv_hart.h"
> >  #include "hw/sysbus.h"
> >
> > +#define SPIKE_SOCKETS_MAX 4
> > +#define SPIKE_CPUS_PER_SOCKET_MIN 2
> > +#define SPIKE_CPUS_PER_SOCKET_MAX 4
> > +#define SPIKE_CPUS_MAX (SPIKE_SOCKETS_MAX *
> > +SPIKE_CPUS_PER_SOCKET_MAX)
> > +
> >  typedef struct {
> >      /*< private >*/
> >      SysBusDevice parent_obj;
> >
> >      /*< public >*/
> > -    RISCVHartArrayState soc;
> > +    unsigned int num_socs;
> > +    RISCVHartArrayState soc[SPIKE_SOCKETS_MAX];
> >      void *fdt;
> >      int fdt_size;
> >  } SpikeState;

Regards,
Anup
Alistair Francis May 27, 2020, 12:38 a.m. UTC | #3
On Fri, May 22, 2020 at 3:10 AM Anup Patel <Anup.Patel@wdc.com> wrote:
>
>
>
> > -----Original Message-----
> > From: Palmer Dabbelt <palmer@dabbelt.com>
> > Sent: 22 May 2020 01:46
> > To: Anup Patel <Anup.Patel@wdc.com>
> > Cc: Peter Maydell <peter.maydell@linaro.org>; Alistair Francis
> > <Alistair.Francis@wdc.com>; sagark@eecs.berkeley.edu; Atish Patra
> > <Atish.Patra@wdc.com>; anup@brainfault.org; qemu-riscv@nongnu.org;
> > qemu-devel@nongnu.org; Anup Patel <Anup.Patel@wdc.com>
> > Subject: Re: [PATCH 2/4] hw/riscv: spike: Allow creating multiple sockets
> >
> > On Fri, 15 May 2020 23:37:44 PDT (-0700), Anup Patel wrote:
> > > We extend RISC-V spike machine to allow creating a multi-socket machine.
> > > Each RISC-V spike machine socket is a set of HARTs and a CLINT instance.
> > > Other peripherals are shared between all RISC-V spike machine sockets.
> > > We also update RISC-V spike machine device tree to treat each socket
> > > as a NUMA node.
> > >
> > > The number of sockets in RISC-V spike machine can be specified using
> > > the "sockets=" sub-option of QEMU "-smp" command-line option. By
> > > default, only one socket RISC-V spike machine will be created.
> > >
> > > Currently, we only allow creating upto maximum 4 sockets with minimum
> > > 2 HARTs per socket. In future, this limits can be changed.
> > >
> > > Signed-off-by: Anup Patel <anup.patel@wdc.com>
> > > ---
> > >  hw/riscv/spike.c         | 206 ++++++++++++++++++++++++---------------
> > >  include/hw/riscv/spike.h |   8 +-
> > >  2 files changed, 133 insertions(+), 81 deletions(-)
> > >
> > > diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index
> > > d5e0103d89..f63c57a87c 100644
> > > --- a/hw/riscv/spike.c
> > > +++ b/hw/riscv/spike.c
> > > @@ -64,9 +64,11 @@ static void create_fdt(SpikeState *s, const struct
> > MemmapEntry *memmap,
> > >      uint64_t mem_size, const char *cmdline)  {
> > >      void *fdt;
> > > -    int cpu;
> > > -    uint32_t *cells;
> > > -    char *nodename;
> > > +    int cpu, socket;
> > > +    uint32_t *clint_cells;
> > > +    unsigned long clint_addr;
> > > +    uint32_t cpu_phandle, intc_phandle, phandle = 1;
> > > +    char *name, *clint_name, *clust_name, *core_name, *cpu_name,
> > > + *intc_name;
> > >
> > >      fdt = s->fdt = create_device_tree(&s->fdt_size);
> > >      if (!fdt) {
> > > @@ -88,68 +90,85 @@ static void create_fdt(SpikeState *s, const struct
> > MemmapEntry *memmap,
> > >      qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2);
> > >      qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2);
> > >
> > > -    nodename = g_strdup_printf("/memory@%lx",
> > > -        (long)memmap[SPIKE_DRAM].base);
> > > -    qemu_fdt_add_subnode(fdt, nodename);
> > > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > > +    name = g_strdup_printf("/memory@%lx",
> > (long)memmap[SPIKE_DRAM].base);
> > > +    qemu_fdt_add_subnode(fdt, name);
> > > +    qemu_fdt_setprop_cells(fdt, name, "reg",
> > >          memmap[SPIKE_DRAM].base >> 32, memmap[SPIKE_DRAM].base,
> > >          mem_size >> 32, mem_size);
> > > -    qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
> > > -    g_free(nodename);
> > > +    qemu_fdt_setprop_string(fdt, name, "device_type", "memory");
> > > +    g_free(name);
> > >
> > >      qemu_fdt_add_subnode(fdt, "/cpus");
> > >      qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency",
> > >          SIFIVE_CLINT_TIMEBASE_FREQ);
> > >      qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0);
> > >      qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1);
> > > +    qemu_fdt_add_subnode(fdt, "/cpus/cpu-map");
> > >
> > > -    for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) {
> > > -        nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
> > > -        char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
> > > -        char *isa = riscv_isa_string(&s->soc.harts[cpu]);
> > > -        qemu_fdt_add_subnode(fdt, nodename);
> > > +    for (socket = (s->num_socs - 1); socket >= 0; socket--) {
> > > +        clust_name = g_strdup_printf("/cpus/cpu-map/cluster0%d", socket);
> > > +        qemu_fdt_add_subnode(fdt, clust_name);
> > > +
> > > +        clint_cells =  g_new0(uint32_t, s->soc[socket].num_harts *
> > > + 4);
> > > +
> > > +        for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
> > > +            cpu_phandle = phandle++;
> > > +
> > > +            cpu_name = g_strdup_printf("/cpus/cpu@%d",
> > > +                s->soc[socket].hartid_base + cpu);
> > > +            qemu_fdt_add_subnode(fdt, cpu_name);
> > >  #if defined(TARGET_RISCV32)
> > > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv32");
> > > +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> > > + "riscv,sv32");
> > >  #else
> > > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48");
> > > +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> > > + "riscv,sv48");
> > >  #endif
> > > -        qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
> > > -        qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
> > > -        qemu_fdt_setprop_string(fdt, nodename, "status", "okay");
> > > -        qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu);
> > > -        qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu");
> > > -        qemu_fdt_add_subnode(fdt, intc);
> > > -        qemu_fdt_setprop_cell(fdt, intc, "phandle", 1);
> > > -        qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc");
> > > -        qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0);
> > > -        qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1);
> > > -        g_free(isa);
> > > -        g_free(intc);
> > > -        g_free(nodename);
> > > -    }
> > > +            name = riscv_isa_string(&s->soc[socket].harts[cpu]);
> > > +            qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
> > > +            g_free(name);
> > > +            qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv");
> > > +            qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
> > > +            qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
> > > +                s->soc[socket].hartid_base + cpu);
> > > +            qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
> > > +            qemu_fdt_setprop_cell(fdt, cpu_name, "phandle",
> > > + cpu_phandle);
> > > +
> > > +            intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name);
> > > +            qemu_fdt_add_subnode(fdt, intc_name);
> > > +            intc_phandle = phandle++;
> > > +            qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_phandle);
> > > +            qemu_fdt_setprop_string(fdt, intc_name, "compatible",
> > > +                "riscv,cpu-intc");
> > > +            qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0);
> > > +            qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells",
> > > + 1);
> > > +
> > > +            clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > > +            clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > > +            clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > > +            clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> > > +
> > > +            core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
> > > +            qemu_fdt_add_subnode(fdt, core_name);
> > > +            qemu_fdt_setprop_cell(fdt, core_name, "cpu",
> > > + cpu_phandle);
> > > +
> > > +            g_free(core_name);
> > > +            g_free(intc_name);
> > > +            g_free(cpu_name);
> > > +        }
> > >
> > > -    cells =  g_new0(uint32_t, s->soc.num_harts * 4);
> > > -    for (cpu = 0; cpu < s->soc.num_harts; cpu++) {
> > > -        nodename =
> > > -            g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
> > > -        uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename);
> > > -        cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > > -        cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > > -        cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > > -        cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> > > -        g_free(nodename);
> > > +        clint_addr = memmap[SPIKE_CLINT].base +
> > > +            (memmap[SPIKE_CLINT].size * socket);
> > > +        clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
> > > +        qemu_fdt_add_subnode(fdt, clint_name);
> > > +        qemu_fdt_setprop_string(fdt, clint_name, "compatible", "riscv,clint0");
> > > +        qemu_fdt_setprop_cells(fdt, clint_name, "reg",
> > > +            0x0, clint_addr, 0x0, memmap[SPIKE_CLINT].size);
> > > +        qemu_fdt_setprop(fdt, clint_name, "interrupts-extended",
> > > +            clint_cells, s->soc[socket].num_harts * sizeof(uint32_t)
> > > + * 4);
> > > +
> > > +        g_free(clint_name);
> > > +        g_free(clint_cells);
> > > +        g_free(clust_name);
> > >      }
> > > -    nodename = g_strdup_printf("/soc/clint@%lx",
> > > -        (long)memmap[SPIKE_CLINT].base);
> > > -    qemu_fdt_add_subnode(fdt, nodename);
> > > -    qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv,clint0");
> > > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > > -        0x0, memmap[SPIKE_CLINT].base,
> > > -        0x0, memmap[SPIKE_CLINT].size);
> > > -    qemu_fdt_setprop(fdt, nodename, "interrupts-extended",
> > > -        cells, s->soc.num_harts * sizeof(uint32_t) * 4);
> > > -    g_free(cells);
> > > -    g_free(nodename);
> > >
> > >      if (cmdline) {
> > >          qemu_fdt_add_subnode(fdt, "/chosen"); @@ -160,23 +179,51 @@
> > > static void create_fdt(SpikeState *s, const struct MemmapEntry
> > > *memmap,  static void spike_board_init(MachineState *machine)  {
> > >      const struct MemmapEntry *memmap = spike_memmap;
> > > -
> > >      SpikeState *s = g_new0(SpikeState, 1);
> > >      MemoryRegion *system_memory = get_system_memory();
> > >      MemoryRegion *main_mem = g_new(MemoryRegion, 1);
> > >      MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
> > >      int i;
> > > +    char *soc_name;
> > >      unsigned int smp_cpus = machine->smp.cpus;
> > > -
> > > -    /* Initialize SOC */
> > > -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
> > > -                            TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> > > -    object_property_set_str(OBJECT(&s->soc), machine->cpu_type, "cpu-
> > type",
> > > -                            &error_abort);
> > > -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> > > -                            &error_abort);
> > > -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> > > -                            &error_abort);
> > > +    unsigned int base_hartid, cpus_per_socket;
> > > +
> > > +    s->num_socs = machine->smp.sockets;
> > > +
> > > +    /* Ensure minumum required CPUs per socket */
> > > +    if ((smp_cpus / s->num_socs) < SPIKE_CPUS_PER_SOCKET_MIN)
> > > +        s->num_socs = 1;
> >
> > Why?  It seems like creating single-hart sockets would be a good test case, and
> > I'm pretty sure it's a configuration that we had in embedded systems.
>
> Yes, single-hart sockets are sensible for testing software.
>
> When "sockets=" sub-option is not provided in "-smp " command line
> options, the machine->smp.sockets is set same as machine->smp.cpus
> by smp_parse() function in hw/core/machine.c. This means by default
> we will always get single-hart per socket. In other words, "-smp 4" will
> be 4 cpus and 4 sockets. This is counter intuitive for users because when
> "sockets=" is not provided we should default to single socket irrespective
> to number of cpus.
>
> I had added SPIKE_CPUS_PER_SOCKET_MIN to handle the default case
> when no "sockets=" sub-option is provided.
>
> Alternate approach will be:
> 1. Add more members in struct CpuTopology of include/hw/boards.h
>     to help us know whether "sockets=" option was passed or not
> 2. Update smp_parse() for new members in struct CpuTopology
> 3. Assume single-socket machine in QEMU RISC-V virt and QEMU
>     RISC-V spike machines when "sockets=" option was not passed
>
> Suggestions ??
>

I think it makes sense to just stick to what smp_parse() does. That's
what QEMU users are used to so we should follow that.

I agree it is strange that is specifying `-smp x' you will get
max_cpus number of sockets and split the CPUs via them, but that's
what every other board (besides x86) does.

Alistair
Anup Patel May 27, 2020, 2:54 a.m. UTC | #4
> -----Original Message-----
> From: Alistair Francis <alistair23@gmail.com>
> Sent: 27 May 2020 06:08
> To: Anup Patel <Anup.Patel@wdc.com>
> Cc: Palmer Dabbelt <palmer@dabbelt.com>; Peter Maydell
> <peter.maydell@linaro.org>; qemu-riscv@nongnu.org;
> sagark@eecs.berkeley.edu; anup@brainfault.org; qemu-devel@nongnu.org;
> Atish Patra <Atish.Patra@wdc.com>; Alistair Francis
> <Alistair.Francis@wdc.com>
> Subject: Re: [PATCH 2/4] hw/riscv: spike: Allow creating multiple sockets
> 
> On Fri, May 22, 2020 at 3:10 AM Anup Patel <Anup.Patel@wdc.com> wrote:
> >
> >
> >
> > > -----Original Message-----
> > > From: Palmer Dabbelt <palmer@dabbelt.com>
> > > Sent: 22 May 2020 01:46
> > > To: Anup Patel <Anup.Patel@wdc.com>
> > > Cc: Peter Maydell <peter.maydell@linaro.org>; Alistair Francis
> > > <Alistair.Francis@wdc.com>; sagark@eecs.berkeley.edu; Atish Patra
> > > <Atish.Patra@wdc.com>; anup@brainfault.org; qemu-riscv@nongnu.org;
> > > qemu-devel@nongnu.org; Anup Patel <Anup.Patel@wdc.com>
> > > Subject: Re: [PATCH 2/4] hw/riscv: spike: Allow creating multiple
> > > sockets
> > >
> > > On Fri, 15 May 2020 23:37:44 PDT (-0700), Anup Patel wrote:
> > > > We extend RISC-V spike machine to allow creating a multi-socket
> machine.
> > > > Each RISC-V spike machine socket is a set of HARTs and a CLINT instance.
> > > > Other peripherals are shared between all RISC-V spike machine sockets.
> > > > We also update RISC-V spike machine device tree to treat each
> > > > socket as a NUMA node.
> > > >
> > > > The number of sockets in RISC-V spike machine can be specified
> > > > using the "sockets=" sub-option of QEMU "-smp" command-line
> > > > option. By default, only one socket RISC-V spike machine will be created.
> > > >
> > > > Currently, we only allow creating upto maximum 4 sockets with
> > > > minimum
> > > > 2 HARTs per socket. In future, this limits can be changed.
> > > >
> > > > Signed-off-by: Anup Patel <anup.patel@wdc.com>
> > > > ---
> > > >  hw/riscv/spike.c         | 206 ++++++++++++++++++++++++---------------
> > > >  include/hw/riscv/spike.h |   8 +-
> > > >  2 files changed, 133 insertions(+), 81 deletions(-)
> > > >
> > > > diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index
> > > > d5e0103d89..f63c57a87c 100644
> > > > --- a/hw/riscv/spike.c
> > > > +++ b/hw/riscv/spike.c
> > > > @@ -64,9 +64,11 @@ static void create_fdt(SpikeState *s, const
> > > > struct
> > > MemmapEntry *memmap,
> > > >      uint64_t mem_size, const char *cmdline)  {
> > > >      void *fdt;
> > > > -    int cpu;
> > > > -    uint32_t *cells;
> > > > -    char *nodename;
> > > > +    int cpu, socket;
> > > > +    uint32_t *clint_cells;
> > > > +    unsigned long clint_addr;
> > > > +    uint32_t cpu_phandle, intc_phandle, phandle = 1;
> > > > +    char *name, *clint_name, *clust_name, *core_name, *cpu_name,
> > > > + *intc_name;
> > > >
> > > >      fdt = s->fdt = create_device_tree(&s->fdt_size);
> > > >      if (!fdt) {
> > > > @@ -88,68 +90,85 @@ static void create_fdt(SpikeState *s, const
> > > > struct
> > > MemmapEntry *memmap,
> > > >      qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2);
> > > >      qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2);
> > > >
> > > > -    nodename = g_strdup_printf("/memory@%lx",
> > > > -        (long)memmap[SPIKE_DRAM].base);
> > > > -    qemu_fdt_add_subnode(fdt, nodename);
> > > > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > > > +    name = g_strdup_printf("/memory@%lx",
> > > (long)memmap[SPIKE_DRAM].base);
> > > > +    qemu_fdt_add_subnode(fdt, name);
> > > > +    qemu_fdt_setprop_cells(fdt, name, "reg",
> > > >          memmap[SPIKE_DRAM].base >> 32, memmap[SPIKE_DRAM].base,
> > > >          mem_size >> 32, mem_size);
> > > > -    qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
> > > > -    g_free(nodename);
> > > > +    qemu_fdt_setprop_string(fdt, name, "device_type", "memory");
> > > > +    g_free(name);
> > > >
> > > >      qemu_fdt_add_subnode(fdt, "/cpus");
> > > >      qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency",
> > > >          SIFIVE_CLINT_TIMEBASE_FREQ);
> > > >      qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0);
> > > >      qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1);
> > > > +    qemu_fdt_add_subnode(fdt, "/cpus/cpu-map");
> > > >
> > > > -    for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) {
> > > > -        nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
> > > > -        char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller",
> cpu);
> > > > -        char *isa = riscv_isa_string(&s->soc.harts[cpu]);
> > > > -        qemu_fdt_add_subnode(fdt, nodename);
> > > > +    for (socket = (s->num_socs - 1); socket >= 0; socket--) {
> > > > +        clust_name = g_strdup_printf("/cpus/cpu-map/cluster0%d", socket);
> > > > +        qemu_fdt_add_subnode(fdt, clust_name);
> > > > +
> > > > +        clint_cells =  g_new0(uint32_t, s->soc[socket].num_harts
> > > > + * 4);
> > > > +
> > > > +        for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
> > > > +            cpu_phandle = phandle++;
> > > > +
> > > > +            cpu_name = g_strdup_printf("/cpus/cpu@%d",
> > > > +                s->soc[socket].hartid_base + cpu);
> > > > +            qemu_fdt_add_subnode(fdt, cpu_name);
> > > >  #if defined(TARGET_RISCV32)
> > > > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv32");
> > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> > > > + "riscv,sv32");
> > > >  #else
> > > > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48");
> > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> > > > + "riscv,sv48");
> > > >  #endif
> > > > -        qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
> > > > -        qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
> > > > -        qemu_fdt_setprop_string(fdt, nodename, "status", "okay");
> > > > -        qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu);
> > > > -        qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu");
> > > > -        qemu_fdt_add_subnode(fdt, intc);
> > > > -        qemu_fdt_setprop_cell(fdt, intc, "phandle", 1);
> > > > -        qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc");
> > > > -        qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0);
> > > > -        qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1);
> > > > -        g_free(isa);
> > > > -        g_free(intc);
> > > > -        g_free(nodename);
> > > > -    }
> > > > +            name = riscv_isa_string(&s->soc[socket].harts[cpu]);
> > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
> > > > +            g_free(name);
> > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv");
> > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
> > > > +            qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
> > > > +                s->soc[socket].hartid_base + cpu);
> > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
> > > > +            qemu_fdt_setprop_cell(fdt, cpu_name, "phandle",
> > > > + cpu_phandle);
> > > > +
> > > > +            intc_name = g_strdup_printf("%s/interrupt-controller",
> cpu_name);
> > > > +            qemu_fdt_add_subnode(fdt, intc_name);
> > > > +            intc_phandle = phandle++;
> > > > +            qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_phandle);
> > > > +            qemu_fdt_setprop_string(fdt, intc_name, "compatible",
> > > > +                "riscv,cpu-intc");
> > > > +            qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL,
> 0);
> > > > +            qemu_fdt_setprop_cell(fdt, intc_name,
> > > > + "#interrupt-cells", 1);
> > > > +
> > > > +            clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > > > +            clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > > > +            clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > > > +            clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> > > > +
> > > > +            core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
> > > > +            qemu_fdt_add_subnode(fdt, core_name);
> > > > +            qemu_fdt_setprop_cell(fdt, core_name, "cpu",
> > > > + cpu_phandle);
> > > > +
> > > > +            g_free(core_name);
> > > > +            g_free(intc_name);
> > > > +            g_free(cpu_name);
> > > > +        }
> > > >
> > > > -    cells =  g_new0(uint32_t, s->soc.num_harts * 4);
> > > > -    for (cpu = 0; cpu < s->soc.num_harts; cpu++) {
> > > > -        nodename =
> > > > -            g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
> > > > -        uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename);
> > > > -        cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > > > -        cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > > > -        cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > > > -        cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> > > > -        g_free(nodename);
> > > > +        clint_addr = memmap[SPIKE_CLINT].base +
> > > > +            (memmap[SPIKE_CLINT].size * socket);
> > > > +        clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
> > > > +        qemu_fdt_add_subnode(fdt, clint_name);
> > > > +        qemu_fdt_setprop_string(fdt, clint_name, "compatible",
> "riscv,clint0");
> > > > +        qemu_fdt_setprop_cells(fdt, clint_name, "reg",
> > > > +            0x0, clint_addr, 0x0, memmap[SPIKE_CLINT].size);
> > > > +        qemu_fdt_setprop(fdt, clint_name, "interrupts-extended",
> > > > +            clint_cells, s->soc[socket].num_harts *
> > > > + sizeof(uint32_t)
> > > > + * 4);
> > > > +
> > > > +        g_free(clint_name);
> > > > +        g_free(clint_cells);
> > > > +        g_free(clust_name);
> > > >      }
> > > > -    nodename = g_strdup_printf("/soc/clint@%lx",
> > > > -        (long)memmap[SPIKE_CLINT].base);
> > > > -    qemu_fdt_add_subnode(fdt, nodename);
> > > > -    qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv,clint0");
> > > > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > > > -        0x0, memmap[SPIKE_CLINT].base,
> > > > -        0x0, memmap[SPIKE_CLINT].size);
> > > > -    qemu_fdt_setprop(fdt, nodename, "interrupts-extended",
> > > > -        cells, s->soc.num_harts * sizeof(uint32_t) * 4);
> > > > -    g_free(cells);
> > > > -    g_free(nodename);
> > > >
> > > >      if (cmdline) {
> > > >          qemu_fdt_add_subnode(fdt, "/chosen"); @@ -160,23 +179,51
> > > > @@ static void create_fdt(SpikeState *s, const struct MemmapEntry
> > > > *memmap,  static void spike_board_init(MachineState *machine)  {
> > > >      const struct MemmapEntry *memmap = spike_memmap;
> > > > -
> > > >      SpikeState *s = g_new0(SpikeState, 1);
> > > >      MemoryRegion *system_memory = get_system_memory();
> > > >      MemoryRegion *main_mem = g_new(MemoryRegion, 1);
> > > >      MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
> > > >      int i;
> > > > +    char *soc_name;
> > > >      unsigned int smp_cpus = machine->smp.cpus;
> > > > -
> > > > -    /* Initialize SOC */
> > > > -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
> > > > -                            TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> > > > -    object_property_set_str(OBJECT(&s->soc), machine->cpu_type, "cpu-
> > > type",
> > > > -                            &error_abort);
> > > > -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> > > > -                            &error_abort);
> > > > -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> > > > -                            &error_abort);
> > > > +    unsigned int base_hartid, cpus_per_socket;
> > > > +
> > > > +    s->num_socs = machine->smp.sockets;
> > > > +
> > > > +    /* Ensure minumum required CPUs per socket */
> > > > +    if ((smp_cpus / s->num_socs) < SPIKE_CPUS_PER_SOCKET_MIN)
> > > > +        s->num_socs = 1;
> > >
> > > Why?  It seems like creating single-hart sockets would be a good
> > > test case, and I'm pretty sure it's a configuration that we had in embedded
> systems.
> >
> > Yes, single-hart sockets are sensible for testing software.
> >
> > When "sockets=" sub-option is not provided in "-smp " command line
> > options, the machine->smp.sockets is set same as machine->smp.cpus by
> > smp_parse() function in hw/core/machine.c. This means by default we
> > will always get single-hart per socket. In other words, "-smp 4" will
> > be 4 cpus and 4 sockets. This is counter intuitive for users because
> > when "sockets=" is not provided we should default to single socket
> > irrespective to number of cpus.
> >
> > I had added SPIKE_CPUS_PER_SOCKET_MIN to handle the default case when
> > no "sockets=" sub-option is provided.
> >
> > Alternate approach will be:
> > 1. Add more members in struct CpuTopology of include/hw/boards.h
> >     to help us know whether "sockets=" option was passed or not 2.
> > Update smp_parse() for new members in struct CpuTopology 3. Assume
> > single-socket machine in QEMU RISC-V virt and QEMU
> >     RISC-V spike machines when "sockets=" option was not passed
> >
> > Suggestions ??
> >
> 
> I think it makes sense to just stick to what smp_parse() does. That's what QEMU
> users are used to so we should follow that.
> 
> I agree it is strange that is specifying `-smp x' you will get max_cpus number of
> sockets and split the CPUs via them, but that's what every other board (besides
> x86) does.

So we are fine with SPIKE_CPUS_PER_SOCKET_MIN=2 for now, right ??

Regards,
Anup
Alistair Francis May 27, 2020, 3:30 a.m. UTC | #5
at all?

AlistairOn Tue, May 26, 2020 at 7:55 PM Anup Patel <Anup.Patel@wdc.com> wrote:
>
>
>
> > -----Original Message-----
> > From: Alistair Francis <alistair23@gmail.com>
> > Sent: 27 May 2020 06:08
> > To: Anup Patel <Anup.Patel@wdc.com>
> > Cc: Palmer Dabbelt <palmer@dabbelt.com>; Peter Maydell
> > <peter.maydell@linaro.org>; qemu-riscv@nongnu.org;
> > sagark@eecs.berkeley.edu; anup@brainfault.org; qemu-devel@nongnu.org;
> > Atish Patra <Atish.Patra@wdc.com>; Alistair Francis
> > <Alistair.Francis@wdc.com>
> > Subject: Re: [PATCH 2/4] hw/riscv: spike: Allow creating multiple sockets
> >
> > On Fri, May 22, 2020 at 3:10 AM Anup Patel <Anup.Patel@wdc.com> wrote:
> > >
> > >
> > >
> > > > -----Original Message-----
> > > > From: Palmer Dabbelt <palmer@dabbelt.com>
> > > > Sent: 22 May 2020 01:46
> > > > To: Anup Patel <Anup.Patel@wdc.com>
> > > > Cc: Peter Maydell <peter.maydell@linaro.org>; Alistair Francis
> > > > <Alistair.Francis@wdc.com>; sagark@eecs.berkeley.edu; Atish Patra
> > > > <Atish.Patra@wdc.com>; anup@brainfault.org; qemu-riscv@nongnu.org;
> > > > qemu-devel@nongnu.org; Anup Patel <Anup.Patel@wdc.com>
> > > > Subject: Re: [PATCH 2/4] hw/riscv: spike: Allow creating multiple
> > > > sockets
> > > >
> > > > On Fri, 15 May 2020 23:37:44 PDT (-0700), Anup Patel wrote:
> > > > > We extend RISC-V spike machine to allow creating a multi-socket
> > machine.
> > > > > Each RISC-V spike machine socket is a set of HARTs and a CLINT instance.
> > > > > Other peripherals are shared between all RISC-V spike machine sockets.
> > > > > We also update RISC-V spike machine device tree to treat each
> > > > > socket as a NUMA node.
> > > > >
> > > > > The number of sockets in RISC-V spike machine can be specified
> > > > > using the "sockets=" sub-option of QEMU "-smp" command-line
> > > > > option. By default, only one socket RISC-V spike machine will be created.
> > > > >
> > > > > Currently, we only allow creating upto maximum 4 sockets with
> > > > > minimum
> > > > > 2 HARTs per socket. In future, this limits can be changed.
> > > > >
> > > > > Signed-off-by: Anup Patel <anup.patel@wdc.com>
> > > > > ---
> > > > >  hw/riscv/spike.c         | 206 ++++++++++++++++++++++++---------------
> > > > >  include/hw/riscv/spike.h |   8 +-
> > > > >  2 files changed, 133 insertions(+), 81 deletions(-)
> > > > >
> > > > > diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index
> > > > > d5e0103d89..f63c57a87c 100644
> > > > > --- a/hw/riscv/spike.c
> > > > > +++ b/hw/riscv/spike.c
> > > > > @@ -64,9 +64,11 @@ static void create_fdt(SpikeState *s, const
> > > > > struct
> > > > MemmapEntry *memmap,
> > > > >      uint64_t mem_size, const char *cmdline)  {
> > > > >      void *fdt;
> > > > > -    int cpu;
> > > > > -    uint32_t *cells;
> > > > > -    char *nodename;
> > > > > +    int cpu, socket;
> > > > > +    uint32_t *clint_cells;
> > > > > +    unsigned long clint_addr;
> > > > > +    uint32_t cpu_phandle, intc_phandle, phandle = 1;
> > > > > +    char *name, *clint_name, *clust_name, *core_name, *cpu_name,
> > > > > + *intc_name;
> > > > >
> > > > >      fdt = s->fdt = create_device_tree(&s->fdt_size);
> > > > >      if (!fdt) {
> > > > > @@ -88,68 +90,85 @@ static void create_fdt(SpikeState *s, const
> > > > > struct
> > > > MemmapEntry *memmap,
> > > > >      qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2);
> > > > >      qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2);
> > > > >
> > > > > -    nodename = g_strdup_printf("/memory@%lx",
> > > > > -        (long)memmap[SPIKE_DRAM].base);
> > > > > -    qemu_fdt_add_subnode(fdt, nodename);
> > > > > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > > > > +    name = g_strdup_printf("/memory@%lx",
> > > > (long)memmap[SPIKE_DRAM].base);
> > > > > +    qemu_fdt_add_subnode(fdt, name);
> > > > > +    qemu_fdt_setprop_cells(fdt, name, "reg",
> > > > >          memmap[SPIKE_DRAM].base >> 32, memmap[SPIKE_DRAM].base,
> > > > >          mem_size >> 32, mem_size);
> > > > > -    qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
> > > > > -    g_free(nodename);
> > > > > +    qemu_fdt_setprop_string(fdt, name, "device_type", "memory");
> > > > > +    g_free(name);
> > > > >
> > > > >      qemu_fdt_add_subnode(fdt, "/cpus");
> > > > >      qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency",
> > > > >          SIFIVE_CLINT_TIMEBASE_FREQ);
> > > > >      qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0);
> > > > >      qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1);
> > > > > +    qemu_fdt_add_subnode(fdt, "/cpus/cpu-map");
> > > > >
> > > > > -    for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) {
> > > > > -        nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
> > > > > -        char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller",
> > cpu);
> > > > > -        char *isa = riscv_isa_string(&s->soc.harts[cpu]);
> > > > > -        qemu_fdt_add_subnode(fdt, nodename);
> > > > > +    for (socket = (s->num_socs - 1); socket >= 0; socket--) {
> > > > > +        clust_name = g_strdup_printf("/cpus/cpu-map/cluster0%d", socket);
> > > > > +        qemu_fdt_add_subnode(fdt, clust_name);
> > > > > +
> > > > > +        clint_cells =  g_new0(uint32_t, s->soc[socket].num_harts
> > > > > + * 4);
> > > > > +
> > > > > +        for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
> > > > > +            cpu_phandle = phandle++;
> > > > > +
> > > > > +            cpu_name = g_strdup_printf("/cpus/cpu@%d",
> > > > > +                s->soc[socket].hartid_base + cpu);
> > > > > +            qemu_fdt_add_subnode(fdt, cpu_name);
> > > > >  #if defined(TARGET_RISCV32)
> > > > > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv32");
> > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> > > > > + "riscv,sv32");
> > > > >  #else
> > > > > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48");
> > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type",
> > > > > + "riscv,sv48");
> > > > >  #endif
> > > > > -        qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
> > > > > -        qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
> > > > > -        qemu_fdt_setprop_string(fdt, nodename, "status", "okay");
> > > > > -        qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu);
> > > > > -        qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu");
> > > > > -        qemu_fdt_add_subnode(fdt, intc);
> > > > > -        qemu_fdt_setprop_cell(fdt, intc, "phandle", 1);
> > > > > -        qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc");
> > > > > -        qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0);
> > > > > -        qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1);
> > > > > -        g_free(isa);
> > > > > -        g_free(intc);
> > > > > -        g_free(nodename);
> > > > > -    }
> > > > > +            name = riscv_isa_string(&s->soc[socket].harts[cpu]);
> > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
> > > > > +            g_free(name);
> > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv");
> > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
> > > > > +            qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
> > > > > +                s->soc[socket].hartid_base + cpu);
> > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
> > > > > +            qemu_fdt_setprop_cell(fdt, cpu_name, "phandle",
> > > > > + cpu_phandle);
> > > > > +
> > > > > +            intc_name = g_strdup_printf("%s/interrupt-controller",
> > cpu_name);
> > > > > +            qemu_fdt_add_subnode(fdt, intc_name);
> > > > > +            intc_phandle = phandle++;
> > > > > +            qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_phandle);
> > > > > +            qemu_fdt_setprop_string(fdt, intc_name, "compatible",
> > > > > +                "riscv,cpu-intc");
> > > > > +            qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL,
> > 0);
> > > > > +            qemu_fdt_setprop_cell(fdt, intc_name,
> > > > > + "#interrupt-cells", 1);
> > > > > +
> > > > > +            clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > > > > +            clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > > > > +            clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > > > > +            clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> > > > > +
> > > > > +            core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
> > > > > +            qemu_fdt_add_subnode(fdt, core_name);
> > > > > +            qemu_fdt_setprop_cell(fdt, core_name, "cpu",
> > > > > + cpu_phandle);
> > > > > +
> > > > > +            g_free(core_name);
> > > > > +            g_free(intc_name);
> > > > > +            g_free(cpu_name);
> > > > > +        }
> > > > >
> > > > > -    cells =  g_new0(uint32_t, s->soc.num_harts * 4);
> > > > > -    for (cpu = 0; cpu < s->soc.num_harts; cpu++) {
> > > > > -        nodename =
> > > > > -            g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
> > > > > -        uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename);
> > > > > -        cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > > > > -        cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > > > > -        cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > > > > -        cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> > > > > -        g_free(nodename);
> > > > > +        clint_addr = memmap[SPIKE_CLINT].base +
> > > > > +            (memmap[SPIKE_CLINT].size * socket);
> > > > > +        clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
> > > > > +        qemu_fdt_add_subnode(fdt, clint_name);
> > > > > +        qemu_fdt_setprop_string(fdt, clint_name, "compatible",
> > "riscv,clint0");
> > > > > +        qemu_fdt_setprop_cells(fdt, clint_name, "reg",
> > > > > +            0x0, clint_addr, 0x0, memmap[SPIKE_CLINT].size);
> > > > > +        qemu_fdt_setprop(fdt, clint_name, "interrupts-extended",
> > > > > +            clint_cells, s->soc[socket].num_harts *
> > > > > + sizeof(uint32_t)
> > > > > + * 4);
> > > > > +
> > > > > +        g_free(clint_name);
> > > > > +        g_free(clint_cells);
> > > > > +        g_free(clust_name);
> > > > >      }
> > > > > -    nodename = g_strdup_printf("/soc/clint@%lx",
> > > > > -        (long)memmap[SPIKE_CLINT].base);
> > > > > -    qemu_fdt_add_subnode(fdt, nodename);
> > > > > -    qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv,clint0");
> > > > > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > > > > -        0x0, memmap[SPIKE_CLINT].base,
> > > > > -        0x0, memmap[SPIKE_CLINT].size);
> > > > > -    qemu_fdt_setprop(fdt, nodename, "interrupts-extended",
> > > > > -        cells, s->soc.num_harts * sizeof(uint32_t) * 4);
> > > > > -    g_free(cells);
> > > > > -    g_free(nodename);
> > > > >
> > > > >      if (cmdline) {
> > > > >          qemu_fdt_add_subnode(fdt, "/chosen"); @@ -160,23 +179,51
> > > > > @@ static void create_fdt(SpikeState *s, const struct MemmapEntry
> > > > > *memmap,  static void spike_board_init(MachineState *machine)  {
> > > > >      const struct MemmapEntry *memmap = spike_memmap;
> > > > > -
> > > > >      SpikeState *s = g_new0(SpikeState, 1);
> > > > >      MemoryRegion *system_memory = get_system_memory();
> > > > >      MemoryRegion *main_mem = g_new(MemoryRegion, 1);
> > > > >      MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
> > > > >      int i;
> > > > > +    char *soc_name;
> > > > >      unsigned int smp_cpus = machine->smp.cpus;
> > > > > -
> > > > > -    /* Initialize SOC */
> > > > > -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
> > > > > -                            TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> > > > > -    object_property_set_str(OBJECT(&s->soc), machine->cpu_type, "cpu-
> > > > type",
> > > > > -                            &error_abort);
> > > > > -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> > > > > -                            &error_abort);
> > > > > -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> > > > > -                            &error_abort);
> > > > > +    unsigned int base_hartid, cpus_per_socket;
> > > > > +
> > > > > +    s->num_socs = machine->smp.sockets;
> > > > > +
> > > > > +    /* Ensure minumum required CPUs per socket */
> > > > > +    if ((smp_cpus / s->num_socs) < SPIKE_CPUS_PER_SOCKET_MIN)
> > > > > +        s->num_socs = 1;
> > > >
> > > > Why?  It seems like creating single-hart sockets would be a good
> > > > test case, and I'm pretty sure it's a configuration that we had in embedded
> > systems.
> > >
> > > Yes, single-hart sockets are sensible for testing software.
> > >
> > > When "sockets=" sub-option is not provided in "-smp " command line
> > > options, the machine->smp.sockets is set same as machine->smp.cpus by
> > > smp_parse() function in hw/core/machine.c. This means by default we
> > > will always get single-hart per socket. In other words, "-smp 4" will
> > > be 4 cpus and 4 sockets. This is counter intuitive for users because
> > > when "sockets=" is not provided we should default to single socket
> > > irrespective to number of cpus.
> > >
> > > I had added SPIKE_CPUS_PER_SOCKET_MIN to handle the default case when
> > > no "sockets=" sub-option is provided.
> > >
> > > Alternate approach will be:
> > > 1. Add more members in struct CpuTopology of include/hw/boards.h
> > >     to help us know whether "sockets=" option was passed or not 2.
> > > Update smp_parse() for new members in struct CpuTopology 3. Assume
> > > single-socket machine in QEMU RISC-V virt and QEMU
> > >     RISC-V spike machines when "sockets=" option was not passed
> > >
> > > Suggestions ??
> > >
> >
> > I think it makes sense to just stick to what smp_parse() does. That's what QEMU
> > users are used to so we should follow that.
> >
> > I agree it is strange that is specifying `-smp x' you will get max_cpus number of
> > sockets and split the CPUs via them, but that's what every other board (besides
> > x86) does.
>
> So we are fine with SPIKE_CPUS_PER_SOCKET_MIN=2 for now, right ??

Why do we need SPIKE_CPUS_PER_SOCKET_MIN at all?

Alistair

>
> Regards,
> Anup
Anup Patel May 27, 2020, 3:59 a.m. UTC | #6
> -----Original Message-----
> From: Alistair Francis <alistair23@gmail.com>
> Sent: 27 May 2020 09:00
> To: Anup Patel <Anup.Patel@wdc.com>
> Cc: Palmer Dabbelt <palmer@dabbelt.com>; Peter Maydell
> <peter.maydell@linaro.org>; qemu-riscv@nongnu.org;
> sagark@eecs.berkeley.edu; anup@brainfault.org; qemu-devel@nongnu.org;
> Atish Patra <Atish.Patra@wdc.com>; Alistair Francis
> <Alistair.Francis@wdc.com>
> Subject: Re: [PATCH 2/4] hw/riscv: spike: Allow creating multiple sockets
> 
>  at all?
> 
> AlistairOn Tue, May 26, 2020 at 7:55 PM Anup Patel <Anup.Patel@wdc.com>
> wrote:
> >
> >
> >
> > > -----Original Message-----
> > > From: Alistair Francis <alistair23@gmail.com>
> > > Sent: 27 May 2020 06:08
> > > To: Anup Patel <Anup.Patel@wdc.com>
> > > Cc: Palmer Dabbelt <palmer@dabbelt.com>; Peter Maydell
> > > <peter.maydell@linaro.org>; qemu-riscv@nongnu.org;
> > > sagark@eecs.berkeley.edu; anup@brainfault.org;
> > > qemu-devel@nongnu.org; Atish Patra <Atish.Patra@wdc.com>; Alistair
> > > Francis <Alistair.Francis@wdc.com>
> > > Subject: Re: [PATCH 2/4] hw/riscv: spike: Allow creating multiple
> > > sockets
> > >
> > > On Fri, May 22, 2020 at 3:10 AM Anup Patel <Anup.Patel@wdc.com> wrote:
> > > >
> > > >
> > > >
> > > > > -----Original Message-----
> > > > > From: Palmer Dabbelt <palmer@dabbelt.com>
> > > > > Sent: 22 May 2020 01:46
> > > > > To: Anup Patel <Anup.Patel@wdc.com>
> > > > > Cc: Peter Maydell <peter.maydell@linaro.org>; Alistair Francis
> > > > > <Alistair.Francis@wdc.com>; sagark@eecs.berkeley.edu; Atish
> > > > > Patra <Atish.Patra@wdc.com>; anup@brainfault.org;
> > > > > qemu-riscv@nongnu.org; qemu-devel@nongnu.org; Anup Patel
> > > > > <Anup.Patel@wdc.com>
> > > > > Subject: Re: [PATCH 2/4] hw/riscv: spike: Allow creating
> > > > > multiple sockets
> > > > >
> > > > > On Fri, 15 May 2020 23:37:44 PDT (-0700), Anup Patel wrote:
> > > > > > We extend RISC-V spike machine to allow creating a
> > > > > > multi-socket
> > > machine.
> > > > > > Each RISC-V spike machine socket is a set of HARTs and a CLINT
> instance.
> > > > > > Other peripherals are shared between all RISC-V spike machine
> sockets.
> > > > > > We also update RISC-V spike machine device tree to treat each
> > > > > > socket as a NUMA node.
> > > > > >
> > > > > > The number of sockets in RISC-V spike machine can be specified
> > > > > > using the "sockets=" sub-option of QEMU "-smp" command-line
> > > > > > option. By default, only one socket RISC-V spike machine will be
> created.
> > > > > >
> > > > > > Currently, we only allow creating upto maximum 4 sockets with
> > > > > > minimum
> > > > > > 2 HARTs per socket. In future, this limits can be changed.
> > > > > >
> > > > > > Signed-off-by: Anup Patel <anup.patel@wdc.com>
> > > > > > ---
> > > > > >  hw/riscv/spike.c         | 206 ++++++++++++++++++++++++---------------
> > > > > >  include/hw/riscv/spike.h |   8 +-
> > > > > >  2 files changed, 133 insertions(+), 81 deletions(-)
> > > > > >
> > > > > > diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c index
> > > > > > d5e0103d89..f63c57a87c 100644
> > > > > > --- a/hw/riscv/spike.c
> > > > > > +++ b/hw/riscv/spike.c
> > > > > > @@ -64,9 +64,11 @@ static void create_fdt(SpikeState *s, const
> > > > > > struct
> > > > > MemmapEntry *memmap,
> > > > > >      uint64_t mem_size, const char *cmdline)  {
> > > > > >      void *fdt;
> > > > > > -    int cpu;
> > > > > > -    uint32_t *cells;
> > > > > > -    char *nodename;
> > > > > > +    int cpu, socket;
> > > > > > +    uint32_t *clint_cells;
> > > > > > +    unsigned long clint_addr;
> > > > > > +    uint32_t cpu_phandle, intc_phandle, phandle = 1;
> > > > > > +    char *name, *clint_name, *clust_name, *core_name,
> > > > > > + *cpu_name, *intc_name;
> > > > > >
> > > > > >      fdt = s->fdt = create_device_tree(&s->fdt_size);
> > > > > >      if (!fdt) {
> > > > > > @@ -88,68 +90,85 @@ static void create_fdt(SpikeState *s,
> > > > > > const struct
> > > > > MemmapEntry *memmap,
> > > > > >      qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2);
> > > > > >      qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells",
> > > > > > 0x2);
> > > > > >
> > > > > > -    nodename = g_strdup_printf("/memory@%lx",
> > > > > > -        (long)memmap[SPIKE_DRAM].base);
> > > > > > -    qemu_fdt_add_subnode(fdt, nodename);
> > > > > > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > > > > > +    name = g_strdup_printf("/memory@%lx",
> > > > > (long)memmap[SPIKE_DRAM].base);
> > > > > > +    qemu_fdt_add_subnode(fdt, name);
> > > > > > +    qemu_fdt_setprop_cells(fdt, name, "reg",
> > > > > >          memmap[SPIKE_DRAM].base >> 32,
> memmap[SPIKE_DRAM].base,
> > > > > >          mem_size >> 32, mem_size);
> > > > > > -    qemu_fdt_setprop_string(fdt, nodename, "device_type",
> "memory");
> > > > > > -    g_free(nodename);
> > > > > > +    qemu_fdt_setprop_string(fdt, name, "device_type", "memory");
> > > > > > +    g_free(name);
> > > > > >
> > > > > >      qemu_fdt_add_subnode(fdt, "/cpus");
> > > > > >      qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency",
> > > > > >          SIFIVE_CLINT_TIMEBASE_FREQ);
> > > > > >      qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0);
> > > > > >      qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells",
> > > > > > 0x1);
> > > > > > +    qemu_fdt_add_subnode(fdt, "/cpus/cpu-map");
> > > > > >
> > > > > > -    for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) {
> > > > > > -        nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
> > > > > > -        char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-
> controller",
> > > cpu);
> > > > > > -        char *isa = riscv_isa_string(&s->soc.harts[cpu]);
> > > > > > -        qemu_fdt_add_subnode(fdt, nodename);
> > > > > > +    for (socket = (s->num_socs - 1); socket >= 0; socket--) {
> > > > > > +        clust_name = g_strdup_printf("/cpus/cpu-map/cluster0%d",
> socket);
> > > > > > +        qemu_fdt_add_subnode(fdt, clust_name);
> > > > > > +
> > > > > > +        clint_cells =  g_new0(uint32_t,
> > > > > > + s->soc[socket].num_harts
> > > > > > + * 4);
> > > > > > +
> > > > > > +        for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
> > > > > > +            cpu_phandle = phandle++;
> > > > > > +
> > > > > > +            cpu_name = g_strdup_printf("/cpus/cpu@%d",
> > > > > > +                s->soc[socket].hartid_base + cpu);
> > > > > > +            qemu_fdt_add_subnode(fdt, cpu_name);
> > > > > >  #if defined(TARGET_RISCV32)
> > > > > > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type",
> "riscv,sv32");
> > > > > > +            qemu_fdt_setprop_string(fdt, cpu_name,
> > > > > > + "mmu-type", "riscv,sv32");
> > > > > >  #else
> > > > > > -        qemu_fdt_setprop_string(fdt, nodename, "mmu-type",
> "riscv,sv48");
> > > > > > +            qemu_fdt_setprop_string(fdt, cpu_name,
> > > > > > + "mmu-type", "riscv,sv48");
> > > > > >  #endif
> > > > > > -        qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
> > > > > > -        qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
> > > > > > -        qemu_fdt_setprop_string(fdt, nodename, "status", "okay");
> > > > > > -        qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu);
> > > > > > -        qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu");
> > > > > > -        qemu_fdt_add_subnode(fdt, intc);
> > > > > > -        qemu_fdt_setprop_cell(fdt, intc, "phandle", 1);
> > > > > > -        qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-
> intc");
> > > > > > -        qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0);
> > > > > > -        qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1);
> > > > > > -        g_free(isa);
> > > > > > -        g_free(intc);
> > > > > > -        g_free(nodename);
> > > > > > -    }
> > > > > > +            name = riscv_isa_string(&s->soc[socket].harts[cpu]);
> > > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
> > > > > > +            g_free(name);
> > > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "compatible",
> "riscv");
> > > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
> > > > > > +            qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
> > > > > > +                s->soc[socket].hartid_base + cpu);
> > > > > > +            qemu_fdt_setprop_string(fdt, cpu_name, "device_type",
> "cpu");
> > > > > > +            qemu_fdt_setprop_cell(fdt, cpu_name, "phandle",
> > > > > > + cpu_phandle);
> > > > > > +
> > > > > > +            intc_name =
> > > > > > + g_strdup_printf("%s/interrupt-controller",
> > > cpu_name);
> > > > > > +            qemu_fdt_add_subnode(fdt, intc_name);
> > > > > > +            intc_phandle = phandle++;
> > > > > > +            qemu_fdt_setprop_cell(fdt, intc_name, "phandle",
> intc_phandle);
> > > > > > +            qemu_fdt_setprop_string(fdt, intc_name, "compatible",
> > > > > > +                "riscv,cpu-intc");
> > > > > > +            qemu_fdt_setprop(fdt, intc_name,
> > > > > > + "interrupt-controller", NULL,
> > > 0);
> > > > > > +            qemu_fdt_setprop_cell(fdt, intc_name,
> > > > > > + "#interrupt-cells", 1);
> > > > > > +
> > > > > > +            clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > > > > > +            clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > > > > > +            clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > > > > > +            clint_cells[cpu * 4 + 3] =
> > > > > > + cpu_to_be32(IRQ_M_TIMER);
> > > > > > +
> > > > > > +            core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
> > > > > > +            qemu_fdt_add_subnode(fdt, core_name);
> > > > > > +            qemu_fdt_setprop_cell(fdt, core_name, "cpu",
> > > > > > + cpu_phandle);
> > > > > > +
> > > > > > +            g_free(core_name);
> > > > > > +            g_free(intc_name);
> > > > > > +            g_free(cpu_name);
> > > > > > +        }
> > > > > >
> > > > > > -    cells =  g_new0(uint32_t, s->soc.num_harts * 4);
> > > > > > -    for (cpu = 0; cpu < s->soc.num_harts; cpu++) {
> > > > > > -        nodename =
> > > > > > -            g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
> > > > > > -        uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename);
> > > > > > -        cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
> > > > > > -        cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
> > > > > > -        cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
> > > > > > -        cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
> > > > > > -        g_free(nodename);
> > > > > > +        clint_addr = memmap[SPIKE_CLINT].base +
> > > > > > +            (memmap[SPIKE_CLINT].size * socket);
> > > > > > +        clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
> > > > > > +        qemu_fdt_add_subnode(fdt, clint_name);
> > > > > > +        qemu_fdt_setprop_string(fdt, clint_name,
> > > > > > + "compatible",
> > > "riscv,clint0");
> > > > > > +        qemu_fdt_setprop_cells(fdt, clint_name, "reg",
> > > > > > +            0x0, clint_addr, 0x0, memmap[SPIKE_CLINT].size);
> > > > > > +        qemu_fdt_setprop(fdt, clint_name, "interrupts-extended",
> > > > > > +            clint_cells, s->soc[socket].num_harts *
> > > > > > + sizeof(uint32_t)
> > > > > > + * 4);
> > > > > > +
> > > > > > +        g_free(clint_name);
> > > > > > +        g_free(clint_cells);
> > > > > > +        g_free(clust_name);
> > > > > >      }
> > > > > > -    nodename = g_strdup_printf("/soc/clint@%lx",
> > > > > > -        (long)memmap[SPIKE_CLINT].base);
> > > > > > -    qemu_fdt_add_subnode(fdt, nodename);
> > > > > > -    qemu_fdt_setprop_string(fdt, nodename, "compatible",
> "riscv,clint0");
> > > > > > -    qemu_fdt_setprop_cells(fdt, nodename, "reg",
> > > > > > -        0x0, memmap[SPIKE_CLINT].base,
> > > > > > -        0x0, memmap[SPIKE_CLINT].size);
> > > > > > -    qemu_fdt_setprop(fdt, nodename, "interrupts-extended",
> > > > > > -        cells, s->soc.num_harts * sizeof(uint32_t) * 4);
> > > > > > -    g_free(cells);
> > > > > > -    g_free(nodename);
> > > > > >
> > > > > >      if (cmdline) {
> > > > > >          qemu_fdt_add_subnode(fdt, "/chosen"); @@ -160,23
> > > > > > +179,51 @@ static void create_fdt(SpikeState *s, const struct
> > > > > > MemmapEntry *memmap,  static void spike_board_init(MachineState
> *machine)  {
> > > > > >      const struct MemmapEntry *memmap = spike_memmap;
> > > > > > -
> > > > > >      SpikeState *s = g_new0(SpikeState, 1);
> > > > > >      MemoryRegion *system_memory = get_system_memory();
> > > > > >      MemoryRegion *main_mem = g_new(MemoryRegion, 1);
> > > > > >      MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
> > > > > >      int i;
> > > > > > +    char *soc_name;
> > > > > >      unsigned int smp_cpus = machine->smp.cpus;
> > > > > > -
> > > > > > -    /* Initialize SOC */
> > > > > > -    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s-
> >soc),
> > > > > > -                            TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
> > > > > > -    object_property_set_str(OBJECT(&s->soc), machine->cpu_type,
> "cpu-
> > > > > type",
> > > > > > -                            &error_abort);
> > > > > > -    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
> > > > > > -                            &error_abort);
> > > > > > -    object_property_set_bool(OBJECT(&s->soc), true, "realized",
> > > > > > -                            &error_abort);
> > > > > > +    unsigned int base_hartid, cpus_per_socket;
> > > > > > +
> > > > > > +    s->num_socs = machine->smp.sockets;
> > > > > > +
> > > > > > +    /* Ensure minumum required CPUs per socket */
> > > > > > +    if ((smp_cpus / s->num_socs) < SPIKE_CPUS_PER_SOCKET_MIN)
> > > > > > +        s->num_socs = 1;
> > > > >
> > > > > Why?  It seems like creating single-hart sockets would be a good
> > > > > test case, and I'm pretty sure it's a configuration that we had
> > > > > in embedded
> > > systems.
> > > >
> > > > Yes, single-hart sockets are sensible for testing software.
> > > >
> > > > When "sockets=" sub-option is not provided in "-smp " command line
> > > > options, the machine->smp.sockets is set same as machine->smp.cpus
> > > > by
> > > > smp_parse() function in hw/core/machine.c. This means by default
> > > > we will always get single-hart per socket. In other words, "-smp
> > > > 4" will be 4 cpus and 4 sockets. This is counter intuitive for
> > > > users because when "sockets=" is not provided we should default to
> > > > single socket irrespective to number of cpus.
> > > >
> > > > I had added SPIKE_CPUS_PER_SOCKET_MIN to handle the default case
> > > > when no "sockets=" sub-option is provided.
> > > >
> > > > Alternate approach will be:
> > > > 1. Add more members in struct CpuTopology of include/hw/boards.h
> > > >     to help us know whether "sockets=" option was passed or not 2.
> > > > Update smp_parse() for new members in struct CpuTopology 3. Assume
> > > > single-socket machine in QEMU RISC-V virt and QEMU
> > > >     RISC-V spike machines when "sockets=" option was not passed
> > > >
> > > > Suggestions ??
> > > >
> > >
> > > I think it makes sense to just stick to what smp_parse() does.
> > > That's what QEMU users are used to so we should follow that.
> > >
> > > I agree it is strange that is specifying `-smp x' you will get
> > > max_cpus number of sockets and split the CPUs via them, but that's
> > > what every other board (besides
> > > x86) does.
> >
> > So we are fine with SPIKE_CPUS_PER_SOCKET_MIN=2 for now, right ??
> 
> Why do we need SPIKE_CPUS_PER_SOCKET_MIN at all?

We are creating a separate CLINT and PLIC for each socket but the
VirtIO and other devices can only connect to one PLIC instance.

Now if we have one CPU/HART per socket (by default) then only
one CPU can take all VirtIO interrupts. We can't even change the
irq affinity.

The SPIKE_CPUS_PER_SOCKET_MIN=2 helps us ensure that
smp_parse() gives us one CPU/HART per socket (by default),
we ignore the "sockets" and force it to just one socket. That's
why SPIKE_CPUS_PER_SOCKET_MIN is a work-around for
the non-intuitive behavior of smp_parse().

My previous suggestion was to add "sockets_available" field in
"Struct CpuTopology". The other fields of "Struct CpuTopology"
will remain as-in (no change in semantics) so it will work fine
for other architectures. The "sockets_available" field will tell
use whether "sockets" sub-option was specified in command
line (or not). Using "sockets_available" field we can remove the
SPIKE_CPUS_PER_SOCKET_MIN=2 work-around. In fact, we can
totally remove SPIKE_CPUS_PER_SOCKET_MIN.

Regards,
Anup
diff mbox series

Patch

diff --git a/hw/riscv/spike.c b/hw/riscv/spike.c
index d5e0103d89..f63c57a87c 100644
--- a/hw/riscv/spike.c
+++ b/hw/riscv/spike.c
@@ -64,9 +64,11 @@  static void create_fdt(SpikeState *s, const struct MemmapEntry *memmap,
     uint64_t mem_size, const char *cmdline)
 {
     void *fdt;
-    int cpu;
-    uint32_t *cells;
-    char *nodename;
+    int cpu, socket;
+    uint32_t *clint_cells;
+    unsigned long clint_addr;
+    uint32_t cpu_phandle, intc_phandle, phandle = 1;
+    char *name, *clint_name, *clust_name, *core_name, *cpu_name, *intc_name;
 
     fdt = s->fdt = create_device_tree(&s->fdt_size);
     if (!fdt) {
@@ -88,68 +90,85 @@  static void create_fdt(SpikeState *s, const struct MemmapEntry *memmap,
     qemu_fdt_setprop_cell(fdt, "/soc", "#size-cells", 0x2);
     qemu_fdt_setprop_cell(fdt, "/soc", "#address-cells", 0x2);
 
-    nodename = g_strdup_printf("/memory@%lx",
-        (long)memmap[SPIKE_DRAM].base);
-    qemu_fdt_add_subnode(fdt, nodename);
-    qemu_fdt_setprop_cells(fdt, nodename, "reg",
+    name = g_strdup_printf("/memory@%lx", (long)memmap[SPIKE_DRAM].base);
+    qemu_fdt_add_subnode(fdt, name);
+    qemu_fdt_setprop_cells(fdt, name, "reg",
         memmap[SPIKE_DRAM].base >> 32, memmap[SPIKE_DRAM].base,
         mem_size >> 32, mem_size);
-    qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory");
-    g_free(nodename);
+    qemu_fdt_setprop_string(fdt, name, "device_type", "memory");
+    g_free(name);
 
     qemu_fdt_add_subnode(fdt, "/cpus");
     qemu_fdt_setprop_cell(fdt, "/cpus", "timebase-frequency",
         SIFIVE_CLINT_TIMEBASE_FREQ);
     qemu_fdt_setprop_cell(fdt, "/cpus", "#size-cells", 0x0);
     qemu_fdt_setprop_cell(fdt, "/cpus", "#address-cells", 0x1);
+    qemu_fdt_add_subnode(fdt, "/cpus/cpu-map");
 
-    for (cpu = s->soc.num_harts - 1; cpu >= 0; cpu--) {
-        nodename = g_strdup_printf("/cpus/cpu@%d", cpu);
-        char *intc = g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
-        char *isa = riscv_isa_string(&s->soc.harts[cpu]);
-        qemu_fdt_add_subnode(fdt, nodename);
+    for (socket = (s->num_socs - 1); socket >= 0; socket--) {
+        clust_name = g_strdup_printf("/cpus/cpu-map/cluster0%d", socket);
+        qemu_fdt_add_subnode(fdt, clust_name);
+
+        clint_cells =  g_new0(uint32_t, s->soc[socket].num_harts * 4);
+
+        for (cpu = s->soc[socket].num_harts - 1; cpu >= 0; cpu--) {
+            cpu_phandle = phandle++;
+
+            cpu_name = g_strdup_printf("/cpus/cpu@%d",
+                s->soc[socket].hartid_base + cpu);
+            qemu_fdt_add_subnode(fdt, cpu_name);
 #if defined(TARGET_RISCV32)
-        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv32");
+            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv32");
 #else
-        qemu_fdt_setprop_string(fdt, nodename, "mmu-type", "riscv,sv48");
+            qemu_fdt_setprop_string(fdt, cpu_name, "mmu-type", "riscv,sv48");
 #endif
-        qemu_fdt_setprop_string(fdt, nodename, "riscv,isa", isa);
-        qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv");
-        qemu_fdt_setprop_string(fdt, nodename, "status", "okay");
-        qemu_fdt_setprop_cell(fdt, nodename, "reg", cpu);
-        qemu_fdt_setprop_string(fdt, nodename, "device_type", "cpu");
-        qemu_fdt_add_subnode(fdt, intc);
-        qemu_fdt_setprop_cell(fdt, intc, "phandle", 1);
-        qemu_fdt_setprop_string(fdt, intc, "compatible", "riscv,cpu-intc");
-        qemu_fdt_setprop(fdt, intc, "interrupt-controller", NULL, 0);
-        qemu_fdt_setprop_cell(fdt, intc, "#interrupt-cells", 1);
-        g_free(isa);
-        g_free(intc);
-        g_free(nodename);
-    }
+            name = riscv_isa_string(&s->soc[socket].harts[cpu]);
+            qemu_fdt_setprop_string(fdt, cpu_name, "riscv,isa", name);
+            g_free(name);
+            qemu_fdt_setprop_string(fdt, cpu_name, "compatible", "riscv");
+            qemu_fdt_setprop_string(fdt, cpu_name, "status", "okay");
+            qemu_fdt_setprop_cell(fdt, cpu_name, "reg",
+                s->soc[socket].hartid_base + cpu);
+            qemu_fdt_setprop_string(fdt, cpu_name, "device_type", "cpu");
+            qemu_fdt_setprop_cell(fdt, cpu_name, "phandle", cpu_phandle);
+
+            intc_name = g_strdup_printf("%s/interrupt-controller", cpu_name);
+            qemu_fdt_add_subnode(fdt, intc_name);
+            intc_phandle = phandle++;
+            qemu_fdt_setprop_cell(fdt, intc_name, "phandle", intc_phandle);
+            qemu_fdt_setprop_string(fdt, intc_name, "compatible",
+                "riscv,cpu-intc");
+            qemu_fdt_setprop(fdt, intc_name, "interrupt-controller", NULL, 0);
+            qemu_fdt_setprop_cell(fdt, intc_name, "#interrupt-cells", 1);
+
+            clint_cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
+            clint_cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
+            clint_cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
+            clint_cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
+
+            core_name = g_strdup_printf("%s/core%d", clust_name, cpu);
+            qemu_fdt_add_subnode(fdt, core_name);
+            qemu_fdt_setprop_cell(fdt, core_name, "cpu", cpu_phandle);
+
+            g_free(core_name);
+            g_free(intc_name);
+            g_free(cpu_name);
+        }
 
-    cells =  g_new0(uint32_t, s->soc.num_harts * 4);
-    for (cpu = 0; cpu < s->soc.num_harts; cpu++) {
-        nodename =
-            g_strdup_printf("/cpus/cpu@%d/interrupt-controller", cpu);
-        uint32_t intc_phandle = qemu_fdt_get_phandle(fdt, nodename);
-        cells[cpu * 4 + 0] = cpu_to_be32(intc_phandle);
-        cells[cpu * 4 + 1] = cpu_to_be32(IRQ_M_SOFT);
-        cells[cpu * 4 + 2] = cpu_to_be32(intc_phandle);
-        cells[cpu * 4 + 3] = cpu_to_be32(IRQ_M_TIMER);
-        g_free(nodename);
+        clint_addr = memmap[SPIKE_CLINT].base +
+            (memmap[SPIKE_CLINT].size * socket);
+        clint_name = g_strdup_printf("/soc/clint@%lx", clint_addr);
+        qemu_fdt_add_subnode(fdt, clint_name);
+        qemu_fdt_setprop_string(fdt, clint_name, "compatible", "riscv,clint0");
+        qemu_fdt_setprop_cells(fdt, clint_name, "reg",
+            0x0, clint_addr, 0x0, memmap[SPIKE_CLINT].size);
+        qemu_fdt_setprop(fdt, clint_name, "interrupts-extended",
+            clint_cells, s->soc[socket].num_harts * sizeof(uint32_t) * 4);
+
+        g_free(clint_name);
+        g_free(clint_cells);
+        g_free(clust_name);
     }
-    nodename = g_strdup_printf("/soc/clint@%lx",
-        (long)memmap[SPIKE_CLINT].base);
-    qemu_fdt_add_subnode(fdt, nodename);
-    qemu_fdt_setprop_string(fdt, nodename, "compatible", "riscv,clint0");
-    qemu_fdt_setprop_cells(fdt, nodename, "reg",
-        0x0, memmap[SPIKE_CLINT].base,
-        0x0, memmap[SPIKE_CLINT].size);
-    qemu_fdt_setprop(fdt, nodename, "interrupts-extended",
-        cells, s->soc.num_harts * sizeof(uint32_t) * 4);
-    g_free(cells);
-    g_free(nodename);
 
     if (cmdline) {
         qemu_fdt_add_subnode(fdt, "/chosen");
@@ -160,23 +179,51 @@  static void create_fdt(SpikeState *s, const struct MemmapEntry *memmap,
 static void spike_board_init(MachineState *machine)
 {
     const struct MemmapEntry *memmap = spike_memmap;
-
     SpikeState *s = g_new0(SpikeState, 1);
     MemoryRegion *system_memory = get_system_memory();
     MemoryRegion *main_mem = g_new(MemoryRegion, 1);
     MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
     int i;
+    char *soc_name;
     unsigned int smp_cpus = machine->smp.cpus;
-
-    /* Initialize SOC */
-    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
-                            TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
-    object_property_set_str(OBJECT(&s->soc), machine->cpu_type, "cpu-type",
-                            &error_abort);
-    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
-                            &error_abort);
-    object_property_set_bool(OBJECT(&s->soc), true, "realized",
-                            &error_abort);
+    unsigned int base_hartid, cpus_per_socket;
+
+    s->num_socs = machine->smp.sockets;
+
+    /* Ensure minumum required CPUs per socket */
+    if ((smp_cpus / s->num_socs) < SPIKE_CPUS_PER_SOCKET_MIN)
+        s->num_socs = 1;
+
+    /* Limit the number of sockets */
+    if (SPIKE_SOCKETS_MAX < s->num_socs)
+        s->num_socs = SPIKE_SOCKETS_MAX;
+
+    /* Initialize socket */
+    for (i = 0; i < s->num_socs; i++) {
+        base_hartid = i * (smp_cpus / s->num_socs);
+        if (i == (s->num_socs - 1))
+            cpus_per_socket = smp_cpus - base_hartid;
+        else
+            cpus_per_socket = smp_cpus / s->num_socs;
+        soc_name = g_strdup_printf("soc%d", i);
+        object_initialize_child(OBJECT(machine), soc_name, &s->soc[i],
+            sizeof(s->soc[i]), TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
+        g_free(soc_name);
+        object_property_set_str(OBJECT(&s->soc[i]),
+            machine->cpu_type, "cpu-type", &error_abort);
+        object_property_set_int(OBJECT(&s->soc[i]),
+            base_hartid, "hartid-base", &error_abort);
+        object_property_set_int(OBJECT(&s->soc[i]),
+            cpus_per_socket, "num-harts", &error_abort);
+        object_property_set_bool(OBJECT(&s->soc[i]),
+            true, "realized", &error_abort);
+
+        /* Core Local Interruptor (timer and IPI) for each socket */
+        sifive_clint_create(
+            memmap[SPIKE_CLINT].base + i * memmap[SPIKE_CLINT].size,
+            memmap[SPIKE_CLINT].size, base_hartid, cpus_per_socket,
+            SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE, false);
+    }
 
     /* register system main memory (actual RAM) */
     memory_region_init_ram(main_mem, NULL, "riscv.spike.ram",
@@ -249,12 +296,7 @@  static void spike_board_init(MachineState *machine)
                           &address_space_memory);
 
     /* initialize HTIF using symbols found in load_kernel */
-    htif_mm_init(system_memory, mask_rom, &s->soc.harts[0].env, serial_hd(0));
-
-    /* Core Local Interruptor (timer and IPI) */
-    sifive_clint_create(memmap[SPIKE_CLINT].base, memmap[SPIKE_CLINT].size,
-        0, smp_cpus, SIFIVE_SIP_BASE, SIFIVE_TIMECMP_BASE, SIFIVE_TIME_BASE,
-        false);
+    htif_mm_init(system_memory, mask_rom, &s->soc[0].harts[0].env, serial_hd(0));
 }
 
 static void spike_v1_10_0_board_init(MachineState *machine)
@@ -268,6 +310,8 @@  static void spike_v1_10_0_board_init(MachineState *machine)
     int i;
     unsigned int smp_cpus = machine->smp.cpus;
 
+    s->num_socs = 1;
+
     if (!qtest_enabled()) {
         info_report("The Spike v1.10.0 machine has been deprecated. "
                     "Please use the generic spike machine and specify the ISA "
@@ -275,13 +319,13 @@  static void spike_v1_10_0_board_init(MachineState *machine)
     }
 
     /* Initialize SOC */
-    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
+    object_initialize_child(OBJECT(machine), "soc", &s->soc[0], sizeof(s->soc[0]),
                             TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
-    object_property_set_str(OBJECT(&s->soc), SPIKE_V1_10_0_CPU, "cpu-type",
+    object_property_set_str(OBJECT(&s->soc[0]), SPIKE_V1_10_0_CPU, "cpu-type",
                             &error_abort);
-    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
+    object_property_set_int(OBJECT(&s->soc[0]), smp_cpus, "num-harts",
                             &error_abort);
-    object_property_set_bool(OBJECT(&s->soc), true, "realized",
+    object_property_set_bool(OBJECT(&s->soc[0]), true, "realized",
                             &error_abort);
 
     /* register system main memory (actual RAM) */
@@ -339,7 +383,7 @@  static void spike_v1_10_0_board_init(MachineState *machine)
                           &address_space_memory);
 
     /* initialize HTIF using symbols found in load_kernel */
-    htif_mm_init(system_memory, mask_rom, &s->soc.harts[0].env, serial_hd(0));
+    htif_mm_init(system_memory, mask_rom, &s->soc[0].harts[0].env, serial_hd(0));
 
     /* Core Local Interruptor (timer and IPI) */
     sifive_clint_create(memmap[SPIKE_CLINT].base, memmap[SPIKE_CLINT].size,
@@ -358,6 +402,8 @@  static void spike_v1_09_1_board_init(MachineState *machine)
     int i;
     unsigned int smp_cpus = machine->smp.cpus;
 
+    s->num_socs = 1;
+
     if (!qtest_enabled()) {
         info_report("The Spike v1.09.1 machine has been deprecated. "
                     "Please use the generic spike machine and specify the ISA "
@@ -365,13 +411,13 @@  static void spike_v1_09_1_board_init(MachineState *machine)
     }
 
     /* Initialize SOC */
-    object_initialize_child(OBJECT(machine), "soc", &s->soc, sizeof(s->soc),
+    object_initialize_child(OBJECT(machine), "soc", &s->soc[0], sizeof(s->soc[0]),
                             TYPE_RISCV_HART_ARRAY, &error_abort, NULL);
-    object_property_set_str(OBJECT(&s->soc), SPIKE_V1_09_1_CPU, "cpu-type",
+    object_property_set_str(OBJECT(&s->soc[0]), SPIKE_V1_09_1_CPU, "cpu-type",
                             &error_abort);
-    object_property_set_int(OBJECT(&s->soc), smp_cpus, "num-harts",
+    object_property_set_int(OBJECT(&s->soc[0]), smp_cpus, "num-harts",
                             &error_abort);
-    object_property_set_bool(OBJECT(&s->soc), true, "realized",
+    object_property_set_bool(OBJECT(&s->soc[0]), true, "realized",
                             &error_abort);
 
     /* register system main memory (actual RAM) */
@@ -425,7 +471,7 @@  static void spike_v1_09_1_board_init(MachineState *machine)
         "};\n";
 
     /* build config string with supplied memory size */
-    char *isa = riscv_isa_string(&s->soc.harts[0]);
+    char *isa = riscv_isa_string(&s->soc[0].harts[0]);
     char *config_string = g_strdup_printf(config_string_tmpl,
         (uint64_t)memmap[SPIKE_CLINT].base + SIFIVE_TIME_BASE,
         (uint64_t)memmap[SPIKE_DRAM].base,
@@ -448,7 +494,7 @@  static void spike_v1_09_1_board_init(MachineState *machine)
                           &address_space_memory);
 
     /* initialize HTIF using symbols found in load_kernel */
-    htif_mm_init(system_memory, mask_rom, &s->soc.harts[0].env, serial_hd(0));
+    htif_mm_init(system_memory, mask_rom, &s->soc[0].harts[0].env, serial_hd(0));
 
     /* Core Local Interruptor (timer and IPI) */
     sifive_clint_create(memmap[SPIKE_CLINT].base, memmap[SPIKE_CLINT].size,
@@ -476,7 +522,7 @@  static void spike_machine_init(MachineClass *mc)
 {
     mc->desc = "RISC-V Spike Board";
     mc->init = spike_board_init;
-    mc->max_cpus = 8;
+    mc->max_cpus = SPIKE_CPUS_MAX;
     mc->is_default = true;
     mc->default_cpu_type = SPIKE_V1_10_0_CPU;
 }
diff --git a/include/hw/riscv/spike.h b/include/hw/riscv/spike.h
index dc770421bc..04a9f593b5 100644
--- a/include/hw/riscv/spike.h
+++ b/include/hw/riscv/spike.h
@@ -22,12 +22,18 @@ 
 #include "hw/riscv/riscv_hart.h"
 #include "hw/sysbus.h"
 
+#define SPIKE_SOCKETS_MAX 4
+#define SPIKE_CPUS_PER_SOCKET_MIN 2
+#define SPIKE_CPUS_PER_SOCKET_MAX 4
+#define SPIKE_CPUS_MAX (SPIKE_SOCKETS_MAX * SPIKE_CPUS_PER_SOCKET_MAX)
+
 typedef struct {
     /*< private >*/
     SysBusDevice parent_obj;
 
     /*< public >*/
-    RISCVHartArrayState soc;
+    unsigned int num_socs;
+    RISCVHartArrayState soc[SPIKE_SOCKETS_MAX];
     void *fdt;
     int fdt_size;
 } SpikeState;