diff mbox series

[2/2] spapr_cpu_core: migrate VPA related state

Message ID 152932480918.500483.5347446234353746966.stgit@bahia.lan
State New
Headers show
Series [1/2] spapr_cpu_core: migrate per-CPU data | expand

Commit Message

Greg Kurz June 18, 2018, 12:26 p.m. UTC
QEMU implements the "Shared Processor LPAR" (SPLPAR) option, which allows
the hypervisor to time-slice a physical processor into multiple virtual
processor. The intent is to allow more guests to run, and to optimize
processor utilization.

The guest OS can cede idle VCPUs, so that their processing capacity may
be used by other VCPUs, with the H_CEDE hcall. The guest OS can also
optimize spinlocks, by confering the time-slice of a spinning VCPU to the
spinlock holder if it's currently notrunning, with the H_CONFER hcall.

Both hcalls depend on a "Virtual Processor Area" (VPA) to be registered
by the guest OS, generally during early boot. Other per-VCPU areas can
be registered: the "SLB Shadow Buffer" which allows a more efficient
dispatching of VCPUs, and the "Dispatch Trace Log Buffer" (DTL) which
is used to compute time stolen by the hypervisor. Both DTL and SLB Shadow
areas depend on the VPA to be registered.

The VPA/SLB Shadow/DTL are state that QEMU should migrate, but this doesn't
happen, for no apparent reason other than it was just never coded. This
causes the features listed above to stop working after migration, and it
breaks the logic of the H_REGISTER_VPA hcall in the destination.

The VPA is set at the guest request, ie, we don't have to migrate
it before the guest has actually set it. This patch hence adds an
"spapr_cpu/vpa" subsection to the recently introduced per-CPU machine
data migration stream.

Since DTL and SLB Shadow are optional and both depend on VPA, they get
their own subsections "spapr_cpu/vpa/slb_shadow" and "spapr_cpu/vpa/dtl"
hanging from the "spapr_cpu/vpa" subsection.

Note that this won't break migration to older QEMUs. Is is already handled
by only registering the vmstate handler for per-CPU data with newer machine
types.

Signed-off-by: Greg Kurz <groug@kaod.org>
---
 hw/ppc/spapr_cpu_core.c |   65 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 65 insertions(+)

Comments

David Gibson June 19, 2018, 12:30 a.m. UTC | #1
On Mon, Jun 18, 2018 at 02:26:49PM +0200, Greg Kurz wrote:
> QEMU implements the "Shared Processor LPAR" (SPLPAR) option, which allows
> the hypervisor to time-slice a physical processor into multiple virtual
> processor. The intent is to allow more guests to run, and to optimize
> processor utilization.
> 
> The guest OS can cede idle VCPUs, so that their processing capacity may
> be used by other VCPUs, with the H_CEDE hcall. The guest OS can also
> optimize spinlocks, by confering the time-slice of a spinning VCPU to the
> spinlock holder if it's currently notrunning, with the H_CONFER hcall.
> 
> Both hcalls depend on a "Virtual Processor Area" (VPA) to be registered
> by the guest OS, generally during early boot. Other per-VCPU areas can
> be registered: the "SLB Shadow Buffer" which allows a more efficient
> dispatching of VCPUs, and the "Dispatch Trace Log Buffer" (DTL) which
> is used to compute time stolen by the hypervisor. Both DTL and SLB Shadow
> areas depend on the VPA to be registered.
> 
> The VPA/SLB Shadow/DTL are state that QEMU should migrate, but this doesn't
> happen, for no apparent reason other than it was just never coded. This
> causes the features listed above to stop working after migration, and it
> breaks the logic of the H_REGISTER_VPA hcall in the destination.
> 
> The VPA is set at the guest request, ie, we don't have to migrate
> it before the guest has actually set it. This patch hence adds an
> "spapr_cpu/vpa" subsection to the recently introduced per-CPU machine
> data migration stream.
> 
> Since DTL and SLB Shadow are optional and both depend on VPA, they get
> their own subsections "spapr_cpu/vpa/slb_shadow" and "spapr_cpu/vpa/dtl"
> hanging from the "spapr_cpu/vpa" subsection.
> 
> Note that this won't break migration to older QEMUs. Is is already handled
> by only registering the vmstate handler for per-CPU data with newer machine
> types.
> 
> Signed-off-by: Greg Kurz <groug@kaod.org>

Applied to ppc-for-3.0, thanks.

> ---
>  hw/ppc/spapr_cpu_core.c |   65 +++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 65 insertions(+)
> 
> diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
> index 96d1dfad00e1..f7e7b739ae49 100644
> --- a/hw/ppc/spapr_cpu_core.c
> +++ b/hw/ppc/spapr_cpu_core.c
> @@ -129,6 +129,67 @@ static void spapr_cpu_core_unrealize(DeviceState *dev, Error **errp)
>      g_free(sc->threads);
>  }
>  
> +static bool slb_shadow_needed(void *opaque)
> +{
> +    sPAPRCPUState *spapr_cpu = opaque;
> +
> +    return spapr_cpu->slb_shadow_addr != 0;
> +}
> +
> +static const VMStateDescription vmstate_spapr_cpu_slb_shadow = {
> +    .name = "spapr_cpu/vpa/slb_shadow",
> +    .version_id = 1,
> +    .minimum_version_id = 1,
> +    .needed = slb_shadow_needed,
> +    .fields = (VMStateField[]) {
> +        VMSTATE_UINT64(slb_shadow_addr, sPAPRCPUState),
> +        VMSTATE_UINT64(slb_shadow_size, sPAPRCPUState),
> +        VMSTATE_END_OF_LIST()
> +    }
> +};
> +
> +static bool dtl_needed(void *opaque)
> +{
> +    sPAPRCPUState *spapr_cpu = opaque;
> +
> +    return spapr_cpu->dtl_addr != 0;
> +}
> +
> +static const VMStateDescription vmstate_spapr_cpu_dtl = {
> +    .name = "spapr_cpu/vpa/dtl",
> +    .version_id = 1,
> +    .minimum_version_id = 1,
> +    .needed = dtl_needed,
> +    .fields = (VMStateField[]) {
> +        VMSTATE_UINT64(dtl_addr, sPAPRCPUState),
> +        VMSTATE_UINT64(dtl_size, sPAPRCPUState),
> +        VMSTATE_END_OF_LIST()
> +    }
> +};
> +
> +static bool vpa_needed(void *opaque)
> +{
> +    sPAPRCPUState *spapr_cpu = opaque;
> +
> +    return spapr_cpu->vpa_addr != 0;
> +}
> +
> +static const VMStateDescription vmstate_spapr_cpu_vpa = {
> +    .name = "spapr_cpu/vpa",
> +    .version_id = 1,
> +    .minimum_version_id = 1,
> +    .needed = vpa_needed,
> +    .fields = (VMStateField[]) {
> +        VMSTATE_UINT64(vpa_addr, sPAPRCPUState),
> +        VMSTATE_END_OF_LIST()
> +    },
> +    .subsections = (const VMStateDescription * []) {
> +        &vmstate_spapr_cpu_slb_shadow,
> +        &vmstate_spapr_cpu_dtl,
> +        NULL
> +    }
> +};
> +
>  static const VMStateDescription vmstate_spapr_cpu_state = {
>      .name = "spapr_cpu",
>      .version_id = 1,
> @@ -136,6 +197,10 @@ static const VMStateDescription vmstate_spapr_cpu_state = {
>      .fields = (VMStateField[]) {
>          VMSTATE_END_OF_LIST()
>      },
> +    .subsections = (const VMStateDescription * []) {
> +        &vmstate_spapr_cpu_vpa,
> +        NULL
> +    }
>  };
>  
>  static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr,
>
diff mbox series

Patch

diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index 96d1dfad00e1..f7e7b739ae49 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -129,6 +129,67 @@  static void spapr_cpu_core_unrealize(DeviceState *dev, Error **errp)
     g_free(sc->threads);
 }
 
+static bool slb_shadow_needed(void *opaque)
+{
+    sPAPRCPUState *spapr_cpu = opaque;
+
+    return spapr_cpu->slb_shadow_addr != 0;
+}
+
+static const VMStateDescription vmstate_spapr_cpu_slb_shadow = {
+    .name = "spapr_cpu/vpa/slb_shadow",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = slb_shadow_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(slb_shadow_addr, sPAPRCPUState),
+        VMSTATE_UINT64(slb_shadow_size, sPAPRCPUState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool dtl_needed(void *opaque)
+{
+    sPAPRCPUState *spapr_cpu = opaque;
+
+    return spapr_cpu->dtl_addr != 0;
+}
+
+static const VMStateDescription vmstate_spapr_cpu_dtl = {
+    .name = "spapr_cpu/vpa/dtl",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = dtl_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(dtl_addr, sPAPRCPUState),
+        VMSTATE_UINT64(dtl_size, sPAPRCPUState),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
+static bool vpa_needed(void *opaque)
+{
+    sPAPRCPUState *spapr_cpu = opaque;
+
+    return spapr_cpu->vpa_addr != 0;
+}
+
+static const VMStateDescription vmstate_spapr_cpu_vpa = {
+    .name = "spapr_cpu/vpa",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = vpa_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(vpa_addr, sPAPRCPUState),
+        VMSTATE_END_OF_LIST()
+    },
+    .subsections = (const VMStateDescription * []) {
+        &vmstate_spapr_cpu_slb_shadow,
+        &vmstate_spapr_cpu_dtl,
+        NULL
+    }
+};
+
 static const VMStateDescription vmstate_spapr_cpu_state = {
     .name = "spapr_cpu",
     .version_id = 1,
@@ -136,6 +197,10 @@  static const VMStateDescription vmstate_spapr_cpu_state = {
     .fields = (VMStateField[]) {
         VMSTATE_END_OF_LIST()
     },
+    .subsections = (const VMStateDescription * []) {
+        &vmstate_spapr_cpu_vpa,
+        NULL
+    }
 };
 
 static void spapr_realize_vcpu(PowerPCCPU *cpu, sPAPRMachineState *spapr,