diff mbox series

[v2,9/9] spapr: implement nested-hv capability for the virtual hypervisor

Message ID 20220216102545.1808018-10-npiggin@gmail.com
State New
Headers show
Series ppc: nested KVM HV for spapr virtual hypervisor | expand

Commit Message

Nicholas Piggin Feb. 16, 2022, 10:25 a.m. UTC
This implements the Nested KVM HV hcall API for spapr under TCG.

The L2 is switched in when the H_ENTER_NESTED hcall is made, and the
L1 is switched back in returned from the hcall when a HV exception
is sent to the vhyp. Register state is copied in and out according to
the nested KVM HV hcall API specification.

The hdecr timer is started when the L2 is switched in, and it provides
the HDEC / 0x980 return to L1.

The MMU re-uses the bare metal radix 2-level page table walker by
using the get_pate method to point the MMU to the nested partition
table entry. MMU faults due to partition scope errors raise HV
exceptions and accordingly are routed back to the L1.

The MMU does not tag translations for the L1 (direct) vs L2 (nested)
guests, so the TLB is flushed on any L1<->L2 transition (hcall entry
and exit).

Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
---
 hw/ppc/spapr.c                  |  37 +++-
 hw/ppc/spapr_caps.c             |  14 +-
 hw/ppc/spapr_hcall.c            | 333 ++++++++++++++++++++++++++++++++
 include/hw/ppc/spapr.h          |  74 ++++++-
 include/hw/ppc/spapr_cpu_core.h |   5 +
 5 files changed, 452 insertions(+), 11 deletions(-)

Comments

Cédric Le Goater Feb. 16, 2022, 10:52 a.m. UTC | #1
On 2/16/22 11:25, Nicholas Piggin wrote:
> This implements the Nested KVM HV hcall API for spapr under TCG.
> 
> The L2 is switched in when the H_ENTER_NESTED hcall is made, and the
> L1 is switched back in returned from the hcall when a HV exception
> is sent to the vhyp. Register state is copied in and out according to
> the nested KVM HV hcall API specification.
> 
> The hdecr timer is started when the L2 is switched in, and it provides
> the HDEC / 0x980 return to L1.
> 
> The MMU re-uses the bare metal radix 2-level page table walker by
> using the get_pate method to point the MMU to the nested partition
> table entry. MMU faults due to partition scope errors raise HV
> exceptions and accordingly are routed back to the L1.
> 
> The MMU does not tag translations for the L1 (direct) vs L2 (nested)
> guests, so the TLB is flushed on any L1<->L2 transition (hcall entry
> and exit).>
> Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>

Reviewed-by: Cédric Le Goater <clg@kaod.org>

Some last comments below,


> ---
>   hw/ppc/spapr.c                  |  37 +++-
>   hw/ppc/spapr_caps.c             |  14 +-
>   hw/ppc/spapr_hcall.c            | 333 ++++++++++++++++++++++++++++++++
>   include/hw/ppc/spapr.h          |  74 ++++++-
>   include/hw/ppc/spapr_cpu_core.h |   5 +
>   5 files changed, 452 insertions(+), 11 deletions(-)
> 
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index 6fab70767f..87e68da77f 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -1270,6 +1270,8 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
>       /* The TCG path should also be holding the BQL at this point */
>       g_assert(qemu_mutex_iothread_locked());
>   
> +    g_assert(!vhyp_cpu_in_nested(cpu));
> +
>       if (msr_pr) {
>           hcall_dprintf("Hypercall made with MSR[PR]=1\n");
>           env->gpr[3] = H_PRIVILEGE;
> @@ -1313,12 +1315,34 @@ static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
>                              target_ulong lpid, ppc_v3_pate_t *entry)
>   {
>       SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
> +    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
>   
> -    assert(lpid == 0);
> +    if (!spapr_cpu->in_nested) {
> +        assert(lpid == 0);
>   
> -    /* Copy PATE1:GR into PATE0:HR */
> -    entry->dw0 = spapr->patb_entry & PATE0_HR;
> -    entry->dw1 = spapr->patb_entry;
> +        /* Copy PATE1:GR into PATE0:HR */
> +        entry->dw0 = spapr->patb_entry & PATE0_HR;
> +        entry->dw1 = spapr->patb_entry;
> +
> +    } else {
> +        uint64_t patb, pats;
> +
> +        assert(lpid != 0);
> +
> +        patb = spapr->nested_ptcr & PTCR_PATB;
> +        pats = spapr->nested_ptcr & PTCR_PATS;
> +
> +        /* Calculate number of entries */
> +        pats = 1ull << (pats + 12 - 4);
> +        if (pats <= lpid) {
> +            return false;
> +        }
> +
> +        /* Grab entry */
> +        patb += 16 * lpid;
> +        entry->dw0 = ldq_phys(CPU(cpu)->as, patb);
> +        entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8);
> +    }
>   
>       return true;
>   }
> @@ -4472,7 +4496,9 @@ PowerPCCPU *spapr_find_cpu(int vcpu_id)
>   
>   static bool spapr_cpu_in_nested(PowerPCCPU *cpu)
>   {
> -    return false;
> +    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
> +
> +    return spapr_cpu->in_nested;
>   }
>   
>   static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
> @@ -4584,6 +4610,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
>       nc->nmi_monitor_handler = spapr_nmi;
>       smc->phb_placement = spapr_phb_placement;
>       vhc->cpu_in_nested = spapr_cpu_in_nested;
> +    vhc->deliver_hv_excp = spapr_exit_nested;
>       vhc->hypercall = emulate_spapr_hypercall;
>       vhc->hpt_mask = spapr_hpt_mask;
>       vhc->map_hptes = spapr_map_hptes;
> diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c
> index e2412aaa57..6d74345930 100644
> --- a/hw/ppc/spapr_caps.c
> +++ b/hw/ppc/spapr_caps.c
> @@ -444,19 +444,23 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr,
>   {
>       ERRP_GUARD();
>       PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
> +    CPUPPCState *env = &cpu->env;
>   
>       if (!val) {
>           /* capability disabled by default */
>           return;
>       }
>   
> -    if (tcg_enabled()) {
> -        error_setg(errp, "No Nested KVM-HV support in TCG");
> +    if (!(env->insns_flags2 & PPC2_ISA300)) {
> +        error_setg(errp, "Nested-HV only supported on POWER9 and later");
>           error_append_hint(errp, "Try appending -machine cap-nested-hv=off\n");
> -    } else if (kvm_enabled()) {
> +        return;
> +    }
> +
> +    if (kvm_enabled()) {
>           if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
>                                 spapr->max_compat_pvr)) {
> -            error_setg(errp, "Nested KVM-HV only supported on POWER9");
> +            error_setg(errp, "Nested-HV only supported on POWER9 and later");
>               error_append_hint(errp,
>                                 "Try appending -machine max-cpu-compat=power9\n");
>               return;
> @@ -464,7 +468,7 @@ static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr,
>   
>           if (!kvmppc_has_cap_nested_kvm_hv()) {
>               error_setg(errp,
> -                       "KVM implementation does not support Nested KVM-HV");
> +                       "KVM implementation does not support Nested-HV");
>               error_append_hint(errp,
>                                 "Try appending -machine cap-nested-hv=off\n");
>           } else if (kvmppc_set_cap_nested_kvm_hv(val) < 0) {
> diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
> index fa60505315..e183892287 100644
> --- a/hw/ppc/spapr_hcall.c
> +++ b/hw/ppc/spapr_hcall.c
> @@ -9,6 +9,7 @@
>   #include "qemu/error-report.h"
>   #include "exec/exec-all.h"
>   #include "helper_regs.h"
> +#include "hw/ppc/ppc.h"
>   #include "hw/ppc/spapr.h"
>   #include "hw/ppc/spapr_cpu_core.h"
>   #include "mmu-hash64.h"
> @@ -1499,6 +1500,333 @@ static void hypercall_register_softmmu(void)
>   }
>   #endif
>   
> +/* TCG only */
> +#define PRTS_MASK      0x1f
> +
> +static target_ulong h_set_ptbl(PowerPCCPU *cpu,
> +                               SpaprMachineState *spapr,
> +                               target_ulong opcode,
> +                               target_ulong *args)
> +{
> +    target_ulong ptcr = args[0];
> +
> +    if (!spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) {
> +        return H_FUNCTION;
> +    }
> +
> +    if ((ptcr & PRTS_MASK) + 12 - 4 > 12) {
> +        return H_PARAMETER;
> +    }
> +
> +    spapr->nested_ptcr = ptcr; /* Save new partition table */
> +
> +    return H_SUCCESS;
> +}
> +
> +static target_ulong h_tlb_invalidate(PowerPCCPU *cpu,
> +                                     SpaprMachineState *spapr,
> +                                     target_ulong opcode,
> +                                     target_ulong *args)
> +{
> +    /*
> +     * The spapr virtual hypervisor nested HV implementation retains no L2
> +     * translation state except for TLB. And the TLB is always invalidated
> +     * across L1<->L2 transitions, so nothing is required here.
> +     */
> +
> +    return H_SUCCESS;
> +}
> +
> +static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu,
> +                                        SpaprMachineState *spapr,
> +                                        target_ulong opcode,
> +                                        target_ulong *args)
> +{
> +    /*
> +     * This HCALL is not required, L1 KVM will take a slow path and walk the
> +     * page tables manually to do the data copy.
> +     */
> +    return H_FUNCTION;
> +}
> +
> +/*
> + * When this handler returns, the environment is switched to the L2 guest
> + * and TCG begins running that. spapr_exit_nested() performs the switch from
> + * L2 back to L1 and returns from the H_ENTER_NESTED hcall.
> + */
> +static target_ulong h_enter_nested(PowerPCCPU *cpu,
> +                                   SpaprMachineState *spapr,
> +                                   target_ulong opcode,
> +                                   target_ulong *args)
> +{
> +    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
> +    CPUState *cs = CPU(cpu);
> +    CPUPPCState *env = &cpu->env;
> +    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
> +    target_ulong hv_ptr = args[0];
> +    target_ulong regs_ptr = args[1];
> +    target_ulong hdec, now = cpu_ppc_load_tbl(env);
> +    target_ulong lpcr, lpcr_mask;
> +    struct kvmppc_hv_guest_state *hvstate;
> +    struct kvmppc_hv_guest_state hv_state;
> +    struct kvmppc_pt_regs *regs;
> +    hwaddr len;
> +    uint64_t cr;
> +    int i;
> +
> +    if (spapr->nested_ptcr == 0) {
> +        return H_NOT_AVAILABLE;
> +    }
> +
> +    len = sizeof(*hvstate);
> +    hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false,
> +                                MEMTXATTRS_UNSPECIFIED);
> +    if (len != sizeof(*hvstate)) {
> +        address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false);
> +        return H_PARAMETER;
> +    }
> +
> +    memcpy(&hv_state, hvstate, len);
> +
> +    address_space_unmap(CPU(cpu)->as, hvstate, len, len, false);
> +
> +    /*
> +     * We accept versions 1 and 2. Version 2 fields are unused because TCG
> +     * does not implement DAWR*.
> +     */
> +    if (hv_state.version > HV_GUEST_STATE_VERSION) {
> +        return H_PARAMETER;
> +    }
> +
> +    spapr_cpu->nested_host_state = g_try_malloc(sizeof(CPUPPCState));
> +    if (!spapr_cpu->nested_host_state) {
> +        return H_NO_MEM;
> +    }
> +
> +    memcpy(spapr_cpu->nested_host_state, env, sizeof(CPUPPCState));
> +
> +    len = sizeof(*regs);
> +    regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false,
> +                                MEMTXATTRS_UNSPECIFIED);
> +    if (!regs || len != sizeof(*regs)) {
> +        address_space_unmap(CPU(cpu)->as, regs, len, 0, false);
> +        g_free(spapr_cpu->nested_host_state);
> +        return H_P2;
> +    }
> +
> +    len = sizeof(env->gpr);
> +    assert(len == sizeof(regs->gpr));
> +    memcpy(env->gpr, regs->gpr, len);
> +
> +    env->lr = regs->link;
> +    env->ctr = regs->ctr;
> +    cpu_write_xer(env, regs->xer);
> +
> +    cr = regs->ccr;
> +    for (i = 7; i >= 0; i--) {
> +        env->crf[i] = cr & 15;
> +        cr >>= 4;
> +    }
> +
> +    env->msr = regs->msr;
> +    env->nip = regs->nip;
> +
> +    address_space_unmap(CPU(cpu)->as, regs, len, len, false);
> +
> +    env->cfar = hv_state.cfar;
> +
> +    assert(env->spr[SPR_LPIDR] == 0);
> +    env->spr[SPR_LPIDR] = hv_state.lpid;
> +
> +    lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER;
> +    lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask);
> +    lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE;
> +    lpcr &= ~LPCR_LPES0;
> +    env->spr[SPR_LPCR] = lpcr & pcc->lpcr_mask;
> +
> +    env->spr[SPR_PCR] = hv_state.pcr;
> +    /* hv_state.amor is not used */
> +    env->spr[SPR_DPDES] = hv_state.dpdes;
> +    env->spr[SPR_HFSCR] = hv_state.hfscr;
> +    hdec = hv_state.hdec_expiry - now;
> +    spapr_cpu->nested_tb_offset = hv_state.tb_offset;
> +    /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/
> +    env->spr[SPR_SRR0] = hv_state.srr0;
> +    env->spr[SPR_SRR1] = hv_state.srr1;
> +    env->spr[SPR_SPRG0] = hv_state.sprg[0];
> +    env->spr[SPR_SPRG1] = hv_state.sprg[1];
> +    env->spr[SPR_SPRG2] = hv_state.sprg[2];
> +    env->spr[SPR_SPRG3] = hv_state.sprg[3];
> +    env->spr[SPR_BOOKS_PID] = hv_state.pidr;
> +    env->spr[SPR_PPR] = hv_state.ppr;
> +
> +    cpu_ppc_hdecr_init(env);
> +    cpu_ppc_store_hdecr(env, hdec);
> +
> +    /*
> +     * The hv_state.vcpu_token is not needed. It is used by the KVM
> +     * implementation to remember which L2 vCPU last ran on which physical
> +     * CPU so as to invalidate process scope translations if it is moved
> +     * between physical CPUs. For now TLBs are always flushed on L1<->L2
> +     * transitions so this is not a problem.
> +     *
> +     * Could validate that the same vcpu_token does not attempt to run on
> +     * different L1 vCPUs at the same time, but that would be a L1 KVM bug
> +     * and it's not obviously worth a new data structure to do it.
> +     */
> +
> +    env->tb_env->tb_offset += spapr_cpu->nested_tb_offset;
> +    spapr_cpu->in_nested = true;
> +
> +    hreg_compute_hflags(env);
> +    tlb_flush(cs);
> +    env->reserve_addr = -1; /* Reset the reservation */
> +
> +    /*
> +     * The spapr hcall helper sets env->gpr[3] to the return value, but at
> +     * this point the L1 is not returning from the hcall but rather we
> +     * start running the L2, so r3 must not be clobbered, so return env->gpr[3]
> +     * to leave it unchanged.
> +     */
> +    return env->gpr[3];
> +}
> +
> +void spapr_exit_nested(PowerPCCPU *cpu, int excp)
> +{
> +    CPUState *cs = CPU(cpu);
> +    CPUPPCState *env = &cpu->env;
> +    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
> +    target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */
> +    target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4];
> +    target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5];
> +    struct kvmppc_hv_guest_state *hvstate;
> +    struct kvmppc_pt_regs *regs;
> +    hwaddr len;
> +    uint64_t cr;
> +    int i;
> +
> +    assert(spapr_cpu->in_nested);
> +
> +    cpu_ppc_hdecr_exit(env);
> +
> +    len = sizeof(*hvstate);
> +    hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true,
> +                                MEMTXATTRS_UNSPECIFIED);
> +    if (len != sizeof(*hvstate)) {
> +        address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true);
> +        r3_return = H_PARAMETER;
> +        goto out_restore_l1;
> +    }
> +
> +    hvstate->cfar = env->cfar;
> +    hvstate->lpcr = env->spr[SPR_LPCR];
> +    hvstate->pcr = env->spr[SPR_PCR];
> +    hvstate->dpdes = env->spr[SPR_DPDES];
> +    hvstate->hfscr = env->spr[SPR_HFSCR];
> +
> +    if (excp == POWERPC_EXCP_HDSI) {
> +        hvstate->hdar = env->spr[SPR_HDAR];
> +        hvstate->hdsisr = env->spr[SPR_HDSISR];
> +        hvstate->asdr = env->spr[SPR_ASDR];
> +    } else if (excp == POWERPC_EXCP_HISI) {
> +        hvstate->asdr = env->spr[SPR_ASDR];
> +    }
> +
> +    /* HEIR should be implemented for HV mode and saved here. */
> +    hvstate->srr0 = env->spr[SPR_SRR0];
> +    hvstate->srr1 = env->spr[SPR_SRR1];
> +    hvstate->sprg[0] = env->spr[SPR_SPRG0];
> +    hvstate->sprg[1] = env->spr[SPR_SPRG1];
> +    hvstate->sprg[2] = env->spr[SPR_SPRG2];
> +    hvstate->sprg[3] = env->spr[SPR_SPRG3];
> +    hvstate->pidr = env->spr[SPR_BOOKS_PID];
> +    hvstate->ppr = env->spr[SPR_PPR];
> +
> +    /* Is it okay to specify write length larger than actual data written? */
> +    address_space_unmap(CPU(cpu)->as, hvstate, len, len, true);
> +
> +    len = sizeof(*regs);
> +    regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true,
> +                                MEMTXATTRS_UNSPECIFIED);
> +    if (!regs || len != sizeof(*regs)) {
> +        address_space_unmap(CPU(cpu)->as, regs, len, 0, true);
> +        r3_return = H_P2;
> +        goto out_restore_l1;
> +    }
> +
> +    len = sizeof(env->gpr);
> +    assert(len == sizeof(regs->gpr));
> +    memcpy(regs->gpr, env->gpr, len);
> +
> +    regs->link = env->lr;
> +    regs->ctr = env->ctr;
> +    regs->xer = cpu_read_xer(env);
> +
> +    cr = 0;
> +    for (i = 0; i < 8; i++) {
> +        cr |= (env->crf[i] & 15) << (4 * (7 - i));
> +    }
> +    regs->ccr = cr;
> +
> +    if (excp == POWERPC_EXCP_MCHECK ||
> +        excp == POWERPC_EXCP_RESET ||
> +        excp == POWERPC_EXCP_SYSCALL) {
> +        regs->nip = env->spr[SPR_SRR0];
> +        regs->msr = env->spr[SPR_SRR1] & env->msr_mask;
> +    } else {
> +        regs->nip = env->spr[SPR_HSRR0];
> +        regs->msr = env->spr[SPR_HSRR1] & env->msr_mask;
> +    }
> +
> +    /* Is it okay to specify write length larger than actual data written? */
> +    address_space_unmap(CPU(cpu)->as, regs, len, len, true);
> +
> +out_restore_l1:
> +    memcpy(env->gpr, spapr_cpu->nested_host_state->gpr, sizeof(env->gpr));
> +    env->lr = spapr_cpu->nested_host_state->lr;
> +    env->ctr = spapr_cpu->nested_host_state->ctr;
> +    memcpy(env->crf, spapr_cpu->nested_host_state->crf, sizeof(env->crf));
> +    env->cfar = spapr_cpu->nested_host_state->cfar;
> +    env->xer = spapr_cpu->nested_host_state->xer;
> +    env->so = spapr_cpu->nested_host_state->so;
> +    env->ov = spapr_cpu->nested_host_state->ov;
> +    env->ov32 = spapr_cpu->nested_host_state->ov32;
> +    env->ca32 = spapr_cpu->nested_host_state->ca32;
> +    env->msr = spapr_cpu->nested_host_state->msr;
> +    env->nip = spapr_cpu->nested_host_state->nip;
> +
> +    assert(env->spr[SPR_LPIDR] != 0);
> +    env->spr[SPR_LPCR] = spapr_cpu->nested_host_state->spr[SPR_LPCR];
> +    env->spr[SPR_LPIDR] = spapr_cpu->nested_host_state->spr[SPR_LPIDR];
> +    env->spr[SPR_PCR] = spapr_cpu->nested_host_state->spr[SPR_PCR];
> +    env->spr[SPR_DPDES] = 0;
> +    env->spr[SPR_HFSCR] = spapr_cpu->nested_host_state->spr[SPR_HFSCR];
> +    env->spr[SPR_SRR0] = spapr_cpu->nested_host_state->spr[SPR_SRR0];
> +    env->spr[SPR_SRR1] = spapr_cpu->nested_host_state->spr[SPR_SRR1];
> +    env->spr[SPR_SPRG0] = spapr_cpu->nested_host_state->spr[SPR_SPRG0];
> +    env->spr[SPR_SPRG1] = spapr_cpu->nested_host_state->spr[SPR_SPRG1];
> +    env->spr[SPR_SPRG2] = spapr_cpu->nested_host_state->spr[SPR_SPRG2];
> +    env->spr[SPR_SPRG3] = spapr_cpu->nested_host_state->spr[SPR_SPRG3];
> +    env->spr[SPR_BOOKS_PID] = spapr_cpu->nested_host_state->spr[SPR_BOOKS_PID];
> +    env->spr[SPR_PPR] = spapr_cpu->nested_host_state->spr[SPR_PPR];
> +
> +    /*
> +     * Return the interrupt vector address from H_ENTER_NESTED to the L1
> +     * (or error code).
> +     */
> +    env->gpr[3] = r3_return;
> +
> +    env->tb_env->tb_offset -= spapr_cpu->nested_tb_offset;
> +    spapr_cpu->in_nested = false;
> +
> +    hreg_compute_hflags(env);
> +    tlb_flush(cs);
> +    env->reserve_addr = -1; /* Reset the reservation */
> +
> +    g_free(spapr_cpu->nested_host_state);
> +    spapr_cpu->nested_host_state = NULL;
> +}
> +
>   static void hypercall_register_types(void)
>   {
>       hypercall_register_softmmu();
> @@ -1554,6 +1882,11 @@ static void hypercall_register_types(void)
>       spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
>   
>       spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
> +
> +    spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl);
> +    spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested);
> +    spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate);
> +    spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest);
>   }
>   
>   type_init(hypercall_register_types)
> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
> index edbf3eeed0..852fe61b36 100644
> --- a/include/hw/ppc/spapr.h
> +++ b/include/hw/ppc/spapr.h
> @@ -199,6 +199,9 @@ struct SpaprMachineState {
>       bool has_graphics;
>       uint32_t vsmt;       /* Virtual SMT mode (KVM's "core stride") */
>   
> +    /* Nested HV support (TCG only) */
> +    uint64_t nested_ptcr;
> +

this is new state to migrate.

>       Notifier epow_notifier;
>       QTAILQ_HEAD(, SpaprEventLogEntry) pending_events;
>       bool use_hotplug_event_source;
> @@ -579,7 +582,14 @@ struct SpaprMachineState {
>   #define KVMPPC_H_UPDATE_DT      (KVMPPC_HCALL_BASE + 0x3)
>   /* 0x4 was used for KVMPPC_H_UPDATE_PHANDLE in SLOF */
>   #define KVMPPC_H_VOF_CLIENT     (KVMPPC_HCALL_BASE + 0x5)
> -#define KVMPPC_HCALL_MAX        KVMPPC_H_VOF_CLIENT
> +
> +/* Platform-specific hcalls used for nested HV KVM */
> +#define KVMPPC_H_SET_PARTITION_TABLE   (KVMPPC_HCALL_BASE + 0x800)
> +#define KVMPPC_H_ENTER_NESTED          (KVMPPC_HCALL_BASE + 0x804)
> +#define KVMPPC_H_TLB_INVALIDATE        (KVMPPC_HCALL_BASE + 0x808)
> +#define KVMPPC_H_COPY_TOFROM_GUEST     (KVMPPC_HCALL_BASE + 0x80C)
> +
> +#define KVMPPC_HCALL_MAX        KVMPPC_H_COPY_TOFROM_GUEST
>   
>   /*
>    * The hcall range 0xEF00 to 0xEF80 is reserved for use in facilitating
> @@ -589,6 +599,65 @@ struct SpaprMachineState {
>   #define SVM_H_TPM_COMM              0xEF10
>   #define SVM_HCALL_MAX               SVM_H_TPM_COMM
>   
> +/*
> + * Register state for entering a nested guest with H_ENTER_NESTED.
> + * New member must be added at the end.
> + */
> +struct kvmppc_hv_guest_state {
> +    uint64_t version;        /* version of this structure layout, must be first */
> +    uint32_t lpid;
> +    uint32_t vcpu_token;
> +    /* These registers are hypervisor privileged (at least for writing) */
> +    uint64_t lpcr;
> +    uint64_t pcr;
> +    uint64_t amor;
> +    uint64_t dpdes;
> +    uint64_t hfscr;
> +    int64_t tb_offset;
> +    uint64_t dawr0;
> +    uint64_t dawrx0;
> +    uint64_t ciabr;
> +    uint64_t hdec_expiry;
> +    uint64_t purr;
> +    uint64_t spurr;
> +    uint64_t ic;
> +    uint64_t vtb;
> +    uint64_t hdar;
> +    uint64_t hdsisr;
> +    uint64_t heir;
> +    uint64_t asdr;
> +    /* These are OS privileged but need to be set late in guest entry */
> +    uint64_t srr0;
> +    uint64_t srr1;
> +    uint64_t sprg[4];
> +    uint64_t pidr;
> +    uint64_t cfar;
> +    uint64_t ppr;
> +    /* Version 1 ends here */
> +    uint64_t dawr1;
> +    uint64_t dawrx1;
> +    /* Version 2 ends here */
> +};
> +
> +/* Latest version of hv_guest_state structure */
> +#define HV_GUEST_STATE_VERSION  2
> +
> +/* Linux 64-bit powerpc pt_regs struct, used by nested HV */
> +struct kvmppc_pt_regs {
> +    uint64_t gpr[32];
> +    uint64_t nip;
> +    uint64_t msr;
> +    uint64_t orig_gpr3;    /* Used for restarting system calls */
> +    uint64_t ctr;
> +    uint64_t link;
> +    uint64_t xer;
> +    uint64_t ccr;
> +    uint64_t softe;        /* Soft enabled/disabled */
> +    uint64_t trap;         /* Reason for being here */
> +    uint64_t dar;          /* Fault registers */
> +    uint64_t dsisr;        /* on 4xx/Book-E used for ESR */
> +    uint64_t result;       /* Result of a system call */
> +};

I think we need to start moving all the spapr hcall definitions under
spapr_hcall.h. It can come later.

>   typedef struct SpaprDeviceTreeUpdateHeader {
>       uint32_t version_id;
> @@ -606,6 +675,9 @@ typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
>   void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn);
>   target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
>                                target_ulong *args);
> +
> +void spapr_exit_nested(PowerPCCPU *cpu, int excp);
> +
>   target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu, SpaprMachineState *spapr,
>                                            target_ulong shift);
>   target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu, SpaprMachineState *spapr,
> diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
> index dab3dfc76c..b560514560 100644
> --- a/include/hw/ppc/spapr_cpu_core.h
> +++ b/include/hw/ppc/spapr_cpu_core.h
> @@ -48,6 +48,11 @@ typedef struct SpaprCpuState {
>       bool prod; /* not migrated, only used to improve dispatch latencies */
>       struct ICPState *icp;
>       struct XiveTCTX *tctx;
> +
> +    /* Fields for nested-HV support */
> +    bool in_nested; /* true while the L2 is executing */
> +    CPUPPCState *nested_host_state; /* holds the L1 state while L2 executes */
> +    int64_t nested_tb_offset; /* L1->L2 TB offset */

This needs a new vmstate.

Thanks,

C.

>   } SpaprCpuState;
>   
>   static inline SpaprCpuState *spapr_cpu_state(PowerPCCPU *cpu)
Nicholas Piggin Feb. 16, 2022, 11:38 a.m. UTC | #2
Excerpts from Cédric Le Goater's message of February 16, 2022 8:52 pm:
> On 2/16/22 11:25, Nicholas Piggin wrote:
>> This implements the Nested KVM HV hcall API for spapr under TCG.
>> 
>> The L2 is switched in when the H_ENTER_NESTED hcall is made, and the
>> L1 is switched back in returned from the hcall when a HV exception
>> is sent to the vhyp. Register state is copied in and out according to
>> the nested KVM HV hcall API specification.
>> 
>> The hdecr timer is started when the L2 is switched in, and it provides
>> the HDEC / 0x980 return to L1.
>> 
>> The MMU re-uses the bare metal radix 2-level page table walker by
>> using the get_pate method to point the MMU to the nested partition
>> table entry. MMU faults due to partition scope errors raise HV
>> exceptions and accordingly are routed back to the L1.
>> 
>> The MMU does not tag translations for the L1 (direct) vs L2 (nested)
>> guests, so the TLB is flushed on any L1<->L2 transition (hcall entry
>> and exit).>
>> Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> 
> Reviewed-by: Cédric Le Goater <clg@kaod.org>
> 
> Some last comments below,

[...]

>> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
>> index edbf3eeed0..852fe61b36 100644
>> --- a/include/hw/ppc/spapr.h
>> +++ b/include/hw/ppc/spapr.h
>> @@ -199,6 +199,9 @@ struct SpaprMachineState {
>>       bool has_graphics;
>>       uint32_t vsmt;       /* Virtual SMT mode (KVM's "core stride") */
>>   
>> +    /* Nested HV support (TCG only) */
>> +    uint64_t nested_ptcr;
>> +
> 
> this is new state to migrate.
> 

[...]

>> +/* Linux 64-bit powerpc pt_regs struct, used by nested HV */
>> +struct kvmppc_pt_regs {
>> +    uint64_t gpr[32];
>> +    uint64_t nip;
>> +    uint64_t msr;
>> +    uint64_t orig_gpr3;    /* Used for restarting system calls */
>> +    uint64_t ctr;
>> +    uint64_t link;
>> +    uint64_t xer;
>> +    uint64_t ccr;
>> +    uint64_t softe;        /* Soft enabled/disabled */
>> +    uint64_t trap;         /* Reason for being here */
>> +    uint64_t dar;          /* Fault registers */
>> +    uint64_t dsisr;        /* on 4xx/Book-E used for ESR */
>> +    uint64_t result;       /* Result of a system call */
>> +};
> 
> I think we need to start moving all the spapr hcall definitions under
> spapr_hcall.h. It can come later.

Sure.

[...]

>> diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
>> index dab3dfc76c..b560514560 100644
>> --- a/include/hw/ppc/spapr_cpu_core.h
>> +++ b/include/hw/ppc/spapr_cpu_core.h
>> @@ -48,6 +48,11 @@ typedef struct SpaprCpuState {
>>       bool prod; /* not migrated, only used to improve dispatch latencies */
>>       struct ICPState *icp;
>>       struct XiveTCTX *tctx;
>> +
>> +    /* Fields for nested-HV support */
>> +    bool in_nested; /* true while the L2 is executing */
>> +    CPUPPCState *nested_host_state; /* holds the L1 state while L2 executes */
>> +    int64_t nested_tb_offset; /* L1->L2 TB offset */
> 
> This needs a new vmstate.

How about instead of the vmstate (we would need all the L1 state in
nested_host_state as well), we just add a migration blocker in the
L2 entry path. We could limit the max hdecr to say 1 second to
ensure it unblocks before long.

I know migration blockers are not preferred but in this case it gives
us some iterations to debug and optimise first, which might change
the data to migrate.

Thanks,
Nick
Nicholas Piggin Feb. 16, 2022, 12:30 p.m. UTC | #3
Excerpts from Nicholas Piggin's message of February 16, 2022 9:38 pm:
> Excerpts from Cédric Le Goater's message of February 16, 2022 8:52 pm:
>> On 2/16/22 11:25, Nicholas Piggin wrote:
>>> This implements the Nested KVM HV hcall API for spapr under TCG.
>>> 
>>> The L2 is switched in when the H_ENTER_NESTED hcall is made, and the
>>> L1 is switched back in returned from the hcall when a HV exception
>>> is sent to the vhyp. Register state is copied in and out according to
>>> the nested KVM HV hcall API specification.
>>> 
>>> The hdecr timer is started when the L2 is switched in, and it provides
>>> the HDEC / 0x980 return to L1.
>>> 
>>> The MMU re-uses the bare metal radix 2-level page table walker by
>>> using the get_pate method to point the MMU to the nested partition
>>> table entry. MMU faults due to partition scope errors raise HV
>>> exceptions and accordingly are routed back to the L1.
>>> 
>>> The MMU does not tag translations for the L1 (direct) vs L2 (nested)
>>> guests, so the TLB is flushed on any L1<->L2 transition (hcall entry
>>> and exit).>
>>> Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
>>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>> 
>> Reviewed-by: Cédric Le Goater <clg@kaod.org>
>> 
>> Some last comments below,
> 
> [...]
> 
>>> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
>>> index edbf3eeed0..852fe61b36 100644
>>> --- a/include/hw/ppc/spapr.h
>>> +++ b/include/hw/ppc/spapr.h
>>> @@ -199,6 +199,9 @@ struct SpaprMachineState {
>>>       bool has_graphics;
>>>       uint32_t vsmt;       /* Virtual SMT mode (KVM's "core stride") */
>>>   
>>> +    /* Nested HV support (TCG only) */
>>> +    uint64_t nested_ptcr;
>>> +
>> 
>> this is new state to migrate.
>> 
> 
> [...]
> 
>>> +/* Linux 64-bit powerpc pt_regs struct, used by nested HV */
>>> +struct kvmppc_pt_regs {
>>> +    uint64_t gpr[32];
>>> +    uint64_t nip;
>>> +    uint64_t msr;
>>> +    uint64_t orig_gpr3;    /* Used for restarting system calls */
>>> +    uint64_t ctr;
>>> +    uint64_t link;
>>> +    uint64_t xer;
>>> +    uint64_t ccr;
>>> +    uint64_t softe;        /* Soft enabled/disabled */
>>> +    uint64_t trap;         /* Reason for being here */
>>> +    uint64_t dar;          /* Fault registers */
>>> +    uint64_t dsisr;        /* on 4xx/Book-E used for ESR */
>>> +    uint64_t result;       /* Result of a system call */
>>> +};
>> 
>> I think we need to start moving all the spapr hcall definitions under
>> spapr_hcall.h. It can come later.
> 
> Sure.
> 
> [...]
> 
>>> diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
>>> index dab3dfc76c..b560514560 100644
>>> --- a/include/hw/ppc/spapr_cpu_core.h
>>> +++ b/include/hw/ppc/spapr_cpu_core.h
>>> @@ -48,6 +48,11 @@ typedef struct SpaprCpuState {
>>>       bool prod; /* not migrated, only used to improve dispatch latencies */
>>>       struct ICPState *icp;
>>>       struct XiveTCTX *tctx;
>>> +
>>> +    /* Fields for nested-HV support */
>>> +    bool in_nested; /* true while the L2 is executing */
>>> +    CPUPPCState *nested_host_state; /* holds the L1 state while L2 executes */
>>> +    int64_t nested_tb_offset; /* L1->L2 TB offset */
>> 
>> This needs a new vmstate.
> 
> How about instead of the vmstate (we would need all the L1 state in
> nested_host_state as well), we just add a migration blocker in the
> L2 entry path. We could limit the max hdecr to say 1 second to
> ensure it unblocks before long.
> 
> I know migration blockers are not preferred but in this case it gives
> us some iterations to debug and optimise first, which might change
> the data to migrate.

This should be roughly the incremental patch to do this.

Thanks,
Nick

--
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 87e68da77f..14e41b7d31 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -2882,6 +2882,13 @@ static void spapr_machine_init(MachineState *machine)
             "may run and log hardware error on the destination");
     }
 
+    if (spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV) == SPAPR_CAP_ON) {
+        /* Create the error string for live migration blocker */
+        error_setg(&spapr->nested_hv_migration_blocker,
+            "A nested-hv L2 guest is running. Migration is blocked until it "
+            "exits to the L1.");
+    }
+
     if (mc->nvdimm_supported) {
         spapr_create_nvdimm_dr_connectors(spapr);
     }
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index e183892287..89295bc723 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -21,6 +21,7 @@
 #include "hw/ppc/spapr_numa.h"
 #include "mmu-book3s-v3.h"
 #include "hw/mem/memory-device.h"
+#include "migration/blocker.h"
 
 bool is_ram_address(SpaprMachineState *spapr, hwaddr addr)
 {
@@ -1565,7 +1566,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
     target_ulong hv_ptr = args[0];
     target_ulong regs_ptr = args[1];
-    target_ulong hdec, now = cpu_ppc_load_tbl(env);
+    target_ulong hdec, now;
     target_ulong lpcr, lpcr_mask;
     struct kvmppc_hv_guest_state *hvstate;
     struct kvmppc_hv_guest_state hv_state;
@@ -1578,11 +1579,16 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
         return H_NOT_AVAILABLE;
     }
 
+    if (migrate_add_blocker(spapr->nested_hv_migration_blocker, NULL)) {
+        return 0; /* This returns nothing to the L1, essentially an EAGAIN */
+    }
+
     len = sizeof(*hvstate);
     hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false,
                                 MEMTXATTRS_UNSPECIFIED);
     if (len != sizeof(*hvstate)) {
         address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false);
+        migrate_del_blocker(spapr->nested_hv_migration_blocker);
         return H_PARAMETER;
     }
 
@@ -1590,16 +1596,36 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
 
     address_space_unmap(CPU(cpu)->as, hvstate, len, len, false);
 
+    spapr_cpu->nested_tb_offset = hv_state.tb_offset;
+    spapr_cpu->nested_hdec_expiry = hv_state.hdec_expiry;
+
+    now = cpu_ppc_load_tbl(env);
+    if (now >= hv_state.hdec_expiry) {
+        migrate_del_blocker(spapr->nested_hv_migration_blocker);
+        return env->excp_vectors[POWERPC_EXCP_HDECR];
+    }
+
+    hdec = hv_state.hdec_expiry - now;
+    if (hdec > env->tb_env->tb_freq) {
+        /*
+         * Limit hdecr to 1 second to prevent the L1 blocking migration for
+         * too long with a large hdecr value.
+         */
+        hdec = env->tb_env->tb_freq;
+    }
+
     /*
      * We accept versions 1 and 2. Version 2 fields are unused because TCG
      * does not implement DAWR*.
      */
     if (hv_state.version > HV_GUEST_STATE_VERSION) {
+        migrate_del_blocker(spapr->nested_hv_migration_blocker);
         return H_PARAMETER;
     }
 
     spapr_cpu->nested_host_state = g_try_malloc(sizeof(CPUPPCState));
     if (!spapr_cpu->nested_host_state) {
+        migrate_del_blocker(spapr->nested_hv_migration_blocker);
         return H_NO_MEM;
     }
 
@@ -1611,6 +1637,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
     if (!regs || len != sizeof(*regs)) {
         address_space_unmap(CPU(cpu)->as, regs, len, 0, false);
         g_free(spapr_cpu->nested_host_state);
+        migrate_del_blocker(spapr->nested_hv_migration_blocker);
         return H_P2;
     }
 
@@ -1648,8 +1675,6 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
     /* hv_state.amor is not used */
     env->spr[SPR_DPDES] = hv_state.dpdes;
     env->spr[SPR_HFSCR] = hv_state.hfscr;
-    hdec = hv_state.hdec_expiry - now;
-    spapr_cpu->nested_tb_offset = hv_state.tb_offset;
     /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/
     env->spr[SPR_SRR0] = hv_state.srr0;
     env->spr[SPR_SRR1] = hv_state.srr1;
@@ -1693,6 +1718,7 @@ static target_ulong h_enter_nested(PowerPCCPU *cpu,
 
 void spapr_exit_nested(PowerPCCPU *cpu, int excp)
 {
+    SpaprMachineState *spapr = SPAPR_MACHINE(qdev_get_machine());
     CPUState *cs = CPU(cpu);
     CPUPPCState *env = &cpu->env;
     SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
@@ -1781,6 +1807,19 @@ void spapr_exit_nested(PowerPCCPU *cpu, int excp)
     /* Is it okay to specify write length larger than actual data written? */
     address_space_unmap(CPU(cpu)->as, regs, len, len, true);
 
+    /*
+     * hdecr is capped at entry, so we may exit here with a HDECR exception
+     * without having exceeded the guest's limit. Clear the HDECR interrupt
+     * return in this case.
+     */
+    if (excp == POWERPC_EXCP_HDECR) {
+        target_ulong now;
+        now = cpu_ppc_load_tbl(env) - spapr_cpu->nested_tb_offset;
+        if (now < spapr_cpu->nested_hdec_expiry) {
+            r3_return = 0;
+        }
+    }
+
 out_restore_l1:
     memcpy(env->gpr, spapr_cpu->nested_host_state->gpr, sizeof(env->gpr));
     env->lr = spapr_cpu->nested_host_state->lr;
@@ -1825,6 +1864,8 @@ out_restore_l1:
 
     g_free(spapr_cpu->nested_host_state);
     spapr_cpu->nested_host_state = NULL;
+
+    migrate_del_blocker(spapr->nested_hv_migration_blocker);
 }
 
 static void hypercall_register_types(void)
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index 852fe61b36..70b330ef9a 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -266,6 +266,7 @@ struct SpaprMachineState {
     uint32_t FORM2_assoc_array[NUMA_NODES_MAX_NUM][FORM2_NUMA_ASSOC_SIZE];
 
     Error *fwnmi_migration_blocker;
+    Error *nested_hv_migration_blocker;
 };
 
 #define H_SUCCESS         0
diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
index b560514560..09da577ca1 100644
--- a/include/hw/ppc/spapr_cpu_core.h
+++ b/include/hw/ppc/spapr_cpu_core.h
@@ -53,6 +53,7 @@ typedef struct SpaprCpuState {
     bool in_nested; /* true while the L2 is executing */
     CPUPPCState *nested_host_state; /* holds the L1 state while L2 executes */
     int64_t nested_tb_offset; /* L1->L2 TB offset */
+    uint64_t nested_hdec_expiry; /* L1 hdec expiry in absolute L1 TB */
 } SpaprCpuState;
 
 static inline SpaprCpuState *spapr_cpu_state(PowerPCCPU *cpu)
Cédric Le Goater Feb. 17, 2022, 5:11 p.m. UTC | #4
On 2/16/22 13:30, Nicholas Piggin wrote:
> Excerpts from Nicholas Piggin's message of February 16, 2022 9:38 pm:
>> Excerpts from Cédric Le Goater's message of February 16, 2022 8:52 pm:
>>> On 2/16/22 11:25, Nicholas Piggin wrote:
>>>> This implements the Nested KVM HV hcall API for spapr under TCG.
>>>>
>>>> The L2 is switched in when the H_ENTER_NESTED hcall is made, and the
>>>> L1 is switched back in returned from the hcall when a HV exception
>>>> is sent to the vhyp. Register state is copied in and out according to
>>>> the nested KVM HV hcall API specification.
>>>>
>>>> The hdecr timer is started when the L2 is switched in, and it provides
>>>> the HDEC / 0x980 return to L1.
>>>>
>>>> The MMU re-uses the bare metal radix 2-level page table walker by
>>>> using the get_pate method to point the MMU to the nested partition
>>>> table entry. MMU faults due to partition scope errors raise HV
>>>> exceptions and accordingly are routed back to the L1.
>>>>
>>>> The MMU does not tag translations for the L1 (direct) vs L2 (nested)
>>>> guests, so the TLB is flushed on any L1<->L2 transition (hcall entry
>>>> and exit).>
>>>> Reviewed-by: Fabiano Rosas <farosas@linux.ibm.com>
>>>> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
>>>
>>> Reviewed-by: Cédric Le Goater <clg@kaod.org>
>>>
>>> Some last comments below,
>>
>> [...]
>>
>>>> diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
>>>> index edbf3eeed0..852fe61b36 100644
>>>> --- a/include/hw/ppc/spapr.h
>>>> +++ b/include/hw/ppc/spapr.h
>>>> @@ -199,6 +199,9 @@ struct SpaprMachineState {
>>>>        bool has_graphics;
>>>>        uint32_t vsmt;       /* Virtual SMT mode (KVM's "core stride") */
>>>>    
>>>> +    /* Nested HV support (TCG only) */
>>>> +    uint64_t nested_ptcr;
>>>> +
>>>
>>> this is new state to migrate.
>>>
>>
>> [...]
>>
>>>> +/* Linux 64-bit powerpc pt_regs struct, used by nested HV */
>>>> +struct kvmppc_pt_regs {
>>>> +    uint64_t gpr[32];
>>>> +    uint64_t nip;
>>>> +    uint64_t msr;
>>>> +    uint64_t orig_gpr3;    /* Used for restarting system calls */
>>>> +    uint64_t ctr;
>>>> +    uint64_t link;
>>>> +    uint64_t xer;
>>>> +    uint64_t ccr;
>>>> +    uint64_t softe;        /* Soft enabled/disabled */
>>>> +    uint64_t trap;         /* Reason for being here */
>>>> +    uint64_t dar;          /* Fault registers */
>>>> +    uint64_t dsisr;        /* on 4xx/Book-E used for ESR */
>>>> +    uint64_t result;       /* Result of a system call */
>>>> +};
>>>
>>> I think we need to start moving all the spapr hcall definitions under
>>> spapr_hcall.h. It can come later.
>>
>> Sure.
>>
>> [...]
>>
>>>> diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
>>>> index dab3dfc76c..b560514560 100644
>>>> --- a/include/hw/ppc/spapr_cpu_core.h
>>>> +++ b/include/hw/ppc/spapr_cpu_core.h
>>>> @@ -48,6 +48,11 @@ typedef struct SpaprCpuState {
>>>>        bool prod; /* not migrated, only used to improve dispatch latencies */
>>>>        struct ICPState *icp;
>>>>        struct XiveTCTX *tctx;
>>>> +
>>>> +    /* Fields for nested-HV support */
>>>> +    bool in_nested; /* true while the L2 is executing */
>>>> +    CPUPPCState *nested_host_state; /* holds the L1 state while L2 executes */
>>>> +    int64_t nested_tb_offset; /* L1->L2 TB offset */
>>>
>>> This needs a new vmstate.
>>
>> How about instead of the vmstate (we would need all the L1 state in
>> nested_host_state as well), we just add a migration blocker in the
>> L2 entry path. We could limit the max hdecr to say 1 second to
>> ensure it unblocks before long.
>>
>> I know migration blockers are not preferred but in this case it gives
>> us some iterations to debug and optimise first, which might change
>> the data to migrate.
> 
> This should be roughly the incremental patch to do this.

I think we can merge without it.

Adding support shouldn't be too complex and TCG migration of an L1
running L2 is not the most important feature today. It would be
better to have something clean (blocker if incomplete or a decent
support) before the 7.0 is released though

However, there is an issue with TCG migration and it has been there
for a while :

https://lore.kernel.org/qemu-devel/fb2e56cc-15d1-65ee-9d9c-64223483ed01@kaod.org/

Thanks,

C.
diff mbox series

Patch

diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 6fab70767f..87e68da77f 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1270,6 +1270,8 @@  static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp,
     /* The TCG path should also be holding the BQL at this point */
     g_assert(qemu_mutex_iothread_locked());
 
+    g_assert(!vhyp_cpu_in_nested(cpu));
+
     if (msr_pr) {
         hcall_dprintf("Hypercall made with MSR[PR]=1\n");
         env->gpr[3] = H_PRIVILEGE;
@@ -1313,12 +1315,34 @@  static bool spapr_get_pate(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu,
                            target_ulong lpid, ppc_v3_pate_t *entry)
 {
     SpaprMachineState *spapr = SPAPR_MACHINE(vhyp);
+    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
 
-    assert(lpid == 0);
+    if (!spapr_cpu->in_nested) {
+        assert(lpid == 0);
 
-    /* Copy PATE1:GR into PATE0:HR */
-    entry->dw0 = spapr->patb_entry & PATE0_HR;
-    entry->dw1 = spapr->patb_entry;
+        /* Copy PATE1:GR into PATE0:HR */
+        entry->dw0 = spapr->patb_entry & PATE0_HR;
+        entry->dw1 = spapr->patb_entry;
+
+    } else {
+        uint64_t patb, pats;
+
+        assert(lpid != 0);
+
+        patb = spapr->nested_ptcr & PTCR_PATB;
+        pats = spapr->nested_ptcr & PTCR_PATS;
+
+        /* Calculate number of entries */
+        pats = 1ull << (pats + 12 - 4);
+        if (pats <= lpid) {
+            return false;
+        }
+
+        /* Grab entry */
+        patb += 16 * lpid;
+        entry->dw0 = ldq_phys(CPU(cpu)->as, patb);
+        entry->dw1 = ldq_phys(CPU(cpu)->as, patb + 8);
+    }
 
     return true;
 }
@@ -4472,7 +4496,9 @@  PowerPCCPU *spapr_find_cpu(int vcpu_id)
 
 static bool spapr_cpu_in_nested(PowerPCCPU *cpu)
 {
-    return false;
+    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+
+    return spapr_cpu->in_nested;
 }
 
 static void spapr_cpu_exec_enter(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu)
@@ -4584,6 +4610,7 @@  static void spapr_machine_class_init(ObjectClass *oc, void *data)
     nc->nmi_monitor_handler = spapr_nmi;
     smc->phb_placement = spapr_phb_placement;
     vhc->cpu_in_nested = spapr_cpu_in_nested;
+    vhc->deliver_hv_excp = spapr_exit_nested;
     vhc->hypercall = emulate_spapr_hypercall;
     vhc->hpt_mask = spapr_hpt_mask;
     vhc->map_hptes = spapr_map_hptes;
diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c
index e2412aaa57..6d74345930 100644
--- a/hw/ppc/spapr_caps.c
+++ b/hw/ppc/spapr_caps.c
@@ -444,19 +444,23 @@  static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr,
 {
     ERRP_GUARD();
     PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
+    CPUPPCState *env = &cpu->env;
 
     if (!val) {
         /* capability disabled by default */
         return;
     }
 
-    if (tcg_enabled()) {
-        error_setg(errp, "No Nested KVM-HV support in TCG");
+    if (!(env->insns_flags2 & PPC2_ISA300)) {
+        error_setg(errp, "Nested-HV only supported on POWER9 and later");
         error_append_hint(errp, "Try appending -machine cap-nested-hv=off\n");
-    } else if (kvm_enabled()) {
+        return;
+    }
+
+    if (kvm_enabled()) {
         if (!ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, 0,
                               spapr->max_compat_pvr)) {
-            error_setg(errp, "Nested KVM-HV only supported on POWER9");
+            error_setg(errp, "Nested-HV only supported on POWER9 and later");
             error_append_hint(errp,
                               "Try appending -machine max-cpu-compat=power9\n");
             return;
@@ -464,7 +468,7 @@  static void cap_nested_kvm_hv_apply(SpaprMachineState *spapr,
 
         if (!kvmppc_has_cap_nested_kvm_hv()) {
             error_setg(errp,
-                       "KVM implementation does not support Nested KVM-HV");
+                       "KVM implementation does not support Nested-HV");
             error_append_hint(errp,
                               "Try appending -machine cap-nested-hv=off\n");
         } else if (kvmppc_set_cap_nested_kvm_hv(val) < 0) {
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index fa60505315..e183892287 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -9,6 +9,7 @@ 
 #include "qemu/error-report.h"
 #include "exec/exec-all.h"
 #include "helper_regs.h"
+#include "hw/ppc/ppc.h"
 #include "hw/ppc/spapr.h"
 #include "hw/ppc/spapr_cpu_core.h"
 #include "mmu-hash64.h"
@@ -1499,6 +1500,333 @@  static void hypercall_register_softmmu(void)
 }
 #endif
 
+/* TCG only */
+#define PRTS_MASK      0x1f
+
+static target_ulong h_set_ptbl(PowerPCCPU *cpu,
+                               SpaprMachineState *spapr,
+                               target_ulong opcode,
+                               target_ulong *args)
+{
+    target_ulong ptcr = args[0];
+
+    if (!spapr_get_cap(spapr, SPAPR_CAP_NESTED_KVM_HV)) {
+        return H_FUNCTION;
+    }
+
+    if ((ptcr & PRTS_MASK) + 12 - 4 > 12) {
+        return H_PARAMETER;
+    }
+
+    spapr->nested_ptcr = ptcr; /* Save new partition table */
+
+    return H_SUCCESS;
+}
+
+static target_ulong h_tlb_invalidate(PowerPCCPU *cpu,
+                                     SpaprMachineState *spapr,
+                                     target_ulong opcode,
+                                     target_ulong *args)
+{
+    /*
+     * The spapr virtual hypervisor nested HV implementation retains no L2
+     * translation state except for TLB. And the TLB is always invalidated
+     * across L1<->L2 transitions, so nothing is required here.
+     */
+
+    return H_SUCCESS;
+}
+
+static target_ulong h_copy_tofrom_guest(PowerPCCPU *cpu,
+                                        SpaprMachineState *spapr,
+                                        target_ulong opcode,
+                                        target_ulong *args)
+{
+    /*
+     * This HCALL is not required, L1 KVM will take a slow path and walk the
+     * page tables manually to do the data copy.
+     */
+    return H_FUNCTION;
+}
+
+/*
+ * When this handler returns, the environment is switched to the L2 guest
+ * and TCG begins running that. spapr_exit_nested() performs the switch from
+ * L2 back to L1 and returns from the H_ENTER_NESTED hcall.
+ */
+static target_ulong h_enter_nested(PowerPCCPU *cpu,
+                                   SpaprMachineState *spapr,
+                                   target_ulong opcode,
+                                   target_ulong *args)
+{
+    PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+    CPUState *cs = CPU(cpu);
+    CPUPPCState *env = &cpu->env;
+    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+    target_ulong hv_ptr = args[0];
+    target_ulong regs_ptr = args[1];
+    target_ulong hdec, now = cpu_ppc_load_tbl(env);
+    target_ulong lpcr, lpcr_mask;
+    struct kvmppc_hv_guest_state *hvstate;
+    struct kvmppc_hv_guest_state hv_state;
+    struct kvmppc_pt_regs *regs;
+    hwaddr len;
+    uint64_t cr;
+    int i;
+
+    if (spapr->nested_ptcr == 0) {
+        return H_NOT_AVAILABLE;
+    }
+
+    len = sizeof(*hvstate);
+    hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, false,
+                                MEMTXATTRS_UNSPECIFIED);
+    if (len != sizeof(*hvstate)) {
+        address_space_unmap(CPU(cpu)->as, hvstate, len, 0, false);
+        return H_PARAMETER;
+    }
+
+    memcpy(&hv_state, hvstate, len);
+
+    address_space_unmap(CPU(cpu)->as, hvstate, len, len, false);
+
+    /*
+     * We accept versions 1 and 2. Version 2 fields are unused because TCG
+     * does not implement DAWR*.
+     */
+    if (hv_state.version > HV_GUEST_STATE_VERSION) {
+        return H_PARAMETER;
+    }
+
+    spapr_cpu->nested_host_state = g_try_malloc(sizeof(CPUPPCState));
+    if (!spapr_cpu->nested_host_state) {
+        return H_NO_MEM;
+    }
+
+    memcpy(spapr_cpu->nested_host_state, env, sizeof(CPUPPCState));
+
+    len = sizeof(*regs);
+    regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, false,
+                                MEMTXATTRS_UNSPECIFIED);
+    if (!regs || len != sizeof(*regs)) {
+        address_space_unmap(CPU(cpu)->as, regs, len, 0, false);
+        g_free(spapr_cpu->nested_host_state);
+        return H_P2;
+    }
+
+    len = sizeof(env->gpr);
+    assert(len == sizeof(regs->gpr));
+    memcpy(env->gpr, regs->gpr, len);
+
+    env->lr = regs->link;
+    env->ctr = regs->ctr;
+    cpu_write_xer(env, regs->xer);
+
+    cr = regs->ccr;
+    for (i = 7; i >= 0; i--) {
+        env->crf[i] = cr & 15;
+        cr >>= 4;
+    }
+
+    env->msr = regs->msr;
+    env->nip = regs->nip;
+
+    address_space_unmap(CPU(cpu)->as, regs, len, len, false);
+
+    env->cfar = hv_state.cfar;
+
+    assert(env->spr[SPR_LPIDR] == 0);
+    env->spr[SPR_LPIDR] = hv_state.lpid;
+
+    lpcr_mask = LPCR_DPFD | LPCR_ILE | LPCR_AIL | LPCR_LD | LPCR_MER;
+    lpcr = (env->spr[SPR_LPCR] & ~lpcr_mask) | (hv_state.lpcr & lpcr_mask);
+    lpcr |= LPCR_HR | LPCR_UPRT | LPCR_GTSE | LPCR_HVICE | LPCR_HDICE;
+    lpcr &= ~LPCR_LPES0;
+    env->spr[SPR_LPCR] = lpcr & pcc->lpcr_mask;
+
+    env->spr[SPR_PCR] = hv_state.pcr;
+    /* hv_state.amor is not used */
+    env->spr[SPR_DPDES] = hv_state.dpdes;
+    env->spr[SPR_HFSCR] = hv_state.hfscr;
+    hdec = hv_state.hdec_expiry - now;
+    spapr_cpu->nested_tb_offset = hv_state.tb_offset;
+    /* TCG does not implement DAWR*, CIABR, PURR, SPURR, IC, VTB, HEIR SPRs*/
+    env->spr[SPR_SRR0] = hv_state.srr0;
+    env->spr[SPR_SRR1] = hv_state.srr1;
+    env->spr[SPR_SPRG0] = hv_state.sprg[0];
+    env->spr[SPR_SPRG1] = hv_state.sprg[1];
+    env->spr[SPR_SPRG2] = hv_state.sprg[2];
+    env->spr[SPR_SPRG3] = hv_state.sprg[3];
+    env->spr[SPR_BOOKS_PID] = hv_state.pidr;
+    env->spr[SPR_PPR] = hv_state.ppr;
+
+    cpu_ppc_hdecr_init(env);
+    cpu_ppc_store_hdecr(env, hdec);
+
+    /*
+     * The hv_state.vcpu_token is not needed. It is used by the KVM
+     * implementation to remember which L2 vCPU last ran on which physical
+     * CPU so as to invalidate process scope translations if it is moved
+     * between physical CPUs. For now TLBs are always flushed on L1<->L2
+     * transitions so this is not a problem.
+     *
+     * Could validate that the same vcpu_token does not attempt to run on
+     * different L1 vCPUs at the same time, but that would be a L1 KVM bug
+     * and it's not obviously worth a new data structure to do it.
+     */
+
+    env->tb_env->tb_offset += spapr_cpu->nested_tb_offset;
+    spapr_cpu->in_nested = true;
+
+    hreg_compute_hflags(env);
+    tlb_flush(cs);
+    env->reserve_addr = -1; /* Reset the reservation */
+
+    /*
+     * The spapr hcall helper sets env->gpr[3] to the return value, but at
+     * this point the L1 is not returning from the hcall but rather we
+     * start running the L2, so r3 must not be clobbered, so return env->gpr[3]
+     * to leave it unchanged.
+     */
+    return env->gpr[3];
+}
+
+void spapr_exit_nested(PowerPCCPU *cpu, int excp)
+{
+    CPUState *cs = CPU(cpu);
+    CPUPPCState *env = &cpu->env;
+    SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
+    target_ulong r3_return = env->excp_vectors[excp]; /* hcall return value */
+    target_ulong hv_ptr = spapr_cpu->nested_host_state->gpr[4];
+    target_ulong regs_ptr = spapr_cpu->nested_host_state->gpr[5];
+    struct kvmppc_hv_guest_state *hvstate;
+    struct kvmppc_pt_regs *regs;
+    hwaddr len;
+    uint64_t cr;
+    int i;
+
+    assert(spapr_cpu->in_nested);
+
+    cpu_ppc_hdecr_exit(env);
+
+    len = sizeof(*hvstate);
+    hvstate = address_space_map(CPU(cpu)->as, hv_ptr, &len, true,
+                                MEMTXATTRS_UNSPECIFIED);
+    if (len != sizeof(*hvstate)) {
+        address_space_unmap(CPU(cpu)->as, hvstate, len, 0, true);
+        r3_return = H_PARAMETER;
+        goto out_restore_l1;
+    }
+
+    hvstate->cfar = env->cfar;
+    hvstate->lpcr = env->spr[SPR_LPCR];
+    hvstate->pcr = env->spr[SPR_PCR];
+    hvstate->dpdes = env->spr[SPR_DPDES];
+    hvstate->hfscr = env->spr[SPR_HFSCR];
+
+    if (excp == POWERPC_EXCP_HDSI) {
+        hvstate->hdar = env->spr[SPR_HDAR];
+        hvstate->hdsisr = env->spr[SPR_HDSISR];
+        hvstate->asdr = env->spr[SPR_ASDR];
+    } else if (excp == POWERPC_EXCP_HISI) {
+        hvstate->asdr = env->spr[SPR_ASDR];
+    }
+
+    /* HEIR should be implemented for HV mode and saved here. */
+    hvstate->srr0 = env->spr[SPR_SRR0];
+    hvstate->srr1 = env->spr[SPR_SRR1];
+    hvstate->sprg[0] = env->spr[SPR_SPRG0];
+    hvstate->sprg[1] = env->spr[SPR_SPRG1];
+    hvstate->sprg[2] = env->spr[SPR_SPRG2];
+    hvstate->sprg[3] = env->spr[SPR_SPRG3];
+    hvstate->pidr = env->spr[SPR_BOOKS_PID];
+    hvstate->ppr = env->spr[SPR_PPR];
+
+    /* Is it okay to specify write length larger than actual data written? */
+    address_space_unmap(CPU(cpu)->as, hvstate, len, len, true);
+
+    len = sizeof(*regs);
+    regs = address_space_map(CPU(cpu)->as, regs_ptr, &len, true,
+                                MEMTXATTRS_UNSPECIFIED);
+    if (!regs || len != sizeof(*regs)) {
+        address_space_unmap(CPU(cpu)->as, regs, len, 0, true);
+        r3_return = H_P2;
+        goto out_restore_l1;
+    }
+
+    len = sizeof(env->gpr);
+    assert(len == sizeof(regs->gpr));
+    memcpy(regs->gpr, env->gpr, len);
+
+    regs->link = env->lr;
+    regs->ctr = env->ctr;
+    regs->xer = cpu_read_xer(env);
+
+    cr = 0;
+    for (i = 0; i < 8; i++) {
+        cr |= (env->crf[i] & 15) << (4 * (7 - i));
+    }
+    regs->ccr = cr;
+
+    if (excp == POWERPC_EXCP_MCHECK ||
+        excp == POWERPC_EXCP_RESET ||
+        excp == POWERPC_EXCP_SYSCALL) {
+        regs->nip = env->spr[SPR_SRR0];
+        regs->msr = env->spr[SPR_SRR1] & env->msr_mask;
+    } else {
+        regs->nip = env->spr[SPR_HSRR0];
+        regs->msr = env->spr[SPR_HSRR1] & env->msr_mask;
+    }
+
+    /* Is it okay to specify write length larger than actual data written? */
+    address_space_unmap(CPU(cpu)->as, regs, len, len, true);
+
+out_restore_l1:
+    memcpy(env->gpr, spapr_cpu->nested_host_state->gpr, sizeof(env->gpr));
+    env->lr = spapr_cpu->nested_host_state->lr;
+    env->ctr = spapr_cpu->nested_host_state->ctr;
+    memcpy(env->crf, spapr_cpu->nested_host_state->crf, sizeof(env->crf));
+    env->cfar = spapr_cpu->nested_host_state->cfar;
+    env->xer = spapr_cpu->nested_host_state->xer;
+    env->so = spapr_cpu->nested_host_state->so;
+    env->ov = spapr_cpu->nested_host_state->ov;
+    env->ov32 = spapr_cpu->nested_host_state->ov32;
+    env->ca32 = spapr_cpu->nested_host_state->ca32;
+    env->msr = spapr_cpu->nested_host_state->msr;
+    env->nip = spapr_cpu->nested_host_state->nip;
+
+    assert(env->spr[SPR_LPIDR] != 0);
+    env->spr[SPR_LPCR] = spapr_cpu->nested_host_state->spr[SPR_LPCR];
+    env->spr[SPR_LPIDR] = spapr_cpu->nested_host_state->spr[SPR_LPIDR];
+    env->spr[SPR_PCR] = spapr_cpu->nested_host_state->spr[SPR_PCR];
+    env->spr[SPR_DPDES] = 0;
+    env->spr[SPR_HFSCR] = spapr_cpu->nested_host_state->spr[SPR_HFSCR];
+    env->spr[SPR_SRR0] = spapr_cpu->nested_host_state->spr[SPR_SRR0];
+    env->spr[SPR_SRR1] = spapr_cpu->nested_host_state->spr[SPR_SRR1];
+    env->spr[SPR_SPRG0] = spapr_cpu->nested_host_state->spr[SPR_SPRG0];
+    env->spr[SPR_SPRG1] = spapr_cpu->nested_host_state->spr[SPR_SPRG1];
+    env->spr[SPR_SPRG2] = spapr_cpu->nested_host_state->spr[SPR_SPRG2];
+    env->spr[SPR_SPRG3] = spapr_cpu->nested_host_state->spr[SPR_SPRG3];
+    env->spr[SPR_BOOKS_PID] = spapr_cpu->nested_host_state->spr[SPR_BOOKS_PID];
+    env->spr[SPR_PPR] = spapr_cpu->nested_host_state->spr[SPR_PPR];
+
+    /*
+     * Return the interrupt vector address from H_ENTER_NESTED to the L1
+     * (or error code).
+     */
+    env->gpr[3] = r3_return;
+
+    env->tb_env->tb_offset -= spapr_cpu->nested_tb_offset;
+    spapr_cpu->in_nested = false;
+
+    hreg_compute_hflags(env);
+    tlb_flush(cs);
+    env->reserve_addr = -1; /* Reset the reservation */
+
+    g_free(spapr_cpu->nested_host_state);
+    spapr_cpu->nested_host_state = NULL;
+}
+
 static void hypercall_register_types(void)
 {
     hypercall_register_softmmu();
@@ -1554,6 +1882,11 @@  static void hypercall_register_types(void)
     spapr_register_hypercall(KVMPPC_H_CAS, h_client_architecture_support);
 
     spapr_register_hypercall(KVMPPC_H_UPDATE_DT, h_update_dt);
+
+    spapr_register_hypercall(KVMPPC_H_SET_PARTITION_TABLE, h_set_ptbl);
+    spapr_register_hypercall(KVMPPC_H_ENTER_NESTED, h_enter_nested);
+    spapr_register_hypercall(KVMPPC_H_TLB_INVALIDATE, h_tlb_invalidate);
+    spapr_register_hypercall(KVMPPC_H_COPY_TOFROM_GUEST, h_copy_tofrom_guest);
 }
 
 type_init(hypercall_register_types)
diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h
index edbf3eeed0..852fe61b36 100644
--- a/include/hw/ppc/spapr.h
+++ b/include/hw/ppc/spapr.h
@@ -199,6 +199,9 @@  struct SpaprMachineState {
     bool has_graphics;
     uint32_t vsmt;       /* Virtual SMT mode (KVM's "core stride") */
 
+    /* Nested HV support (TCG only) */
+    uint64_t nested_ptcr;
+
     Notifier epow_notifier;
     QTAILQ_HEAD(, SpaprEventLogEntry) pending_events;
     bool use_hotplug_event_source;
@@ -579,7 +582,14 @@  struct SpaprMachineState {
 #define KVMPPC_H_UPDATE_DT      (KVMPPC_HCALL_BASE + 0x3)
 /* 0x4 was used for KVMPPC_H_UPDATE_PHANDLE in SLOF */
 #define KVMPPC_H_VOF_CLIENT     (KVMPPC_HCALL_BASE + 0x5)
-#define KVMPPC_HCALL_MAX        KVMPPC_H_VOF_CLIENT
+
+/* Platform-specific hcalls used for nested HV KVM */
+#define KVMPPC_H_SET_PARTITION_TABLE   (KVMPPC_HCALL_BASE + 0x800)
+#define KVMPPC_H_ENTER_NESTED          (KVMPPC_HCALL_BASE + 0x804)
+#define KVMPPC_H_TLB_INVALIDATE        (KVMPPC_HCALL_BASE + 0x808)
+#define KVMPPC_H_COPY_TOFROM_GUEST     (KVMPPC_HCALL_BASE + 0x80C)
+
+#define KVMPPC_HCALL_MAX        KVMPPC_H_COPY_TOFROM_GUEST
 
 /*
  * The hcall range 0xEF00 to 0xEF80 is reserved for use in facilitating
@@ -589,6 +599,65 @@  struct SpaprMachineState {
 #define SVM_H_TPM_COMM              0xEF10
 #define SVM_HCALL_MAX               SVM_H_TPM_COMM
 
+/*
+ * Register state for entering a nested guest with H_ENTER_NESTED.
+ * New member must be added at the end.
+ */
+struct kvmppc_hv_guest_state {
+    uint64_t version;        /* version of this structure layout, must be first */
+    uint32_t lpid;
+    uint32_t vcpu_token;
+    /* These registers are hypervisor privileged (at least for writing) */
+    uint64_t lpcr;
+    uint64_t pcr;
+    uint64_t amor;
+    uint64_t dpdes;
+    uint64_t hfscr;
+    int64_t tb_offset;
+    uint64_t dawr0;
+    uint64_t dawrx0;
+    uint64_t ciabr;
+    uint64_t hdec_expiry;
+    uint64_t purr;
+    uint64_t spurr;
+    uint64_t ic;
+    uint64_t vtb;
+    uint64_t hdar;
+    uint64_t hdsisr;
+    uint64_t heir;
+    uint64_t asdr;
+    /* These are OS privileged but need to be set late in guest entry */
+    uint64_t srr0;
+    uint64_t srr1;
+    uint64_t sprg[4];
+    uint64_t pidr;
+    uint64_t cfar;
+    uint64_t ppr;
+    /* Version 1 ends here */
+    uint64_t dawr1;
+    uint64_t dawrx1;
+    /* Version 2 ends here */
+};
+
+/* Latest version of hv_guest_state structure */
+#define HV_GUEST_STATE_VERSION  2
+
+/* Linux 64-bit powerpc pt_regs struct, used by nested HV */
+struct kvmppc_pt_regs {
+    uint64_t gpr[32];
+    uint64_t nip;
+    uint64_t msr;
+    uint64_t orig_gpr3;    /* Used for restarting system calls */
+    uint64_t ctr;
+    uint64_t link;
+    uint64_t xer;
+    uint64_t ccr;
+    uint64_t softe;        /* Soft enabled/disabled */
+    uint64_t trap;         /* Reason for being here */
+    uint64_t dar;          /* Fault registers */
+    uint64_t dsisr;        /* on 4xx/Book-E used for ESR */
+    uint64_t result;       /* Result of a system call */
+};
 
 typedef struct SpaprDeviceTreeUpdateHeader {
     uint32_t version_id;
@@ -606,6 +675,9 @@  typedef target_ulong (*spapr_hcall_fn)(PowerPCCPU *cpu, SpaprMachineState *sm,
 void spapr_register_hypercall(target_ulong opcode, spapr_hcall_fn fn);
 target_ulong spapr_hypercall(PowerPCCPU *cpu, target_ulong opcode,
                              target_ulong *args);
+
+void spapr_exit_nested(PowerPCCPU *cpu, int excp);
+
 target_ulong softmmu_resize_hpt_prepare(PowerPCCPU *cpu, SpaprMachineState *spapr,
                                          target_ulong shift);
 target_ulong softmmu_resize_hpt_commit(PowerPCCPU *cpu, SpaprMachineState *spapr,
diff --git a/include/hw/ppc/spapr_cpu_core.h b/include/hw/ppc/spapr_cpu_core.h
index dab3dfc76c..b560514560 100644
--- a/include/hw/ppc/spapr_cpu_core.h
+++ b/include/hw/ppc/spapr_cpu_core.h
@@ -48,6 +48,11 @@  typedef struct SpaprCpuState {
     bool prod; /* not migrated, only used to improve dispatch latencies */
     struct ICPState *icp;
     struct XiveTCTX *tctx;
+
+    /* Fields for nested-HV support */
+    bool in_nested; /* true while the L2 is executing */
+    CPUPPCState *nested_host_state; /* holds the L1 state while L2 executes */
+    int64_t nested_tb_offset; /* L1->L2 TB offset */
 } SpaprCpuState;
 
 static inline SpaprCpuState *spapr_cpu_state(PowerPCCPU *cpu)