diff mbox

[RFC,v3,3/4] hw/intc/arm_gicv3_kvm: Implement get/put functions

Message ID 835124713dcd2ff8240460ae3df401ccb5f1f0ee.1445522263.git.p.fedin@samsung.com
State New
Headers show

Commit Message

Pavel Fedin Oct. 22, 2015, 2:02 p.m. UTC
This actually implements pre_save and post_load methods for in-kernel
vGICv3.

Signed-off-by: Pavel Fedin <p.fedin@samsung.com>
---
 hw/intc/arm_gicv3_kvm.c | 456 +++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 452 insertions(+), 4 deletions(-)

Comments

Peter Maydell Oct. 23, 2015, 1:57 p.m. UTC | #1
On 22 October 2015 at 15:02, Pavel Fedin <p.fedin@samsung.com> wrote:
> This actually implements pre_save and post_load methods for in-kernel
> vGICv3.
>
> Signed-off-by: Pavel Fedin <p.fedin@samsung.com>
> ---
>  hw/intc/arm_gicv3_kvm.c | 456 +++++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 452 insertions(+), 4 deletions(-)
>
> diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
> index b48f78f..ce8d2a0 100644
> --- a/hw/intc/arm_gicv3_kvm.c
> +++ b/hw/intc/arm_gicv3_kvm.c
> @@ -21,8 +21,11 @@
>
>  #include "hw/intc/arm_gicv3_common.h"
>  #include "hw/sysbus.h"
> +#include "migration/migration.h"
> +#include "qemu/error-report.h"
>  #include "sysemu/kvm.h"
>  #include "kvm_arm.h"
> +#include "gicv3_internal.h"
>  #include "vgic_common.h"
>
>  #ifdef DEBUG_GICV3_KVM
> @@ -41,6 +44,23 @@
>  #define KVM_ARM_GICV3_GET_CLASS(obj) \
>       OBJECT_GET_CLASS(KVMARMGICv3Class, (obj), TYPE_KVM_ARM_GICV3)
>
> +#define ICC_PMR_EL1     \
> +    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b0100, 0b0110, 0b000)
> +#define ICC_BPR0_EL1    \
> +    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1000, 0b011)
> +#define ICC_APR0_EL1(n) \
> +    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1000, 0b100 | n)
> +#define ICC_APR1_EL1(n) \
> +    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1001, 0b000 | n)
> +#define ICC_BPR1_EL1    \
> +    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b011)
> +#define ICC_CTLR_EL1    \
> +    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b100)
> +#define ICC_IGRPEN0_EL1 \
> +    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b110)
> +#define ICC_IGRPEN1_EL1 \
> +    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b111)

Nice, but I'm not sure binary literals are supported by all the compilers
we use (we don't use them anywhere else in qemu as far as I can see),
and the other code we have in helper.c for system register numbers
consistently uses decimal, so probably better to stick with that.

> +
>  typedef struct KVMARMGICv3Class {
>      ARMGICv3CommonClass parent_class;
>      DeviceRealize parent_realize;
> @@ -54,16 +74,431 @@ static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level)
>      kvm_arm_gic_set_irq(s->num_irq, irq, level);
>  }
>
> +#define VGIC_CPUID(cpuid) ((((cpuid) & ARM_AFF3_MASK) >> 8) | \
> +                           ((cpuid) & ARM32_AFFINITY_MASK))
> +#define KVM_VGIC_ATTR(reg, cpuid) \
> +    ((VGIC_CPUID(cpuid) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) | (reg))
> +
> +static inline void kvm_gicd_access(GICv3State *s, int offset, int cpu,
> +                                   uint64_t *val, bool write)
> +{
> +    kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
> +                      KVM_VGIC_ATTR(offset, s->cpu[cpu].affinity_id),
> +                      val, write);
> +}
> +
> +static inline void kvm_gicr_access(GICv3State *s, int offset, int cpu,
> +                                   uint64_t *val, bool write)
> +{
> +    kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
> +                      KVM_VGIC_ATTR(offset, s->cpu[cpu].affinity_id),
> +                      val, write);
> +}
> +
> +static inline void kvm_gicc_access(GICv3State *s, uint64_t reg, int cpu,
> +                                   uint64_t *val, bool write)
> +{
> +    kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
> +                      KVM_VGIC_ATTR(reg, s->cpu[cpu].affinity_id),
> +                      val, write);
> +}
> +
> +/*
> + * Translate from the in-kernel field for an IRQ value to/from the qemu
> + * representation.
> + */
> +typedef void (*vgic_translate_fn)(GICv3State *s, int irq, int cpu,
> +                                  uint32_t *field, bool to_kernel);
> +
> +/* synthetic translate function used for clear/set registers to completely
> + * clear a setting using a clear-register before setting the remaining bits
> + * using a set-register */
> +static void translate_clear(GICv3State *s, int irq, int cpu,
> +                            uint32_t *field, bool to_kernel)
> +{
> +    if (to_kernel) {
> +        *field = ~0;
> +    } else {
> +        /* does not make sense: qemu model doesn't use set/clear regs */
> +        abort();
> +    }
> +}
> +
> +static void translate_enabled(GICv3State *s, int irq, int cpu,
> +                              uint32_t *field, bool to_kernel)
> +{
> +    if (to_kernel) {
> +        *field = GIC_TEST_ENABLED(irq, cpu);
> +    } else {
> +        GIC_REPLACE_ENABLED(irq, cpu, *field);
> +    }
> +}
> +
> +static void translate_group(GICv3State *s, int irq, int cpu,
> +                            uint32_t *field, bool to_kernel)
> +{
> +    if (to_kernel) {
> +        *field = GIC_TEST_GROUP(irq, cpu);
> +    } else {
> +        GIC_REPLACE_GROUP(irq, cpu, *field);
> +    }
> +}
> +
> +static void translate_trigger(GICv3State *s, int irq, int cpu,
> +                              uint32_t *field, bool to_kernel)
> +{
> +    if (to_kernel) {
> +        *field = GIC_TEST_EDGE_TRIGGER(irq, cpu) ? 2 : 0;
> +    } else {
> +        GIC_REPLACE_EDGE_TRIGGER(irq, cpu, *field & 2);
> +    }
> +}
> +
> +static void translate_pending(GICv3State *s, int irq, int cpu,
> +                              uint32_t *field, bool to_kernel)
> +{
> +    if (to_kernel) {
> +        *field = gic_test_pending(s, irq, cpu);
> +    } else {
> +        GIC_REPLACE_PENDING(irq, cpu, *field);
> +        /* TODO: Capture if level-line is held high in the kernel */
> +    }
> +}
> +
> +static void translate_active(GICv3State *s, int irq, int cpu,
> +                             uint32_t *field, bool to_kernel)
> +{
> +    if (to_kernel) {
> +        *field = GIC_TEST_ACTIVE(irq, cpu);
> +    } else {
> +        GIC_REPLACE_ACTIVE(irq, cpu, *field);
> +    }
> +}
> +
> +static void translate_priority(GICv3State *s, int irq, int cpu,
> +                               uint32_t *field, bool to_kernel)
> +{
> +    if (to_kernel) {
> +        *field = GIC_GET_PRIORITY(irq, cpu);
> +    } else {
> +        GIC_SET_PRIORITY(irq, cpu, *field);
> +    }
> +}
> +
> +#define for_each_irq_reg(_irq, _max, _field_width) \
> +    for (_irq = 0; _irq < _max; _irq += (32 / _field_width))
> +
> +/* Read a register group from the kernel VGIC */
> +static void kvm_dist_get(GICv3State *s, uint32_t offset, int width,
> +                         vgic_translate_fn translate_fn)
> +{
> +    uint64_t reg;
> +    int j;
> +    int irq, cpu, maxcpu;
> +    uint32_t field;
> +    int regsz = 32 / width; /* irqs per kernel register */
> +
> +    for_each_irq_reg(irq, s->num_irq, width) {
> +        maxcpu = irq < GIC_INTERNAL ? s->num_cpu : 1;
> +        for (cpu = 0; cpu < maxcpu; cpu++) {
> +            /* In GICv3 SGI/PPIs are stored in redistributor
> +             * Offsets in SGI area are the same as in distributor
> +             */
> +            if (irq < GIC_INTERNAL) {
> +                kvm_gicr_access(s, offset + GICR_SGI_OFFSET, cpu, &reg, false);
> +            } else {
> +                kvm_gicd_access(s, offset, cpu, &reg, false);
> +            }

This looks very odd. Rather than saying "if this is a GIC internal
interrupt then the state lives in the redistributor, otherwise it's
in the distributor", we should just first transfer all the
distributor state (once), and then transfer the redistributor state
for each CPU. (This also relates to my comment on patch 1 about
not wanting a generic "GIC_GET/SET_FOO" set of macros which
don't care about where the state is.)

> +            for (j = 0; j < regsz; j++) {
> +                field = extract32(reg, j * width, width);
> +                translate_fn(s, irq + j, cpu, &field, false);
> +            }
> +        }
> +        offset += 4;
> +    }
> +}
> +
> +/* Write a register group to the kernel VGIC */
> +static void kvm_dist_put(GICv3State *s, uint32_t offset, int width,
> +                         vgic_translate_fn translate_fn)
> +{
> +    uint64_t reg;
> +    int j;
> +    int irq, cpu, maxcpu;
> +    uint32_t field;
> +    int regsz = 32 / width; /* irqs per kernel register */
> +
> +    for_each_irq_reg(irq, s->num_irq, width) {
> +        maxcpu = irq < GIC_INTERNAL ? s->num_cpu : 1;
> +        for (cpu = 0; cpu < maxcpu; cpu++) {
> +            reg = 0;
> +            for (j = 0; j < regsz; j++) {
> +                translate_fn(s, irq + j, cpu, &field, true);
> +                reg = deposit32(reg, j * width, width, field);
> +            }
> +            /* In GICv3 SGI/PPIs are stored in redistributor
> +             * Offsets in SGI area are the same as in distributor
> +             */
> +            if (irq < GIC_INTERNAL) {
> +                kvm_gicr_access(s, offset + GICR_SGI_OFFSET, cpu, &reg, true);
> +            } else {
> +                kvm_gicd_access(s, offset, cpu, &reg, true);
> +            }
> +        }
> +        offset += 4;
> +    }
> +}
> +
> +static void kvm_arm_gicv3_check(GICv3State *s)
> +{
> +    uint64_t reg;
> +    uint32_t num_irq;
> +
> +    /* Sanity checking s->num_irq */
> +    kvm_gicd_access(s, GICD_TYPER, 0, &reg, false);
> +    num_irq = ((reg & 0x1f) + 1) * 32;
> +
> +    if (num_irq < s->num_irq) {
> +        error_report("Model requests %u IRQs, but kernel supports max %u\n",
> +                     s->num_irq, num_irq);
> +        abort();
> +    }
> +
> +    /* TODO: Consider checking compatibility with the IIDR ? */
> +}
> +
>  static void kvm_arm_gicv3_put(GICv3State *s)
>  {
> -    /* TODO */
> -    DPRINTF("Cannot put kernel gic state, no kernel interface\n");
> +    uint64_t reg, redist_typer;
> +    int ncpu, i;
> +
> +    kvm_arm_gicv3_check(s);
> +
> +    kvm_gicr_access(s, GICR_TYPER, 0, &redist_typer, false);
> +
> +    /*****************************************************************
> +     * (Re)distributor State
> +     */
> +
> +    reg = s->ctlr;
> +    kvm_gicd_access(s, GICD_CTLR, 0, &reg, true);
> +
> +    if (redist_typer & GICR_TYPER_PLPIS) {
> +        /* Set base addresses before LPIs are enabled by GICR_CTLR write */
> +        for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
> +            GICv3CPUState *c = &s->cpu[ncpu];
> +
> +            reg = c->propbaser & (GICR_PROPBASER_OUTER_CACHEABILITY_MASK |
> +                                  GICR_PROPBASER_ADDR_MASK |
> +                                  GICR_PROPBASER_SHAREABILITY_MASK |
> +                                  GICR_PROPBASER_CACHEABILITY_MASK |
> +                                  GICR_PROPBASER_IDBITS_MASK);
> +            kvm_gicr_access(s, GICR_PROPBASER, ncpu, &reg, true);
> +
> +            reg = c->pendbaser & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK |
> +                                  GICR_PENDBASER_ADDR_MASK |
> +                                  GICR_PENDBASER_SHAREABILITY_MASK |
> +                                  GICR_PENDBASER_CACHEABILITY_MASK);
> +            if (!c->redist_ctlr & GICR_CTLR_ENABLE_LPIS) {
> +                reg |= GICR_PENDBASER_PTZ;
> +            }

Why does the state of the pendbaser register depend on state in the
redist_ctlr ? Worth a comment, whatever the answer is.

> +            kvm_gicr_access(s, GICR_PENDBASER, ncpu, &reg, true);
> +        }
> +    }
> +
> +    for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
> +        GICv3CPUState *c = &s->cpu[ncpu];
> +
> +        reg = c->redist_ctlr & (GICR_CTLR_ENABLE_LPIS | GICR_CTLR_DPG0 |
> +                                GICR_CTLR_DPG1NS | GICR_CTLR_DPG1S);
> +        kvm_gicr_access(s, GICR_CTLR, ncpu, &reg, true);
> +
> +        reg = c->cpu_enabled ? 0 : GICR_WAKER_ProcessorSleep;
> +        kvm_gicr_access(s, GICR_WAKER, ncpu, &reg, true);
> +    }
> +
> +    /* irq_state[n].enabled -> GICD_ISENABLERn */
> +    kvm_dist_put(s, GICD_ICENABLER, 1, translate_clear);
> +    kvm_dist_put(s, GICD_ISENABLER, 1, translate_enabled);
> +
> +    /* irq_state[n].group -> GICD_IGROUPRn */
> +    kvm_dist_put(s, GICD_IGROUPR, 1, translate_group);
> +
> +    /* Restore targets before pending to ensure the pending state is set on
> +     * the appropriate CPU interfaces in the kernel */
> +
> +    /* s->route[irq] -> GICD_IROUTERn */
> +    for (i = GIC_INTERNAL; i < s->num_irq; i++) {
> +        uint32_t offset = GICD_IROUTER + (sizeof(reg) * i);
> +
> +        reg = s->irq_route[i - GIC_INTERNAL];
> +        kvm_gicd_access(s, offset, 0, &reg, true);
> +    }
> +
> +    /* irq_state[n].trigger -> GICD_ICFGRn
> +     * (restore configuration registers before pending IRQs so we treat
> +     * level/edge correctly) */
> +    kvm_dist_put(s, GICD_ICFGR, 2, translate_trigger);
> +
> +    /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */
> +    kvm_dist_put(s, GICD_ICPENDR, 1, translate_clear);
> +    kvm_dist_put(s, GICD_ISPENDR, 1, translate_pending);
> +
> +    /* irq_state[n].active -> GICD_ISACTIVERn */
> +    kvm_dist_put(s, GICD_ICACTIVER, 1, translate_clear);
> +    kvm_dist_put(s, GICD_ISACTIVER, 1, translate_active);
> +
> +    /* s->priorityX[irq] -> ICD_IPRIORITYRn */
> +    kvm_dist_put(s, GICD_IPRIORITYR, 8, translate_priority);
> +
> +    /*****************************************************************
> +     * CPU Interface(s) State
> +     */
> +
> +    for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
> +        GICv3CPUState *c = &s->cpu[ncpu];
> +
> +        reg = c->ctlr[1] & (ICC_CTLR_CBPR | ICC_CTLR_EOIMODE | ICC_CTLR_PMHE);
> +        kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, &reg, true);
> +
> +        reg = gicv3_get_igrpen0(s, ncpu);
> +        kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, &reg, true);
> +
> +        reg = gicv3_get_igrpen1(s, ncpu);
> +        kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, &reg, true);
> +
> +        reg = c->priority_mask;
> +        kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &reg, true);
> +
> +        reg = c->bpr[0];
> +        kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &reg, true);
> +
> +        reg = c->bpr[1];
> +        kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &reg, true);
> +
> +        for (i = 0; i < 4; i++) {
> +            reg = c->apr[i][0];
> +            kvm_gicc_access(s, ICC_APR0_EL1(i), ncpu, &reg, true);
> +        }
> +
> +        for (i = 0; i < 4; i++) {
> +            reg = c->apr[i][1];
> +            kvm_gicc_access(s, ICC_APR1_EL1(i), ncpu, &reg, true);
> +        }
> +    }
>  }
>
>  static void kvm_arm_gicv3_get(GICv3State *s)
>  {
> -    /* TODO */
> -    DPRINTF("Cannot get kernel gic state, no kernel interface\n");
> +    uint64_t reg, redist_typer;
> +    int ncpu, i;
> +
> +    kvm_arm_gicv3_check(s);
> +
> +    kvm_gicr_access(s, GICR_TYPER, 0, &redist_typer, false);
> +
> +    /*****************************************************************
> +     * (Re)distributor State
> +     */
> +
> +    /* GICD_CTLR -> s->ctlr */
> +    kvm_gicd_access(s, GICD_CTLR, 0, &reg, false);
> +    s->ctlr = reg;
> +
> +    for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
> +        GICv3CPUState *c = &s->cpu[ncpu];
> +
> +        kvm_gicr_access(s, GICR_CTLR, ncpu, &reg, false);
> +        c->redist_ctlr = reg & (GICR_CTLR_ENABLE_LPIS | GICR_CTLR_DPG0 |
> +                                GICR_CTLR_DPG1NS | GICR_CTLR_DPG1S);
> +
> +        kvm_gicr_access(s, GICR_WAKER, ncpu, &reg, false);
> +        c->cpu_enabled = !(reg & GICR_WAKER_ProcessorSleep);

If you take my suggestion in patch 1 of just having GICR_WAKER
in the state struct, this code becomes simpler.

> +    }
> +
> +    if (redist_typer & GICR_TYPER_PLPIS) {
> +        for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
> +            GICv3CPUState *c = &s->cpu[ncpu];
> +
> +            kvm_gicr_access(s, GICR_PROPBASER, ncpu, &reg, false);
> +            c->propbaser = reg & (GICR_PROPBASER_OUTER_CACHEABILITY_MASK |
> +                                  GICR_PROPBASER_ADDR_MASK |
> +                                  GICR_PROPBASER_SHAREABILITY_MASK |
> +                                  GICR_PROPBASER_CACHEABILITY_MASK |
> +                                  GICR_PROPBASER_IDBITS_MASK);
> +
> +            kvm_gicr_access(s, GICR_PENDBASER, ncpu, &reg, false);
> +            c->pendbaser = reg & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK |
> +                                  GICR_PENDBASER_ADDR_MASK |
> +                                  GICR_PENDBASER_SHAREABILITY_MASK |
> +                                  GICR_PENDBASER_CACHEABILITY_MASK);

Why do we need to mask these values?

> +        }
> +    }
> +
> +    /* GICD_IIDR -> ? */
> +    /* kvm_gicd_access(s, GICD_IIDR, 0, &reg, false); */
> +
> +    /* GICD_IGROUPRn -> irq_state[n].group */
> +    kvm_dist_get(s, GICD_IGROUPR, 1, translate_group);

These comments about 'irq_state[n]' don't match where the state
actually is in the struct definitions from patch 1.

> +
> +    /* GICD_ISENABLERn -> irq_state[n].enabled */
> +    kvm_dist_get(s, GICD_ISENABLER, 1, translate_enabled);
> +
> +    /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */
> +    kvm_dist_get(s, GICD_ISPENDR, 1, translate_pending);
> +
> +    /* GICD_ISACTIVERn -> irq_state[n].active */
> +    kvm_dist_get(s, GICD_ISACTIVER, 1, translate_active);
> +
> +    /* GICD_ICFRn -> irq_state[n].trigger */
> +    kvm_dist_get(s, GICD_ICFGR, 2, translate_trigger);
> +
> +    /* GICD_IPRIORITYRn -> s->priorityX[irq] */
> +    kvm_dist_get(s, GICD_IPRIORITYR, 8, translate_priority);
> +
> +    /* GICD_IROUTERn -> s->route[irq] */
> +    for (i = GIC_INTERNAL; i < s->num_irq; i++) {
> +        uint32_t offset = GICD_IROUTER + (sizeof(reg) * i);
> +
> +        kvm_gicd_access(s, offset, 0, &reg, false);
> +        s->irq_route[i - GIC_INTERNAL] = reg;
> +    }

Missing code to transfer GICD_IGRPMODR<n> ?

> +
> +    /*****************************************************************
> +     * CPU Interface(s) State
> +     */
> +
> +    for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
> +        GICv3CPUState *c = &s->cpu[ncpu];
> +
> +        kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, &reg, false);
> +        c->ctlr[1] = reg & (ICC_CTLR_CBPR | ICC_CTLR_EOIMODE | ICC_CTLR_PMHE);
> +
> +        kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, &reg, false);
> +        gicv3_set_igrpen0(s, ncpu, reg);
> +
> +        kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, &reg, false);
> +        gicv3_set_igrpen1(s, ncpu, reg);
> +
> +        kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &reg, false);
> +        c->priority_mask = reg & ICC_PMR_PRIORITY_MASK;
> +
> +        kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &reg, false);
> +        c->bpr[0] = reg & ICC_BPR_BINARYPOINT_MASK;
> +
> +        kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &reg, false);
> +        c->bpr[1] = reg & ICC_BPR_BINARYPOINT_MASK;

Do we need to mask these out? We could just trust that the state the
kernel has is valid...

> +
> +        for (i = 0; i < 4; i++) {
> +            kvm_gicc_access(s, ICC_APR0_EL1(i), ncpu, &reg, false);
> +            c->apr[i][0] = reg;
> +        }
> +
> +        for (i = 0; i < 4; i++) {
> +            kvm_gicc_access(s, ICC_APR1_EL1(i), ncpu, &reg, false);
> +            c->apr[i][1] = reg;
> +        }

Do we not transfer ICC_SRE_EL1 because it's implemented as RO?
(I think that's right for no-irq/fiq-bypass, sysregs only.)

> +    }
>  }
>
>  static void kvm_arm_gicv3_reset(DeviceState *dev)
> @@ -74,6 +509,12 @@ static void kvm_arm_gicv3_reset(DeviceState *dev)
>      DPRINTF("Reset\n");
>
>      kgc->parent_reset(dev);
> +
> +    if (s->migration_blocker) {
> +        DPRINTF("Cannot put kernel gic state, no kernel interface\n");
> +        return;
> +    }
> +
>      kvm_arm_gicv3_put(s);
>  }
>
> @@ -117,6 +558,13 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
>                              KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd);
>      kvm_arm_register_device(&s->iomem_redist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
>                              KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd);
> +
> +    if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
> +                               GICD_CTLR)) {
> +        error_setg(&s->migration_blocker, "This operating system kernel does "
> +                                          "not support vGICv3 migration");
> +        migrate_add_blocker(s->migration_blocker);
> +    }
>  }
>
>  static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data)
> --
> 2.4.4

thanks
-- PMM
Pavel Fedin Oct. 26, 2015, 7:59 a.m. UTC | #2
Hello!

> > +            reg = c->pendbaser & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK |
> > +                                  GICR_PENDBASER_ADDR_MASK |
> > +                                  GICR_PENDBASER_SHAREABILITY_MASK |
> > +                                  GICR_PENDBASER_CACHEABILITY_MASK);
> > +            if (!c->redist_ctlr & GICR_CTLR_ENABLE_LPIS) {
> > +                reg |= GICR_PENDBASER_PTZ;
> > +            }
> 
> Why does the state of the pendbaser register depend on state in the
> redist_ctlr ?

 PTZ bit is write-only, we cannot read it back. And spec says that setting PTZ is adviced while LPIs are not enabled, because it shortens down the time of GIC initialization. So, i had to implement this small heuristics here. Is this approach OK?

> Worth a comment, whatever the answer is.

 I will.

> > +            kvm_gicr_access(s, GICR_PENDBASER, ncpu, &reg, false);
> > +            c->pendbaser = reg & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK |
> > +                                  GICR_PENDBASER_ADDR_MASK |
> > +                                  GICR_PENDBASER_SHAREABILITY_MASK |
> > +                                  GICR_PENDBASER_CACHEABILITY_MASK);
> 
> Why do we need to mask these values?

 I decided to do this at least for the case of KVM->TCG migration (as far as i understand, such things are possible). In this case i think we should not pollute our state with read-only bits, which get added by the emulation code itself.

> Do we not transfer ICC_SRE_EL1 because it's implemented as RO?
> (I think that's right for no-irq/fiq-bypass, sysregs only.)

 Yes, also because looks like KVM is not going to implement GICv3 with non-SRE mode, instead, if we want to run a legacy guest, we just configure our host to provide GICv2 for it.
 I actually migrate only those CPU interface registers, which are saved by the kernel code as part of guest's context.

Kind regards,
Pavel Fedin
Expert Engineer
Samsung Electronics Research center Russia
Peter Maydell Oct. 26, 2015, 11:09 a.m. UTC | #3
On 26 October 2015 at 07:59, Pavel Fedin <p.fedin@samsung.com> wrote:
>  Hello!
>
>> > +            reg = c->pendbaser & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK |
>> > +                                  GICR_PENDBASER_ADDR_MASK |
>> > +                                  GICR_PENDBASER_SHAREABILITY_MASK |
>> > +                                  GICR_PENDBASER_CACHEABILITY_MASK);
>> > +            if (!c->redist_ctlr & GICR_CTLR_ENABLE_LPIS) {
>> > +                reg |= GICR_PENDBASER_PTZ;
>> > +            }
>>
>> Why does the state of the pendbaser register depend on state in the
>> redist_ctlr ?
>
>  PTZ bit is write-only, we cannot read it back. And spec says that setting PTZ is adviced while LPIs are not enabled, because it shortens down the time of GIC initialization. So, i had to implement this small heuristics here. Is this approach OK?

OK, with a comment to say that's what we're doing. (I assume that
when we support LPIs we'll then set PTZ appropriately, so this
code will change later.)

>> Worth a comment, whatever the answer is.
>
>  I will.
>
>> > +            kvm_gicr_access(s, GICR_PENDBASER, ncpu, &reg, false);
>> > +            c->pendbaser = reg & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK |
>> > +                                  GICR_PENDBASER_ADDR_MASK |
>> > +                                  GICR_PENDBASER_SHAREABILITY_MASK |
>> > +                                  GICR_PENDBASER_CACHEABILITY_MASK);
>>
>> Why do we need to mask these values?
>
>  I decided to do this at least for the case of KVM->TCG migration (as far as i understand, such things are possible). In this case i think we should not pollute our state with read-only bits, which get added by the emulation code itself.

We don't do this for other system registers which might contain
RO bits, so I think for consistency we shouldn't mask bits out
here either.

(Transferring RO bits in migration state gives the destination
an opportunity in theory to reject a migration which is for
a config it can't handle. And reserved bits may end up having a
use in a future GIC version, so it's nice not to have to do a
QEMU update just to remove them from the mask.)

thanks
-- PMM
diff mbox

Patch

diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c
index b48f78f..ce8d2a0 100644
--- a/hw/intc/arm_gicv3_kvm.c
+++ b/hw/intc/arm_gicv3_kvm.c
@@ -21,8 +21,11 @@ 
 
 #include "hw/intc/arm_gicv3_common.h"
 #include "hw/sysbus.h"
+#include "migration/migration.h"
+#include "qemu/error-report.h"
 #include "sysemu/kvm.h"
 #include "kvm_arm.h"
+#include "gicv3_internal.h"
 #include "vgic_common.h"
 
 #ifdef DEBUG_GICV3_KVM
@@ -41,6 +44,23 @@ 
 #define KVM_ARM_GICV3_GET_CLASS(obj) \
      OBJECT_GET_CLASS(KVMARMGICv3Class, (obj), TYPE_KVM_ARM_GICV3)
 
+#define ICC_PMR_EL1     \
+    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b0100, 0b0110, 0b000)
+#define ICC_BPR0_EL1    \
+    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1000, 0b011)
+#define ICC_APR0_EL1(n) \
+    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1000, 0b100 | n)
+#define ICC_APR1_EL1(n) \
+    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1001, 0b000 | n)
+#define ICC_BPR1_EL1    \
+    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b011)
+#define ICC_CTLR_EL1    \
+    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b100)
+#define ICC_IGRPEN0_EL1 \
+    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b110)
+#define ICC_IGRPEN1_EL1 \
+    KVM_DEV_ARM_VGIC_SYSREG(0b11, 0b000, 0b1100, 0b1100, 0b111)
+
 typedef struct KVMARMGICv3Class {
     ARMGICv3CommonClass parent_class;
     DeviceRealize parent_realize;
@@ -54,16 +74,431 @@  static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level)
     kvm_arm_gic_set_irq(s->num_irq, irq, level);
 }
 
+#define VGIC_CPUID(cpuid) ((((cpuid) & ARM_AFF3_MASK) >> 8) | \
+                           ((cpuid) & ARM32_AFFINITY_MASK))
+#define KVM_VGIC_ATTR(reg, cpuid) \
+    ((VGIC_CPUID(cpuid) << KVM_DEV_ARM_VGIC_CPUID_SHIFT) | (reg))
+
+static inline void kvm_gicd_access(GICv3State *s, int offset, int cpu,
+                                   uint64_t *val, bool write)
+{
+    kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
+                      KVM_VGIC_ATTR(offset, s->cpu[cpu].affinity_id),
+                      val, write);
+}
+
+static inline void kvm_gicr_access(GICv3State *s, int offset, int cpu,
+                                   uint64_t *val, bool write)
+{
+    kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS,
+                      KVM_VGIC_ATTR(offset, s->cpu[cpu].affinity_id),
+                      val, write);
+}
+
+static inline void kvm_gicc_access(GICv3State *s, uint64_t reg, int cpu,
+                                   uint64_t *val, bool write)
+{
+    kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS,
+                      KVM_VGIC_ATTR(reg, s->cpu[cpu].affinity_id),
+                      val, write);
+}
+
+/*
+ * Translate from the in-kernel field for an IRQ value to/from the qemu
+ * representation.
+ */
+typedef void (*vgic_translate_fn)(GICv3State *s, int irq, int cpu,
+                                  uint32_t *field, bool to_kernel);
+
+/* synthetic translate function used for clear/set registers to completely
+ * clear a setting using a clear-register before setting the remaining bits
+ * using a set-register */
+static void translate_clear(GICv3State *s, int irq, int cpu,
+                            uint32_t *field, bool to_kernel)
+{
+    if (to_kernel) {
+        *field = ~0;
+    } else {
+        /* does not make sense: qemu model doesn't use set/clear regs */
+        abort();
+    }
+}
+
+static void translate_enabled(GICv3State *s, int irq, int cpu,
+                              uint32_t *field, bool to_kernel)
+{
+    if (to_kernel) {
+        *field = GIC_TEST_ENABLED(irq, cpu);
+    } else {
+        GIC_REPLACE_ENABLED(irq, cpu, *field);
+    }
+}
+
+static void translate_group(GICv3State *s, int irq, int cpu,
+                            uint32_t *field, bool to_kernel)
+{
+    if (to_kernel) {
+        *field = GIC_TEST_GROUP(irq, cpu);
+    } else {
+        GIC_REPLACE_GROUP(irq, cpu, *field);
+    }
+}
+
+static void translate_trigger(GICv3State *s, int irq, int cpu,
+                              uint32_t *field, bool to_kernel)
+{
+    if (to_kernel) {
+        *field = GIC_TEST_EDGE_TRIGGER(irq, cpu) ? 2 : 0;
+    } else {
+        GIC_REPLACE_EDGE_TRIGGER(irq, cpu, *field & 2);
+    }
+}
+
+static void translate_pending(GICv3State *s, int irq, int cpu,
+                              uint32_t *field, bool to_kernel)
+{
+    if (to_kernel) {
+        *field = gic_test_pending(s, irq, cpu);
+    } else {
+        GIC_REPLACE_PENDING(irq, cpu, *field);
+        /* TODO: Capture if level-line is held high in the kernel */
+    }
+}
+
+static void translate_active(GICv3State *s, int irq, int cpu,
+                             uint32_t *field, bool to_kernel)
+{
+    if (to_kernel) {
+        *field = GIC_TEST_ACTIVE(irq, cpu);
+    } else {
+        GIC_REPLACE_ACTIVE(irq, cpu, *field);
+    }
+}
+
+static void translate_priority(GICv3State *s, int irq, int cpu,
+                               uint32_t *field, bool to_kernel)
+{
+    if (to_kernel) {
+        *field = GIC_GET_PRIORITY(irq, cpu);
+    } else {
+        GIC_SET_PRIORITY(irq, cpu, *field);
+    }
+}
+
+#define for_each_irq_reg(_irq, _max, _field_width) \
+    for (_irq = 0; _irq < _max; _irq += (32 / _field_width))
+
+/* Read a register group from the kernel VGIC */
+static void kvm_dist_get(GICv3State *s, uint32_t offset, int width,
+                         vgic_translate_fn translate_fn)
+{
+    uint64_t reg;
+    int j;
+    int irq, cpu, maxcpu;
+    uint32_t field;
+    int regsz = 32 / width; /* irqs per kernel register */
+
+    for_each_irq_reg(irq, s->num_irq, width) {
+        maxcpu = irq < GIC_INTERNAL ? s->num_cpu : 1;
+        for (cpu = 0; cpu < maxcpu; cpu++) {
+            /* In GICv3 SGI/PPIs are stored in redistributor
+             * Offsets in SGI area are the same as in distributor
+             */
+            if (irq < GIC_INTERNAL) {
+                kvm_gicr_access(s, offset + GICR_SGI_OFFSET, cpu, &reg, false);
+            } else {
+                kvm_gicd_access(s, offset, cpu, &reg, false);
+            }
+            for (j = 0; j < regsz; j++) {
+                field = extract32(reg, j * width, width);
+                translate_fn(s, irq + j, cpu, &field, false);
+            }
+        }
+        offset += 4;
+    }
+}
+
+/* Write a register group to the kernel VGIC */
+static void kvm_dist_put(GICv3State *s, uint32_t offset, int width,
+                         vgic_translate_fn translate_fn)
+{
+    uint64_t reg;
+    int j;
+    int irq, cpu, maxcpu;
+    uint32_t field;
+    int regsz = 32 / width; /* irqs per kernel register */
+
+    for_each_irq_reg(irq, s->num_irq, width) {
+        maxcpu = irq < GIC_INTERNAL ? s->num_cpu : 1;
+        for (cpu = 0; cpu < maxcpu; cpu++) {
+            reg = 0;
+            for (j = 0; j < regsz; j++) {
+                translate_fn(s, irq + j, cpu, &field, true);
+                reg = deposit32(reg, j * width, width, field);
+            }
+            /* In GICv3 SGI/PPIs are stored in redistributor
+             * Offsets in SGI area are the same as in distributor
+             */
+            if (irq < GIC_INTERNAL) {
+                kvm_gicr_access(s, offset + GICR_SGI_OFFSET, cpu, &reg, true);
+            } else {
+                kvm_gicd_access(s, offset, cpu, &reg, true);
+            }
+        }
+        offset += 4;
+    }
+}
+
+static void kvm_arm_gicv3_check(GICv3State *s)
+{
+    uint64_t reg;
+    uint32_t num_irq;
+
+    /* Sanity checking s->num_irq */
+    kvm_gicd_access(s, GICD_TYPER, 0, &reg, false);
+    num_irq = ((reg & 0x1f) + 1) * 32;
+
+    if (num_irq < s->num_irq) {
+        error_report("Model requests %u IRQs, but kernel supports max %u\n",
+                     s->num_irq, num_irq);
+        abort();
+    }
+
+    /* TODO: Consider checking compatibility with the IIDR ? */
+}
+
 static void kvm_arm_gicv3_put(GICv3State *s)
 {
-    /* TODO */
-    DPRINTF("Cannot put kernel gic state, no kernel interface\n");
+    uint64_t reg, redist_typer;
+    int ncpu, i;
+
+    kvm_arm_gicv3_check(s);
+
+    kvm_gicr_access(s, GICR_TYPER, 0, &redist_typer, false);
+
+    /*****************************************************************
+     * (Re)distributor State
+     */
+
+    reg = s->ctlr;
+    kvm_gicd_access(s, GICD_CTLR, 0, &reg, true);
+
+    if (redist_typer & GICR_TYPER_PLPIS) {
+        /* Set base addresses before LPIs are enabled by GICR_CTLR write */
+        for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+            GICv3CPUState *c = &s->cpu[ncpu];
+
+            reg = c->propbaser & (GICR_PROPBASER_OUTER_CACHEABILITY_MASK |
+                                  GICR_PROPBASER_ADDR_MASK |
+                                  GICR_PROPBASER_SHAREABILITY_MASK |
+                                  GICR_PROPBASER_CACHEABILITY_MASK |
+                                  GICR_PROPBASER_IDBITS_MASK);
+            kvm_gicr_access(s, GICR_PROPBASER, ncpu, &reg, true);
+
+            reg = c->pendbaser & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK |
+                                  GICR_PENDBASER_ADDR_MASK |
+                                  GICR_PENDBASER_SHAREABILITY_MASK |
+                                  GICR_PENDBASER_CACHEABILITY_MASK);
+            if (!c->redist_ctlr & GICR_CTLR_ENABLE_LPIS) {
+                reg |= GICR_PENDBASER_PTZ;
+            }
+            kvm_gicr_access(s, GICR_PENDBASER, ncpu, &reg, true);
+        }
+    }
+
+    for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+        GICv3CPUState *c = &s->cpu[ncpu];
+
+        reg = c->redist_ctlr & (GICR_CTLR_ENABLE_LPIS | GICR_CTLR_DPG0 |
+                                GICR_CTLR_DPG1NS | GICR_CTLR_DPG1S);
+        kvm_gicr_access(s, GICR_CTLR, ncpu, &reg, true);
+
+        reg = c->cpu_enabled ? 0 : GICR_WAKER_ProcessorSleep;
+        kvm_gicr_access(s, GICR_WAKER, ncpu, &reg, true);
+    }
+
+    /* irq_state[n].enabled -> GICD_ISENABLERn */
+    kvm_dist_put(s, GICD_ICENABLER, 1, translate_clear);
+    kvm_dist_put(s, GICD_ISENABLER, 1, translate_enabled);
+
+    /* irq_state[n].group -> GICD_IGROUPRn */
+    kvm_dist_put(s, GICD_IGROUPR, 1, translate_group);
+
+    /* Restore targets before pending to ensure the pending state is set on
+     * the appropriate CPU interfaces in the kernel */
+
+    /* s->route[irq] -> GICD_IROUTERn */
+    for (i = GIC_INTERNAL; i < s->num_irq; i++) {
+        uint32_t offset = GICD_IROUTER + (sizeof(reg) * i);
+
+        reg = s->irq_route[i - GIC_INTERNAL];
+        kvm_gicd_access(s, offset, 0, &reg, true);
+    }
+
+    /* irq_state[n].trigger -> GICD_ICFGRn
+     * (restore configuration registers before pending IRQs so we treat
+     * level/edge correctly) */
+    kvm_dist_put(s, GICD_ICFGR, 2, translate_trigger);
+
+    /* irq_state[n].pending + irq_state[n].level -> GICD_ISPENDRn */
+    kvm_dist_put(s, GICD_ICPENDR, 1, translate_clear);
+    kvm_dist_put(s, GICD_ISPENDR, 1, translate_pending);
+
+    /* irq_state[n].active -> GICD_ISACTIVERn */
+    kvm_dist_put(s, GICD_ICACTIVER, 1, translate_clear);
+    kvm_dist_put(s, GICD_ISACTIVER, 1, translate_active);
+
+    /* s->priorityX[irq] -> ICD_IPRIORITYRn */
+    kvm_dist_put(s, GICD_IPRIORITYR, 8, translate_priority);
+
+    /*****************************************************************
+     * CPU Interface(s) State
+     */
+
+    for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+        GICv3CPUState *c = &s->cpu[ncpu];
+
+        reg = c->ctlr[1] & (ICC_CTLR_CBPR | ICC_CTLR_EOIMODE | ICC_CTLR_PMHE);
+        kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, &reg, true);
+
+        reg = gicv3_get_igrpen0(s, ncpu);
+        kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, &reg, true);
+
+        reg = gicv3_get_igrpen1(s, ncpu);
+        kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, &reg, true);
+
+        reg = c->priority_mask;
+        kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &reg, true);
+
+        reg = c->bpr[0];
+        kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &reg, true);
+
+        reg = c->bpr[1];
+        kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &reg, true);
+
+        for (i = 0; i < 4; i++) {
+            reg = c->apr[i][0];
+            kvm_gicc_access(s, ICC_APR0_EL1(i), ncpu, &reg, true);
+        }
+
+        for (i = 0; i < 4; i++) {
+            reg = c->apr[i][1];
+            kvm_gicc_access(s, ICC_APR1_EL1(i), ncpu, &reg, true);
+        }
+    }
 }
 
 static void kvm_arm_gicv3_get(GICv3State *s)
 {
-    /* TODO */
-    DPRINTF("Cannot get kernel gic state, no kernel interface\n");
+    uint64_t reg, redist_typer;
+    int ncpu, i;
+
+    kvm_arm_gicv3_check(s);
+
+    kvm_gicr_access(s, GICR_TYPER, 0, &redist_typer, false);
+
+    /*****************************************************************
+     * (Re)distributor State
+     */
+
+    /* GICD_CTLR -> s->ctlr */
+    kvm_gicd_access(s, GICD_CTLR, 0, &reg, false);
+    s->ctlr = reg;
+
+    for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+        GICv3CPUState *c = &s->cpu[ncpu];
+
+        kvm_gicr_access(s, GICR_CTLR, ncpu, &reg, false);
+        c->redist_ctlr = reg & (GICR_CTLR_ENABLE_LPIS | GICR_CTLR_DPG0 |
+                                GICR_CTLR_DPG1NS | GICR_CTLR_DPG1S);
+
+        kvm_gicr_access(s, GICR_WAKER, ncpu, &reg, false);
+        c->cpu_enabled = !(reg & GICR_WAKER_ProcessorSleep);
+    }
+
+    if (redist_typer & GICR_TYPER_PLPIS) {
+        for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+            GICv3CPUState *c = &s->cpu[ncpu];
+
+            kvm_gicr_access(s, GICR_PROPBASER, ncpu, &reg, false);
+            c->propbaser = reg & (GICR_PROPBASER_OUTER_CACHEABILITY_MASK |
+                                  GICR_PROPBASER_ADDR_MASK |
+                                  GICR_PROPBASER_SHAREABILITY_MASK |
+                                  GICR_PROPBASER_CACHEABILITY_MASK |
+                                  GICR_PROPBASER_IDBITS_MASK);
+
+            kvm_gicr_access(s, GICR_PENDBASER, ncpu, &reg, false);
+            c->pendbaser = reg & (GICR_PENDBASER_OUTER_CACHEABILITY_MASK |
+                                  GICR_PENDBASER_ADDR_MASK |
+                                  GICR_PENDBASER_SHAREABILITY_MASK |
+                                  GICR_PENDBASER_CACHEABILITY_MASK);
+        }
+    }
+
+    /* GICD_IIDR -> ? */
+    /* kvm_gicd_access(s, GICD_IIDR, 0, &reg, false); */
+
+    /* GICD_IGROUPRn -> irq_state[n].group */
+    kvm_dist_get(s, GICD_IGROUPR, 1, translate_group);
+
+    /* GICD_ISENABLERn -> irq_state[n].enabled */
+    kvm_dist_get(s, GICD_ISENABLER, 1, translate_enabled);
+
+    /* GICD_ISPENDRn -> irq_state[n].pending + irq_state[n].level */
+    kvm_dist_get(s, GICD_ISPENDR, 1, translate_pending);
+
+    /* GICD_ISACTIVERn -> irq_state[n].active */
+    kvm_dist_get(s, GICD_ISACTIVER, 1, translate_active);
+
+    /* GICD_ICFRn -> irq_state[n].trigger */
+    kvm_dist_get(s, GICD_ICFGR, 2, translate_trigger);
+
+    /* GICD_IPRIORITYRn -> s->priorityX[irq] */
+    kvm_dist_get(s, GICD_IPRIORITYR, 8, translate_priority);
+
+    /* GICD_IROUTERn -> s->route[irq] */
+    for (i = GIC_INTERNAL; i < s->num_irq; i++) {
+        uint32_t offset = GICD_IROUTER + (sizeof(reg) * i);
+
+        kvm_gicd_access(s, offset, 0, &reg, false);
+        s->irq_route[i - GIC_INTERNAL] = reg;
+    }
+
+    /*****************************************************************
+     * CPU Interface(s) State
+     */
+
+    for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+        GICv3CPUState *c = &s->cpu[ncpu];
+
+        kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, &reg, false);
+        c->ctlr[1] = reg & (ICC_CTLR_CBPR | ICC_CTLR_EOIMODE | ICC_CTLR_PMHE);
+
+        kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, &reg, false);
+        gicv3_set_igrpen0(s, ncpu, reg);
+
+        kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, &reg, false);
+        gicv3_set_igrpen1(s, ncpu, reg);
+
+        kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &reg, false);
+        c->priority_mask = reg & ICC_PMR_PRIORITY_MASK;
+
+        kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &reg, false);
+        c->bpr[0] = reg & ICC_BPR_BINARYPOINT_MASK;
+
+        kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &reg, false);
+        c->bpr[1] = reg & ICC_BPR_BINARYPOINT_MASK;
+
+        for (i = 0; i < 4; i++) {
+            kvm_gicc_access(s, ICC_APR0_EL1(i), ncpu, &reg, false);
+            c->apr[i][0] = reg;
+        }
+
+        for (i = 0; i < 4; i++) {
+            kvm_gicc_access(s, ICC_APR1_EL1(i), ncpu, &reg, false);
+            c->apr[i][1] = reg;
+        }
+    }
 }
 
 static void kvm_arm_gicv3_reset(DeviceState *dev)
@@ -74,6 +509,12 @@  static void kvm_arm_gicv3_reset(DeviceState *dev)
     DPRINTF("Reset\n");
 
     kgc->parent_reset(dev);
+
+    if (s->migration_blocker) {
+        DPRINTF("Cannot put kernel gic state, no kernel interface\n");
+        return;
+    }
+
     kvm_arm_gicv3_put(s);
 }
 
@@ -117,6 +558,13 @@  static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp)
                             KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd);
     kvm_arm_register_device(&s->iomem_redist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR,
                             KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd);
+
+    if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS,
+                               GICD_CTLR)) {
+        error_setg(&s->migration_blocker, "This operating system kernel does "
+                                          "not support vGICv3 migration");
+        migrate_add_blocker(s->migration_blocker);
+    }
 }
 
 static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data)