diff mbox series

[RFC,8/8] hw/arm/virt: Allow up to 512 vcpus along with KVM VGICv3

Message ID 1522160122-10744-9-git-send-email-eric.auger@redhat.com
State New
Headers show
Series KVM/ARM: Relax the max 123 vcpus limitation along with KVM GICv3 | expand

Commit Message

Eric Auger March 27, 2018, 2:15 p.m. UTC
With KVM acceleration and if KVM VGICV3 supports to set multiple
redistributor regions, we now allow up to 512 vcpus.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
 hw/arm/virt.c         | 17 ++++++++++++++++-
 include/hw/arm/virt.h |  1 +
 2 files changed, 17 insertions(+), 1 deletion(-)

Comments

Shannon Zhao March 28, 2018, 4:02 a.m. UTC | #1
On 2018/3/27 22:15, Eric Auger wrote:
> With KVM acceleration and if KVM VGICV3 supports to set multiple
> redistributor regions, we now allow up to 512 vcpus.
> 
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
> ---
>  hw/arm/virt.c         | 17 ++++++++++++++++-
>  include/hw/arm/virt.h |  1 +
>  2 files changed, 17 insertions(+), 1 deletion(-)
> 
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index 8258f6f..cdb1a75 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -149,8 +149,10 @@ static const MemMapEntry a15memmap[] = {
>      [VIRT_PCIE_PIO] =           { 0x3eff0000, 0x00010000 },
>      [VIRT_PCIE_ECAM] =          { 0x3f000000, 0x01000000 },
>      [VIRT_MEM] =                { 0x40000000, RAMLIMIT_BYTES },
> +    /* Allows 512 - 123 additional vcpus (each 2x64kB) */
> +    [VIRT_GIC_REDIST2] =        { 0x4000000000ULL, 0x30A0000LL },
One concern that this will limit the guest ram size to RAMLIMIT_BYTES.
If we want to support larger ram size in the future, this may be a problem.

>      /* Second PCIe window, 512GB wide at the 512GB boundary */
> -    [VIRT_PCIE_MMIO_HIGH] =   { 0x8000000000ULL, 0x8000000000ULL },
> +    [VIRT_PCIE_MMIO_HIGH] =     { 0x8000000000ULL, 0x8000000000ULL },
>  };
>  
>  static const int a15irqmap[] = {
> @@ -553,6 +555,11 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
>          agcc->register_redist_region((GICv3State *)gicdev,
>                                   vms->memmap[VIRT_GIC_REDIST].base,
>                                   vms->memmap[VIRT_GIC_REDIST].size >> 17);
> +        if (vms->smp_cpus > 123) {
> +            agcc->register_redist_region((GICv3State *)gicdev,
> +                     vms->memmap[VIRT_GIC_REDIST2].base,
> +                                 vms->memmap[VIRT_GIC_REDIST2].size >> 17);
> +        }
>      } else {
>          sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
>      }
> @@ -1284,6 +1291,14 @@ static void machvirt_init(MachineState *machine)
>       */
>      if (vms->gic_version == 3) {
>          virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000;
> +        if (kvm_max_vcpus(kvm_state) > 255) {
> +            /*
> +             * VGICv3 KVM device capability to set multiple redistributor
> +             * was introduced at the same time KVM_MAX_VCPUS was bumped
> +             * from 255 to 512
> +             */
> +            virt_max_cpus += vms->memmap[VIRT_GIC_REDIST2].size / 0x20000;
> +        }
>      } else {
>          virt_max_cpus = GIC_NCPU;
>      }
> diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
> index d168291..801a4ad 100644
> --- a/include/hw/arm/virt.h
> +++ b/include/hw/arm/virt.h
> @@ -60,6 +60,7 @@ enum {
>      VIRT_GIC_V2M,
>      VIRT_GIC_ITS,
>      VIRT_GIC_REDIST,
> +    VIRT_GIC_REDIST2,
>      VIRT_UART,
>      VIRT_MMIO,
>      VIRT_RTC,
>
Eric Auger March 28, 2018, 6:47 a.m. UTC | #2
Hi Shannon,

On 28/03/18 06:02, Shannon Zhao wrote:
> 
> 
> On 2018/3/27 22:15, Eric Auger wrote:
>> With KVM acceleration and if KVM VGICV3 supports to set multiple
>> redistributor regions, we now allow up to 512 vcpus.
>>
>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>> ---
>>  hw/arm/virt.c         | 17 ++++++++++++++++-
>>  include/hw/arm/virt.h |  1 +
>>  2 files changed, 17 insertions(+), 1 deletion(-)
>>
>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>> index 8258f6f..cdb1a75 100644
>> --- a/hw/arm/virt.c
>> +++ b/hw/arm/virt.c
>> @@ -149,8 +149,10 @@ static const MemMapEntry a15memmap[] = {
>>      [VIRT_PCIE_PIO] =           { 0x3eff0000, 0x00010000 },
>>      [VIRT_PCIE_ECAM] =          { 0x3f000000, 0x01000000 },
>>      [VIRT_MEM] =                { 0x40000000, RAMLIMIT_BYTES },
>> +    /* Allows 512 - 123 additional vcpus (each 2x64kB) */
>> +    [VIRT_GIC_REDIST2] =        { 0x4000000000ULL, 0x30A0000LL },
> One concern that this will limit the guest ram size to RAMLIMIT_BYTES.
> If we want to support larger ram size in the future, this may be a problem.
There is comment before  #define RAMLIMIT_GB 255
saying that RAM extension should rather happen at 2TB base.
"../.. We don't want to fill all the way up to 512GB with RAM because
 * we might want it for non-RAM purposes later"

This REDIST2 region could also be allocated at the top of the [256GB -
512GB] region.

I will start working on extending the RAM as suggested by this comment.

Thanks

Eric

> 
>>      /* Second PCIe window, 512GB wide at the 512GB boundary */
>> -    [VIRT_PCIE_MMIO_HIGH] =   { 0x8000000000ULL, 0x8000000000ULL },
>> +    [VIRT_PCIE_MMIO_HIGH] =     { 0x8000000000ULL, 0x8000000000ULL },
>>  };
>>  
>>  static const int a15irqmap[] = {
>> @@ -553,6 +555,11 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
>>          agcc->register_redist_region((GICv3State *)gicdev,
>>                                   vms->memmap[VIRT_GIC_REDIST].base,
>>                                   vms->memmap[VIRT_GIC_REDIST].size >> 17);
>> +        if (vms->smp_cpus > 123) {
>> +            agcc->register_redist_region((GICv3State *)gicdev,
>> +                     vms->memmap[VIRT_GIC_REDIST2].base,
>> +                                 vms->memmap[VIRT_GIC_REDIST2].size >> 17);
>> +        }
>>      } else {
>>          sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
>>      }
>> @@ -1284,6 +1291,14 @@ static void machvirt_init(MachineState *machine)
>>       */
>>      if (vms->gic_version == 3) {
>>          virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000;
>> +        if (kvm_max_vcpus(kvm_state) > 255) {
>> +            /*
>> +             * VGICv3 KVM device capability to set multiple redistributor
>> +             * was introduced at the same time KVM_MAX_VCPUS was bumped
>> +             * from 255 to 512
>> +             */
>> +            virt_max_cpus += vms->memmap[VIRT_GIC_REDIST2].size / 0x20000;
>> +        }
>>      } else {
>>          virt_max_cpus = GIC_NCPU;
>>      }
>> diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
>> index d168291..801a4ad 100644
>> --- a/include/hw/arm/virt.h
>> +++ b/include/hw/arm/virt.h
>> @@ -60,6 +60,7 @@ enum {
>>      VIRT_GIC_V2M,
>>      VIRT_GIC_ITS,
>>      VIRT_GIC_REDIST,
>> +    VIRT_GIC_REDIST2,
>>      VIRT_UART,
>>      VIRT_MMIO,
>>      VIRT_RTC,
>>
>
Peter Maydell April 13, 2018, 1:41 p.m. UTC | #3
On 27 March 2018 at 15:15, Eric Auger <eric.auger@redhat.com> wrote:
> With KVM acceleration and if KVM VGICV3 supports to set multiple
> redistributor regions, we now allow up to 512 vcpus.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
> ---
>  hw/arm/virt.c         | 17 ++++++++++++++++-
>  include/hw/arm/virt.h |  1 +
>  2 files changed, 17 insertions(+), 1 deletion(-)
>
> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
> index 8258f6f..cdb1a75 100644
> --- a/hw/arm/virt.c
> +++ b/hw/arm/virt.c
> @@ -149,8 +149,10 @@ static const MemMapEntry a15memmap[] = {
>      [VIRT_PCIE_PIO] =           { 0x3eff0000, 0x00010000 },
>      [VIRT_PCIE_ECAM] =          { 0x3f000000, 0x01000000 },
>      [VIRT_MEM] =                { 0x40000000, RAMLIMIT_BYTES },
> +    /* Allows 512 - 123 additional vcpus (each 2x64kB) */
> +    [VIRT_GIC_REDIST2] =        { 0x4000000000ULL, 0x30A0000LL },

Maybe we should make the 2nd redist region go up to a slightly
rounder address rather than making it exactly big enough to get
us up to 512 CPUs? (Why 512, by the way?)

>      /* Second PCIe window, 512GB wide at the 512GB boundary */
> -    [VIRT_PCIE_MMIO_HIGH] =   { 0x8000000000ULL, 0x8000000000ULL },
> +    [VIRT_PCIE_MMIO_HIGH] =     { 0x8000000000ULL, 0x8000000000ULL },
>  };
>
>  static const int a15irqmap[] = {
> @@ -553,6 +555,11 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
>          agcc->register_redist_region((GICv3State *)gicdev,
>                                   vms->memmap[VIRT_GIC_REDIST].base,
>                                   vms->memmap[VIRT_GIC_REDIST].size >> 17);
> +        if (vms->smp_cpus > 123) {
> +            agcc->register_redist_region((GICv3State *)gicdev,
> +                     vms->memmap[VIRT_GIC_REDIST2].base,
> +                                 vms->memmap[VIRT_GIC_REDIST2].size >> 17);
> +        }
>      } else {
>          sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
>      }
> @@ -1284,6 +1291,14 @@ static void machvirt_init(MachineState *machine)
>       */
>      if (vms->gic_version == 3) {
>          virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000;
> +        if (kvm_max_vcpus(kvm_state) > 255) {
> +            /*
> +             * VGICv3 KVM device capability to set multiple redistributor
> +             * was introduced at the same time KVM_MAX_VCPUS was bumped
> +             * from 255 to 512
> +             */
> +            virt_max_cpus += vms->memmap[VIRT_GIC_REDIST2].size / 0x20000;

I think it would be better to explicitly check "do we have
support for split redistributors" rather than looking at
KVM_MAX_VCPUS. It's not impossible that a distro kernel
might have chosen to backport support for one but not the
other.

> +        }
>      } else {
>          virt_max_cpus = GIC_NCPU;
>      }
> diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
> index d168291..801a4ad 100644
> --- a/include/hw/arm/virt.h
> +++ b/include/hw/arm/virt.h
> @@ -60,6 +60,7 @@ enum {
>      VIRT_GIC_V2M,
>      VIRT_GIC_ITS,
>      VIRT_GIC_REDIST,
> +    VIRT_GIC_REDIST2,
>      VIRT_UART,
>      VIRT_MMIO,
>      VIRT_RTC,
> --
> 2.5.5

thanks
-- PMM
Eric Auger April 13, 2018, 2:01 p.m. UTC | #4
Hi Peter,

On 13/04/18 15:41, Peter Maydell wrote:
> On 27 March 2018 at 15:15, Eric Auger <eric.auger@redhat.com> wrote:
>> With KVM acceleration and if KVM VGICV3 supports to set multiple
>> redistributor regions, we now allow up to 512 vcpus.
>>
>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>> ---
>>  hw/arm/virt.c         | 17 ++++++++++++++++-
>>  include/hw/arm/virt.h |  1 +
>>  2 files changed, 17 insertions(+), 1 deletion(-)
>>
>> diff --git a/hw/arm/virt.c b/hw/arm/virt.c
>> index 8258f6f..cdb1a75 100644
>> --- a/hw/arm/virt.c
>> +++ b/hw/arm/virt.c
>> @@ -149,8 +149,10 @@ static const MemMapEntry a15memmap[] = {
>>      [VIRT_PCIE_PIO] =           { 0x3eff0000, 0x00010000 },
>>      [VIRT_PCIE_ECAM] =          { 0x3f000000, 0x01000000 },
>>      [VIRT_MEM] =                { 0x40000000, RAMLIMIT_BYTES },
>> +    /* Allows 512 - 123 additional vcpus (each 2x64kB) */
>> +    [VIRT_GIC_REDIST2] =        { 0x4000000000ULL, 0x30A0000LL },
> 
> Maybe we should make the 2nd redist region go up to a slightly
> rounder address rather than making it exactly big enough to get
> us up to 512 CPUs? (Why 512, by the way?)
> 
>>      /* Second PCIe window, 512GB wide at the 512GB boundary */
>> -    [VIRT_PCIE_MMIO_HIGH] =   { 0x8000000000ULL, 0x8000000000ULL },
>> +    [VIRT_PCIE_MMIO_HIGH] =     { 0x8000000000ULL, 0x8000000000ULL },
>>  };
>>
>>  static const int a15irqmap[] = {
>> @@ -553,6 +555,11 @@ static void create_gic(VirtMachineState *vms, qemu_irq *pic)
>>          agcc->register_redist_region((GICv3State *)gicdev,
>>                                   vms->memmap[VIRT_GIC_REDIST].base,
>>                                   vms->memmap[VIRT_GIC_REDIST].size >> 17);
>> +        if (vms->smp_cpus > 123) {
>> +            agcc->register_redist_region((GICv3State *)gicdev,
>> +                     vms->memmap[VIRT_GIC_REDIST2].base,
>> +                                 vms->memmap[VIRT_GIC_REDIST2].size >> 17);
>> +        }
>>      } else {
>>          sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
>>      }
>> @@ -1284,6 +1291,14 @@ static void machvirt_init(MachineState *machine)
>>       */
>>      if (vms->gic_version == 3) {
>>          virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000;
>> +        if (kvm_max_vcpus(kvm_state) > 255) {
>> +            /*
>> +             * VGICv3 KVM device capability to set multiple redistributor
>> +             * was introduced at the same time KVM_MAX_VCPUS was bumped
>> +             * from 255 to 512
>> +             */
>> +            virt_max_cpus += vms->memmap[VIRT_GIC_REDIST2].size / 0x20000;
> 
> I think it would be better to explicitly check "do we have
> support for split redistributors" rather than looking at
> KVM_MAX_VCPUS. It's not impossible that a distro kernel
> might have chosen to backport support for one but not the
> other.

The issue is we have race here:
to check whether the new ATTR is supported I need to query the GICV3 KVM
device. This later is created in the GICv3 initialize. But to initialize
the VGIC I need the VCPUs to be created. And to create the VCPUs I need
to check their max number. Or do I miss somethinh?

The KVM device create dry-run mode cannot be used here. I would need to
really create the KVM device and then delete it. The issue is there is
no kvm device DELETE ioctl at kernel level since KVM devices are deleted
on VM deletion if I understand correctly. Adding a KVM device delete at
kernel level may not be straightforward as I suspect some
simplifications could be made in KVM device deletion knowing the VM was
under deletion.

Thanks

Eric
> 
>> +        }
>>      } else {
>>          virt_max_cpus = GIC_NCPU;
>>      }
>> diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
>> index d168291..801a4ad 100644
>> --- a/include/hw/arm/virt.h
>> +++ b/include/hw/arm/virt.h
>> @@ -60,6 +60,7 @@ enum {
>>      VIRT_GIC_V2M,
>>      VIRT_GIC_ITS,
>>      VIRT_GIC_REDIST,
>> +    VIRT_GIC_REDIST2,
>>      VIRT_UART,
>>      VIRT_MMIO,
>>      VIRT_RTC,
>> --
>> 2.5.5
> 
> thanks
> -- PMM
>
Peter Maydell April 13, 2018, 2:06 p.m. UTC | #5
On 13 April 2018 at 15:01, Auger Eric <eric.auger@redhat.com> wrote:
> On 13/04/18 15:41, Peter Maydell wrote:
>> I think it would be better to explicitly check "do we have
>> support for split redistributors" rather than looking at
>> KVM_MAX_VCPUS. It's not impossible that a distro kernel
>> might have chosen to backport support for one but not the
>> other.
>
> The issue is we have race here:
> to check whether the new ATTR is supported I need to query the GICV3 KVM
> device. This later is created in the GICv3 initialize. But to initialize
> the VGIC I need the VCPUs to be created. And to create the VCPUs I need
> to check their max number. Or do I miss somethinh?
>
> The KVM device create dry-run mode cannot be used here. I would need to
> really create the KVM device and then delete it. The issue is there is
> no kvm device DELETE ioctl at kernel level since KVM devices are deleted
> on VM deletion if I understand correctly. Adding a KVM device delete at
> kernel level may not be straightforward as I suspect some
> simplifications could be made in KVM device deletion knowing the VM was
> under deletion.

Two possibilities:

(1) do like we do in kvm_arm_get_host_cpu_features() where we
create a scratch VM to interrogate it and then throw it away
when we have the info we need

(2) add an API to the kernel to make it easier to query it
for this sort of feature

thanks
-- PMM
Eric Auger April 13, 2018, 2:11 p.m. UTC | #6
Hi Peter,

On 13/04/18 16:06, Peter Maydell wrote:
> On 13 April 2018 at 15:01, Auger Eric <eric.auger@redhat.com> wrote:
>> On 13/04/18 15:41, Peter Maydell wrote:
>>> I think it would be better to explicitly check "do we have
>>> support for split redistributors" rather than looking at
>>> KVM_MAX_VCPUS. It's not impossible that a distro kernel
>>> might have chosen to backport support for one but not the
>>> other.
>>
>> The issue is we have race here:
>> to check whether the new ATTR is supported I need to query the GICV3 KVM
>> device. This later is created in the GICv3 initialize. But to initialize
>> the VGIC I need the VCPUs to be created. And to create the VCPUs I need
>> to check their max number. Or do I miss somethinh?
>>
>> The KVM device create dry-run mode cannot be used here. I would need to
>> really create the KVM device and then delete it. The issue is there is
>> no kvm device DELETE ioctl at kernel level since KVM devices are deleted
>> on VM deletion if I understand correctly. Adding a KVM device delete at
>> kernel level may not be straightforward as I suspect some
>> simplifications could be made in KVM device deletion knowing the VM was
>> under deletion.
> 
> Two possibilities:
> 
> (1) do like we do in kvm_arm_get_host_cpu_features() where we
> create a scratch VM to interrogate it and then throw it away
> when we have the info we need
Oh OK. I did not know this was in place. At first sight, this looks
simpler than (2) to me. I will investigate this solution.

Thanks!

Eric
> 
> (2) add an API to the kernel to make it easier to query it
> for this sort of feature
> 
> thanks
> -- PMM
>
Andrew Jones April 16, 2018, 9:19 a.m. UTC | #7
On Fri, Apr 13, 2018 at 04:11:24PM +0200, Auger Eric wrote:
> Hi Peter,
> 
> On 13/04/18 16:06, Peter Maydell wrote:
> > On 13 April 2018 at 15:01, Auger Eric <eric.auger@redhat.com> wrote:
> >> On 13/04/18 15:41, Peter Maydell wrote:
> >>> I think it would be better to explicitly check "do we have
> >>> support for split redistributors" rather than looking at
> >>> KVM_MAX_VCPUS. It's not impossible that a distro kernel
> >>> might have chosen to backport support for one but not the
> >>> other.
> >>
> >> The issue is we have race here:
> >> to check whether the new ATTR is supported I need to query the GICV3 KVM
> >> device. This later is created in the GICv3 initialize. But to initialize
> >> the VGIC I need the VCPUs to be created. And to create the VCPUs I need
> >> to check their max number. Or do I miss somethinh?
> >>
> >> The KVM device create dry-run mode cannot be used here. I would need to
> >> really create the KVM device and then delete it. The issue is there is
> >> no kvm device DELETE ioctl at kernel level since KVM devices are deleted
> >> on VM deletion if I understand correctly. Adding a KVM device delete at
> >> kernel level may not be straightforward as I suspect some
> >> simplifications could be made in KVM device deletion knowing the VM was
> >> under deletion.
> > 
> > Two possibilities:
> > 
> > (1) do like we do in kvm_arm_get_host_cpu_features() where we
> > create a scratch VM to interrogate it and then throw it away
> > when we have the info we need
> Oh OK. I did not know this was in place. At first sight, this looks
> simpler than (2) to me. I will investigate this solution.

But (1) isn't awesome. It'd be better if we could avoid scratch VMs
altogether as they slow VM launch and create confusing traces when
debugging. Also, it's just not a pretty way to do things. If we must
keep a scratch VM, then let's at least make sure we only have one,
i.e. put all probing into a single scratch VM.

> 
> Thanks!
> 
> Eric
> > 
> > (2) add an API to the kernel to make it easier to query it
> > for this sort of feature

This is the cleaner choice, but I wasn't opposed to the max-vcpus
query getting double-duty though. While the backporting argument is
a good point, I'm not sure we've ever really cared about what
issues backporters have had to endure when they cherry-pick features
before.

Thanks,
drew


> > 
> > thanks
> > -- PMM
> > 
>
Peter Maydell April 16, 2018, 10:58 a.m. UTC | #8
On 16 April 2018 at 10:19, Andrew Jones <drjones@redhat.com> wrote:
> This is the cleaner choice, but I wasn't opposed to the max-vcpus
> query getting double-duty though. While the backporting argument is
> a good point, I'm not sure we've ever really cared about what
> issues backporters have had to endure when they cherry-pick features
> before.

Well, mostly I didn't like the entangling of two different
features, so we look for the presence of one by checking
for the other. That's confusing going forward as well as
awkward for backporting. Plus if we have a difficulty with
detection of new kernel features like this we're probably going
to run into it again in the future, so we might as well sort
it out now...

thanks
-- PMM
diff mbox series

Patch

diff --git a/hw/arm/virt.c b/hw/arm/virt.c
index 8258f6f..cdb1a75 100644
--- a/hw/arm/virt.c
+++ b/hw/arm/virt.c
@@ -149,8 +149,10 @@  static const MemMapEntry a15memmap[] = {
     [VIRT_PCIE_PIO] =           { 0x3eff0000, 0x00010000 },
     [VIRT_PCIE_ECAM] =          { 0x3f000000, 0x01000000 },
     [VIRT_MEM] =                { 0x40000000, RAMLIMIT_BYTES },
+    /* Allows 512 - 123 additional vcpus (each 2x64kB) */
+    [VIRT_GIC_REDIST2] =        { 0x4000000000ULL, 0x30A0000LL },
     /* Second PCIe window, 512GB wide at the 512GB boundary */
-    [VIRT_PCIE_MMIO_HIGH] =   { 0x8000000000ULL, 0x8000000000ULL },
+    [VIRT_PCIE_MMIO_HIGH] =     { 0x8000000000ULL, 0x8000000000ULL },
 };
 
 static const int a15irqmap[] = {
@@ -553,6 +555,11 @@  static void create_gic(VirtMachineState *vms, qemu_irq *pic)
         agcc->register_redist_region((GICv3State *)gicdev,
                                  vms->memmap[VIRT_GIC_REDIST].base,
                                  vms->memmap[VIRT_GIC_REDIST].size >> 17);
+        if (vms->smp_cpus > 123) {
+            agcc->register_redist_region((GICv3State *)gicdev,
+                     vms->memmap[VIRT_GIC_REDIST2].base,
+                                 vms->memmap[VIRT_GIC_REDIST2].size >> 17);
+        }
     } else {
         sysbus_mmio_map(gicbusdev, 1, vms->memmap[VIRT_GIC_CPU].base);
     }
@@ -1284,6 +1291,14 @@  static void machvirt_init(MachineState *machine)
      */
     if (vms->gic_version == 3) {
         virt_max_cpus = vms->memmap[VIRT_GIC_REDIST].size / 0x20000;
+        if (kvm_max_vcpus(kvm_state) > 255) {
+            /*
+             * VGICv3 KVM device capability to set multiple redistributor
+             * was introduced at the same time KVM_MAX_VCPUS was bumped
+             * from 255 to 512
+             */
+            virt_max_cpus += vms->memmap[VIRT_GIC_REDIST2].size / 0x20000;
+        }
     } else {
         virt_max_cpus = GIC_NCPU;
     }
diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h
index d168291..801a4ad 100644
--- a/include/hw/arm/virt.h
+++ b/include/hw/arm/virt.h
@@ -60,6 +60,7 @@  enum {
     VIRT_GIC_V2M,
     VIRT_GIC_ITS,
     VIRT_GIC_REDIST,
+    VIRT_GIC_REDIST2,
     VIRT_UART,
     VIRT_MMIO,
     VIRT_RTC,