diff mbox series

[qemu,v2] vfio/spapr: Allow backing bigger guest IOMMU pages with smaller physical pages

Message ID 20180620091013.2224-1-aik@ozlabs.ru
State New
Headers show
Series [qemu,v2] vfio/spapr: Allow backing bigger guest IOMMU pages with smaller physical pages | expand

Commit Message

Alexey Kardashevskiy June 20, 2018, 9:10 a.m. UTC
At the moment the PPC64/pseries guest only supports 4K/64K/16M IOMMU
pages and POWER8 CPU supports the exact same set of page size so
so far things worked fine.

However POWER9 supports different set of sizes - 4K/64K/2M/1G and
the last two - 2M and 1G - are not even allowed in the paravirt interface
(RTAS DDW) so we always end up using 64K IOMMU pages, although we could
back guest's 16MB IOMMU pages with 2MB pages on the host.

This stores the supported host IOMMU page sizes in VFIOContainer and uses
this later when creating a new DMA window. This uses the system page size
(64k normally, 2M/16M/1G if hugepages used) as the upper limit of
the IOMMU pagesize.

This changes the type of @pagesize to uint64_t as this is what
memory_region_iommu_get_min_page_size() returns and clz64() takes.

There should be no behavioral changes on platforms other than pseries.
The guest will keep using the IOMMU page size selected by the PHB pagesize
property as this only changes the underlying hardware TCE table
granularity.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
---
Changes:
v2:
* fixed biggest but smaller page size calculation
* limit IOMMU pagesize to the system pagesize
---
 include/hw/vfio/vfio-common.h |  1 +
 hw/vfio/common.c              |  3 +++
 hw/vfio/spapr.c               | 21 ++++++++++++++++++++-
 3 files changed, 24 insertions(+), 1 deletion(-)

Comments

Alexey Kardashevskiy July 20, 2018, 6:09 a.m. UTC | #1
On 20/6/18 7:10 pm, Alexey Kardashevskiy wrote:
> At the moment the PPC64/pseries guest only supports 4K/64K/16M IOMMU
> pages and POWER8 CPU supports the exact same set of page size so
> so far things worked fine.
> 
> However POWER9 supports different set of sizes - 4K/64K/2M/1G and
> the last two - 2M and 1G - are not even allowed in the paravirt interface
> (RTAS DDW) so we always end up using 64K IOMMU pages, although we could
> back guest's 16MB IOMMU pages with 2MB pages on the host.
> 
> This stores the supported host IOMMU page sizes in VFIOContainer and uses
> this later when creating a new DMA window. This uses the system page size
> (64k normally, 2M/16M/1G if hugepages used) as the upper limit of
> the IOMMU pagesize.
> 
> This changes the type of @pagesize to uint64_t as this is what
> memory_region_iommu_get_min_page_size() returns and clz64() takes.
> 
> There should be no behavioral changes on platforms other than pseries.
> The guest will keep using the IOMMU page size selected by the PHB pagesize
> property as this only changes the underlying hardware TCE table
> granularity.
> 
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>


Ping?

> ---
> Changes:
> v2:
> * fixed biggest but smaller page size calculation
> * limit IOMMU pagesize to the system pagesize
> ---
>  include/hw/vfio/vfio-common.h |  1 +
>  hw/vfio/common.c              |  3 +++
>  hw/vfio/spapr.c               | 21 ++++++++++++++++++++-
>  3 files changed, 24 insertions(+), 1 deletion(-)
> 
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index a903692..c20524d 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -73,6 +73,7 @@ typedef struct VFIOContainer {
>      unsigned iommu_type;
>      int error;
>      bool initialized;
> +    unsigned long pgsizes;
>      /*
>       * This assumes the host IOMMU can support only a single
>       * contiguous IOVA window.  We may need to generalize that in
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index fb396cf..40f0356 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -1108,6 +1108,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>              info.iova_pgsizes = 4096;
>          }
>          vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
> +        container->pgsizes = info.iova_pgsizes;
>      } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
>                 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
>          struct vfio_iommu_spapr_tce_info info;
> @@ -1172,6 +1173,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>          }
>  
>          if (v2) {
> +            container->pgsizes = info.ddw.pgsizes;
>              /*
>               * There is a default window in just created container.
>               * To make region_add/del simpler, we better remove this
> @@ -1186,6 +1188,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>              }
>          } else {
>              /* The default table uses 4K pages */
> +            container->pgsizes = 0x1000;
>              vfio_host_win_add(container, info.dma32_window_start,
>                                info.dma32_window_start +
>                                info.dma32_window_size - 1,
> diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
> index 259397c..becf71a 100644
> --- a/hw/vfio/spapr.c
> +++ b/hw/vfio/spapr.c
> @@ -15,6 +15,7 @@
>  
>  #include "hw/vfio/vfio-common.h"
>  #include "hw/hw.h"
> +#include "exec/ram_addr.h"
>  #include "qemu/error-report.h"
>  #include "trace.h"
>  
> @@ -144,9 +145,27 @@ int vfio_spapr_create_window(VFIOContainer *container,
>  {
>      int ret;
>      IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
> -    unsigned pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
> +    uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
>      unsigned entries, pages;
>      struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
> +    long systempagesize = qemu_getrampagesize();
> +
> +    /*
> +     * The host might not support the guest supported IOMMU page size,
> +     * so we will use smaller physical IOMMU pages to back them.
> +     */
> +    if (pagesize > systempagesize) {
> +        pagesize = systempagesize;
> +    }
> +    pagesize = 1ULL << (63 - clz64(container->pgsizes &
> +                                   (pagesize | (pagesize - 1))));
> +    if (!pagesize) {
> +        error_report("Host doesn't support page size 0x%"PRIx64
> +                     ", the supported mask is 0x%lx",
> +                     memory_region_iommu_get_min_page_size(iommu_mr),
> +                     container->pgsizes);
> +        return -EINVAL;
> +    }
>  
>      /*
>       * FIXME: For VFIO iommu types which have KVM acceleration to
>
David Gibson July 23, 2018, 3:11 a.m. UTC | #2
On Wed, Jun 20, 2018 at 07:10:12PM +1000, Alexey Kardashevskiy wrote:
> At the moment the PPC64/pseries guest only supports 4K/64K/16M IOMMU
> pages and POWER8 CPU supports the exact same set of page size so
> so far things worked fine.
> 
> However POWER9 supports different set of sizes - 4K/64K/2M/1G and
> the last two - 2M and 1G - are not even allowed in the paravirt interface
> (RTAS DDW) so we always end up using 64K IOMMU pages, although we could
> back guest's 16MB IOMMU pages with 2MB pages on the host.
> 
> This stores the supported host IOMMU page sizes in VFIOContainer and uses
> this later when creating a new DMA window. This uses the system page size
> (64k normally, 2M/16M/1G if hugepages used) as the upper limit of
> the IOMMU pagesize.
> 
> This changes the type of @pagesize to uint64_t as this is what
> memory_region_iommu_get_min_page_size() returns and clz64() takes.
> 
> There should be no behavioral changes on platforms other than pseries.
> The guest will keep using the IOMMU page size selected by the PHB pagesize
> property as this only changes the underlying hardware TCE table
> granularity.
> 
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> ---
> Changes:
> v2:
> * fixed biggest but smaller page size calculation
> * limit IOMMU pagesize to the system pagesize

Sorry, this fell of my radar for a bit.

I've now applied it to ppc-for-3.1.

> ---
>  include/hw/vfio/vfio-common.h |  1 +
>  hw/vfio/common.c              |  3 +++
>  hw/vfio/spapr.c               | 21 ++++++++++++++++++++-
>  3 files changed, 24 insertions(+), 1 deletion(-)
> 
> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> index a903692..c20524d 100644
> --- a/include/hw/vfio/vfio-common.h
> +++ b/include/hw/vfio/vfio-common.h
> @@ -73,6 +73,7 @@ typedef struct VFIOContainer {
>      unsigned iommu_type;
>      int error;
>      bool initialized;
> +    unsigned long pgsizes;
>      /*
>       * This assumes the host IOMMU can support only a single
>       * contiguous IOVA window.  We may need to generalize that in
> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> index fb396cf..40f0356 100644
> --- a/hw/vfio/common.c
> +++ b/hw/vfio/common.c
> @@ -1108,6 +1108,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>              info.iova_pgsizes = 4096;
>          }
>          vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
> +        container->pgsizes = info.iova_pgsizes;
>      } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
>                 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
>          struct vfio_iommu_spapr_tce_info info;
> @@ -1172,6 +1173,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>          }
>  
>          if (v2) {
> +            container->pgsizes = info.ddw.pgsizes;
>              /*
>               * There is a default window in just created container.
>               * To make region_add/del simpler, we better remove this
> @@ -1186,6 +1188,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>              }
>          } else {
>              /* The default table uses 4K pages */
> +            container->pgsizes = 0x1000;
>              vfio_host_win_add(container, info.dma32_window_start,
>                                info.dma32_window_start +
>                                info.dma32_window_size - 1,
> diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
> index 259397c..becf71a 100644
> --- a/hw/vfio/spapr.c
> +++ b/hw/vfio/spapr.c
> @@ -15,6 +15,7 @@
>  
>  #include "hw/vfio/vfio-common.h"
>  #include "hw/hw.h"
> +#include "exec/ram_addr.h"
>  #include "qemu/error-report.h"
>  #include "trace.h"
>  
> @@ -144,9 +145,27 @@ int vfio_spapr_create_window(VFIOContainer *container,
>  {
>      int ret;
>      IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
> -    unsigned pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
> +    uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
>      unsigned entries, pages;
>      struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
> +    long systempagesize = qemu_getrampagesize();
> +
> +    /*
> +     * The host might not support the guest supported IOMMU page size,
> +     * so we will use smaller physical IOMMU pages to back them.
> +     */
> +    if (pagesize > systempagesize) {
> +        pagesize = systempagesize;
> +    }
> +    pagesize = 1ULL << (63 - clz64(container->pgsizes &
> +                                   (pagesize | (pagesize - 1))));
> +    if (!pagesize) {
> +        error_report("Host doesn't support page size 0x%"PRIx64
> +                     ", the supported mask is 0x%lx",
> +                     memory_region_iommu_get_min_page_size(iommu_mr),
> +                     container->pgsizes);
> +        return -EINVAL;
> +    }
>  
>      /*
>       * FIXME: For VFIO iommu types which have KVM acceleration to
Alexey Kardashevskiy July 24, 2018, 3:56 a.m. UTC | #3
On 23/07/2018 13:11, David Gibson wrote:
> On Wed, Jun 20, 2018 at 07:10:12PM +1000, Alexey Kardashevskiy wrote:
>> At the moment the PPC64/pseries guest only supports 4K/64K/16M IOMMU
>> pages and POWER8 CPU supports the exact same set of page size so
>> so far things worked fine.
>>
>> However POWER9 supports different set of sizes - 4K/64K/2M/1G and
>> the last two - 2M and 1G - are not even allowed in the paravirt interface
>> (RTAS DDW) so we always end up using 64K IOMMU pages, although we could
>> back guest's 16MB IOMMU pages with 2MB pages on the host.
>>
>> This stores the supported host IOMMU page sizes in VFIOContainer and uses
>> this later when creating a new DMA window. This uses the system page size
>> (64k normally, 2M/16M/1G if hugepages used) as the upper limit of
>> the IOMMU pagesize.
>>
>> This changes the type of @pagesize to uint64_t as this is what
>> memory_region_iommu_get_min_page_size() returns and clz64() takes.
>>
>> There should be no behavioral changes on platforms other than pseries.
>> The guest will keep using the IOMMU page size selected by the PHB pagesize
>> property as this only changes the underlying hardware TCE table
>> granularity.
>>
>> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
>> ---
>> Changes:
>> v2:
>> * fixed biggest but smaller page size calculation
>> * limit IOMMU pagesize to the system pagesize
> 
> Sorry, this fell of my radar for a bit.
> 
> I've now applied it to ppc-for-3.1.


Alex might object that as it does not have his "rb" ;)


> 
>> ---
>>  include/hw/vfio/vfio-common.h |  1 +
>>  hw/vfio/common.c              |  3 +++
>>  hw/vfio/spapr.c               | 21 ++++++++++++++++++++-
>>  3 files changed, 24 insertions(+), 1 deletion(-)
>>
>> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
>> index a903692..c20524d 100644
>> --- a/include/hw/vfio/vfio-common.h
>> +++ b/include/hw/vfio/vfio-common.h
>> @@ -73,6 +73,7 @@ typedef struct VFIOContainer {
>>      unsigned iommu_type;
>>      int error;
>>      bool initialized;
>> +    unsigned long pgsizes;
>>      /*
>>       * This assumes the host IOMMU can support only a single
>>       * contiguous IOVA window.  We may need to generalize that in
>> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
>> index fb396cf..40f0356 100644
>> --- a/hw/vfio/common.c
>> +++ b/hw/vfio/common.c
>> @@ -1108,6 +1108,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>>              info.iova_pgsizes = 4096;
>>          }
>>          vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
>> +        container->pgsizes = info.iova_pgsizes;
>>      } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
>>                 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
>>          struct vfio_iommu_spapr_tce_info info;
>> @@ -1172,6 +1173,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>>          }
>>  
>>          if (v2) {
>> +            container->pgsizes = info.ddw.pgsizes;
>>              /*
>>               * There is a default window in just created container.
>>               * To make region_add/del simpler, we better remove this
>> @@ -1186,6 +1188,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
>>              }
>>          } else {
>>              /* The default table uses 4K pages */
>> +            container->pgsizes = 0x1000;
>>              vfio_host_win_add(container, info.dma32_window_start,
>>                                info.dma32_window_start +
>>                                info.dma32_window_size - 1,
>> diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
>> index 259397c..becf71a 100644
>> --- a/hw/vfio/spapr.c
>> +++ b/hw/vfio/spapr.c
>> @@ -15,6 +15,7 @@
>>  
>>  #include "hw/vfio/vfio-common.h"
>>  #include "hw/hw.h"
>> +#include "exec/ram_addr.h"
>>  #include "qemu/error-report.h"
>>  #include "trace.h"
>>  
>> @@ -144,9 +145,27 @@ int vfio_spapr_create_window(VFIOContainer *container,
>>  {
>>      int ret;
>>      IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
>> -    unsigned pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
>> +    uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
>>      unsigned entries, pages;
>>      struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
>> +    long systempagesize = qemu_getrampagesize();
>> +
>> +    /*
>> +     * The host might not support the guest supported IOMMU page size,
>> +     * so we will use smaller physical IOMMU pages to back them.
>> +     */
>> +    if (pagesize > systempagesize) {
>> +        pagesize = systempagesize;
>> +    }
>> +    pagesize = 1ULL << (63 - clz64(container->pgsizes &
>> +                                   (pagesize | (pagesize - 1))));
>> +    if (!pagesize) {
>> +        error_report("Host doesn't support page size 0x%"PRIx64
>> +                     ", the supported mask is 0x%lx",
>> +                     memory_region_iommu_get_min_page_size(iommu_mr),
>> +                     container->pgsizes);
>> +        return -EINVAL;
>> +    }
>>  
>>      /*
>>       * FIXME: For VFIO iommu types which have KVM acceleration to
>
David Gibson July 26, 2018, 1:41 a.m. UTC | #4
On Tue, Jul 24, 2018 at 01:56:54PM +1000, Alexey Kardashevskiy wrote:
> 
> 
> On 23/07/2018 13:11, David Gibson wrote:
> > On Wed, Jun 20, 2018 at 07:10:12PM +1000, Alexey Kardashevskiy wrote:
> >> At the moment the PPC64/pseries guest only supports 4K/64K/16M IOMMU
> >> pages and POWER8 CPU supports the exact same set of page size so
> >> so far things worked fine.
> >>
> >> However POWER9 supports different set of sizes - 4K/64K/2M/1G and
> >> the last two - 2M and 1G - are not even allowed in the paravirt interface
> >> (RTAS DDW) so we always end up using 64K IOMMU pages, although we could
> >> back guest's 16MB IOMMU pages with 2MB pages on the host.
> >>
> >> This stores the supported host IOMMU page sizes in VFIOContainer and uses
> >> this later when creating a new DMA window. This uses the system page size
> >> (64k normally, 2M/16M/1G if hugepages used) as the upper limit of
> >> the IOMMU pagesize.
> >>
> >> This changes the type of @pagesize to uint64_t as this is what
> >> memory_region_iommu_get_min_page_size() returns and clz64() takes.
> >>
> >> There should be no behavioral changes on platforms other than pseries.
> >> The guest will keep using the IOMMU page size selected by the PHB pagesize
> >> property as this only changes the underlying hardware TCE table
> >> granularity.
> >>
> >> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> >> ---
> >> Changes:
> >> v2:
> >> * fixed biggest but smaller page size calculation
> >> * limit IOMMU pagesize to the system pagesize
> > 
> > Sorry, this fell of my radar for a bit.
> > 
> > I've now applied it to ppc-for-3.1.
> 
> 
> Alex might object that as it does not have his "rb" ;)

Well, if he does I'll yank it from my tree.  It won't be going
upstream for a while yet.

> 
> 
> > 
> >> ---
> >>  include/hw/vfio/vfio-common.h |  1 +
> >>  hw/vfio/common.c              |  3 +++
> >>  hw/vfio/spapr.c               | 21 ++++++++++++++++++++-
> >>  3 files changed, 24 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
> >> index a903692..c20524d 100644
> >> --- a/include/hw/vfio/vfio-common.h
> >> +++ b/include/hw/vfio/vfio-common.h
> >> @@ -73,6 +73,7 @@ typedef struct VFIOContainer {
> >>      unsigned iommu_type;
> >>      int error;
> >>      bool initialized;
> >> +    unsigned long pgsizes;
> >>      /*
> >>       * This assumes the host IOMMU can support only a single
> >>       * contiguous IOVA window.  We may need to generalize that in
> >> diff --git a/hw/vfio/common.c b/hw/vfio/common.c
> >> index fb396cf..40f0356 100644
> >> --- a/hw/vfio/common.c
> >> +++ b/hw/vfio/common.c
> >> @@ -1108,6 +1108,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
> >>              info.iova_pgsizes = 4096;
> >>          }
> >>          vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
> >> +        container->pgsizes = info.iova_pgsizes;
> >>      } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
> >>                 ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
> >>          struct vfio_iommu_spapr_tce_info info;
> >> @@ -1172,6 +1173,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
> >>          }
> >>  
> >>          if (v2) {
> >> +            container->pgsizes = info.ddw.pgsizes;
> >>              /*
> >>               * There is a default window in just created container.
> >>               * To make region_add/del simpler, we better remove this
> >> @@ -1186,6 +1188,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
> >>              }
> >>          } else {
> >>              /* The default table uses 4K pages */
> >> +            container->pgsizes = 0x1000;
> >>              vfio_host_win_add(container, info.dma32_window_start,
> >>                                info.dma32_window_start +
> >>                                info.dma32_window_size - 1,
> >> diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
> >> index 259397c..becf71a 100644
> >> --- a/hw/vfio/spapr.c
> >> +++ b/hw/vfio/spapr.c
> >> @@ -15,6 +15,7 @@
> >>  
> >>  #include "hw/vfio/vfio-common.h"
> >>  #include "hw/hw.h"
> >> +#include "exec/ram_addr.h"
> >>  #include "qemu/error-report.h"
> >>  #include "trace.h"
> >>  
> >> @@ -144,9 +145,27 @@ int vfio_spapr_create_window(VFIOContainer *container,
> >>  {
> >>      int ret;
> >>      IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
> >> -    unsigned pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
> >> +    uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
> >>      unsigned entries, pages;
> >>      struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
> >> +    long systempagesize = qemu_getrampagesize();
> >> +
> >> +    /*
> >> +     * The host might not support the guest supported IOMMU page size,
> >> +     * so we will use smaller physical IOMMU pages to back them.
> >> +     */
> >> +    if (pagesize > systempagesize) {
> >> +        pagesize = systempagesize;
> >> +    }
> >> +    pagesize = 1ULL << (63 - clz64(container->pgsizes &
> >> +                                   (pagesize | (pagesize - 1))));
> >> +    if (!pagesize) {
> >> +        error_report("Host doesn't support page size 0x%"PRIx64
> >> +                     ", the supported mask is 0x%lx",
> >> +                     memory_region_iommu_get_min_page_size(iommu_mr),
> >> +                     container->pgsizes);
> >> +        return -EINVAL;
> >> +    }
> >>  
> >>      /*
> >>       * FIXME: For VFIO iommu types which have KVM acceleration to
> > 
>
diff mbox series

Patch

diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h
index a903692..c20524d 100644
--- a/include/hw/vfio/vfio-common.h
+++ b/include/hw/vfio/vfio-common.h
@@ -73,6 +73,7 @@  typedef struct VFIOContainer {
     unsigned iommu_type;
     int error;
     bool initialized;
+    unsigned long pgsizes;
     /*
      * This assumes the host IOMMU can support only a single
      * contiguous IOVA window.  We may need to generalize that in
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index fb396cf..40f0356 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -1108,6 +1108,7 @@  static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
             info.iova_pgsizes = 4096;
         }
         vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
+        container->pgsizes = info.iova_pgsizes;
     } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU) ||
                ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_v2_IOMMU)) {
         struct vfio_iommu_spapr_tce_info info;
@@ -1172,6 +1173,7 @@  static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
         }
 
         if (v2) {
+            container->pgsizes = info.ddw.pgsizes;
             /*
              * There is a default window in just created container.
              * To make region_add/del simpler, we better remove this
@@ -1186,6 +1188,7 @@  static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
             }
         } else {
             /* The default table uses 4K pages */
+            container->pgsizes = 0x1000;
             vfio_host_win_add(container, info.dma32_window_start,
                               info.dma32_window_start +
                               info.dma32_window_size - 1,
diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c
index 259397c..becf71a 100644
--- a/hw/vfio/spapr.c
+++ b/hw/vfio/spapr.c
@@ -15,6 +15,7 @@ 
 
 #include "hw/vfio/vfio-common.h"
 #include "hw/hw.h"
+#include "exec/ram_addr.h"
 #include "qemu/error-report.h"
 #include "trace.h"
 
@@ -144,9 +145,27 @@  int vfio_spapr_create_window(VFIOContainer *container,
 {
     int ret;
     IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
-    unsigned pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
+    uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr);
     unsigned entries, pages;
     struct vfio_iommu_spapr_tce_create create = { .argsz = sizeof(create) };
+    long systempagesize = qemu_getrampagesize();
+
+    /*
+     * The host might not support the guest supported IOMMU page size,
+     * so we will use smaller physical IOMMU pages to back them.
+     */
+    if (pagesize > systempagesize) {
+        pagesize = systempagesize;
+    }
+    pagesize = 1ULL << (63 - clz64(container->pgsizes &
+                                   (pagesize | (pagesize - 1))));
+    if (!pagesize) {
+        error_report("Host doesn't support page size 0x%"PRIx64
+                     ", the supported mask is 0x%lx",
+                     memory_region_iommu_get_min_page_size(iommu_mr),
+                     container->pgsizes);
+        return -EINVAL;
+    }
 
     /*
      * FIXME: For VFIO iommu types which have KVM acceleration to