@@ -312,6 +312,16 @@ out:
rcu_read_unlock();
}
+static hwaddr vfio_iommu_page_mask(MemoryRegion *mr)
+{
+ if (memory_region_is_iommu(mr)) {
+ int smallest = ffs(memory_region_iommu_get_page_sizes(mr)) - 1;
+
+ return ~((1ULL << smallest) - 1);
+ }
+ return qemu_real_host_page_mask;
+}
+
static void vfio_listener_region_add(VFIOMemoryListener *vlistener,
MemoryRegionSection *section)
{
@@ -320,6 +330,7 @@ static void vfio_listener_region_add(VFIOMemoryListener *vlistener,
Int128 llend;
void *vaddr;
int ret;
+ hwaddr page_mask = vfio_iommu_page_mask(section->mr);
if (vfio_listener_skipped_section(section)) {
trace_vfio_listener_region_add_skip(
@@ -329,16 +340,16 @@ static void vfio_listener_region_add(VFIOMemoryListener *vlistener,
return;
}
- if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
- (section->offset_within_region & ~TARGET_PAGE_MASK))) {
+ if (unlikely((section->offset_within_address_space & ~page_mask) !=
+ (section->offset_within_region & ~page_mask))) {
error_report("%s received unaligned region", __func__);
return;
}
- iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
+ iova = ROUND_UP(section->offset_within_address_space, ~page_mask + 1);
llend = int128_make64(section->offset_within_address_space);
llend = int128_add(llend, section->size);
- llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
+ llend = int128_and(llend, int128_exts64(page_mask));
if (int128_ge(int128_make64(iova), llend)) {
return;
@@ -421,6 +432,7 @@ static void vfio_listener_region_del(VFIOMemoryListener *vlistener,
VFIOContainer *container = vlistener->container;
hwaddr iova, end;
int ret;
+ hwaddr page_mask = vfio_iommu_page_mask(section->mr);
if (vfio_listener_skipped_section(section)) {
trace_vfio_listener_region_del_skip(
@@ -430,8 +442,8 @@ static void vfio_listener_region_del(VFIOMemoryListener *vlistener,
return;
}
- if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
- (section->offset_within_region & ~TARGET_PAGE_MASK))) {
+ if (unlikely((section->offset_within_address_space & ~page_mask) !=
+ (section->offset_within_region & ~page_mask))) {
error_report("%s received unaligned region", __func__);
return;
}
@@ -457,9 +469,9 @@ static void vfio_listener_region_del(VFIOMemoryListener *vlistener,
*/
}
- iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
+ iova = ROUND_UP(section->offset_within_address_space, ~page_mask + 1);
end = (section->offset_within_address_space + int128_get64(section->size)) &
- TARGET_PAGE_MASK;
+ page_mask;
if (iova >= end) {
return;
The existing memory listener is called on RAM or PCI address space which implies potentially different page size. This uses new memory_region_iommu_get_page_sizes() for IOMMU regions or falls back to qemu_real_host_page_size if RAM. Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru> --- Changes: * uses the smallest page size for mask as IOMMU MR can support multple page sizes --- hw/vfio/common.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-)