diff mbox

[v2] spapr_iommu: Enable multiple TCE requests

Message ID 1400685781-8545-1-git-send-email-aik@ozlabs.ru
State New
Headers show

Commit Message

Alexey Kardashevskiy May 21, 2014, 3:23 p.m. UTC
Currently only single TCE entry per request is supported (H_PUT_TCE).
However PAPR+ specification allows multiple entry requests such as
H_PUT_TCE_INDIRECT and H_STUFF_TCE. Having less transitions to the host
kernel via ioctls, support of these calls can accelerate IOMMU operations.

This implements H_STUFF_TCE and H_PUT_TCE_INDIRECT.

This advertises "multi-tce" capability to the guest if the host kernel
supports it (KVM_CAP_SPAPR_MULTITCE) or guest is running in TCG mode.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
---
Changes:
v2:
* multi-tce enabled explicitely for TCG, it was implicit
* kvmppc_spapr_use_multitce() does not handle TCG anymore

v1:
* removed checks for liobn as the check is performed already in
spapr_tce_find_by_liobn
* added hcall-multi-tce if the host kernel supports the capability
---
 hw/ppc/spapr.c       |  3 ++
 hw/ppc/spapr_iommu.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++
 target-ppc/kvm.c     |  7 +++++
 target-ppc/kvm_ppc.h |  6 ++++
 trace-events         |  2 ++
 5 files changed, 96 insertions(+)

Comments

Alexey Kardashevskiy May 21, 2014, 4:03 p.m. UTC | #1
On 05/22/2014 01:23 AM, Alexey Kardashevskiy wrote:
> Currently only single TCE entry per request is supported (H_PUT_TCE).
> However PAPR+ specification allows multiple entry requests such as
> H_PUT_TCE_INDIRECT and H_STUFF_TCE. Having less transitions to the host
> kernel via ioctls, support of these calls can accelerate IOMMU operations.
> 
> This implements H_STUFF_TCE and H_PUT_TCE_INDIRECT.
> 
> This advertises "multi-tce" capability to the guest if the host kernel
> supports it (KVM_CAP_SPAPR_MULTITCE) or guest is running in TCG mode.
> 
> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
> ---
> Changes:
> v2:
> * multi-tce enabled explicitely for TCG, it was implicit
> * kvmppc_spapr_use_multitce() does not handle TCG anymore
> 
> v1:
> * removed checks for liobn as the check is performed already in
> spapr_tce_find_by_liobn
> * added hcall-multi-tce if the host kernel supports the capability
> ---
>  hw/ppc/spapr.c       |  3 ++
>  hw/ppc/spapr_iommu.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>  target-ppc/kvm.c     |  7 +++++
>  target-ppc/kvm_ppc.h |  6 ++++
>  trace-events         |  2 ++
>  5 files changed, 96 insertions(+)
> 
> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
> index 3b28211..697fba6 100644
> --- a/hw/ppc/spapr.c
> +++ b/hw/ppc/spapr.c
> @@ -498,6 +498,9 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
>      /* RTAS */
>      _FDT((fdt_begin_node(fdt, "rtas")));
>  
> +    if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
> +        add_str(hypertas, "hcall-multi-tce");
> +    }
>      _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas->str,
>                         hypertas->len)));
>      g_string_free(hypertas, TRUE);
> diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
> index 72493d8..ab5037c 100644
> --- a/hw/ppc/spapr_iommu.c
> +++ b/hw/ppc/spapr_iommu.c
> @@ -224,6 +224,82 @@ static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
>      return H_SUCCESS;
>  }
>  
> +static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
> +                                       sPAPREnvironment *spapr,
> +                                       target_ulong opcode, target_ulong *args)
> +{
> +    int i;
> +    target_ulong liobn = args[0];
> +    target_ulong ioba = args[1];
> +    target_ulong ioba1 = ioba;
> +    target_ulong tce_list = args[2];
> +    target_ulong npages = args[3];
> +    target_ulong ret = H_PARAMETER;
> +    sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
> +    CPUState *cs = CPU(cpu);
> +
> +    if (!tcet) {
> +        return H_PARAMETER;
> +    }
> +
> +    if (npages > 512) {
> +        return H_PARAMETER;
> +    }
> +
> +    ioba &= ~SPAPR_TCE_PAGE_MASK;
> +    tce_list &= ~SPAPR_TCE_PAGE_MASK;
> +
> +    for (i = 0; i < npages; ++i, ioba += SPAPR_TCE_PAGE_SIZE) {
> +        target_ulong tce = ldq_phys(cs->as, tce_list +
> +                                    i * sizeof(target_ulong));

Sorry, it is too late here, forgot to comment :)

I cannot use rtas_ld straight away as it is 32bit and here I need 64bit.
Anyway, this is a hypercall and it is called from guest virtual mode so I
do not think that rule with masking top bits for RTAS applies here.

SPAPR says about it:

uint64 TCE, /* The logical address of a page of (4 K long on a 4 K
boundary) of TCE contents to
be stored in the TCE table (contains logical address of storage page to be
mapped)*/

I believe "logical address" in this context is a "guest physical" address.
Does it help? Does not help me :)



> +        ret = put_tce_emu(tcet, ioba, tce);
> +        if (ret) {
> +            break;
> +        }
> +    }
> +
> +    /* Trace last successful or the first problematic entry */
> +    i = i ? (i - 1) : 0;
> +    trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i,
> +                               ldq_phys(cs->as,
> +                               tce_list + i * sizeof(target_ulong)),
> +                               ret);
> +
> +    return ret;
> +}
> +
> +static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
> +                              target_ulong opcode, target_ulong *args)
> +{
> +    int i;
> +    target_ulong liobn = args[0];
> +    target_ulong ioba = args[1];
> +    target_ulong tce_value = args[2];
> +    target_ulong npages = args[3];
> +    target_ulong ret = H_PARAMETER;
> +    sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
> +
> +    if (!tcet) {
> +        return H_PARAMETER;
> +    }
> +
> +    if (npages > tcet->nb_table) {
> +        return H_PARAMETER;
> +    }
> +
> +    ioba &= ~SPAPR_TCE_PAGE_MASK;
> +
> +    for (i = 0; i < npages; ++i, ioba += SPAPR_TCE_PAGE_SIZE) {
> +        ret = put_tce_emu(tcet, ioba, tce_value);
> +        if (ret) {
> +            break;
> +        }
> +    }
> +    trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
> +
> +    return ret;
> +}
> +
>  static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
>                                target_ulong opcode, target_ulong *args)
>  {
> @@ -332,6 +408,8 @@ static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
>      /* hcall-tce */
>      spapr_register_hypercall(H_PUT_TCE, h_put_tce);
>      spapr_register_hypercall(H_GET_TCE, h_get_tce);
> +    spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
> +    spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
>  }
>  
>  static TypeInfo spapr_tce_table_info = {
> diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
> index ca31027..bcf2db8 100644
> --- a/target-ppc/kvm.c
> +++ b/target-ppc/kvm.c
> @@ -62,6 +62,7 @@ static int cap_booke_sregs;
>  static int cap_ppc_smt;
>  static int cap_ppc_rma;
>  static int cap_spapr_tce;
> +static int cap_spapr_multitce;
>  static int cap_hior;
>  static int cap_one_reg;
>  static int cap_epr;
> @@ -98,6 +99,7 @@ int kvm_arch_init(KVMState *s)
>      cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
>      cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
>      cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
> +    cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
>      cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
>      cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
>      cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
> @@ -1608,6 +1610,11 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
>  }
>  #endif
>  
> +bool kvmppc_spapr_use_multitce(void)
> +{
> +    return cap_spapr_multitce;
> +}
> +
>  void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
>  {
>      struct kvm_create_spapr_tce args = {
> diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
> index ff077ec..b90d31b 100644
> --- a/target-ppc/kvm_ppc.h
> +++ b/target-ppc/kvm_ppc.h
> @@ -31,6 +31,7 @@ int kvmppc_set_tcr(PowerPCCPU *cpu);
>  int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu);
>  #ifndef CONFIG_USER_ONLY
>  off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem);
> +bool kvmppc_spapr_use_multitce(void);
>  void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd);
>  int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
>  int kvmppc_reset_htab(int shift_hint);
> @@ -130,6 +131,11 @@ static inline off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
>      return 0;
>  }
>  
> +static inline bool kvmppc_spapr_use_multitce(void)
> +{
> +    return false;
> +}
> +
>  static inline void *kvmppc_create_spapr_tce(uint32_t liobn,
>                                              uint32_t window_size, int *fd)
>  {
> diff --git a/trace-events b/trace-events
> index 5997846..89719c7 100644
> --- a/trace-events
> +++ b/trace-events
> @@ -1186,6 +1186,8 @@ xics_ics_eoi(int nr) "ics_eoi: irq %#x"
>  # hw/ppc/spapr_iommu.c
>  spapr_iommu_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64
>  spapr_iommu_get(uint64_t liobn, uint64_t ioba, uint64_t ret, uint64_t tce) "liobn=%"PRIx64" ioba=0x%"PRIx64" ret=%"PRId64" tce=0x%"PRIx64
> +spapr_iommu_indirect(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t iobaN, uint64_t tceN, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tcelist=0x%"PRIx64" iobaN=0x%"PRIx64" tceN=0x%"PRIx64" ret=%"PRId64
> +spapr_iommu_stuff(uint64_t liobn, uint64_t ioba, uint64_t tce_value, uint64_t npages, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tcevalue=0x%"PRIx64" npages=%"PRId64" ret=%"PRId64
>  spapr_iommu_xlate(uint64_t liobn, uint64_t ioba, uint64_t tce, unsigned perm, unsigned pgsize) "liobn=%"PRIx64" 0x%"PRIx64" -> 0x%"PRIx64" perm=%u mask=%x"
>  spapr_iommu_new_table(uint64_t liobn, void *tcet, void *table, int fd) "liobn=%"PRIx64" tcet=%p table=%p fd=%d"
>  
>
Alexander Graf May 21, 2014, 9:54 p.m. UTC | #2
On 21.05.14 18:03, Alexey Kardashevskiy wrote:
> On 05/22/2014 01:23 AM, Alexey Kardashevskiy wrote:
>> Currently only single TCE entry per request is supported (H_PUT_TCE).
>> However PAPR+ specification allows multiple entry requests such as
>> H_PUT_TCE_INDIRECT and H_STUFF_TCE. Having less transitions to the host
>> kernel via ioctls, support of these calls can accelerate IOMMU operations.
>>
>> This implements H_STUFF_TCE and H_PUT_TCE_INDIRECT.
>>
>> This advertises "multi-tce" capability to the guest if the host kernel
>> supports it (KVM_CAP_SPAPR_MULTITCE) or guest is running in TCG mode.
>>
>> Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
>> ---
>> Changes:
>> v2:
>> * multi-tce enabled explicitely for TCG, it was implicit
>> * kvmppc_spapr_use_multitce() does not handle TCG anymore
>>
>> v1:
>> * removed checks for liobn as the check is performed already in
>> spapr_tce_find_by_liobn
>> * added hcall-multi-tce if the host kernel supports the capability
>> ---
>>   hw/ppc/spapr.c       |  3 ++
>>   hw/ppc/spapr_iommu.c | 78 ++++++++++++++++++++++++++++++++++++++++++++++++++++
>>   target-ppc/kvm.c     |  7 +++++
>>   target-ppc/kvm_ppc.h |  6 ++++
>>   trace-events         |  2 ++
>>   5 files changed, 96 insertions(+)
>>
>> diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
>> index 3b28211..697fba6 100644
>> --- a/hw/ppc/spapr.c
>> +++ b/hw/ppc/spapr.c
>> @@ -498,6 +498,9 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
>>       /* RTAS */
>>       _FDT((fdt_begin_node(fdt, "rtas")));
>>   
>> +    if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
>> +        add_str(hypertas, "hcall-multi-tce");
>> +    }
>>       _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas->str,
>>                          hypertas->len)));
>>       g_string_free(hypertas, TRUE);
>> diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
>> index 72493d8..ab5037c 100644
>> --- a/hw/ppc/spapr_iommu.c
>> +++ b/hw/ppc/spapr_iommu.c
>> @@ -224,6 +224,82 @@ static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
>>       return H_SUCCESS;
>>   }
>>   
>> +static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
>> +                                       sPAPREnvironment *spapr,
>> +                                       target_ulong opcode, target_ulong *args)
>> +{
>> +    int i;
>> +    target_ulong liobn = args[0];
>> +    target_ulong ioba = args[1];
>> +    target_ulong ioba1 = ioba;
>> +    target_ulong tce_list = args[2];
>> +    target_ulong npages = args[3];
>> +    target_ulong ret = H_PARAMETER;
>> +    sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
>> +    CPUState *cs = CPU(cpu);
>> +
>> +    if (!tcet) {
>> +        return H_PARAMETER;
>> +    }
>> +
>> +    if (npages > 512) {
>> +        return H_PARAMETER;
>> +    }
>> +
>> +    ioba &= ~SPAPR_TCE_PAGE_MASK;
>> +    tce_list &= ~SPAPR_TCE_PAGE_MASK;
>> +
>> +    for (i = 0; i < npages; ++i, ioba += SPAPR_TCE_PAGE_SIZE) {
>> +        target_ulong tce = ldq_phys(cs->as, tce_list +
>> +                                    i * sizeof(target_ulong));
> Sorry, it is too late here, forgot to comment :)
>
> I cannot use rtas_ld straight away as it is 32bit and here I need 64bit.
> Anyway, this is a hypercall and it is called from guest virtual mode so I
> do not think that rule with masking top bits for RTAS applies here.
>
> SPAPR says about it:
>
> uint64 TCE, /* The logical address of a page of (4 K long on a 4 K
> boundary) of TCE contents to
> be stored in the TCE table (contains logical address of storage page to be
> mapped)*/
>
> I believe "logical address" in this context is a "guest physical" address.
> Does it help? Does not help me :)

Yeah, I think it's safe to assume that "logical address" basically is 
our guest physical. So using ldq_phys is correct.


Alex
diff mbox

Patch

diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 3b28211..697fba6 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -498,6 +498,9 @@  static void *spapr_create_fdt_skel(hwaddr initrd_base,
     /* RTAS */
     _FDT((fdt_begin_node(fdt, "rtas")));
 
+    if (!kvm_enabled() || kvmppc_spapr_use_multitce()) {
+        add_str(hypertas, "hcall-multi-tce");
+    }
     _FDT((fdt_property(fdt, "ibm,hypertas-functions", hypertas->str,
                        hypertas->len)));
     g_string_free(hypertas, TRUE);
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index 72493d8..ab5037c 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -224,6 +224,82 @@  static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
     return H_SUCCESS;
 }
 
+static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
+                                       sPAPREnvironment *spapr,
+                                       target_ulong opcode, target_ulong *args)
+{
+    int i;
+    target_ulong liobn = args[0];
+    target_ulong ioba = args[1];
+    target_ulong ioba1 = ioba;
+    target_ulong tce_list = args[2];
+    target_ulong npages = args[3];
+    target_ulong ret = H_PARAMETER;
+    sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
+    CPUState *cs = CPU(cpu);
+
+    if (!tcet) {
+        return H_PARAMETER;
+    }
+
+    if (npages > 512) {
+        return H_PARAMETER;
+    }
+
+    ioba &= ~SPAPR_TCE_PAGE_MASK;
+    tce_list &= ~SPAPR_TCE_PAGE_MASK;
+
+    for (i = 0; i < npages; ++i, ioba += SPAPR_TCE_PAGE_SIZE) {
+        target_ulong tce = ldq_phys(cs->as, tce_list +
+                                    i * sizeof(target_ulong));
+        ret = put_tce_emu(tcet, ioba, tce);
+        if (ret) {
+            break;
+        }
+    }
+
+    /* Trace last successful or the first problematic entry */
+    i = i ? (i - 1) : 0;
+    trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i,
+                               ldq_phys(cs->as,
+                               tce_list + i * sizeof(target_ulong)),
+                               ret);
+
+    return ret;
+}
+
+static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+                              target_ulong opcode, target_ulong *args)
+{
+    int i;
+    target_ulong liobn = args[0];
+    target_ulong ioba = args[1];
+    target_ulong tce_value = args[2];
+    target_ulong npages = args[3];
+    target_ulong ret = H_PARAMETER;
+    sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
+
+    if (!tcet) {
+        return H_PARAMETER;
+    }
+
+    if (npages > tcet->nb_table) {
+        return H_PARAMETER;
+    }
+
+    ioba &= ~SPAPR_TCE_PAGE_MASK;
+
+    for (i = 0; i < npages; ++i, ioba += SPAPR_TCE_PAGE_SIZE) {
+        ret = put_tce_emu(tcet, ioba, tce_value);
+        if (ret) {
+            break;
+        }
+    }
+    trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
+
+    return ret;
+}
+
 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
                               target_ulong opcode, target_ulong *args)
 {
@@ -332,6 +408,8 @@  static void spapr_tce_table_class_init(ObjectClass *klass, void *data)
     /* hcall-tce */
     spapr_register_hypercall(H_PUT_TCE, h_put_tce);
     spapr_register_hypercall(H_GET_TCE, h_get_tce);
+    spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect);
+    spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce);
 }
 
 static TypeInfo spapr_tce_table_info = {
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index ca31027..bcf2db8 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -62,6 +62,7 @@  static int cap_booke_sregs;
 static int cap_ppc_smt;
 static int cap_ppc_rma;
 static int cap_spapr_tce;
+static int cap_spapr_multitce;
 static int cap_hior;
 static int cap_one_reg;
 static int cap_epr;
@@ -98,6 +99,7 @@  int kvm_arch_init(KVMState *s)
     cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
     cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
     cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
+    cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
     cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
     cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
     cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
@@ -1608,6 +1610,11 @@  uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
 }
 #endif
 
+bool kvmppc_spapr_use_multitce(void)
+{
+    return cap_spapr_multitce;
+}
+
 void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
 {
     struct kvm_create_spapr_tce args = {
diff --git a/target-ppc/kvm_ppc.h b/target-ppc/kvm_ppc.h
index ff077ec..b90d31b 100644
--- a/target-ppc/kvm_ppc.h
+++ b/target-ppc/kvm_ppc.h
@@ -31,6 +31,7 @@  int kvmppc_set_tcr(PowerPCCPU *cpu);
 int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu);
 #ifndef CONFIG_USER_ONLY
 off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem);
+bool kvmppc_spapr_use_multitce(void);
 void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd);
 int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
 int kvmppc_reset_htab(int shift_hint);
@@ -130,6 +131,11 @@  static inline off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
     return 0;
 }
 
+static inline bool kvmppc_spapr_use_multitce(void)
+{
+    return false;
+}
+
 static inline void *kvmppc_create_spapr_tce(uint32_t liobn,
                                             uint32_t window_size, int *fd)
 {
diff --git a/trace-events b/trace-events
index 5997846..89719c7 100644
--- a/trace-events
+++ b/trace-events
@@ -1186,6 +1186,8 @@  xics_ics_eoi(int nr) "ics_eoi: irq %#x"
 # hw/ppc/spapr_iommu.c
 spapr_iommu_put(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tce=0x%"PRIx64" ret=%"PRId64
 spapr_iommu_get(uint64_t liobn, uint64_t ioba, uint64_t ret, uint64_t tce) "liobn=%"PRIx64" ioba=0x%"PRIx64" ret=%"PRId64" tce=0x%"PRIx64
+spapr_iommu_indirect(uint64_t liobn, uint64_t ioba, uint64_t tce, uint64_t iobaN, uint64_t tceN, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tcelist=0x%"PRIx64" iobaN=0x%"PRIx64" tceN=0x%"PRIx64" ret=%"PRId64
+spapr_iommu_stuff(uint64_t liobn, uint64_t ioba, uint64_t tce_value, uint64_t npages, uint64_t ret) "liobn=%"PRIx64" ioba=0x%"PRIx64" tcevalue=0x%"PRIx64" npages=%"PRId64" ret=%"PRId64
 spapr_iommu_xlate(uint64_t liobn, uint64_t ioba, uint64_t tce, unsigned perm, unsigned pgsize) "liobn=%"PRIx64" 0x%"PRIx64" -> 0x%"PRIx64" perm=%u mask=%x"
 spapr_iommu_new_table(uint64_t liobn, void *tcet, void *table, int fd) "liobn=%"PRIx64" tcet=%p table=%p fd=%d"