diff mbox series

target/hppa: Optimize ldcw/ldcd instruction translation

Message ID ZQHLcL6E5uNvjkaN@p100
State New
Headers show
Series target/hppa: Optimize ldcw/ldcd instruction translation | expand

Commit Message

Helge Deller Sept. 13, 2023, 2:47 p.m. UTC
ldcw (load word and clear) is the only atomic memory instruction of
the hppa architecture and thus is heavily used by the Linux and
HP/UX kernel to implement locks.

Since ldcw always writes a zero, optimize it to not write zero again
if the memory already contained zero (as the lock was already
locked). This reduces the memory contention.
Furthermore, use a native qatomic_xchg() call to write the zero and
thus give a small performace improvement and avoid additional
atomic locking in the target.

Signed-off-by: Helge Deller <deller@gmx.de>

Comments

Richard Henderson Sept. 13, 2023, 4:55 p.m. UTC | #1
On 9/13/23 07:47, Helge Deller wrote:
> +        haddr = (uint32_t *)((uintptr_t)vaddr);
> +        old = *haddr;

This is horribly incorrect, both for user-only and system mode.

> +        /* if already zero, do not write 0 again to reduce memory presssure */
> +        if (old == 0) {
> +            return 0;
> +        }
> +        old = qatomic_xchg(haddr, (uint32_t) 0);

You're also dropping the required host memory barrier.

Frankly, the current tcg_gen_atomic_xchg_reg is as optimized as you'll be able to do.  I 
really doubt the "avoid write 0" is measurable at all.


r~
Helge Deller Sept. 13, 2023, 5:19 p.m. UTC | #2
On 9/13/23 18:55, Richard Henderson wrote:
> On 9/13/23 07:47, Helge Deller wrote:
>> +        haddr = (uint32_t *)((uintptr_t)vaddr);
>> +        old = *haddr;
>
> This is horribly incorrect, both for user-only and system mode.

Richard, thank you for the review!
But would you mind explaining why this is incorrect?
I thought the "vaddr = probe_access()" calculates the host address, so
shouldn't it be the right address?

>> +        /* if already zero, do not write 0 again to reduce memory presssure */
>> +        if (old == 0) {
>> +            return 0;
>> +        }
>> +        old = qatomic_xchg(haddr, (uint32_t) 0);
>
> You're also dropping the required host memory barrier.

?

> Frankly, the current tcg_gen_atomic_xchg_reg is as optimized as you'll be able to do.  I really doubt the "avoid write 0" is measurable at all.

Helge
Richard Henderson Sept. 13, 2023, 8:30 p.m. UTC | #3
On 9/13/23 10:19, Helge Deller wrote:
> On 9/13/23 18:55, Richard Henderson wrote:
>> On 9/13/23 07:47, Helge Deller wrote:
>>> +        haddr = (uint32_t *)((uintptr_t)vaddr);
>>> +        old = *haddr;
>>
>> This is horribly incorrect, both for user-only and system mode.
> 
> Richard, thank you for the review!
> But would you mind explaining why this is incorrect?
> I thought the "vaddr = probe_access()" calculates the host address, so
> shouldn't it be the right address?

The vaddr name is confusing (since it implies virtual address, which the return from 
probe_access is not) as are the casts, which are unnecessary.



> 
>>> +        /* if already zero, do not write 0 again to reduce memory presssure */
>>> +        if (old == 0) {
>>> +            return 0;
>>> +        }
>>> +        old = qatomic_xchg(haddr, (uint32_t) 0);
>>
>> You're also dropping the required host memory barrier.
> 
> ?

The path through the read+test+return, without the qatomic_xchg, has no host memory 
barrier to provide sequential consistency of the entire operation.


r~
Helge Deller Sept. 14, 2023, 9:19 p.m. UTC | #4
Hi Richard,

On 9/13/23 22:30, Richard Henderson wrote:
> On 9/13/23 10:19, Helge Deller wrote:
>> On 9/13/23 18:55, Richard Henderson wrote:
>>> On 9/13/23 07:47, Helge Deller wrote:
>>>> +        haddr = (uint32_t *)((uintptr_t)vaddr);
>>>> +        old = *haddr;
>>>
>>> This is horribly incorrect, both for user-only and system mode.
>>
>> Richard, thank you for the review!
>> But would you mind explaining why this is incorrect?
>> I thought the "vaddr = probe_access()" calculates the host address, so
>> shouldn't it be the right address?
>
> The vaddr name is confusing (since it implies virtual address, which
> the return from probe_access is not) as are the casts, which are
> unnecessary.

Still, I think my code isn't as wrong as you said.

But I tend to agree with you on this:
> Frankly, the current tcg_gen_atomic_xchg_reg is as optimized as
> you'll be able to do.
tcg_gen_atomic_xchg_reg() seems to generate on x86-64:

0000000000525160 <helper_atomic_xchgl_be>:
   525160:       53                      push   %rbx
   525161:       4c 8b 44 24 08          mov    0x8(%rsp),%r8
   525166:       89 d3                   mov    %edx,%ebx
   525168:       89 ca                   mov    %ecx,%edx
   52516a:       b9 04 00 00 00          mov    $0x4,%ecx
   52516f:       e8 1c a6 ff ff          call   51f790 <atomic_mmu_lookup>
   525174:       48 89 c2                mov    %rax,%rdx
   525177:       89 d8                   mov    %ebx,%eax
   525179:       0f c8                   bswap  %eax
   52517b:       87 02                   xchg   %eax,(%rdx)
   52517d:       5b                      pop    %rbx
   52517e:       0f c8                   bswap  %eax
   525180:       c3                      ret

and atomic_mmu_lookup() is basically the same as probe_access(),
so there is probably no gain in my patch.

Please ignore my patch.

Thank you!
Helge
diff mbox series

Patch

diff --git a/target/hppa/helper.h b/target/hppa/helper.h
index c7e35ce8c7..26d022c714 100644
--- a/target/hppa/helper.h
+++ b/target/hppa/helper.h
@@ -16,7 +16,7 @@  DEF_HELPER_FLAGS_3(stby_b_parallel, TCG_CALL_NO_WG, void, env, tl, tr)
 DEF_HELPER_FLAGS_3(stby_e, TCG_CALL_NO_WG, void, env, tl, tr)
 DEF_HELPER_FLAGS_3(stby_e_parallel, TCG_CALL_NO_WG, void, env, tl, tr)
 
-DEF_HELPER_FLAGS_1(ldc_check, TCG_CALL_NO_RWG, void, tl)
+DEF_HELPER_FLAGS_3(ldc, TCG_CALL_NO_WG, tr, env, tl, i32)
 
 DEF_HELPER_FLAGS_4(probe, TCG_CALL_NO_WG, tr, env, tl, i32, i32)
 
diff --git a/target/hppa/op_helper.c b/target/hppa/op_helper.c
index f25a5a72aa..d61f067e71 100644
--- a/target/hppa/op_helper.c
+++ b/target/hppa/op_helper.c
@@ -155,13 +155,63 @@  void HELPER(stby_e_parallel)(CPUHPPAState *env, target_ulong addr,
     do_stby_e(env, addr, val, true, GETPC());
 }
 
-void HELPER(ldc_check)(target_ulong addr)
+target_ureg HELPER(ldc)(CPUHPPAState *env, target_ulong addr, uint32_t size)
 {
+    uintptr_t ra = GETPC();
+    int mmu_idx = cpu_mmu_index(env, 0);
+    void *vaddr;
+
+    /*
+     * For hppa1.1, LDCW is undefined unless aligned mod 16.
+     * However actual hardware succeeds with aligned mod 4.
+     * Detect this case and log a GUEST_ERROR.
+     *
+     * TODO: HPPA64 relaxes the over-alignment requirement
+     * with the ,co completer.
+     */
     if (unlikely(addr & 0xf)) {
         qemu_log_mask(LOG_GUEST_ERROR,
                       "Undefined ldc to unaligned address mod 16: "
                       TARGET_FMT_lx "\n", addr);
     }
+
+    vaddr = probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, ra);
+    if (vaddr == NULL) {
+        cpu_loop_exit_restore(env_cpu(env), ra);
+    }
+
+    if (size == 4) {
+        /* 32-bit ldcw */
+        uint32_t old, *haddr;
+
+        haddr = (uint32_t *)((uintptr_t)vaddr);
+        old = *haddr;
+
+        /* if already zero, do not write 0 again to reduce memory presssure */
+        if (old == 0) {
+            return 0;
+        }
+        old = qatomic_xchg(haddr, (uint32_t) 0);
+        return be32_to_cpu(old);
+    } else {
+        /* 64-bit ldcd */
+#ifdef TARGET_HPPA64
+        uint64_t old, *haddr;
+
+        haddr = (uint64_t *)((uintptr_t)vaddr);
+        old = *haddr;
+
+        /* if already zero, do not write 0 again to reduce memory presssure */
+        if (old == 0) {
+            return 0;
+        }
+        old = qatomic_xchg(haddr, (uint64_t) 0);
+        return be64_to_cpu(old);
+#else
+        hppa_dynamic_excp(env, EXCP_ILL, ra);
+        return 0;
+#endif
+    }
 }
 
 target_ureg HELPER(probe)(CPUHPPAState *env, target_ulong addr,
diff --git a/target/hppa/translate.c b/target/hppa/translate.c
index c04dc15228..c96691ab62 100644
--- a/target/hppa/translate.c
+++ b/target/hppa/translate.c
@@ -2857,9 +2857,9 @@  static bool trans_st(DisasContext *ctx, arg_ldst *a)
 
 static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
 {
-    MemOp mop = MO_TE | MO_ALIGN | a->size;
-    TCGv_reg zero, dest, ofs;
+    TCGv_reg dest, ofs;
     TCGv_tl addr;
+    TCGv_i32 sz;
 
     nullify_over(ctx);
 
@@ -2874,18 +2874,8 @@  static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
     form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
              a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
 
-    /*
-     * For hppa1.1, LDCW is undefined unless aligned mod 16.
-     * However actual hardware succeeds with aligned mod 4.
-     * Detect this case and log a GUEST_ERROR.
-     *
-     * TODO: HPPA64 relaxes the over-alignment requirement
-     * with the ,co completer.
-     */
-    gen_helper_ldc_check(addr);
-
-    zero = tcg_constant_reg(0);
-    tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
+    sz = tcg_constant_i32((a->size == MO_32) ? 4 : 8);
+    gen_helper_ldc(dest, cpu_env, addr, sz);
 
     if (a->m) {
         save_gpr(ctx, a->b, ofs);