diff mbox series

[v3,5/5] powerpc/bpf: use patch_instructions()

Message ID 20230825151810.164418-6-hbathini@linux.ibm.com (mailing list archive)
State Superseded
Headers show
Series powerpc/bpf: use BPF prog pack allocator | expand

Checks

Context Check Description
snowpatch_ozlabs/github-powerpc_ppctests success Successfully ran 8 jobs.
snowpatch_ozlabs/github-powerpc_selftests success Successfully ran 8 jobs.
snowpatch_ozlabs/github-powerpc_sparse success Successfully ran 4 jobs.
snowpatch_ozlabs/github-powerpc_clang success Successfully ran 6 jobs.
snowpatch_ozlabs/github-powerpc_kernel_qemu success Successfully ran 23 jobs.

Commit Message

Hari Bathini Aug. 25, 2023, 3:18 p.m. UTC
Use the newly introduced patch_instructions() that handles patching
multiple instructions with one call. This improves speed of exectution
for JIT'ing bpf programs.

Without this patch (on a POWER9 lpar):

  # time modprobe test_bpf
  real    2m59.681s
  user    0m0.000s
  sys     1m44.160s
  #

With this patch (on a POWER9 lpar):

  # time modprobe test_bpf
  real    0m5.013s
  user    0m0.000s
  sys     0m4.216s
  #

Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
---
 arch/powerpc/net/bpf_jit_comp.c | 30 ++++--------------------------
 1 file changed, 4 insertions(+), 26 deletions(-)

Comments

Christophe Leroy Aug. 25, 2023, 3:46 p.m. UTC | #1
Le 25/08/2023 à 17:18, Hari Bathini a écrit :
> Use the newly introduced patch_instructions() that handles patching
> multiple instructions with one call. This improves speed of exectution
> for JIT'ing bpf programs.
> 
> Without this patch (on a POWER9 lpar):
> 
>    # time modprobe test_bpf
>    real    2m59.681s
>    user    0m0.000s
>    sys     1m44.160s
>    #
> 
> With this patch (on a POWER9 lpar):
> 
>    # time modprobe test_bpf
>    real    0m5.013s
>    user    0m0.000s
>    sys     0m4.216s
>    #

Right, significant improvement. Forget by comment to patch 1, I should 
have read the series up to the end. Just wondering why you don't just 
put patch 4 up front ?

Christophe

> 
> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
> ---
>   arch/powerpc/net/bpf_jit_comp.c | 30 ++++--------------------------
>   1 file changed, 4 insertions(+), 26 deletions(-)
> 
> diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
> index c60d7570e05d..1e5000d18321 100644
> --- a/arch/powerpc/net/bpf_jit_comp.c
> +++ b/arch/powerpc/net/bpf_jit_comp.c
> @@ -26,28 +26,6 @@ static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
>   	memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
>   }
>   
> -/*
> - * Patch 'len' bytes of instructions from opcode to addr, one instruction
> - * at a time. Returns addr on success. ERR_PTR(-EINVAL), otherwise.
> - */
> -static void *bpf_patch_instructions(void *addr, void *opcode, size_t len, bool fill_insn)
> -{
> -	while (len > 0) {
> -		ppc_inst_t insn = ppc_inst_read(opcode);
> -		int ilen = ppc_inst_len(insn);
> -
> -		if (patch_instruction(addr, insn))
> -			return ERR_PTR(-EINVAL);
> -
> -		len -= ilen;
> -		addr = addr + ilen;
> -		if (!fill_insn)
> -			opcode = opcode + ilen;
> -	}
> -
> -	return addr;
> -}
> -
>   int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
>   {
>   	if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
> @@ -330,16 +308,16 @@ int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass
>   
>   void *bpf_arch_text_copy(void *dst, void *src, size_t len)
>   {
> -	void *ret;
> +	int err;
>   
>   	if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
>   		return ERR_PTR(-EINVAL);
>   
>   	mutex_lock(&text_mutex);
> -	ret = bpf_patch_instructions(dst, src, len, false);
> +	err = patch_instructions(dst, src, len, false);
>   	mutex_unlock(&text_mutex);
>   
> -	return ret;
> +	return err ? ERR_PTR(err) : dst;
>   }
>   
>   int bpf_arch_text_invalidate(void *dst, size_t len)
> @@ -351,7 +329,7 @@ int bpf_arch_text_invalidate(void *dst, size_t len)
>   		return -EINVAL;
>   
>   	mutex_lock(&text_mutex);
> -	ret = IS_ERR(bpf_patch_instructions(dst, &insn, len, true));
> +	ret = patch_instructions(dst, &insn, len, true);
>   	mutex_unlock(&text_mutex);
>   
>   	return ret;
Hari Bathini Aug. 25, 2023, 5:43 p.m. UTC | #2
On 25/08/23 9:16 pm, Christophe Leroy wrote:
> 
> 
> Le 25/08/2023 à 17:18, Hari Bathini a écrit :
>> Use the newly introduced patch_instructions() that handles patching
>> multiple instructions with one call. This improves speed of exectution
>> for JIT'ing bpf programs.
>>
>> Without this patch (on a POWER9 lpar):
>>
>>     # time modprobe test_bpf
>>     real    2m59.681s
>>     user    0m0.000s
>>     sys     1m44.160s
>>     #
>>
>> With this patch (on a POWER9 lpar):
>>
>>     # time modprobe test_bpf
>>     real    0m5.013s
>>     user    0m0.000s
>>     sys     0m4.216s
>>     #
> 
> Right, significant improvement. Forget by comment to patch 1, I should
> have read the series up to the end. Just wondering why you don't just
> put patch 4 up front ?

I wanted to remove the dependency for bpf_prog_pack enablement
patches with this improvement, just in case..

- Hari
diff mbox series

Patch

diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index c60d7570e05d..1e5000d18321 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -26,28 +26,6 @@  static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
 	memset32(area, BREAKPOINT_INSTRUCTION, size / 4);
 }
 
-/*
- * Patch 'len' bytes of instructions from opcode to addr, one instruction
- * at a time. Returns addr on success. ERR_PTR(-EINVAL), otherwise.
- */
-static void *bpf_patch_instructions(void *addr, void *opcode, size_t len, bool fill_insn)
-{
-	while (len > 0) {
-		ppc_inst_t insn = ppc_inst_read(opcode);
-		int ilen = ppc_inst_len(insn);
-
-		if (patch_instruction(addr, insn))
-			return ERR_PTR(-EINVAL);
-
-		len -= ilen;
-		addr = addr + ilen;
-		if (!fill_insn)
-			opcode = opcode + ilen;
-	}
-
-	return addr;
-}
-
 int bpf_jit_emit_exit_insn(u32 *image, struct codegen_context *ctx, int tmp_reg, long exit_addr)
 {
 	if (!exit_addr || is_offset_in_branch_range(exit_addr - (ctx->idx * 4))) {
@@ -330,16 +308,16 @@  int bpf_add_extable_entry(struct bpf_prog *fp, u32 *image, u32 *fimage, int pass
 
 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
 {
-	void *ret;
+	int err;
 
 	if (WARN_ON_ONCE(core_kernel_text((unsigned long)dst)))
 		return ERR_PTR(-EINVAL);
 
 	mutex_lock(&text_mutex);
-	ret = bpf_patch_instructions(dst, src, len, false);
+	err = patch_instructions(dst, src, len, false);
 	mutex_unlock(&text_mutex);
 
-	return ret;
+	return err ? ERR_PTR(err) : dst;
 }
 
 int bpf_arch_text_invalidate(void *dst, size_t len)
@@ -351,7 +329,7 @@  int bpf_arch_text_invalidate(void *dst, size_t len)
 		return -EINVAL;
 
 	mutex_lock(&text_mutex);
-	ret = IS_ERR(bpf_patch_instructions(dst, &insn, len, true));
+	ret = patch_instructions(dst, &insn, len, true);
 	mutex_unlock(&text_mutex);
 
 	return ret;