diff mbox series

[4/5] bpf ppc32: add support for BPF_ATOMIC bitwise operations

Message ID 20220512074546.231616-5-hbathini@linux.ibm.com (mailing list archive)
State Changes Requested
Headers show
Series Atomics support for eBPF on powerpc | expand

Commit Message

Hari Bathini May 12, 2022, 7:45 a.m. UTC
Adding instructions for ppc32 for

atomic_and
atomic_or
atomic_xor
atomic_fetch_add
atomic_fetch_and
atomic_fetch_or
atomic_fetch_xor

Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
---
 arch/powerpc/net/bpf_jit_comp32.c | 45 +++++++++++++++++++++----------
 1 file changed, 31 insertions(+), 14 deletions(-)

Comments

Christophe Leroy May 13, 2022, 6:58 a.m. UTC | #1
Le 12/05/2022 à 09:45, Hari Bathini a écrit :
> Adding instructions for ppc32 for
> 
> atomic_and
> atomic_or
> atomic_xor
> atomic_fetch_add
> atomic_fetch_and
> atomic_fetch_or
> atomic_fetch_xor
> 
> Signed-off-by: Hari Bathini <hbathini@linux.ibm.com>
> ---
>   arch/powerpc/net/bpf_jit_comp32.c | 45 +++++++++++++++++++++----------
>   1 file changed, 31 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
> index e46ed1e8c6ca..5604ae1b60ab 100644
> --- a/arch/powerpc/net/bpf_jit_comp32.c
> +++ b/arch/powerpc/net/bpf_jit_comp32.c
> @@ -798,25 +798,42 @@ int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
>   		 * BPF_STX ATOMIC (atomic ops)
>   		 */
>   		case BPF_STX | BPF_ATOMIC | BPF_W:
> -			if (imm != BPF_ADD) {
> -				pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
> -						   code, i);
> -				return -ENOTSUPP;
> -			}
> -
> -			/* *(u32 *)(dst + off) += src */
> -
>   			bpf_set_seen_register(ctx, tmp_reg);
>   			/* Get offset into TMP_REG */
>   			EMIT(PPC_RAW_LI(tmp_reg, off));
> +			tmp_idx = ctx->idx * 4;
>   			/* load value from memory into r0 */
>   			EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
> -			/* add value from src_reg into this */
> -			EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
> -			/* store result back */
> -			EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
> -			/* we're done if this succeeded */
> -			PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4);
> +			switch (imm) {
> +			case BPF_ADD:
> +			case BPF_ADD | BPF_FETCH:
> +				EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
> +				goto atomic_ops;
> +			case BPF_AND:
> +			case BPF_AND | BPF_FETCH:
> +				EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
> +				goto atomic_ops;
> +			case BPF_OR:
> +			case BPF_OR | BPF_FETCH:
> +				EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
> +				goto atomic_ops;
> +			case BPF_XOR:
> +			case BPF_XOR | BPF_FETCH:
> +				EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
> +atomic_ops:

This looks like an odd construct.

The default case doesn't fall through, so the below part could go after 
the switch and all cases could just break instead of goto atomic_ops.

> +				/* For the BPF_FETCH variant, get old data into src_reg */
> +				if (imm & BPF_FETCH)
> +					EMIT(PPC_RAW_LWARX(src_reg, tmp_reg, dst_reg, 0));

I think this is wrong. By doing a new LWARX you kill the reservation 
done by the previous one. If the data has changed between the first 
LWARX and now, it will go undetected.

It should be a LWZX I believe.

But there is another problem: you clobber src_reg, then what happens if 
STWCX fails and it loops back to tmp_idx ?

> +				/* store result back */
> +				EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
> +				/* we're done if this succeeded */
> +				PPC_BCC_SHORT(COND_NE, tmp_idx);
> +				break;
> +			default:
> +				pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
> +						   code, i);
> +				return -EOPNOTSUPP;
> +			}
>   			break;
>   
>   		case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */
diff mbox series

Patch

diff --git a/arch/powerpc/net/bpf_jit_comp32.c b/arch/powerpc/net/bpf_jit_comp32.c
index e46ed1e8c6ca..5604ae1b60ab 100644
--- a/arch/powerpc/net/bpf_jit_comp32.c
+++ b/arch/powerpc/net/bpf_jit_comp32.c
@@ -798,25 +798,42 @@  int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, struct codegen_context *
 		 * BPF_STX ATOMIC (atomic ops)
 		 */
 		case BPF_STX | BPF_ATOMIC | BPF_W:
-			if (imm != BPF_ADD) {
-				pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
-						   code, i);
-				return -ENOTSUPP;
-			}
-
-			/* *(u32 *)(dst + off) += src */
-
 			bpf_set_seen_register(ctx, tmp_reg);
 			/* Get offset into TMP_REG */
 			EMIT(PPC_RAW_LI(tmp_reg, off));
+			tmp_idx = ctx->idx * 4;
 			/* load value from memory into r0 */
 			EMIT(PPC_RAW_LWARX(_R0, tmp_reg, dst_reg, 0));
-			/* add value from src_reg into this */
-			EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
-			/* store result back */
-			EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
-			/* we're done if this succeeded */
-			PPC_BCC_SHORT(COND_NE, (ctx->idx - 3) * 4);
+			switch (imm) {
+			case BPF_ADD:
+			case BPF_ADD | BPF_FETCH:
+				EMIT(PPC_RAW_ADD(_R0, _R0, src_reg));
+				goto atomic_ops;
+			case BPF_AND:
+			case BPF_AND | BPF_FETCH:
+				EMIT(PPC_RAW_AND(_R0, _R0, src_reg));
+				goto atomic_ops;
+			case BPF_OR:
+			case BPF_OR | BPF_FETCH:
+				EMIT(PPC_RAW_OR(_R0, _R0, src_reg));
+				goto atomic_ops;
+			case BPF_XOR:
+			case BPF_XOR | BPF_FETCH:
+				EMIT(PPC_RAW_XOR(_R0, _R0, src_reg));
+atomic_ops:
+				/* For the BPF_FETCH variant, get old data into src_reg */
+				if (imm & BPF_FETCH)
+					EMIT(PPC_RAW_LWARX(src_reg, tmp_reg, dst_reg, 0));
+				/* store result back */
+				EMIT(PPC_RAW_STWCX(_R0, tmp_reg, dst_reg));
+				/* we're done if this succeeded */
+				PPC_BCC_SHORT(COND_NE, tmp_idx);
+				break;
+			default:
+				pr_err_ratelimited("eBPF filter atomic op code %02x (@%d) unsupported\n",
+						   code, i);
+				return -EOPNOTSUPP;
+			}
 			break;
 
 		case BPF_STX | BPF_ATOMIC | BPF_DW: /* *(u64 *)(dst + off) += src */