diff mbox series

[bpf-next,v2,13/16] ppc: bpf: implement jitting of JMP32

Message ID 1548076553-31268-14-git-send-email-jiong.wang@netronome.com
State Changes Requested
Delegated to: BPF Maintainers
Headers show
Series bpf: propose new jmp32 instructions | expand

Commit Message

Jiong Wang Jan. 21, 2019, 1:15 p.m. UTC
This patch implements code-gen for new JMP32 instructions on ppc.

For JMP32 | JSET, instruction encoding for PPC_RLWINM_DOT is added to check
the result of ANDing low 32-bit of operands.

Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
Cc: Sandipan Das <sandipan@linux.ibm.com>
Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
---
 arch/powerpc/include/asm/ppc-opcode.h |  1 +
 arch/powerpc/net/bpf_jit.h            |  4 ++
 arch/powerpc/net/bpf_jit_comp64.c     | 98 ++++++++++++++++++++++++++++++-----
 3 files changed, 89 insertions(+), 14 deletions(-)

Comments

Sandipan Das Jan. 24, 2019, 4:58 a.m. UTC | #1
Hi Jiong,

On 21/01/19 6:45 PM, Jiong Wang wrote:
> This patch implements code-gen for new JMP32 instructions on ppc.
> 
> For JMP32 | JSET, instruction encoding for PPC_RLWINM_DOT is added to check
> the result of ANDing low 32-bit of operands.
> 
> Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
> Cc: Sandipan Das <sandipan@linux.ibm.com>
> Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
> ---
>  arch/powerpc/include/asm/ppc-opcode.h |  1 +
>  arch/powerpc/net/bpf_jit.h            |  4 ++
>  arch/powerpc/net/bpf_jit_comp64.c     | 98 ++++++++++++++++++++++++++++++-----
>  3 files changed, 89 insertions(+), 14 deletions(-)
> [...]

I ran the verifier selftests on a ppc64 test system and found that the jmp32 tests
were failing because the instructions were not being decoded by the JIT compiler.
The codegen logic looks good to me and the tests passed after I fixed the decoding
logic. Here are the changes that I had to make:

diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 2e40c2b251ba..15bba765fa79 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -768,36 +768,58 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                case BPF_JMP | BPF_JGT | BPF_X:
                case BPF_JMP | BPF_JSGT | BPF_K:
                case BPF_JMP | BPF_JSGT | BPF_X:
+               case BPF_JMP32 | BPF_JGT | BPF_K:
+               case BPF_JMP32 | BPF_JGT | BPF_X:
+               case BPF_JMP32 | BPF_JSGT | BPF_K:
+               case BPF_JMP32 | BPF_JSGT | BPF_X:
                        true_cond = COND_GT;
                        goto cond_branch;
                case BPF_JMP | BPF_JLT | BPF_K:
                case BPF_JMP | BPF_JLT | BPF_X:
                case BPF_JMP | BPF_JSLT | BPF_K:
                case BPF_JMP | BPF_JSLT | BPF_X:
+               case BPF_JMP32 | BPF_JLT | BPF_K:
+               case BPF_JMP32 | BPF_JLT | BPF_X:
+               case BPF_JMP32 | BPF_JSLT | BPF_K:
+               case BPF_JMP32 | BPF_JSLT | BPF_X:
                        true_cond = COND_LT;
                        goto cond_branch;
                case BPF_JMP | BPF_JGE | BPF_K:
                case BPF_JMP | BPF_JGE | BPF_X:
                case BPF_JMP | BPF_JSGE | BPF_K:
                case BPF_JMP | BPF_JSGE | BPF_X:
+               case BPF_JMP32 | BPF_JGE | BPF_K:
+               case BPF_JMP32 | BPF_JGE | BPF_X:
+               case BPF_JMP32 | BPF_JSGE | BPF_K:
+               case BPF_JMP32 | BPF_JSGE | BPF_X:
                        true_cond = COND_GE;
                        goto cond_branch;
                case BPF_JMP | BPF_JLE | BPF_K:
                case BPF_JMP | BPF_JLE | BPF_X:
                case BPF_JMP | BPF_JSLE | BPF_K:
                case BPF_JMP | BPF_JSLE | BPF_X:
+               case BPF_JMP32 | BPF_JLE | BPF_K:
+               case BPF_JMP32 | BPF_JLE | BPF_X:
+               case BPF_JMP32 | BPF_JSLE | BPF_K:
+               case BPF_JMP32 | BPF_JSLE | BPF_X:
                        true_cond = COND_LE;
                        goto cond_branch;
                case BPF_JMP | BPF_JEQ | BPF_K:
                case BPF_JMP | BPF_JEQ | BPF_X:
+               case BPF_JMP32 | BPF_JEQ | BPF_K:
+               case BPF_JMP32 | BPF_JEQ | BPF_X:
                        true_cond = COND_EQ;
                        goto cond_branch;
                case BPF_JMP | BPF_JNE | BPF_K:
                case BPF_JMP | BPF_JNE | BPF_X:
+               case BPF_JMP32 | BPF_JNE | BPF_K:
+               case BPF_JMP32 | BPF_JNE | BPF_X:
                        true_cond = COND_NE;
                        goto cond_branch;
                case BPF_JMP | BPF_JSET | BPF_K:
                case BPF_JMP | BPF_JSET | BPF_X:
+               case BPF_JMP32 | BPF_JSET | BPF_K:
+               case BPF_JMP32 | BPF_JSET | BPF_X:
                        true_cond = COND_NE;
                        /* Fall through */
 

--
With Regards,
Sandipan
Jiong Wang Jan. 24, 2019, 9:53 a.m. UTC | #2
Sandipan Das writes:

> Hi Jiong,
>
> On 21/01/19 6:45 PM, Jiong Wang wrote:
>> This patch implements code-gen for new JMP32 instructions on ppc.
>> 
>> For JMP32 | JSET, instruction encoding for PPC_RLWINM_DOT is added to check
>> the result of ANDing low 32-bit of operands.
>> 
>> Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
>> Cc: Sandipan Das <sandipan@linux.ibm.com>
>> Signed-off-by: Jiong Wang <jiong.wang@netronome.com>
>> ---
>>  arch/powerpc/include/asm/ppc-opcode.h |  1 +
>>  arch/powerpc/net/bpf_jit.h            |  4 ++
>>  arch/powerpc/net/bpf_jit_comp64.c     | 98 ++++++++++++++++++++++++++++++-----
>>  3 files changed, 89 insertions(+), 14 deletions(-)
>> [...]
>
> I ran the verifier selftests on a ppc64 test system and found that the jmp32 tests
> were failing because the instructions were not being decoded by the JIT compiler.
> The codegen logic looks good to me and the tests passed after I fixed the decoding
> logic. Here are the changes that I had to make:

Ah, I missed the outer layer decoding logic.

Thanks for the review, test and fix. Will integrate the following in v3.

Regards,
Jiong

>
> diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
> index 2e40c2b251ba..15bba765fa79 100644
> --- a/arch/powerpc/net/bpf_jit_comp64.c
> +++ b/arch/powerpc/net/bpf_jit_comp64.c
> @@ -768,36 +768,58 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
>                 case BPF_JMP | BPF_JGT | BPF_X:
>                 case BPF_JMP | BPF_JSGT | BPF_K:
>                 case BPF_JMP | BPF_JSGT | BPF_X:
> +               case BPF_JMP32 | BPF_JGT | BPF_K:
> +               case BPF_JMP32 | BPF_JGT | BPF_X:
> +               case BPF_JMP32 | BPF_JSGT | BPF_K:
> +               case BPF_JMP32 | BPF_JSGT | BPF_X:
>                         true_cond = COND_GT;
>                         goto cond_branch;
>                 case BPF_JMP | BPF_JLT | BPF_K:
>                 case BPF_JMP | BPF_JLT | BPF_X:
>                 case BPF_JMP | BPF_JSLT | BPF_K:
>                 case BPF_JMP | BPF_JSLT | BPF_X:
> +               case BPF_JMP32 | BPF_JLT | BPF_K:
> +               case BPF_JMP32 | BPF_JLT | BPF_X:
> +               case BPF_JMP32 | BPF_JSLT | BPF_K:
> +               case BPF_JMP32 | BPF_JSLT | BPF_X:
>                         true_cond = COND_LT;
>                         goto cond_branch;
>                 case BPF_JMP | BPF_JGE | BPF_K:
>                 case BPF_JMP | BPF_JGE | BPF_X:
>                 case BPF_JMP | BPF_JSGE | BPF_K:
>                 case BPF_JMP | BPF_JSGE | BPF_X:
> +               case BPF_JMP32 | BPF_JGE | BPF_K:
> +               case BPF_JMP32 | BPF_JGE | BPF_X:
> +               case BPF_JMP32 | BPF_JSGE | BPF_K:
> +               case BPF_JMP32 | BPF_JSGE | BPF_X:
>                         true_cond = COND_GE;
>                         goto cond_branch;
>                 case BPF_JMP | BPF_JLE | BPF_K:
>                 case BPF_JMP | BPF_JLE | BPF_X:
>                 case BPF_JMP | BPF_JSLE | BPF_K:
>                 case BPF_JMP | BPF_JSLE | BPF_X:
> +               case BPF_JMP32 | BPF_JLE | BPF_K:
> +               case BPF_JMP32 | BPF_JLE | BPF_X:
> +               case BPF_JMP32 | BPF_JSLE | BPF_K:
> +               case BPF_JMP32 | BPF_JSLE | BPF_X:
>                         true_cond = COND_LE;
>                         goto cond_branch;
>                 case BPF_JMP | BPF_JEQ | BPF_K:
>                 case BPF_JMP | BPF_JEQ | BPF_X:
> +               case BPF_JMP32 | BPF_JEQ | BPF_K:
> +               case BPF_JMP32 | BPF_JEQ | BPF_X:
>                         true_cond = COND_EQ;
>                         goto cond_branch;
>                 case BPF_JMP | BPF_JNE | BPF_K:
>                 case BPF_JMP | BPF_JNE | BPF_X:
> +               case BPF_JMP32 | BPF_JNE | BPF_K:
> +               case BPF_JMP32 | BPF_JNE | BPF_X:
>                         true_cond = COND_NE;
>                         goto cond_branch;
>                 case BPF_JMP | BPF_JSET | BPF_K:
>                 case BPF_JMP | BPF_JSET | BPF_X:
> +               case BPF_JMP32 | BPF_JSET | BPF_K:
> +               case BPF_JMP32 | BPF_JSET | BPF_X:
>                         true_cond = COND_NE;
>                         /* Fall through */
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index 19a8834..f9513ad 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -337,6 +337,7 @@ 
 #define PPC_INST_DIVWU			0x7c000396
 #define PPC_INST_DIVD			0x7c0003d2
 #define PPC_INST_RLWINM			0x54000000
+#define PPC_INST_RLWINM_DOT		0x54000001
 #define PPC_INST_RLWIMI			0x50000000
 #define PPC_INST_RLDICL			0x78000000
 #define PPC_INST_RLDICR			0x78000004
diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
index c2d5192..549e949 100644
--- a/arch/powerpc/net/bpf_jit.h
+++ b/arch/powerpc/net/bpf_jit.h
@@ -165,6 +165,10 @@ 
 #define PPC_RLWINM(d, a, i, mb, me)	EMIT(PPC_INST_RLWINM | ___PPC_RA(d) | \
 					___PPC_RS(a) | __PPC_SH(i) |	      \
 					__PPC_MB(mb) | __PPC_ME(me))
+#define PPC_RLWINM_DOT(d, a, i, mb, me)	EMIT(PPC_INST_RLWINM_DOT |	      \
+					___PPC_RA(d) | ___PPC_RS(a) |	      \
+					__PPC_SH(i) | __PPC_MB(mb) |	      \
+					__PPC_ME(me))
 #define PPC_RLWIMI(d, a, i, mb, me)	EMIT(PPC_INST_RLWIMI | ___PPC_RA(d) | \
 					___PPC_RS(a) | __PPC_SH(i) |	      \
 					__PPC_MB(mb) | __PPC_ME(me))
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 7ce57657..2e40c2b 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -809,18 +809,44 @@  static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 			case BPF_JMP | BPF_JLE | BPF_X:
 			case BPF_JMP | BPF_JEQ | BPF_X:
 			case BPF_JMP | BPF_JNE | BPF_X:
+			case BPF_JMP32 | BPF_JGT | BPF_X:
+			case BPF_JMP32 | BPF_JLT | BPF_X:
+			case BPF_JMP32 | BPF_JGE | BPF_X:
+			case BPF_JMP32 | BPF_JLE | BPF_X:
+			case BPF_JMP32 | BPF_JEQ | BPF_X:
+			case BPF_JMP32 | BPF_JNE | BPF_X:
 				/* unsigned comparison */
-				PPC_CMPLD(dst_reg, src_reg);
+				if (BPF_CLASS(code) == BPF_JMP32)
+					PPC_CMPLW(dst_reg, src_reg);
+				else
+					PPC_CMPLD(dst_reg, src_reg);
 				break;
 			case BPF_JMP | BPF_JSGT | BPF_X:
 			case BPF_JMP | BPF_JSLT | BPF_X:
 			case BPF_JMP | BPF_JSGE | BPF_X:
 			case BPF_JMP | BPF_JSLE | BPF_X:
+			case BPF_JMP32 | BPF_JSGT | BPF_X:
+			case BPF_JMP32 | BPF_JSLT | BPF_X:
+			case BPF_JMP32 | BPF_JSGE | BPF_X:
+			case BPF_JMP32 | BPF_JSLE | BPF_X:
 				/* signed comparison */
-				PPC_CMPD(dst_reg, src_reg);
+				if (BPF_CLASS(code) == BPF_JMP32)
+					PPC_CMPW(dst_reg, src_reg);
+				else
+					PPC_CMPD(dst_reg, src_reg);
 				break;
 			case BPF_JMP | BPF_JSET | BPF_X:
-				PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
+			case BPF_JMP32 | BPF_JSET | BPF_X:
+				if (BPF_CLASS(code) == BPF_JMP) {
+					PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
+						    src_reg);
+				} else {
+					int tmp_reg = b2p[TMP_REG_1];
+
+					PPC_AND(tmp_reg, dst_reg, src_reg);
+					PPC_RLWINM_DOT(tmp_reg, tmp_reg, 0, 0,
+						       31);
+				}
 				break;
 			case BPF_JMP | BPF_JNE | BPF_K:
 			case BPF_JMP | BPF_JEQ | BPF_K:
@@ -828,43 +854,87 @@  static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
 			case BPF_JMP | BPF_JLT | BPF_K:
 			case BPF_JMP | BPF_JGE | BPF_K:
 			case BPF_JMP | BPF_JLE | BPF_K:
+			case BPF_JMP32 | BPF_JNE | BPF_K:
+			case BPF_JMP32 | BPF_JEQ | BPF_K:
+			case BPF_JMP32 | BPF_JGT | BPF_K:
+			case BPF_JMP32 | BPF_JLT | BPF_K:
+			case BPF_JMP32 | BPF_JGE | BPF_K:
+			case BPF_JMP32 | BPF_JLE | BPF_K:
+			{
+				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
+
 				/*
 				 * Need sign-extended load, so only positive
 				 * values can be used as imm in cmpldi
 				 */
-				if (imm >= 0 && imm < 32768)
-					PPC_CMPLDI(dst_reg, imm);
-				else {
+				if (imm >= 0 && imm < 32768) {
+					if (is_jmp32)
+						PPC_CMPLWI(dst_reg, imm);
+					else
+						PPC_CMPLDI(dst_reg, imm);
+				} else {
 					/* sign-extending load */
 					PPC_LI32(b2p[TMP_REG_1], imm);
 					/* ... but unsigned comparison */
-					PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
+					if (is_jmp32)
+						PPC_CMPLW(dst_reg,
+							  b2p[TMP_REG_1]);
+					else
+						PPC_CMPLD(dst_reg,
+							  b2p[TMP_REG_1]);
 				}
 				break;
+			}
 			case BPF_JMP | BPF_JSGT | BPF_K:
 			case BPF_JMP | BPF_JSLT | BPF_K:
 			case BPF_JMP | BPF_JSGE | BPF_K:
 			case BPF_JMP | BPF_JSLE | BPF_K:
+			case BPF_JMP32 | BPF_JSGT | BPF_K:
+			case BPF_JMP32 | BPF_JSLT | BPF_K:
+			case BPF_JMP32 | BPF_JSGE | BPF_K:
+			case BPF_JMP32 | BPF_JSLE | BPF_K:
+			{
+				bool is_jmp32 = BPF_CLASS(code) == BPF_JMP32;
+
 				/*
 				 * signed comparison, so any 16-bit value
 				 * can be used in cmpdi
 				 */
-				if (imm >= -32768 && imm < 32768)
-					PPC_CMPDI(dst_reg, imm);
-				else {
+				if (imm >= -32768 && imm < 32768) {
+					if (is_jmp32)
+						PPC_CMPWI(dst_reg, imm);
+					else
+						PPC_CMPDI(dst_reg, imm);
+				} else {
 					PPC_LI32(b2p[TMP_REG_1], imm);
-					PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
+					if (is_jmp32)
+						PPC_CMPW(dst_reg,
+							 b2p[TMP_REG_1]);
+					else
+						PPC_CMPD(dst_reg,
+							 b2p[TMP_REG_1]);
 				}
 				break;
+			}
 			case BPF_JMP | BPF_JSET | BPF_K:
+			case BPF_JMP32 | BPF_JSET | BPF_K:
 				/* andi does not sign-extend the immediate */
 				if (imm >= 0 && imm < 32768)
 					/* PPC_ANDI is _only/always_ dot-form */
 					PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
 				else {
-					PPC_LI32(b2p[TMP_REG_1], imm);
-					PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
-						    b2p[TMP_REG_1]);
+					int tmp_reg = b2p[TMP_REG_1];
+
+					PPC_LI32(tmp_reg, imm);
+					if (BPF_CLASS(code) == BPF_JMP) {
+						PPC_AND_DOT(tmp_reg, dst_reg,
+							    tmp_reg);
+					} else {
+						PPC_AND(tmp_reg, dst_reg,
+							tmp_reg);
+						PPC_RLWINM_DOT(tmp_reg, tmp_reg,
+							       0, 0, 31);
+					}
 				}
 				break;
 			}