Patchwork [net-next-2.6] filter: cleanup codes[] init

login
register
mail settings
Submitter Eric Dumazet
Date Nov. 19, 2010, 7:56 a.m.
Message ID <1290153398.29509.7.camel@edumazet-laptop>
Download mbox | patch
Permalink /patch/72223/
State Accepted
Delegated to: David Miller
Headers show

Comments

Eric Dumazet - Nov. 19, 2010, 7:56 a.m.
Starting the translated instruction to 1 instead of 0 allows us to
remove one descrement at check time and makes codes[] array init
cleaner.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
---
 net/core/filter.c |   95 +++++++++++++++++++++-----------------------
 1 file changed, 47 insertions(+), 48 deletions(-)



--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Changli Gao - Nov. 19, 2010, 8:38 a.m.
On Fri, Nov 19, 2010 at 3:56 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
> Starting the translated instruction to 1 instead of 0 allows us to
> remove one descrement at check time and makes codes[] array init
> cleaner.
>
> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
> ---
>  net/core/filter.c |   95 +++++++++++++++++++++-----------------------
>  1 file changed, 47 insertions(+), 48 deletions(-)
>
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 15a545d..a1edb5d 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -39,7 +39,7 @@
>  #include <linux/filter.h>
>
>  enum {
> -       BPF_S_RET_K = 0,
> +       BPF_S_RET_K = 1,
>        BPF_S_RET_A,
>        BPF_S_ALU_ADD_K,
>        BPF_S_ALU_ADD_X,
> @@ -436,51 +436,51 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
>         * Invalid instructions are initialized to 0.
>         */
>        static const u8 codes[] = {
> -               [BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K + 1,
> -               [BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X + 1,
> -               [BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K + 1,
> -               [BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X + 1,
> -               [BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K + 1,
> -               [BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X + 1,
> -               [BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X + 1,
> -               [BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K + 1,
> -               [BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X + 1,
> -               [BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K + 1,
> -               [BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X + 1,
> -               [BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K + 1,
> -               [BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X + 1,
> -               [BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K + 1,
> -               [BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X + 1,
> -               [BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG + 1,
> -               [BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS + 1,
> -               [BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS + 1,
> -               [BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS + 1,
> -               [BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN + 1,
> -               [BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND + 1,
> -               [BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND + 1,
> -               [BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND + 1,
> -               [BPF_LD|BPF_IMM]         = BPF_S_LD_IMM + 1,
> -               [BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN + 1,
> -               [BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH + 1,
> -               [BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM + 1,
> -               [BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX + 1,
> -               [BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA + 1,
> -               [BPF_RET|BPF_K]          = BPF_S_RET_K + 1,
> -               [BPF_RET|BPF_A]          = BPF_S_RET_A + 1,
> -               [BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K + 1,
> -               [BPF_LD|BPF_MEM]         = BPF_S_LD_MEM + 1,
> -               [BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM + 1,
> -               [BPF_ST]                 = BPF_S_ST + 1,
> -               [BPF_STX]                = BPF_S_STX + 1,
> -               [BPF_JMP|BPF_JA]         = BPF_S_JMP_JA + 1,
> -               [BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K + 1,
> -               [BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X + 1,
> -               [BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K + 1,
> -               [BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X + 1,
> -               [BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K + 1,
> -               [BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X + 1,
> -               [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K + 1,
> -               [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X + 1,
> +               [BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K,
> +               [BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X,
> +               [BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K,
> +               [BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X,
> +               [BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
> +               [BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
> +               [BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
> +               [BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
> +               [BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
> +               [BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
> +               [BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
> +               [BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
> +               [BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
> +               [BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
> +               [BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X,
> +               [BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG,
> +               [BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS,
> +               [BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS,
> +               [BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS,
> +               [BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN,
> +               [BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND,
> +               [BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND,
> +               [BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND,
> +               [BPF_LD|BPF_IMM]         = BPF_S_LD_IMM,
> +               [BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN,
> +               [BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH,
> +               [BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM,
> +               [BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX,
> +               [BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA,
> +               [BPF_RET|BPF_K]          = BPF_S_RET_K,
> +               [BPF_RET|BPF_A]          = BPF_S_RET_A,
> +               [BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K,
> +               [BPF_LD|BPF_MEM]         = BPF_S_LD_MEM,
> +               [BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM,
> +               [BPF_ST]                 = BPF_S_ST,
> +               [BPF_STX]                = BPF_S_STX,
> +               [BPF_JMP|BPF_JA]         = BPF_S_JMP_JA,
> +               [BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K,
> +               [BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X,
> +               [BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K,
> +               [BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X,
> +               [BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K,
> +               [BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X,
> +               [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
> +               [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
>        };
>        int pc;
>
> @@ -495,8 +495,7 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
>                if (code >= ARRAY_SIZE(codes))
>                        return -EINVAL;
>                code = codes[code];
> -               /* Undo the '+ 1' in codes[] after validation. */
> -               if (!code--)
> +               if (!code)
>                        return -EINVAL;
>                /* Some instructions need special checks */
>                switch (code) {
>
>
>

I compared the asm code of sk_run_filter.

Here is the original:

                switch (fentry->code) {
ffffffff8138c70c:       66 83 38 2c             cmpw   $0x2c,(%rax)
        /*
         * Process array of filter instructions.
         */
        for (pc = 0; pc < flen; pc++) {
                const struct sock_filter *fentry = &filter[pc];
                u32 f_k = fentry->k;
ffffffff8138c710:       44 8b 78 04             mov    0x4(%rax),%r15d

                switch (fentry->code) {
ffffffff8138c714:       0f 87 53 02 00 00       ja
ffffffff8138c96d <sk_run_filter+0x2a8>
ffffffff8138c71a:       0f b7 10                movzwl (%rax),%edx
ffffffff8138c71d:       ff 24 d5 00 00 00 00    jmpq   *0x0(,%rdx,8)

And here is the patched:

                switch (fentry->code) {
ffffffff8138c708:       8b 10                   mov    (%rax),%edx
        /*
         * Process array of filter instructions.
         */
        for (pc = 0; pc < flen; pc++) {
                const struct sock_filter *fentry = &filter[pc];
                u32 f_k = fentry->k;
ffffffff8138c70a:       44 8b 78 04             mov    0x4(%rax),%r15d

                switch (fentry->code) {
ffffffff8138c70e:       ff ca                   dec    %edx
ffffffff8138c710:       66 83 fa 2c             cmp    $0x2c,%dx
ffffffff8138c714:       0f 87 53 02 00 00       ja
ffffffff8138c96d <sk_run_filter+0x2ac>
ffffffff8138c71a:       0f b7 d2                movzwl %dx,%edx
ffffffff8138c71d:       ff 24 d5 00 00 00 00    jmpq   *0x0(,%rdx,8)

As you see, an additional 'dec %edx' instruction is inserted.
sk_chk_filter() only runs 1 times, I think we can afford the 'dec
instruction' and 'dirty' code, but sk_run_filter() runs much often,
this additional dec instruction isn't affordable.
Eric Dumazet - Nov. 19, 2010, 9:54 a.m.
Le vendredi 19 novembre 2010 à 16:38 +0800, Changli Gao a écrit :

> I compared the asm code of sk_run_filter.

> As you see, an additional 'dec %edx' instruction is inserted.
> sk_chk_filter() only runs 1 times, I think we can afford the 'dec
> instruction' and 'dirty' code, but sk_run_filter() runs much often,
> this additional dec instruction isn't affordable.
> 

Maybe on your setup. By the way, the 

u32 f_k = fentry->k;

that David added in commit 57fe93b374a6b871

was much more a problem on arches with not enough registers.

x86_32 for example : compiler use a register (%esi on my gcc-4.5.1) to
store f_k, and more important A register is now stored in stack instead
of a cpu register.



On my compilers

 gcc version 4.4.5 (Ubuntu/Linaro 4.4.4-14ubuntu5) 64bit
 gcc-4.5.1 (self compiled) 32bit

result code was the same, before and after patch

Most probably you have "CONFIG_CC_OPTIMIZE_FOR_SIZE=y" which
unfortunately is known to generate poor looking code.

 39b:	49 8d 14 c6          	lea    (%r14,%rax,8),%rdx
 39f:	66 83 3a 2d          	cmpw   $0x2d,(%rdx)
 3a3:	8b 42 04             	mov    0x4(%rdx),%eax             // f_k = fentry->k;
 3a6:	76 28                	jbe    3d0 <sk_run_filter+0x70>

 3d0:	0f b7 0a             	movzwl (%rdx),%ecx
 3d3:	ff 24 cd 00 00 00 00 	jmpq   *0x0(,%rcx,8)


32bit code:

 2e0:	8d 04 df             	lea    (%edi,%ebx,8),%eax
 2e3:	66 83 38 2d          	cmpw   $0x2d,(%eax)
 2e7:	8b 70 04             	mov    0x4(%eax),%esi  // f_k = fentry->k;
 2ea:	76 1c                	jbe    308 <sk_run_filter+0x58>

 308:	0f b7 10             	movzwl (%eax),%edx
 30b:	ff 24 95 38 00 00 00 	jmp    *0x38(,%edx,4)


DIV_X instruction :
 480:	8b 45 a4             	mov    -0x5c(%ebp),%eax
 483:	85 c0                	test   %eax,%eax
 485:	0f 84 9d fe ff ff    	je     328 <sk_run_filter+0x78>
 48b:	8b 45 ac             	mov    -0x54(%ebp),%eax   // A
 48e:	31 d2                	xor    %edx,%edx
 490:	f7 75 a4             	divl   -0x5c(%ebp)
 493:	89 45 ac             	mov    %eax,-0x54(%ebp)  // A
 496:	e9 85 fe ff ff       	jmp    320 <sk_run_filter+0x70>


I believe we should revert the u32 f_k = fentry->k; part

fentry->k as is fast as f_k if stored on stack, and avoids one
instruction if fentry->k is not needed.



--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Changli Gao - Nov. 19, 2010, 12:21 p.m.
On Fri, Nov 19, 2010 at 5:54 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
>
> Most probably you have "CONFIG_CC_OPTIMIZE_FOR_SIZE=y" which
> unfortunately is known to generate poor looking code.

Yes. So

Acked-by: Changli Gao <xiaosuo@gmail.com>
David Miller - Nov. 19, 2010, 6:07 p.m.
From: Changli Gao <xiaosuo@gmail.com>
Date: Fri, 19 Nov 2010 20:21:57 +0800

> On Fri, Nov 19, 2010 at 5:54 PM, Eric Dumazet <eric.dumazet@gmail.com> wrote:
>>
>> Most probably you have "CONFIG_CC_OPTIMIZE_FOR_SIZE=y" which
>> unfortunately is known to generate poor looking code.
> 
> Yes. So
> 
> Acked-by: Changli Gao <xiaosuo@gmail.com>

Applied.
--
To unsubscribe from this list: send the line "unsubscribe netdev" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Patch

diff --git a/net/core/filter.c b/net/core/filter.c
index 15a545d..a1edb5d 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -39,7 +39,7 @@ 
 #include <linux/filter.h>
 
 enum {
-	BPF_S_RET_K = 0,
+	BPF_S_RET_K = 1,
 	BPF_S_RET_A,
 	BPF_S_ALU_ADD_K,
 	BPF_S_ALU_ADD_X,
@@ -436,51 +436,51 @@  int sk_chk_filter(struct sock_filter *filter, int flen)
 	 * Invalid instructions are initialized to 0.
 	 */
 	static const u8 codes[] = {
-		[BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K + 1,
-		[BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X + 1,
-		[BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K + 1,
-		[BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X + 1,
-		[BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K + 1,
-		[BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X + 1,
-		[BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X + 1,
-		[BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K + 1,
-		[BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X + 1,
-		[BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K + 1,
-		[BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X + 1,
-		[BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K + 1,
-		[BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X + 1,
-		[BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K + 1,
-		[BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X + 1,
-		[BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG + 1,
-		[BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS + 1,
-		[BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS + 1,
-		[BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS + 1,
-		[BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN + 1,
-		[BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND + 1,
-		[BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND + 1,
-		[BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND + 1,
-		[BPF_LD|BPF_IMM]         = BPF_S_LD_IMM + 1,
-		[BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN + 1,
-		[BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH + 1,
-		[BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM + 1,
-		[BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX + 1,
-		[BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA + 1,
-		[BPF_RET|BPF_K]          = BPF_S_RET_K + 1,
-		[BPF_RET|BPF_A]          = BPF_S_RET_A + 1,
-		[BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K + 1,
-		[BPF_LD|BPF_MEM]         = BPF_S_LD_MEM + 1,
-		[BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM + 1,
-		[BPF_ST]                 = BPF_S_ST + 1,
-		[BPF_STX]                = BPF_S_STX + 1,
-		[BPF_JMP|BPF_JA]         = BPF_S_JMP_JA + 1,
-		[BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K + 1,
-		[BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X + 1,
-		[BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K + 1,
-		[BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X + 1,
-		[BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K + 1,
-		[BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X + 1,
-		[BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K + 1,
-		[BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X + 1,
+		[BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K,
+		[BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X,
+		[BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K,
+		[BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X,
+		[BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
+		[BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
+		[BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
+		[BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
+		[BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
+		[BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
+		[BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
+		[BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
+		[BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
+		[BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
+		[BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X,
+		[BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG,
+		[BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS,
+		[BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS,
+		[BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS,
+		[BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN,
+		[BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND,
+		[BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND,
+		[BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND,
+		[BPF_LD|BPF_IMM]         = BPF_S_LD_IMM,
+		[BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN,
+		[BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH,
+		[BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM,
+		[BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX,
+		[BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA,
+		[BPF_RET|BPF_K]          = BPF_S_RET_K,
+		[BPF_RET|BPF_A]          = BPF_S_RET_A,
+		[BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K,
+		[BPF_LD|BPF_MEM]         = BPF_S_LD_MEM,
+		[BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM,
+		[BPF_ST]                 = BPF_S_ST,
+		[BPF_STX]                = BPF_S_STX,
+		[BPF_JMP|BPF_JA]         = BPF_S_JMP_JA,
+		[BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K,
+		[BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X,
+		[BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K,
+		[BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X,
+		[BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K,
+		[BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X,
+		[BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
+		[BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
 	};
 	int pc;
 
@@ -495,8 +495,7 @@  int sk_chk_filter(struct sock_filter *filter, int flen)
 		if (code >= ARRAY_SIZE(codes))
 			return -EINVAL;
 		code = codes[code];
-		/* Undo the '+ 1' in codes[] after validation. */
-		if (!code--)
+		if (!code)
 			return -EINVAL;
 		/* Some instructions need special checks */
 		switch (code) {