diff mbox series

Squash-to: "sock: Make sk_protocol a 16-bit value"

Message ID 9b1dae4f99342be4d34072d10f08ab1650d47895.1578404341.git.pabeni@redhat.com
State Superseded, archived
Delegated to: Matthieu Baerts
Headers show
Series Squash-to: "sock: Make sk_protocol a 16-bit value" | expand

Commit Message

Paolo Abeni Jan. 7, 2020, 2:05 p.m. UTC
We need to update the ebpf accessors for both sk_type and
sk_protocol. The code previously in place has some special
hack to take care such fields being part of a bitfield.

The fix actually clean-up a bit the existing code.

Note: I'm not 100% sure about dropping the 'target_size' handling.
This changes the 'target_size' from 1 to 2 for sk_protocol.
Should not matter, as the ebpf target field is 4 bytes wide.

To be appended to the orig commit message:
"""
Take care of BPF field access for sk_type/sk_protocol.
Both of them are now outside the bitfield, so we can use
load instructions without fourther shifting/masking.

v5 -> v6:
 - update eBPF accessors, too.
"""

Additionally, Eric's Reviewed-by should be dropped.
---
 include/net/sock.h | 15 ------------
 net/core/filter.c  | 58 ++++++++++++++++------------------------------
 2 files changed, 20 insertions(+), 53 deletions(-)

Comments

Matthieu Baerts Jan. 7, 2020, 3:08 p.m. UTC | #1
Hi Paolo,

On 07/01/2020 15:05, Paolo Abeni wrote:
> We need to update the ebpf accessors for both sk_type and
> sk_protocol. The code previously in place has some special
> hack to take care such fields being part of a bitfield.
> 
> The fix actually clean-up a bit the existing code.

Thank you for looking at that!

I have 3 questions below.

> Note: I'm not 100% sure about dropping the 'target_size' handling.
> This changes the 'target_size' from 1 to 2 for sk_protocol.
> Should not matter, as the ebpf target field is 4 bytes wide.
> 
> To be appended to the orig commit message:
> """
> Take care of BPF field access for sk_type/sk_protocol.
> Both of them are now outside the bitfield, so we can use
> load instructions without fourther shifting/masking.

(detail: s/fourther/further)

> 
> v5 -> v6:
>   - update eBPF accessors, too.
> """
> 
> Additionally, Eric's Reviewed-by should be dropped.

Good point!

> ---
>   include/net/sock.h | 15 ------------
>   net/core/filter.c  | 58 ++++++++++++++++------------------------------
>   2 files changed, 20 insertions(+), 53 deletions(-)
> 
> diff --git a/include/net/sock.h b/include/net/sock.h
> index c63e9088b4e3..8766f9bc3e70 100644
> --- a/include/net/sock.h
> +++ b/include/net/sock.h
> @@ -436,21 +436,6 @@ struct sock {
>   	 * Because of non atomicity rules, all
>   	 * changes are protected by socket lock.
>   	 */
> -	unsigned int		__sk_flags_offset[0];
> -#ifdef __BIG_ENDIAN_BITFIELD
> -#define SK_FL_PROTO_SHIFT  16
> -#define SK_FL_PROTO_MASK   0x00ff0000
> -
> -#define SK_FL_TYPE_SHIFT   0
> -#define SK_FL_TYPE_MASK    0x0000ffff
> -#else
> -#define SK_FL_PROTO_SHIFT  8
> -#define SK_FL_PROTO_MASK   0x0000ff00
> -
> -#define SK_FL_TYPE_SHIFT   16
> -#define SK_FL_TYPE_MASK    0xffff0000
> -#endif
> -
>   	u8			sk_padding : 1,
>   				sk_kern_sock : 1,
>   				sk_no_check_tx : 1,
> diff --git a/net/core/filter.c b/net/core/filter.c
> index 42fd17c48c5f..37759d6ce3ea 100644
> --- a/net/core/filter.c
> +++ b/net/core/filter.c
> @@ -7607,21 +7607,19 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
>   		break;
>   
>   	case offsetof(struct bpf_sock, type):
> -		BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);

Should we not keep it?

I see that some others are still doing that:

     BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);

But not all, e.g. for sk_state, sk_num, sk_dport.

> -		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
> -				      offsetof(struct sock, __sk_flags_offset));
> -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
> -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
> -		*target_size = 2;
> +		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,

Instead of BPF_H, should we use this macro?

     BPF_FIELD_SIZEOF(struct sock, sk_type)

> +				      bpf_target_off(struct sock, sk_type,
> +						     sizeof_field(struct sock,
> +								  sk_type),
> +						     target_size));
>   		break;
>   
>   	case offsetof(struct bpf_sock, protocol):
> -		BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
> -		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
> -				      offsetof(struct sock, __sk_flags_offset));
> -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
> -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
> -		*target_size = 1;
> +		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
> +				      bpf_target_off(struct sock, sk_protocol,
> +						     sizeof_field(struct sock,
> +								  sk_protocol),
> +						     target_size));
>   		break;
>   
>   	case offsetof(struct bpf_sock, src_ip4):
> @@ -7903,20 +7901,13 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
>   		break;
>   
>   	case offsetof(struct bpf_sock_addr, type):
> -		SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
> -			struct bpf_sock_addr_kern, struct sock, sk,
> -			__sk_flags_offset, BPF_W, 0);
> -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
> -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
> +		SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
> +					    struct sock, sk, sk_type);
>   		break;
>   
>   	case offsetof(struct bpf_sock_addr, protocol):
> -		SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
> -			struct bpf_sock_addr_kern, struct sock, sk,
> -			__sk_flags_offset, BPF_W, 0);
> -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
> -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
> -					SK_FL_PROTO_SHIFT);
> +		SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
> +					    struct sock, sk, sk_protocol);
>   		break;
>   
>   	case offsetof(struct bpf_sock_addr, msg_src_ip4):
> @@ -8835,11 +8826,11 @@ sk_reuseport_is_valid_access(int off, int size,
>   				    skb,				\
>   				    SKB_FIELD)
>   
> -#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
> -	SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern,	\
> -					     struct sock,		\
> -					     sk,			\
> -					     SK_FIELD, BPF_SIZE, EXTRA_OFF)
> +#define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD)				\
> +	SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern,		\
> +				    struct sock,			\
> +				    sk,					\
> +				    SK_FIELD)

Why did you have to change that too?

>   
>   static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
>   					   const struct bpf_insn *si,
> @@ -8863,16 +8854,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
>   		break;
>   
>   	case offsetof(struct sk_reuseport_md, ip_protocol):
> -		BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
> -		SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
> -						    BPF_W, 0);
> -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
> -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
> -					SK_FL_PROTO_SHIFT);
> -		/* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian
> -		 * aware.  No further narrowing or masking is needed.
> -		 */
> -		*target_size = 1;
> +		SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol);

Detail for me: why did they not modify the bitfield instead of adding 
__sk_flags_offset[0] in include/net/sock.h ?

Cheers,
Matt
Paolo Abeni Jan. 7, 2020, 3:21 p.m. UTC | #2
On Tue, 2020-01-07 at 16:08 +0100, Matthieu Baerts wrote:
> Hi Paolo,
> 
> On 07/01/2020 15:05, Paolo Abeni wrote:
> > We need to update the ebpf accessors for both sk_type and
> > sk_protocol. The code previously in place has some special
> > hack to take care such fields being part of a bitfield.
> > 
> > The fix actually clean-up a bit the existing code.
> 
> Thank you for looking at that!
> 
> I have 3 questions below.
> 
> > Note: I'm not 100% sure about dropping the 'target_size' handling.
> > This changes the 'target_size' from 1 to 2 for sk_protocol.
> > Should not matter, as the ebpf target field is 4 bytes wide.
> > 
> > To be appended to the orig commit message:
> > """
> > Take care of BPF field access for sk_type/sk_protocol.
> > Both of them are now outside the bitfield, so we can use
> > load instructions without fourther shifting/masking.
> 
> (detail: s/fourther/further)
> 
> > v5 -> v6:
> >   - update eBPF accessors, too.
> > """
> > 
> > Additionally, Eric's Reviewed-by should be dropped.
> 
> Good point!
> 
> > ---
> >   include/net/sock.h | 15 ------------
> >   net/core/filter.c  | 58 ++++++++++++++++------------------------------
> >   2 files changed, 20 insertions(+), 53 deletions(-)
> > 
> > diff --git a/include/net/sock.h b/include/net/sock.h
> > index c63e9088b4e3..8766f9bc3e70 100644
> > --- a/include/net/sock.h
> > +++ b/include/net/sock.h
> > @@ -436,21 +436,6 @@ struct sock {
> >   	 * Because of non atomicity rules, all
> >   	 * changes are protected by socket lock.
> >   	 */
> > -	unsigned int		__sk_flags_offset[0];
> > -#ifdef __BIG_ENDIAN_BITFIELD
> > -#define SK_FL_PROTO_SHIFT  16
> > -#define SK_FL_PROTO_MASK   0x00ff0000
> > -
> > -#define SK_FL_TYPE_SHIFT   0
> > -#define SK_FL_TYPE_MASK    0x0000ffff
> > -#else
> > -#define SK_FL_PROTO_SHIFT  8
> > -#define SK_FL_PROTO_MASK   0x0000ff00
> > -
> > -#define SK_FL_TYPE_SHIFT   16
> > -#define SK_FL_TYPE_MASK    0xffff0000
> > -#endif
> > -
> >   	u8			sk_padding : 1,
> >   				sk_kern_sock : 1,
> >   				sk_no_check_tx : 1,
> > diff --git a/net/core/filter.c b/net/core/filter.c
> > index 42fd17c48c5f..37759d6ce3ea 100644
> > --- a/net/core/filter.c
> > +++ b/net/core/filter.c
> > @@ -7607,21 +7607,19 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
> >   		break;
> >   
> >   	case offsetof(struct bpf_sock, type):
> > -		BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);
> 
> Should we not keep it?
> 
> I see that some others are still doing that:
> 
>      BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);

The first BUILD_BUG reported above regards only the type mask. This
patch remove such definition, so we can't keep the build bug as-is.

The second one instead is still present, via the 'bpf_target_off()'
macro call.

> But not all, e.g. for sk_state, sk_num, sk_dport.
> 
> > -		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
> > -				      offsetof(struct sock, __sk_flags_offset));
> > -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
> > -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
> > -		*target_size = 2;
> > +		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
> 
> Instead of BPF_H, should we use this macro?
> 
>      BPF_FIELD_SIZEOF(struct sock, sk_type)

Right u r, its' more consistent with the nearby code.

> > +				      bpf_target_off(struct sock, sk_type,
> > +						     sizeof_field(struct sock,
> > +								  sk_type),
> > +						     target_size));
> >   		break;
> >   
> >   	case offsetof(struct bpf_sock, protocol):
> > -		BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
> > -		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
> > -				      offsetof(struct sock, __sk_flags_offset));
> > -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
> > -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
> > -		*target_size = 1;
> > +		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
> > +				      bpf_target_off(struct sock, sk_protocol,
> > +						     sizeof_field(struct sock,
> > +								  sk_protocol),
> > +						     target_size));
> >   		break;
> >   
> >   	case offsetof(struct bpf_sock, src_ip4):
> > @@ -7903,20 +7901,13 @@ static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
> >   		break;
> >   
> >   	case offsetof(struct bpf_sock_addr, type):
> > -		SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
> > -			struct bpf_sock_addr_kern, struct sock, sk,
> > -			__sk_flags_offset, BPF_W, 0);
> > -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
> > -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
> > +		SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
> > +					    struct sock, sk, sk_type);
> >   		break;
> >   
> >   	case offsetof(struct bpf_sock_addr, protocol):
> > -		SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
> > -			struct bpf_sock_addr_kern, struct sock, sk,
> > -			__sk_flags_offset, BPF_W, 0);
> > -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
> > -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
> > -					SK_FL_PROTO_SHIFT);
> > +		SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
> > +					    struct sock, sk, sk_protocol);
> >   		break;
> >   
> >   	case offsetof(struct bpf_sock_addr, msg_src_ip4):
> > @@ -8835,11 +8826,11 @@ sk_reuseport_is_valid_access(int off, int size,
> >   				    skb,				\
> >   				    SKB_FIELD)
> >   
> > -#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
> > -	SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern,	\
> > -					     struct sock,		\
> > -					     sk,			\
> > -					     SK_FIELD, BPF_SIZE, EXTRA_OFF)
> > +#define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD)				\
> > +	SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern,		\
> > +				    struct sock,			\
> > +				    sk,					\
> > +				    SK_FIELD)
> 
> Why did you have to change that too?

SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF() is unused after this patch.
So I crafted another helper to make sk_protocol access easier.

> 
> >   
> >   static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
> >   					   const struct bpf_insn *si,
> > @@ -8863,16 +8854,7 @@ static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
> >   		break;
> >   
> >   	case offsetof(struct sk_reuseport_md, ip_protocol):
> > -		BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
> > -		SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
> > -						    BPF_W, 0);
> > -		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
> > -		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
> > -					SK_FL_PROTO_SHIFT);
> > -		/* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian
> > -		 * aware.  No further narrowing or masking is needed.
> > -		 */
> > -		*target_size = 1;
> > +		SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol);
> 
> Detail for me: why did they not modify the bitfield instead of adding 
> __sk_flags_offset[0] in include/net/sock.h ?

I guess to avoid touching a long established and delicate piece of
code. You know, you get quite serious feedback when doint that kind of
changes ;)

I'll send v1 with the cleanup hinted by the 2nd question after doing
some tests.

/P
Mat Martineau Jan. 7, 2020, 5:24 p.m. UTC | #3
On Tue, 7 Jan 2020, Paolo Abeni wrote:

> On Tue, 2020-01-07 at 16:08 +0100, Matthieu Baerts wrote:
>> Hi Paolo,
>>
>> On 07/01/2020 15:05, Paolo Abeni wrote:
>>> We need to update the ebpf accessors for both sk_type and
>>> sk_protocol. The code previously in place has some special
>>> hack to take care such fields being part of a bitfield.
>>>
>>> The fix actually clean-up a bit the existing code.
>>
>> Thank you for looking at that!
>>
>> I have 3 questions below.
>>
>>> Note: I'm not 100% sure about dropping the 'target_size' handling.
>>> This changes the 'target_size' from 1 to 2 for sk_protocol.
>>> Should not matter, as the ebpf target field is 4 bytes wide.

It looks like your handling of target_size is consistent with other 16-bit 
fields (like skc_dport), so it seems like the right way to go.

>>>
>>> To be appended to the orig commit message:
>>> """
>>> Take care of BPF field access for sk_type/sk_protocol.
>>> Both of them are now outside the bitfield, so we can use
>>> load instructions without fourther shifting/masking.
>>
>> (detail: s/fourther/further)
>>
>>> v5 -> v6:
>>>   - update eBPF accessors, too.
>>> """
>>>
>>> Additionally, Eric's Reviewed-by should be dropped.
>>
>> Good point!
>>
>>> ---
>>>   include/net/sock.h | 15 ------------
>>>   net/core/filter.c  | 58 ++++++++++++++++------------------------------
>>>   2 files changed, 20 insertions(+), 53 deletions(-)
>>>

...

>
> I'll send v1 with the cleanup hinted by the 2nd question after doing
> some tests.
>

I don't have anything to add to Matthieu's comments for now, and thanks 
for tracking this down Paolo! Will watch for next version.

--
Mat Martineau
Intel
diff mbox series

Patch

diff --git a/include/net/sock.h b/include/net/sock.h
index c63e9088b4e3..8766f9bc3e70 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -436,21 +436,6 @@  struct sock {
 	 * Because of non atomicity rules, all
 	 * changes are protected by socket lock.
 	 */
-	unsigned int		__sk_flags_offset[0];
-#ifdef __BIG_ENDIAN_BITFIELD
-#define SK_FL_PROTO_SHIFT  16
-#define SK_FL_PROTO_MASK   0x00ff0000
-
-#define SK_FL_TYPE_SHIFT   0
-#define SK_FL_TYPE_MASK    0x0000ffff
-#else
-#define SK_FL_PROTO_SHIFT  8
-#define SK_FL_PROTO_MASK   0x0000ff00
-
-#define SK_FL_TYPE_SHIFT   16
-#define SK_FL_TYPE_MASK    0xffff0000
-#endif
-
 	u8			sk_padding : 1,
 				sk_kern_sock : 1,
 				sk_no_check_tx : 1,
diff --git a/net/core/filter.c b/net/core/filter.c
index 42fd17c48c5f..37759d6ce3ea 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -7607,21 +7607,19 @@  u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
 		break;
 
 	case offsetof(struct bpf_sock, type):
-		BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);
-		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
-				      offsetof(struct sock, __sk_flags_offset));
-		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
-		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
-		*target_size = 2;
+		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+				      bpf_target_off(struct sock, sk_type,
+						     sizeof_field(struct sock,
+								  sk_type),
+						     target_size));
 		break;
 
 	case offsetof(struct bpf_sock, protocol):
-		BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
-		*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
-				      offsetof(struct sock, __sk_flags_offset));
-		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
-		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
-		*target_size = 1;
+		*insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
+				      bpf_target_off(struct sock, sk_protocol,
+						     sizeof_field(struct sock,
+								  sk_protocol),
+						     target_size));
 		break;
 
 	case offsetof(struct bpf_sock, src_ip4):
@@ -7903,20 +7901,13 @@  static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
 		break;
 
 	case offsetof(struct bpf_sock_addr, type):
-		SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
-			struct bpf_sock_addr_kern, struct sock, sk,
-			__sk_flags_offset, BPF_W, 0);
-		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
-		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
+		SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
+					    struct sock, sk, sk_type);
 		break;
 
 	case offsetof(struct bpf_sock_addr, protocol):
-		SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
-			struct bpf_sock_addr_kern, struct sock, sk,
-			__sk_flags_offset, BPF_W, 0);
-		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
-		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
-					SK_FL_PROTO_SHIFT);
+		SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
+					    struct sock, sk, sk_protocol);
 		break;
 
 	case offsetof(struct bpf_sock_addr, msg_src_ip4):
@@ -8835,11 +8826,11 @@  sk_reuseport_is_valid_access(int off, int size,
 				    skb,				\
 				    SKB_FIELD)
 
-#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
-	SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern,	\
-					     struct sock,		\
-					     sk,			\
-					     SK_FIELD, BPF_SIZE, EXTRA_OFF)
+#define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD)				\
+	SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern,		\
+				    struct sock,			\
+				    sk,					\
+				    SK_FIELD)
 
 static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
 					   const struct bpf_insn *si,
@@ -8863,16 +8854,7 @@  static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
 		break;
 
 	case offsetof(struct sk_reuseport_md, ip_protocol):
-		BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
-		SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
-						    BPF_W, 0);
-		*insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
-		*insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
-					SK_FL_PROTO_SHIFT);
-		/* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian
-		 * aware.  No further narrowing or masking is needed.
-		 */
-		*target_size = 1;
+		SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol);
 		break;
 
 	case offsetof(struct sk_reuseport_md, data_end):