Message ID | 20c53e88-acf5-4c4f-cea9-4dd8745814b5@akamai.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Jason Baron <jbaron@akamai.com> writes: > On 02/20/2017 10:05 PM, Sachin Sant wrote: >> >>> On 20-Feb-2017, at 8:27 PM, Jason Baron <jbaron@akamai.com >>> <mailto:jbaron@akamai.com>> wrote: >>> >>> Hi, >>> >>> On 02/19/2017 09:07 AM, Sachin Sant wrote: >>>> While booting next-20170217 on a POWER8 LPAR following >>>> warning is displayed. >>>> >>>> Reverting the following commit helps boot cleanly. >>>> commit 3821fd35b5 : jump_label: Reduce the size of struct static_key >>>> >>>> [ 11.393008] ------------[ cut here ]------------ >>>> [ 11.393031] WARNING: CPU: 5 PID: 2890 at kernel/jump_label.c:287 >>>> static_key_set_entries.isra.10+0x3c/0x50 >>> >>> Thanks for the report. So this is saying that the jump_entry table is >>> not at least 4-byte aligned. I wonder if this fixes it up? >>> >> >> Yes. With this patch the warning is gone. > > Hi, > > Thanks for testing. We probably need something like the following to > make sure we don't hit this on other arches. Steve - I will send 4 > separate patches for this to get arch maintainers' acks for this? What's the 4 byte alignment requirement from? On 64-bit our JUMP_ENTRY_TYPE is 8 bytes, should we be aligning to 8 bytes? > diff --git a/arch/powerpc/include/asm/jump_label.h > b/arch/powerpc/include/asm/jump_label.h > index 9a287e0ac8b1..f870a85bac46 100644 > --- a/arch/powerpc/include/asm/jump_label.h > +++ b/arch/powerpc/include/asm/jump_label.h > @@ -24,6 +24,7 @@ static __always_inline bool arch_static_branch(struct > static_key *key, bool bran > asm_volatile_goto("1:\n\t" > "nop # arch_static_branch\n\t" > ".pushsection __jump_table, \"aw\"\n\t" > + ".balign 4 \n\t" Can you line those up vertically? (That may just be an email artifact) > JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" > ".popsection \n\t" > : : "i" (&((char *)key)[branch]) : : l_yes); > @@ -38,6 +39,7 @@ static __always_inline bool > arch_static_branch_jump(struct static_key *key, bool > asm_volatile_goto("1:\n\t" > "b %l[l_yes] # arch_static_branch_jump\n\t" > ".pushsection __jump_table, \"aw\"\n\t" > + ".balign 4 \n\t" > JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" > ".popsection \n\t" > : : "i" (&((char *)key)[branch]) : : l_yes); > @@ -63,6 +65,7 @@ struct jump_entry { > #define ARCH_STATIC_BRANCH(LABEL, KEY) \ > 1098: nop; \ > .pushsection __jump_table, "aw"; \ > + .balign 4; \ > FTR_ENTRY_LONG 1098b, LABEL, KEY; \ > .popsection > #endif Otherwise that looks fine assuming 4 bytes is the correct alignment. cheers
On 02/22/2017 12:38 AM, Michael Ellerman wrote: > Jason Baron <jbaron@akamai.com> writes: > >> On 02/20/2017 10:05 PM, Sachin Sant wrote: >>> >>>> On 20-Feb-2017, at 8:27 PM, Jason Baron <jbaron@akamai.com >>>> <mailto:jbaron@akamai.com>> wrote: >>>> >>>> Hi, >>>> >>>> On 02/19/2017 09:07 AM, Sachin Sant wrote: >>>>> While booting next-20170217 on a POWER8 LPAR following >>>>> warning is displayed. >>>>> >>>>> Reverting the following commit helps boot cleanly. >>>>> commit 3821fd35b5 : jump_label: Reduce the size of struct static_key >>>>> >>>>> [ 11.393008] ------------[ cut here ]------------ >>>>> [ 11.393031] WARNING: CPU: 5 PID: 2890 at kernel/jump_label.c:287 >>>>> static_key_set_entries.isra.10+0x3c/0x50 >>>> >>>> Thanks for the report. So this is saying that the jump_entry table is >>>> not at least 4-byte aligned. I wonder if this fixes it up? >>>> >>> >>> Yes. With this patch the warning is gone. >> >> Hi, >> >> Thanks for testing. We probably need something like the following to >> make sure we don't hit this on other arches. Steve - I will send 4 >> separate patches for this to get arch maintainers' acks for this? > > What's the 4 byte alignment requirement from? > The 4 byte alignment is coming from this patch in linux-next: https://lkml.org/lkml/2017/2/3/558 It reduces the size of 'struct static_key' by making use of the two least significant bits of the static_key::entry pointer. Thus, the jump_entry table needs to be 4 byte aligned to make it work. I added a WARN_ON() to make sure the jump_entry table is in fact 4 byte aligned, and that is what we hit here. > On 64-bit our JUMP_ENTRY_TYPE is 8 bytes, should we be aligning to 8 > bytes? > 4 bytes should be sufficient and apparently fixes the WARN_ON() that was hit. >> diff --git a/arch/powerpc/include/asm/jump_label.h >> b/arch/powerpc/include/asm/jump_label.h >> index 9a287e0ac8b1..f870a85bac46 100644 >> --- a/arch/powerpc/include/asm/jump_label.h >> +++ b/arch/powerpc/include/asm/jump_label.h >> @@ -24,6 +24,7 @@ static __always_inline bool arch_static_branch(struct >> static_key *key, bool bran >> asm_volatile_goto("1:\n\t" >> "nop # arch_static_branch\n\t" >> ".pushsection __jump_table, \"aw\"\n\t" >> + ".balign 4 \n\t" > > Can you line those up vertically? > > (That may just be an email artifact) sure will fix. Thanks, -Jason > >> JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" >> ".popsection \n\t" >> : : "i" (&((char *)key)[branch]) : : l_yes); >> @@ -38,6 +39,7 @@ static __always_inline bool >> arch_static_branch_jump(struct static_key *key, bool >> asm_volatile_goto("1:\n\t" >> "b %l[l_yes] # arch_static_branch_jump\n\t" >> ".pushsection __jump_table, \"aw\"\n\t" >> + ".balign 4 \n\t" >> JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" >> ".popsection \n\t" >> : : "i" (&((char *)key)[branch]) : : l_yes); >> @@ -63,6 +65,7 @@ struct jump_entry { >> #define ARCH_STATIC_BRANCH(LABEL, KEY) \ >> 1098: nop; \ >> .pushsection __jump_table, "aw"; \ >> + .balign 4; \ >> FTR_ENTRY_LONG 1098b, LABEL, KEY; \ >> .popsection >> #endif > > Otherwise that looks fine assuming 4 bytes is the correct alignment. > > cheers >
On Tue, 21 Feb 2017 11:37:21 -0500 Jason Baron <jbaron@akamai.com> wrote: > Thanks for testing. We probably need something like the following to > make sure we don't hit this on other arches. Steve - I will send 4 > separate patches for this to get arch maintainers' acks for this? > I just got back from ELC, so sorry for the late reply. I think you can just send this as one patch, as it is a single fix. Cc each of the arch maintainers though. Unless you think it will have conflicts with other changes in those archs. But I'm assuming there wont be any conflicts. This is a fix for something that I just pushed to Linus, thus it can still get in after the merge window closes. -- Steve
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h index 34f7b6980d21..9720c2f1850b 100644 --- a/arch/arm/include/asm/jump_label.h +++ b/arch/arm/include/asm/jump_label.h @@ -13,6 +13,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran asm_volatile_goto("1:\n\t" WASM(nop) "\n\t" ".pushsection __jump_table, \"aw\"\n\t" + ".balign 4 \n\t" ".word 1b, %l[l_yes], %c0\n\t" ".popsection\n\t" : : "i" (&((char *)key)[branch]) : : l_yes); @@ -27,6 +28,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool asm_volatile_goto("1:\n\t" WASM(b) " %l[l_yes]\n\t" ".pushsection __jump_table, \"aw\"\n\t" + ".balign 4 \n\t" ".word 1b, %l[l_yes], %c0\n\t" ".popsection\n\t" : : "i" (&((char *)key)[branch]) : : l_yes); diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h index e77672539e8e..51ce97dda3cc 100644 --- a/arch/mips/include/asm/jump_label.h +++ b/arch/mips/include/asm/jump_label.h @@ -31,6 +31,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran asm_volatile_goto("1:\t" NOP_INSN "\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" + ".balign 4 \n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" ".popsection\n\t" : : "i" (&((char *)key)[branch]) : : l_yes); @@ -45,6 +46,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool asm_volatile_goto("1:\tj %l[l_yes]\n\t" "nop\n\t" ".pushsection __jump_table, \"aw\"\n\t" + ".balign 4 \n\t" WORD_INSN " 1b, %l[l_yes], %0\n\t" ".popsection\n\t" : : "i" (&((char *)key)[branch]) : : l_yes); diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h index 9a287e0ac8b1..f870a85bac46 100644 --- a/arch/powerpc/include/asm/jump_label.h +++ b/arch/powerpc/include/asm/jump_label.h @@ -24,6 +24,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran asm_volatile_goto("1:\n\t" "nop # arch_static_branch\n\t" ".pushsection __jump_table, \"aw\"\n\t" + ".balign 4 \n\t" JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" ".popsection \n\t" : : "i" (&((char *)key)[branch]) : : l_yes); @@ -38,6 +39,7 @@ static __always_inline bool arch_static_branch_jump(struct static_key *key, bool asm_volatile_goto("1:\n\t" "b %l[l_yes] # arch_static_branch_jump\n\t" ".pushsection __jump_table, \"aw\"\n\t" + ".balign 4 \n\t" JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t" ".popsection \n\t" : : "i" (&((char *)key)[branch]) : : l_yes); @@ -63,6 +65,7 @@ struct jump_entry { #define ARCH_STATIC_BRANCH(LABEL, KEY) \ 1098: nop; \ .pushsection __jump_table, "aw"; \ + .balign 4; \ FTR_ENTRY_LONG 1098b, LABEL, KEY; \ .popsection #endif diff --git a/arch/tile/include/asm/jump_label.h b/arch/tile/include/asm/jump_label.h index cde7573f397b..c9f6125c41ef 100644 --- a/arch/tile/include/asm/jump_label.h +++ b/arch/tile/include/asm/jump_label.h @@ -25,6 +25,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, asm_volatile_goto("1:\n\t" "nop" "\n\t" ".pushsection __jump_table, \"aw\"\n\t" + ".balign 4 \n\t" ".quad 1b, %l[l_yes], %0 + %1 \n\t" ".popsection\n\t"