Message ID | 1469629097-30859-12-git-send-email-mpe@ellerman.id.au (mailing list archive) |
---|---|
State | Accepted |
Headers | show |
On Thu, 2016-07-28 at 00:18 +1000, Michael Ellerman wrote: > > diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h > b/arch/powerpc/include/asm/book3s/64/mmu.h > index 70c995870297..6deda6ecc4f7 100644 > --- a/arch/powerpc/include/asm/book3s/64/mmu.h > +++ b/arch/powerpc/include/asm/book3s/64/mmu.h > @@ -116,7 +116,7 @@ extern void hash__early_init_mmu_secondary(void); > extern void radix__early_init_mmu_secondary(void); > static inline void early_init_mmu_secondary(void) > { > - if (radix_enabled()) > + if (__radix_enabled()) > return radix__early_init_mmu_secondary(); > return hash__early_init_mmu_secondary(); > } This one can go, no ? Cheers, Ben.
On Thu, 2016-07-28 at 00:18 +1000, Michael Ellerman wrote: > --- a/arch/powerpc/mm/hash_utils_64.c > +++ b/arch/powerpc/mm/hash_utils_64.c > @@ -530,7 +530,7 @@ static bool might_have_hea(void) > * we will never see an HEA ethernet device. > */ > #ifdef CONFIG_IBMEBUS > - return !cpu_has_feature(CPU_FTR_ARCH_207S) && > + return !__cpu_has_feature(CPU_FTR_ARCH_207S) && > !firmware_has_feature(FW_FEATURE_SPLPAR); > #else All these could go if that function was split. The part that reads the DT stays in early_init_mmu_devtree (bastically up to "found:" and then the bit at the end that scans the huge pages). The rest, which just assigns the various mmu_*_psize can go into eary_init_mmu(). That means the only conversion needed is the one below: > return false; > @@ -561,7 +561,7 @@ static void __init htab_init_page_sizes(void) > * Not in the device-tree, let's fallback on known size > * list for 16M capable GP & GR > */ > - if (mmu_has_feature(MMU_FTR_16M_PAGE)) > + if (__mmu_has_feature(MMU_FTR_16M_PAGE)) > memcpy(mmu_psize_defs, mmu_psize_defaults_gp, > sizeof(mmu_psize_defaults_gp)); > found: And the rest can remain. > @@ -591,7 +591,7 @@ found: > mmu_vmalloc_psize = MMU_PAGE_64K; > if (mmu_linear_psize == MMU_PAGE_4K) > mmu_linear_psize = MMU_PAGE_64K; > - if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { > + if (__mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { > /* > * When running on pSeries using 64k pages > for ioremap > * would stop us accessing the HEA ethernet. > So if we
On Thu, 28 Jul 2016 00:18:08 +1000 Michael Ellerman <mpe@ellerman.id.au> wrote: > From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> > > This switches early feature checks to use the non static key variant > of the function. In later patches we will be switching > cpu_has_feature() and mmu_has_feature() to use static keys and we can > use them only after static key/jump label is initialized. Any check > for feature before jump label init should be done using this new > helper. Can't convince you to call it *_has_feature_early()? Any point to a WARN_ON_ONCE() in these guys that trips if they are used after the jump labels are set up? Thanks, Nick
Benjamin Herrenschmidt <benh@kernel.crashing.org> writes: > On Thu, 2016-07-28 at 00:18 +1000, Michael Ellerman wrote: >> >> diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h >> b/arch/powerpc/include/asm/book3s/64/mmu.h >> index 70c995870297..6deda6ecc4f7 100644 >> --- a/arch/powerpc/include/asm/book3s/64/mmu.h >> +++ b/arch/powerpc/include/asm/book3s/64/mmu.h >> @@ -116,7 +116,7 @@ extern void hash__early_init_mmu_secondary(void); >> extern void radix__early_init_mmu_secondary(void); >> static inline void early_init_mmu_secondary(void) >> { >> - if (radix_enabled()) >> + if (__radix_enabled()) >> return radix__early_init_mmu_secondary(); >> return hash__early_init_mmu_secondary(); >> } > > This one can go, no ? Yep. cheers
Nicholas Piggin <npiggin@gmail.com> writes: > On Thu, 28 Jul 2016 00:18:08 +1000 > Michael Ellerman <mpe@ellerman.id.au> wrote: > >> From: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> >> >> This switches early feature checks to use the non static key variant >> of the function. In later patches we will be switching >> cpu_has_feature() and mmu_has_feature() to use static keys and we can >> use them only after static key/jump label is initialized. Any check >> for feature before jump label init should be done using this new >> helper. > > Can't convince you to call it *_has_feature_early()? Hmmm, I'll go with early_cpu_has_feature(). Otherwise it reads "does the CPU have feature 'early'". > Any point to a WARN_ON_ONCE() in these guys that trips if they are > used after the jump labels are set up? See patch 20. Though we can't use WARN() as it may be too early in boot to WARN(), so it just prints() and dumps stack. cheers
diff --git a/arch/powerpc/include/asm/book3s/64/mmu.h b/arch/powerpc/include/asm/book3s/64/mmu.h index 70c995870297..6deda6ecc4f7 100644 --- a/arch/powerpc/include/asm/book3s/64/mmu.h +++ b/arch/powerpc/include/asm/book3s/64/mmu.h @@ -116,7 +116,7 @@ extern void hash__early_init_mmu_secondary(void); extern void radix__early_init_mmu_secondary(void); static inline void early_init_mmu_secondary(void) { - if (radix_enabled()) + if (__radix_enabled()) return radix__early_init_mmu_secondary(); return hash__early_init_mmu_secondary(); } @@ -128,7 +128,7 @@ extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base, static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base, phys_addr_t first_memblock_size) { - if (radix_enabled()) + if (__radix_enabled()) return radix__setup_initial_memory_limit(first_memblock_base, first_memblock_size); return hash__setup_initial_memory_limit(first_memblock_base, diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index 93dae296b6be..1b0b89e80824 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c @@ -184,7 +184,7 @@ void setup_paca(struct paca_struct *new_paca) * if we do a GET_PACA() before the feature fixups have been * applied */ - if (cpu_has_feature(CPU_FTR_HVMODE)) + if (__cpu_has_feature(CPU_FTR_HVMODE)) mtspr(SPRN_SPRG_HPACA, local_paca); #endif mtspr(SPRN_SPRG_PACA, local_paca); diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 984696136f96..86ffab4c427b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -227,8 +227,8 @@ static void __init configure_exceptions(void) opal_configure_cores(); /* Enable AIL if supported, and we are in hypervisor mode */ - if (cpu_has_feature(CPU_FTR_HVMODE) && - cpu_has_feature(CPU_FTR_ARCH_207S)) { + if (__cpu_has_feature(CPU_FTR_HVMODE) && + __cpu_has_feature(CPU_FTR_ARCH_207S)) { unsigned long lpcr = mfspr(SPRN_LPCR); mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 5f922e93af25..3aad12fb9d2f 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -530,7 +530,7 @@ static bool might_have_hea(void) * we will never see an HEA ethernet device. */ #ifdef CONFIG_IBMEBUS - return !cpu_has_feature(CPU_FTR_ARCH_207S) && + return !__cpu_has_feature(CPU_FTR_ARCH_207S) && !firmware_has_feature(FW_FEATURE_SPLPAR); #else return false; @@ -561,7 +561,7 @@ static void __init htab_init_page_sizes(void) * Not in the device-tree, let's fallback on known size * list for 16M capable GP & GR */ - if (mmu_has_feature(MMU_FTR_16M_PAGE)) + if (__mmu_has_feature(MMU_FTR_16M_PAGE)) memcpy(mmu_psize_defs, mmu_psize_defaults_gp, sizeof(mmu_psize_defaults_gp)); found: @@ -591,7 +591,7 @@ found: mmu_vmalloc_psize = MMU_PAGE_64K; if (mmu_linear_psize == MMU_PAGE_4K) mmu_linear_psize = MMU_PAGE_64K; - if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { + if (__mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { /* * When running on pSeries using 64k pages for ioremap * would stop us accessing the HEA ethernet. So if we diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 6259f5db525b..c21d160088fa 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -427,7 +427,7 @@ void __init mmu_early_init_devtree(void) if (disable_radix) cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; - if (radix_enabled()) + if (__radix_enabled()) radix__early_init_devtree(); else hash__early_init_devtree();