@@ -77,9 +77,10 @@ static inline pte_t __rpte_to_pte(real_pte_t rpte)
* Trick: we set __end to va + 64k, which happens works for
* a 16M page as well as we want only one iteration
*/
-#define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \
+#define pte_iterate_hashed_subpages(vpn, psize, shift) \
do { \
- unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
+ unsigned long index; \
+ unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \
shift = mmu_psize_defs[psize].shift; \
for (index = 0; vpn < __end; index++, \
vpn += (1L << (shift - VPN_SHIFT))) { \
@@ -52,10 +52,9 @@
#endif
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >>_PAGE_F_GIX_SHIFT)
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
- do { \
- index = 0; \
- shift = mmu_psize_defs[psize].shift; \
+#define pte_iterate_hashed_subpages(vpn, psize, shift) \
+ do { \
+ shift = mmu_psize_defs[psize].shift; \
#define pte_iterate_hashed_end() } while(0)
@@ -123,10 +123,9 @@
#endif
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> _PAGE_F_GIX_SHIFT)
-#define pte_iterate_hashed_subpages(rpte, psize, va, index, shift) \
- do { \
- index = 0; \
- shift = mmu_psize_defs[psize].shift; \
+#define pte_iterate_hashed_subpages(vpn, psize, shift) \
+ do { \
+ shift = mmu_psize_defs[psize].shift; \
#define pte_iterate_hashed_end() } while(0)
@@ -646,7 +646,7 @@ static void native_hpte_clear(void)
static void native_flush_hash_range(unsigned long number, int local)
{
unsigned long vpn;
- unsigned long hash, index, hidx, shift, slot;
+ unsigned long hash, hidx, shift, slot;
struct hash_pte *hptep;
unsigned long hpte_v;
unsigned long want_v;
@@ -665,7 +665,7 @@ static void native_flush_hash_range(unsigned long number, int local)
vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
+ pte_iterate_hashed_subpages(vpn, psize, shift) {
hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, hash, vpn, ssize, &valid_slot);
if (!valid_slot)
@@ -693,8 +693,7 @@ static void native_flush_hash_range(unsigned long number, int local)
vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize,
- vpn, index, shift) {
+ pte_iterate_hashed_subpages(vpn, psize, shift) {
/*
* We are not looking at subpage valid here
*/
@@ -713,8 +712,7 @@ static void native_flush_hash_range(unsigned long number, int local)
vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize,
- vpn, index, shift) {
+ pte_iterate_hashed_subpages(vpn, psize, shift) {
/*
* We are not looking at subpage valid here
*/
@@ -1298,11 +1298,11 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
unsigned long flags)
{
bool valid_slot;
- unsigned long hash, index, shift, hidx, slot;
+ unsigned long hash, shift, hidx, slot;
int local = flags & HPTE_LOCAL_UPDATE;
DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
- pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
+ pte_iterate_hashed_subpages(vpn, psize, shift) {
hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, hash, vpn, ssize, &valid_slot);
if (!valid_slot)
@@ -1311,7 +1311,7 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
hash = ~hash;
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
slot += hidx & _PTEIDX_GROUP_IX;
- DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx);
+ DBG_LOW(" hash=%lx, hidx=%lx\n", slot, hidx);
/*
* We use same base page size and actual psize, because we don't
* use these functions for hugepage
@@ -534,7 +534,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
unsigned long param[9];
- unsigned long hash, index, shift, hidx, slot;
+ unsigned long hash, shift, hidx, slot;
real_pte_t pte;
int psize, ssize;
@@ -549,7 +549,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
vpn = batch->vpn[i];
pte = batch->pte[i];
- pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
+ pte_iterate_hashed_subpages(vpn, psize, shift) {
hash = hpt_hash(vpn, shift, ssize);
hidx = __rpte_to_hidx(pte, hash, vpn, ssize, &valid_slot);
if (!valid_slot)
Now that we don't really use real_pte_t drop them from iterator argument list. The follow up patch will remove real_pte_t completely Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> --- arch/powerpc/include/asm/book3s/64/hash-64k.h | 5 +++-- arch/powerpc/include/asm/book3s/64/pgtable.h | 7 +++---- arch/powerpc/include/asm/nohash/64/pgtable.h | 7 +++---- arch/powerpc/mm/hash_native_64.c | 10 ++++------ arch/powerpc/mm/hash_utils_64.c | 6 +++--- arch/powerpc/platforms/pseries/lpar.c | 4 ++-- 6 files changed, 18 insertions(+), 21 deletions(-)