@@ -78,7 +78,7 @@
* generic accessors and iterators here
*/
#define __real_pte __real_pte
-static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
+static inline real_pte_t __real_pte(unsigned long addr, pte_t pte, pte_t *ptep)
{
real_pte_t rpte;
@@ -44,10 +44,10 @@
#ifndef __real_pte
#ifdef CONFIG_STRICT_MM_TYPECHECKS
-#define __real_pte(e,p) ((real_pte_t){(e)})
+#define __real_pte(a,e,p) ((real_pte_t){(e)})
#define __rpte_to_pte(r) ((r).pte)
#else
-#define __real_pte(e,p) (e)
+#define __real_pte(a,e,p) (e)
#define __rpte_to_pte(r) (__pte(r))
#endif
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
@@ -115,10 +115,10 @@
#ifndef __real_pte
#ifdef CONFIG_STRICT_MM_TYPECHECKS
-#define __real_pte(e,p) ((real_pte_t){(e)})
+#define __real_pte(a,e,p) ((real_pte_t){(e)})
#define __rpte_to_pte(r) ((r).pte)
#else
-#define __real_pte(e,p) (e)
+#define __real_pte(a,e,p) (e)
#define __rpte_to_pte(r) (__pte(r))
#endif
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> 12)
@@ -90,7 +90,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
vpn = hpt_vpn(ea, vsid, ssize);
- rpte = __real_pte(__pte(old_pte), ptep);
+ rpte = __real_pte(ea, __pte(old_pte), ptep);
/*
*None of the sub 4k page is hashed
*/
@@ -89,7 +89,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
}
WARN_ON(vsid == 0);
vpn = hpt_vpn(addr, vsid, ssize);
- rpte = __real_pte(__pte(pte), ptep);
+ rpte = __real_pte(addr, __pte(pte), ptep);
/*
* Check if we have an active batch on this CPU. If not, just
We will use this in the later patch to compute the right hash index Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> --- arch/powerpc/include/asm/book3s/64/hash-64k.h | 2 +- arch/powerpc/include/asm/book3s/64/pgtable.h | 4 ++-- arch/powerpc/include/asm/nohash/64/pgtable.h | 4 ++-- arch/powerpc/mm/hash64_64k.c | 2 +- arch/powerpc/mm/tlb_hash64.c | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-)