===================================================================
@@ -15,6 +15,14 @@
#ifndef __ASSEMBLY__
+static inline int pfn_overflow(dma_addr_t phy_addr)
+{
+ dma_addr_t real_pfn = phy_addr >> PAGE_SHIFT;
+ unsigned long pfn = (unsigned long)real_pfn;
+
+ return pfn != real_pfn;
+}
+
struct page;
#include <linux/range.h>
===================================================================
@@ -355,9 +355,9 @@ static inline pgprotval_t massage_pgprot
return protval;
}
-static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
+static inline pte_t pfn_pte(phys_addr_t page_nr, pgprot_t pgprot)
{
- return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
+ return __pte((page_nr << PAGE_SHIFT) |
massage_pgprot(pgprot));
}
===================================================================
@@ -122,7 +122,9 @@ static void __iomem *__ioremap_caller(re
if (ram_region < 0) {
pfn = phys_addr >> PAGE_SHIFT;
last_pfn = last_addr >> PAGE_SHIFT;
- if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ /* pfn overflow, don't need to check */
+ if (!pfn_overflow(last_addr) &&
+ walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
__ioremap_check_ram) == 1)
return NULL;
}
@@ -130,7 +132,7 @@ static void __iomem *__ioremap_caller(re
* Mappings have to be page-aligned
*/
offset = phys_addr & ~PAGE_MASK;
- phys_addr &= PHYSICAL_PAGE_MASK;
+ phys_addr -= offset;
size = PAGE_ALIGN(last_addr+1) - phys_addr;
retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
===================================================================
@@ -299,6 +299,9 @@ static int pat_pagerange_is_ram(resource
unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct pagerange_state state = {start_pfn, 0, 0};
+ /* pfn overflow, don't need to check */
+ if (pfn_overflow(end + PAGE_SIZE - 1))
+ return 0;
/*
* For legacy reasons, physical address range in the legacy ISA
* region is tracked as non-RAM. This will allow users of