diff mbox

[SRU,Xenial,3/3] x86/power/64: Always create temporary identity mapping correctly

Message ID 20170425102009.9978-4-kai.heng.feng@canonical.com
State New
Headers show

Commit Message

Kai-Heng Feng April 25, 2017, 10:20 a.m. UTC
From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>

BugLink: https://bugs.launchpad.net/bugs/1686061

The low-level resume-from-hibernation code on x86-64 uses
kernel_ident_mapping_init() to create the temoprary identity mapping,
but that function assumes that the offset between kernel virtual
addresses and physical addresses is aligned on the PGD level.

However, with a randomized identity mapping base, it may be aligned
on the PUD level and if that happens, the temporary identity mapping
created by set_up_temporary_mappings() will not reflect the actual
kernel identity mapping and the image restoration will fail as a
result (leading to a kernel panic most of the time).

To fix this problem, rework kernel_ident_mapping_init() to support
unaligned offsets between KVA and PA up to the PMD level and make
set_up_temporary_mappings() use it as approprtiate.

Reported-and-tested-by: Thomas Garnier <thgarnie@google.com>
Reported-by: Borislav Petkov <bp@suse.de>
Suggested-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Yinghai Lu <yinghai@kernel.org>
(cherry picked from commit e4630fdd47637168927905983205d7b7c5c08c09)
Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
---
 arch/x86/include/asm/init.h   |  4 ++--
 arch/x86/mm/ident_map.c       | 19 +++++++++++--------
 arch/x86/power/hibernate_64.c |  2 +-
 3 files changed, 14 insertions(+), 11 deletions(-)

Comments

Colin Ian King April 25, 2017, 11:07 a.m. UTC | #1
On 25/04/17 11:20, Kai-Heng Feng wrote:
> From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1686061
> 
> The low-level resume-from-hibernation code on x86-64 uses
> kernel_ident_mapping_init() to create the temoprary identity mapping,
> but that function assumes that the offset between kernel virtual
> addresses and physical addresses is aligned on the PGD level.
> 
> However, with a randomized identity mapping base, it may be aligned
> on the PUD level and if that happens, the temporary identity mapping
> created by set_up_temporary_mappings() will not reflect the actual
> kernel identity mapping and the image restoration will fail as a
> result (leading to a kernel panic most of the time).
> 
> To fix this problem, rework kernel_ident_mapping_init() to support
> unaligned offsets between KVA and PA up to the PMD level and make
> set_up_temporary_mappings() use it as approprtiate.
> 
> Reported-and-tested-by: Thomas Garnier <thgarnie@google.com>
> Reported-by: Borislav Petkov <bp@suse.de>
> Suggested-by: Yinghai Lu <yinghai@kernel.org>
> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
> Acked-by: Yinghai Lu <yinghai@kernel.org>
> (cherry picked from commit e4630fdd47637168927905983205d7b7c5c08c09)
> Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
> ---
>  arch/x86/include/asm/init.h   |  4 ++--
>  arch/x86/mm/ident_map.c       | 19 +++++++++++--------
>  arch/x86/power/hibernate_64.c |  2 +-
>  3 files changed, 14 insertions(+), 11 deletions(-)
> 
> diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
> index 223042086f4e..737da62bfeb0 100644
> --- a/arch/x86/include/asm/init.h
> +++ b/arch/x86/include/asm/init.h
> @@ -5,10 +5,10 @@ struct x86_mapping_info {
>  	void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
>  	void *context;			 /* context for alloc_pgt_page */
>  	unsigned long pmd_flag;		 /* page flag for PMD entry */
> -	bool kernel_mapping;		 /* kernel mapping or ident mapping */
> +	unsigned long offset;		 /* ident mapping offset */
>  };
>  
>  int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
> -				unsigned long addr, unsigned long end);
> +				unsigned long pstart, unsigned long pend);
>  
>  #endif /* _ASM_X86_INIT_H */
> diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
> index 751ca920773a..8886e3e6cc65 100644
> --- a/arch/x86/mm/ident_map.c
> +++ b/arch/x86/mm/ident_map.c
> @@ -1,13 +1,15 @@
>  
> -static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
> +static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
>  			   unsigned long addr, unsigned long end)
>  {
>  	addr &= PMD_MASK;
>  	for (; addr < end; addr += PMD_SIZE) {
>  		pmd_t *pmd = pmd_page + pmd_index(addr);
>  
> -		if (!pmd_present(*pmd))
> -			set_pmd(pmd, __pmd(addr | pmd_flag));
> +		if (pmd_present(*pmd))
> +			continue;
> +
> +		set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
>  	}
>  }
>  static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
> @@ -25,13 +27,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
>  
>  		if (pud_present(*pud)) {
>  			pmd = pmd_offset(pud, 0);
> -			ident_pmd_init(info->pmd_flag, pmd, addr, next);
> +			ident_pmd_init(info, pmd, addr, next);
>  			continue;
>  		}
>  		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
>  		if (!pmd)
>  			return -ENOMEM;
> -		ident_pmd_init(info->pmd_flag, pmd, addr, next);
> +		ident_pmd_init(info, pmd, addr, next);
>  		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
>  	}
>  
> @@ -39,14 +41,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
>  }
>  
>  int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
> -			      unsigned long addr, unsigned long end)
> +			      unsigned long pstart, unsigned long pend)
>  {
> +	unsigned long addr = pstart + info->offset;
> +	unsigned long end = pend + info->offset;
>  	unsigned long next;
>  	int result;
> -	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
>  
>  	for (; addr < end; addr = next) {
> -		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
> +		pgd_t *pgd = pgd_page + pgd_index(addr);
>  		pud_t *pud;
>  
>  		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
> diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
> index f2b5e6a5cf95..aabc7b4f95c4 100644
> --- a/arch/x86/power/hibernate_64.c
> +++ b/arch/x86/power/hibernate_64.c
> @@ -87,7 +87,7 @@ static int set_up_temporary_mappings(void)
>  	struct x86_mapping_info info = {
>  		.alloc_pgt_page	= alloc_pgt_page,
>  		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
> -		.kernel_mapping = true,
> +		.offset		= __PAGE_OFFSET,
>  	};
>  	unsigned long mstart, mend;
>  	int result;
> 
This is a clean upstream cherry pick of fix that has existed for a while
so it has a good history of usage by now.  It fixes a
resume-from-hibernation issue, so I'm happy with this fix.

Acked-by: Colin Ian King <colin.king@canonical.com>
diff mbox

Patch

diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h
index 223042086f4e..737da62bfeb0 100644
--- a/arch/x86/include/asm/init.h
+++ b/arch/x86/include/asm/init.h
@@ -5,10 +5,10 @@  struct x86_mapping_info {
 	void *(*alloc_pgt_page)(void *); /* allocate buf for page table */
 	void *context;			 /* context for alloc_pgt_page */
 	unsigned long pmd_flag;		 /* page flag for PMD entry */
-	bool kernel_mapping;		 /* kernel mapping or ident mapping */
+	unsigned long offset;		 /* ident mapping offset */
 };
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-				unsigned long addr, unsigned long end);
+				unsigned long pstart, unsigned long pend);
 
 #endif /* _ASM_X86_INIT_H */
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index 751ca920773a..8886e3e6cc65 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -1,13 +1,15 @@ 
 
-static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
+static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
 			   unsigned long addr, unsigned long end)
 {
 	addr &= PMD_MASK;
 	for (; addr < end; addr += PMD_SIZE) {
 		pmd_t *pmd = pmd_page + pmd_index(addr);
 
-		if (!pmd_present(*pmd))
-			set_pmd(pmd, __pmd(addr | pmd_flag));
+		if (pmd_present(*pmd))
+			continue;
+
+		set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
 	}
 }
 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
@@ -25,13 +27,13 @@  static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 
 		if (pud_present(*pud)) {
 			pmd = pmd_offset(pud, 0);
-			ident_pmd_init(info->pmd_flag, pmd, addr, next);
+			ident_pmd_init(info, pmd, addr, next);
 			continue;
 		}
 		pmd = (pmd_t *)info->alloc_pgt_page(info->context);
 		if (!pmd)
 			return -ENOMEM;
-		ident_pmd_init(info->pmd_flag, pmd, addr, next);
+		ident_pmd_init(info, pmd, addr, next);
 		set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
 	}
 
@@ -39,14 +41,15 @@  static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
 }
 
 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
-			      unsigned long addr, unsigned long end)
+			      unsigned long pstart, unsigned long pend)
 {
+	unsigned long addr = pstart + info->offset;
+	unsigned long end = pend + info->offset;
 	unsigned long next;
 	int result;
-	int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
 
 	for (; addr < end; addr = next) {
-		pgd_t *pgd = pgd_page + pgd_index(addr) + off;
+		pgd_t *pgd = pgd_page + pgd_index(addr);
 		pud_t *pud;
 
 		next = (addr & PGDIR_MASK) + PGDIR_SIZE;
diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
index f2b5e6a5cf95..aabc7b4f95c4 100644
--- a/arch/x86/power/hibernate_64.c
+++ b/arch/x86/power/hibernate_64.c
@@ -87,7 +87,7 @@  static int set_up_temporary_mappings(void)
 	struct x86_mapping_info info = {
 		.alloc_pgt_page	= alloc_pgt_page,
 		.pmd_flag	= __PAGE_KERNEL_LARGE_EXEC,
-		.kernel_mapping = true,
+		.offset		= __PAGE_OFFSET,
 	};
 	unsigned long mstart, mend;
 	int result;