diff mbox series

[SRU,HWE,BIONIC,DISCO,V2] x86/mm: Sync also unmappings in vmalloc_sync_all()

Message ID 20190728130600.10637-1-colin.king@canonical.com
State New
Headers show
Series [SRU,HWE,BIONIC,DISCO,V2] x86/mm: Sync also unmappings in vmalloc_sync_all() | expand

Commit Message

Colin Ian King July 28, 2019, 1:06 p.m. UTC
From: Colin Ian King <colin.king@canonical.com>

BugLink: https://bugs.launchpad.net/bugs/1838115

With huge-page ioremap areas the unmappings also need to be synced between
all page-tables. Otherwise it can cause data corruption when a region is
unmapped and later re-used.

Make the vmalloc_sync_one() function ready to sync unmappings and make sure
vmalloc_sync_all() iterates over all page-tables even when an unmapped PMD
is found.

Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
Signed-off-by: Joerg Roedel <jroedel@suse.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
Link: https://lkml.kernel.org/r/20190719184652.11391-3-joro@8bytes.org
(backported from commit 8e998fc24de47c55b47a887f6c95ab91acd4a720)
Signed-off-by: Colin Ian King <colin.king@canonical.com>
---
 arch/x86/mm/fault.c | 13 +++++--------
 1 file changed, 5 insertions(+), 8 deletions(-)

Comments

Stefan Bader July 29, 2019, 8:56 a.m. UTC | #1
On 28.07.19 15:06, Colin King wrote:
> From: Colin Ian King <colin.king@canonical.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1838115
> 
> With huge-page ioremap areas the unmappings also need to be synced between
> all page-tables. Otherwise it can cause data corruption when a region is
> unmapped and later re-used.
> 
> Make the vmalloc_sync_one() function ready to sync unmappings and make sure
> vmalloc_sync_all() iterates over all page-tables even when an unmapped PMD
> is found.
> 
> Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
> Signed-off-by: Joerg Roedel <jroedel@suse.de>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
> Link: https://lkml.kernel.org/r/20190719184652.11391-3-joro@8bytes.org
> (backported from commit 8e998fc24de47c55b47a887f6c95ab91acd4a720)
> Signed-off-by: Colin Ian King <colin.king@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
> ---

Added a disco nomination to the bug report. This probably is also something that
affects Eoan as long as they did not yet upgrade to 5.3.

-Stefan

>  arch/x86/mm/fault.c | 13 +++++--------
>  1 file changed, 5 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index 9d5c75f..eb246b3 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -193,11 +193,12 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
>  
>  	pmd = pmd_offset(pud, address);
>  	pmd_k = pmd_offset(pud_k, address);
> -	if (!pmd_present(*pmd_k))
> -		return NULL;
>  
> -	if (!pmd_present(*pmd))
> +	if (pmd_present(*pmd) != pmd_present(*pmd_k))
>  		set_pmd(pmd, *pmd_k);
> +
> +	if (!pmd_present(*pmd_k))
> +		return NULL;
>  	else
>  		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
>  
> @@ -219,17 +220,13 @@ void vmalloc_sync_all(void)
>  		spin_lock(&pgd_lock);
>  		list_for_each_entry(page, &pgd_list, lru) {
>  			spinlock_t *pgt_lock;
> -			pmd_t *ret;
>  
>  			/* the pgt_lock only for Xen */
>  			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
>  
>  			spin_lock(pgt_lock);
> -			ret = vmalloc_sync_one(page_address(page), address);
> +			vmalloc_sync_one(page_address(page), address);
>  			spin_unlock(pgt_lock);
> -
> -			if (!ret)
> -				break;
>  		}
>  		spin_unlock(&pgd_lock);
>  	}
>
Stefan Bader July 29, 2019, 9:49 a.m. UTC | #2
On 29.07.19 10:56, Stefan Bader wrote:
> On 28.07.19 15:06, Colin King wrote:
>> From: Colin Ian King <colin.king@canonical.com>
>>
>> BugLink: https://bugs.launchpad.net/bugs/1838115
>>
>> With huge-page ioremap areas the unmappings also need to be synced between
>> all page-tables. Otherwise it can cause data corruption when a region is
>> unmapped and later re-used.
>>
>> Make the vmalloc_sync_one() function ready to sync unmappings and make sure
>> vmalloc_sync_all() iterates over all page-tables even when an unmapped PMD
>> is found.
>>
>> Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
>> Signed-off-by: Joerg Roedel <jroedel@suse.de>
>> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
>> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
>> Link: https://lkml.kernel.org/r/20190719184652.11391-3-joro@8bytes.org
>> (backported from commit 8e998fc24de47c55b47a887f6c95ab91acd4a720)
>> Signed-off-by: Colin Ian King <colin.king@canonical.com>
> Acked-by: Stefan Bader <stefan.bader@canonical.com>
>> ---
> 
> Added a disco nomination to the bug report. This probably is also something that
> affects Eoan as long as they did not yet upgrade to 5.3.
> 

I might have been too quick. This patch seems to be 1/3 which all claim to be
fixes for the same commit. And were committed right next to each other. I
probably would be better to pick them all.

> -Stefan
> 
>>  arch/x86/mm/fault.c | 13 +++++--------
>>  1 file changed, 5 insertions(+), 8 deletions(-)
>>
>> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
>> index 9d5c75f..eb246b3 100644
>> --- a/arch/x86/mm/fault.c
>> +++ b/arch/x86/mm/fault.c
>> @@ -193,11 +193,12 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
>>  
>>  	pmd = pmd_offset(pud, address);
>>  	pmd_k = pmd_offset(pud_k, address);
>> -	if (!pmd_present(*pmd_k))
>> -		return NULL;
>>  
>> -	if (!pmd_present(*pmd))
>> +	if (pmd_present(*pmd) != pmd_present(*pmd_k))
>>  		set_pmd(pmd, *pmd_k);
>> +
>> +	if (!pmd_present(*pmd_k))
>> +		return NULL;
>>  	else
>>  		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
>>  
>> @@ -219,17 +220,13 @@ void vmalloc_sync_all(void)
>>  		spin_lock(&pgd_lock);
>>  		list_for_each_entry(page, &pgd_list, lru) {
>>  			spinlock_t *pgt_lock;
>> -			pmd_t *ret;
>>  
>>  			/* the pgt_lock only for Xen */
>>  			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
>>  
>>  			spin_lock(pgt_lock);
>> -			ret = vmalloc_sync_one(page_address(page), address);
>> +			vmalloc_sync_one(page_address(page), address);
>>  			spin_unlock(pgt_lock);
>> -
>> -			if (!ret)
>> -				break;
>>  		}
>>  		spin_unlock(&pgd_lock);
>>  	}
>>
> 
> 
>
Andy Whitcroft July 29, 2019, 10:25 a.m. UTC | #3
On Sun, Jul 28, 2019 at 02:06:00PM +0100, Colin King wrote:
> From: Colin Ian King <colin.king@canonical.com>
> 
> BugLink: https://bugs.launchpad.net/bugs/1838115
> 
> With huge-page ioremap areas the unmappings also need to be synced between
> all page-tables. Otherwise it can cause data corruption when a region is
> unmapped and later re-used.
> 
> Make the vmalloc_sync_one() function ready to sync unmappings and make sure
> vmalloc_sync_all() iterates over all page-tables even when an unmapped PMD
> is found.
> 
> Fixes: 5d72b4fba40ef ('x86, mm: support huge I/O mapping capability I/F')
> Signed-off-by: Joerg Roedel <jroedel@suse.de>
> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
> Reviewed-by: Dave Hansen <dave.hansen@linux.intel.com>
> Link: https://lkml.kernel.org/r/20190719184652.11391-3-joro@8bytes.org
> (backported from commit 8e998fc24de47c55b47a887f6c95ab91acd4a720)
> Signed-off-by: Colin Ian King <colin.king@canonical.com>
> ---
>  arch/x86/mm/fault.c | 13 +++++--------
>  1 file changed, 5 insertions(+), 8 deletions(-)
> 
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index 9d5c75f..eb246b3 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -193,11 +193,12 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
>  
>  	pmd = pmd_offset(pud, address);
>  	pmd_k = pmd_offset(pud_k, address);
> -	if (!pmd_present(*pmd_k))
> -		return NULL;
>  
> -	if (!pmd_present(*pmd))
> +	if (pmd_present(*pmd) != pmd_present(*pmd_k))
>  		set_pmd(pmd, *pmd_k);
> +
> +	if (!pmd_present(*pmd_k))
> +		return NULL;
>  	else
>  		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
>  
> @@ -219,17 +220,13 @@ void vmalloc_sync_all(void)
>  		spin_lock(&pgd_lock);
>  		list_for_each_entry(page, &pgd_list, lru) {
>  			spinlock_t *pgt_lock;
> -			pmd_t *ret;
>  
>  			/* the pgt_lock only for Xen */
>  			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
>  
>  			spin_lock(pgt_lock);
> -			ret = vmalloc_sync_one(page_address(page), address);
> +			vmalloc_sync_one(page_address(page), address);
>  			spin_unlock(pgt_lock);
> -
> -			if (!ret)
> -				break;
>  		}
>  		spin_unlock(&pgd_lock);
>  	}
> -- 

This patch looks good, but I think we may have found a couple of related
patches.

So 'soft' nacking till we confirm.

-apw
diff mbox series

Patch

diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d5c75f..eb246b3 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -193,11 +193,12 @@  static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
 
 	pmd = pmd_offset(pud, address);
 	pmd_k = pmd_offset(pud_k, address);
-	if (!pmd_present(*pmd_k))
-		return NULL;
 
-	if (!pmd_present(*pmd))
+	if (pmd_present(*pmd) != pmd_present(*pmd_k))
 		set_pmd(pmd, *pmd_k);
+
+	if (!pmd_present(*pmd_k))
+		return NULL;
 	else
 		BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
 
@@ -219,17 +220,13 @@  void vmalloc_sync_all(void)
 		spin_lock(&pgd_lock);
 		list_for_each_entry(page, &pgd_list, lru) {
 			spinlock_t *pgt_lock;
-			pmd_t *ret;
 
 			/* the pgt_lock only for Xen */
 			pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
 
 			spin_lock(pgt_lock);
-			ret = vmalloc_sync_one(page_address(page), address);
+			vmalloc_sync_one(page_address(page), address);
 			spin_unlock(pgt_lock);
-
-			if (!ret)
-				break;
 		}
 		spin_unlock(&pgd_lock);
 	}