diff mbox

[v2,2/2] powerpc/mm: Tracking vDSO remap

Message ID 25152b76585716dc635945c3455ab9b49e645f6d.1427280806.git.ldufour@linux.vnet.ibm.com (mailing list archive)
State Superseded
Headers show

Commit Message

Laurent Dufour March 25, 2015, 11:06 a.m. UTC
Some processes (CRIU) are moving the vDSO area using the mremap system
call. As a consequence the kernel reference to the vDSO base address is
no more valid and the signal return frame built once the vDSO has been
moved is not pointing to the new sigreturn address.

This patch handles vDSO remapping and unmapping.

Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
---
 arch/powerpc/include/asm/mmu_context.h | 36 +++++++++++++++++++++++++++++++++-
 1 file changed, 35 insertions(+), 1 deletion(-)

Comments

Ingo Molnar March 25, 2015, 12:11 p.m. UTC | #1
* Laurent Dufour <ldufour@linux.vnet.ibm.com> wrote:

> Some processes (CRIU) are moving the vDSO area using the mremap system
> call. As a consequence the kernel reference to the vDSO base address is
> no more valid and the signal return frame built once the vDSO has been
> moved is not pointing to the new sigreturn address.
> 
> This patch handles vDSO remapping and unmapping.
> 
> Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
> ---
>  arch/powerpc/include/asm/mmu_context.h | 36 +++++++++++++++++++++++++++++++++-
>  1 file changed, 35 insertions(+), 1 deletion(-)
> 
> diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
> index 73382eba02dc..be5dca3f7826 100644
> --- a/arch/powerpc/include/asm/mmu_context.h
> +++ b/arch/powerpc/include/asm/mmu_context.h
> @@ -8,7 +8,6 @@
>  #include <linux/spinlock.h>
>  #include <asm/mmu.h>	
>  #include <asm/cputable.h>
> -#include <asm-generic/mm_hooks.h>
>  #include <asm/cputhreads.h>
>  
>  /*
> @@ -109,5 +108,40 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
>  #endif
>  }
>  
> +static inline void arch_dup_mmap(struct mm_struct *oldmm,
> +				 struct mm_struct *mm)
> +{
> +}
> +
> +static inline void arch_exit_mmap(struct mm_struct *mm)
> +{
> +}
> +
> +static inline void arch_unmap(struct mm_struct *mm,
> +			struct vm_area_struct *vma,
> +			unsigned long start, unsigned long end)
> +{
> +	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
> +		mm->context.vdso_base = 0;
> +}
> +
> +static inline void arch_bprm_mm_init(struct mm_struct *mm,
> +				     struct vm_area_struct *vma)
> +{
> +}
> +
> +#define __HAVE_ARCH_REMAP
> +static inline void arch_remap(struct mm_struct *mm,
> +			      unsigned long old_start, unsigned long old_end,
> +			      unsigned long new_start, unsigned long new_end)
> +{
> +	/*
> +	 * mremap don't allow moving multiple vma so we can limit the check
> +	 * to old_start == vdso_base.

s/mremap don't allow moving multiple vma
  mremap() doesn't allow moving multiple vmas

right?

Thanks,

	Ingo
Laurent Dufour March 25, 2015, 1:25 p.m. UTC | #2
On 25/03/2015 13:11, Ingo Molnar wrote:
> 
> * Laurent Dufour <ldufour@linux.vnet.ibm.com> wrote:
> 
>> Some processes (CRIU) are moving the vDSO area using the mremap system
>> call. As a consequence the kernel reference to the vDSO base address is
>> no more valid and the signal return frame built once the vDSO has been
>> moved is not pointing to the new sigreturn address.
>>
>> This patch handles vDSO remapping and unmapping.
>>
>> Signed-off-by: Laurent Dufour <ldufour@linux.vnet.ibm.com>
>> ---
>>  arch/powerpc/include/asm/mmu_context.h | 36 +++++++++++++++++++++++++++++++++-
>>  1 file changed, 35 insertions(+), 1 deletion(-)
>>
>> diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
>> index 73382eba02dc..be5dca3f7826 100644
>> --- a/arch/powerpc/include/asm/mmu_context.h
>> +++ b/arch/powerpc/include/asm/mmu_context.h
>> @@ -8,7 +8,6 @@
>>  #include <linux/spinlock.h>
>>  #include <asm/mmu.h>	
>>  #include <asm/cputable.h>
>> -#include <asm-generic/mm_hooks.h>
>>  #include <asm/cputhreads.h>
>>  
>>  /*
>> @@ -109,5 +108,40 @@ static inline void enter_lazy_tlb(struct mm_struct *mm,
>>  #endif
>>  }
>>  
>> +static inline void arch_dup_mmap(struct mm_struct *oldmm,
>> +				 struct mm_struct *mm)
>> +{
>> +}
>> +
>> +static inline void arch_exit_mmap(struct mm_struct *mm)
>> +{
>> +}
>> +
>> +static inline void arch_unmap(struct mm_struct *mm,
>> +			struct vm_area_struct *vma,
>> +			unsigned long start, unsigned long end)
>> +{
>> +	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
>> +		mm->context.vdso_base = 0;
>> +}
>> +
>> +static inline void arch_bprm_mm_init(struct mm_struct *mm,
>> +				     struct vm_area_struct *vma)
>> +{
>> +}
>> +
>> +#define __HAVE_ARCH_REMAP
>> +static inline void arch_remap(struct mm_struct *mm,
>> +			      unsigned long old_start, unsigned long old_end,
>> +			      unsigned long new_start, unsigned long new_end)
>> +{
>> +	/*
>> +	 * mremap don't allow moving multiple vma so we can limit the check
>> +	 * to old_start == vdso_base.
> 
> s/mremap don't allow moving multiple vma
>   mremap() doesn't allow moving multiple vmas
> 
> right?

Sure you're right.

I'll provide a v3 fixing that comment.

Thanks,
Laurent.
Laurent Dufour March 25, 2015, 1:53 p.m. UTC | #3
CRIU is recreating the process memory layout by remapping the checkpointee
memory area on top of the current process (criu). This includes remapping
the vDSO to the place it has at checkpoint time.

However some architectures like powerpc are keeping a reference to the vDSO
base address to build the signal return stack frame by calling the vDSO
sigreturn service. So once the vDSO has been moved, this reference is no
more valid and the signal frame built later are not usable.

This patch serie is introducing a new mm hook 'arch_remap' which is called
when mremap is done and the mm lock still hold. The next patch is adding the
vDSO remap and unmap tracking to the powerpc architecture.

Changes in v3:
--------------
- Fixed grammatical error in a comment of the second patch. 
  Thanks again, Ingo.

Changes in v2:
--------------
- Following the Ingo Molnar's advice, enabling the call to arch_remap through
  the __HAVE_ARCH_REMAP macro. This reduces considerably the first patch.

Laurent Dufour (2):
  mm: Introducing arch_remap hook
  powerpc/mm: Tracking vDSO remap

 arch/powerpc/include/asm/mmu_context.h | 36 +++++++++++++++++++++++++++++++++-
 mm/mremap.c                            | 11 +++++++++--
 2 files changed, 44 insertions(+), 3 deletions(-)
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 73382eba02dc..be5dca3f7826 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -8,7 +8,6 @@ 
 #include <linux/spinlock.h>
 #include <asm/mmu.h>	
 #include <asm/cputable.h>
-#include <asm-generic/mm_hooks.h>
 #include <asm/cputhreads.h>
 
 /*
@@ -109,5 +108,40 @@  static inline void enter_lazy_tlb(struct mm_struct *mm,
 #endif
 }
 
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+				 struct mm_struct *mm)
+{
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+static inline void arch_unmap(struct mm_struct *mm,
+			struct vm_area_struct *vma,
+			unsigned long start, unsigned long end)
+{
+	if (start <= mm->context.vdso_base && mm->context.vdso_base < end)
+		mm->context.vdso_base = 0;
+}
+
+static inline void arch_bprm_mm_init(struct mm_struct *mm,
+				     struct vm_area_struct *vma)
+{
+}
+
+#define __HAVE_ARCH_REMAP
+static inline void arch_remap(struct mm_struct *mm,
+			      unsigned long old_start, unsigned long old_end,
+			      unsigned long new_start, unsigned long new_end)
+{
+	/*
+	 * mremap don't allow moving multiple vma so we can limit the check
+	 * to old_start == vdso_base.
+	 */
+	if (old_start == mm->context.vdso_base)
+		mm->context.vdso_base = new_start;
+}
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_POWERPC_MMU_CONTEXT_H */