diff mbox series

[RFC,03/14] powerpc/tm: Recheckpoint when exiting from kernel

Message ID 1541508028-31865-4-git-send-email-leitao@debian.org (mailing list archive)
State RFC
Headers show
Series New TM Model | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success next/apply_patch Successfully applied
snowpatch_ozlabs/checkpatch success Test checkpatch on branch next

Commit Message

Breno Leitao Nov. 6, 2018, 12:40 p.m. UTC
This is the only place we are going to recheckpoint now. Now the task
needs to have TIF_RESTORE_TM flag set, which will get into
restore_tm_state() at exception exit path, and execute the recheckpoint
depending on the MSR.

Every time a task is required to recheckpoint, or just have the TM SPRs
restore, the TIF_RESTORE_TM flag should be set and the task MSR should
properly be in a transactional state, which will be checked by
restore_tm_state().

After the facility registers are recheckpointed, they are clobbered with
the values that were recheckpointed (and are now also in the checkpoint
area).

If facility is enabled at MSR that is being returned to user space, then
the facility registers need to be restored, otherwise userspace will see
invalid values.

This patch simplify the restore_tm_state() to just restore the facility
registers that are enabled when returning to userspace, i.e. the MSR will
be the same that will be put into SRR1, which will be the MSR after RFID.

Signed-off-by: Breno Leitao <leitao@debian.org>
---
 arch/powerpc/kernel/process.c | 38 ++++++++++++++++++++++++-----------
 1 file changed, 26 insertions(+), 12 deletions(-)

Comments

Michael Neuling Nov. 15, 2018, 12:11 a.m. UTC | #1
On Tue, 2018-11-06 at 10:40 -0200, Breno Leitao wrote:
> This is the only place we are going to recheckpoint now. Now the task
> needs to have TIF_RESTORE_TM flag set, which will get into
> restore_tm_state() at exception exit path, and execute the recheckpoint
> depending on the MSR.
> 
> Every time a task is required to recheckpoint, or just have the TM SPRs
> restore, the TIF_RESTORE_TM flag should be set and the task MSR should
> properly be in a transactional state, which will be checked by
> restore_tm_state().
> 
> After the facility registers are recheckpointed, they are clobbered with
> the values that were recheckpointed (and are now also in the checkpoint
> area).

Which facility registers? I don't understand this.

> If facility is enabled at MSR that is being returned to user space, then
> the facility registers need to be restored, otherwise userspace will see
> invalid values.
> 
> This patch simplify the restore_tm_state() to just restore the facility
> registers that are enabled when returning to userspace, i.e. the MSR will
> be the same that will be put into SRR1, which will be the MSR after RFID.
> 
> Signed-off-by: Breno Leitao <leitao@debian.org>
> ---
>  arch/powerpc/kernel/process.c | 38 ++++++++++++++++++++++++-----------
>  1 file changed, 26 insertions(+), 12 deletions(-)
> 
> diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
> index 4d5322cfad25..c7e758a42b8f 100644
> --- a/arch/powerpc/kernel/process.c
> +++ b/arch/powerpc/kernel/process.c
> @@ -1049,8 +1049,6 @@ static inline void __switch_to_tm(struct task_struct
> *prev,
>   */
>  void restore_tm_state(struct pt_regs *regs)
>  {
> -	unsigned long msr_diff;
> -
>  	/*
>  	 * This is the only moment we should clear TIF_RESTORE_TM as
>  	 * it is here that ckpt_regs.msr and pt_regs.msr become the same
> @@ -1061,19 +1059,35 @@ void restore_tm_state(struct pt_regs *regs)
>  	if (!MSR_TM_ACTIVE(regs->msr))
>  		return;
>  
> -	msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
> -	msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
> +	tm_enable();
> +	/* The only place we recheckpoint */
> +	tm_recheckpoint(&current->thread);
>  
> -	/* Ensure that restore_math() will restore */
> -	if (msr_diff & MSR_FP)
> -		current->thread.load_fp = 1;
> +	/*
> +	 * Restore the facility registers that were clobbered during
> +	 * recheckpoint.
> +	 */
> +	if (regs->msr & MSR_FP) {
> +		/*
> +		 * Using load_fp_state() instead of restore_fp() because we
> +		 * want to force the restore, independent of
> +		 * tsk->thread.load_fp. Same for other cases below.
> +		 */
> +		load_fp_state(&current->thread.fp_state);
> +	}
>  #ifdef CONFIG_ALTIVEC
> -	if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
> -		current->thread.load_vec = 1;
> +	if (cpu_has_feature(CPU_FTR_ALTIVEC) && regs->msr & MSR_VEC)
> +		load_vr_state(&current->thread.vr_state);
> +#endif
> +#ifdef CONFIG_VSX
> +	if (cpu_has_feature(CPU_FTR_VSX) && regs->msr & MSR_VSX) {
> +		/*
> +		 * If VSX is enabled, it is expected that VEC and FP are
> +		 * also enabled and already restored the full register set.
> +		 * Cause a warning if that is not the case.
> +		 */
> +		WARN_ON(!(regs->msr & MSR_VEC) || !(regs->msr & MSR_FP)); }
>  #endif
> -	restore_math(regs);
> -
> -	regs->msr |= msr_diff;
>  }
>  
>  #else
diff mbox series

Patch

diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 4d5322cfad25..c7e758a42b8f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1049,8 +1049,6 @@  static inline void __switch_to_tm(struct task_struct *prev,
  */
 void restore_tm_state(struct pt_regs *regs)
 {
-	unsigned long msr_diff;
-
 	/*
 	 * This is the only moment we should clear TIF_RESTORE_TM as
 	 * it is here that ckpt_regs.msr and pt_regs.msr become the same
@@ -1061,19 +1059,35 @@  void restore_tm_state(struct pt_regs *regs)
 	if (!MSR_TM_ACTIVE(regs->msr))
 		return;
 
-	msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
-	msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
+	tm_enable();
+	/* The only place we recheckpoint */
+	tm_recheckpoint(&current->thread);
 
-	/* Ensure that restore_math() will restore */
-	if (msr_diff & MSR_FP)
-		current->thread.load_fp = 1;
+	/*
+	 * Restore the facility registers that were clobbered during
+	 * recheckpoint.
+	 */
+	if (regs->msr & MSR_FP) {
+		/*
+		 * Using load_fp_state() instead of restore_fp() because we
+		 * want to force the restore, independent of
+		 * tsk->thread.load_fp. Same for other cases below.
+		 */
+		load_fp_state(&current->thread.fp_state);
+	}
 #ifdef CONFIG_ALTIVEC
-	if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
-		current->thread.load_vec = 1;
+	if (cpu_has_feature(CPU_FTR_ALTIVEC) && regs->msr & MSR_VEC)
+		load_vr_state(&current->thread.vr_state);
+#endif
+#ifdef CONFIG_VSX
+	if (cpu_has_feature(CPU_FTR_VSX) && regs->msr & MSR_VSX) {
+		/*
+		 * If VSX is enabled, it is expected that VEC and FP are
+		 * also enabled and already restored the full register set.
+		 * Cause a warning if that is not the case.
+		 */
+		WARN_ON(!(regs->msr & MSR_VEC) || !(regs->msr & MSR_FP)); }
 #endif
-	restore_math(regs);
-
-	regs->msr |= msr_diff;
 }
 
 #else