diff mbox series

[v5,18/26] powerpc/book3s64/keys/kuap: Reset AMR/IAMR values on kexec

Message ID 20200619135850.47155-19-aneesh.kumar@linux.ibm.com (mailing list archive)
State Superseded
Headers show
Series powerpc/book3s/64/pkeys: Simplify the code | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch powerpc/merge (4885fd794229380fe6d6f5913d9f13670593a627)
snowpatch_ozlabs/checkpatch warning Test checkpatch on branch powerpc/merge
snowpatch_ozlabs/needsstable success Patch has no Fixes tags

Commit Message

Aneesh Kumar K V June 19, 2020, 1:58 p.m. UTC
As we kexec across kernels that use AMR/IAMR for different purposes
we need to ensure that new kernels get kexec'd with a reset value
of AMR/IAMR. For ex: the new kernel can use key 0 for kernel mapping and the old
AMR value prevents access to key 0.

This patch also removes reset if IAMR and AMOR in kexec_sequence. Reset of AMOR
is not needed and the IAMR reset is partial (it doesn't do the reset
on secondary cpus) and is redundant with this patch.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
---
 .../powerpc/include/asm/book3s/64/kup-radix.h | 20 +++++++++++++++++++
 arch/powerpc/include/asm/kup.h                | 14 +++++++++++++
 arch/powerpc/kernel/misc_64.S                 | 14 -------------
 arch/powerpc/kexec/core_64.c                  |  3 +++
 arch/powerpc/mm/book3s64/pgtable.c            |  3 +++
 5 files changed, 40 insertions(+), 14 deletions(-)

Comments

Michael Ellerman July 6, 2020, 12:29 p.m. UTC | #1
"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:
> As we kexec across kernels that use AMR/IAMR for different purposes
> we need to ensure that new kernels get kexec'd with a reset value
> of AMR/IAMR. For ex: the new kernel can use key 0 for kernel mapping and the old
> AMR value prevents access to key 0.
>
> This patch also removes reset if IAMR and AMOR in kexec_sequence. Reset of AMOR
> is not needed and the IAMR reset is partial (it doesn't do the reset
> on secondary cpus) and is redundant with this patch.

I like the idea of cleaning this stuff up.

But I think tying it into kup/kuep/kuap and the MMU features and ifdefs
and so on is overly complicated.

We should just have eg:

void reset_sprs(void)
{
	if (early_cpu_has_feature(CPU_FTR_ARCH_206)) {
        	mtspr(SPRN_AMR, 0);
        	mtspr(SPRN_UAMOR, 0);
        }

	if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
        	mtspr(SPRN_IAMR, 0);
        }
}

And call that from the kexec paths.

cheers

> diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
> index 3ee1ec60be84..c57063c35833 100644
> --- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
> +++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
> @@ -180,6 +180,26 @@ static inline unsigned long kuap_get_and_check_amr(void)
>  }
>  #endif /* CONFIG_PPC_KUAP */
>  
> +#define reset_kuap reset_kuap
> +static inline void reset_kuap(void)
> +{
> +	if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) {
> +		mtspr(SPRN_AMR, 0);
> +		/*  Do we need isync()? We are going via a kexec reset */
> +		isync();
> +	}
> +}
> +
> +#define reset_kuep reset_kuep
> +static inline void reset_kuep(void)
> +{
> +	if (mmu_has_feature(MMU_FTR_KUEP)) {
> +		mtspr(SPRN_IAMR, 0);
> +		/*  Do we need isync()? We are going via a kexec reset */
> +		isync();
> +	}
> +}
> +
>  #endif /* __ASSEMBLY__ */
>  
>  #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
> diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
> index c745ee41ad66..4dc23a706910 100644
> --- a/arch/powerpc/include/asm/kup.h
> +++ b/arch/powerpc/include/asm/kup.h
> @@ -113,6 +113,20 @@ static inline void prevent_current_write_to_user(void)
>  	prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE);
>  }
>  
> +#ifndef reset_kuap
> +#define reset_kuap reset_kuap
> +static inline void reset_kuap(void)
> +{
> +}
> +#endif
> +
> +#ifndef reset_kuep
> +#define reset_kuep reset_kuep
> +static inline void reset_kuep(void)
> +{
> +}
> +#endif
> +
>  #endif /* !__ASSEMBLY__ */
>  
>  #endif /* _ASM_POWERPC_KUAP_H_ */
> diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
> index 1864605eca29..7bb46ad98207 100644
> --- a/arch/powerpc/kernel/misc_64.S
> +++ b/arch/powerpc/kernel/misc_64.S
> @@ -413,20 +413,6 @@ _GLOBAL(kexec_sequence)
>  	li	r0,0
>  	std	r0,16(r1)
>  
> -BEGIN_FTR_SECTION
> -	/*
> -	 * This is the best time to turn AMR/IAMR off.
> -	 * key 0 is used in radix for supervisor<->user
> -	 * protection, but on hash key 0 is reserved
> -	 * ideally we want to enter with a clean state.
> -	 * NOTE, we rely on r0 being 0 from above.
> -	 */
> -	mtspr	SPRN_IAMR,r0
> -BEGIN_FTR_SECTION_NESTED(42)
> -	mtspr	SPRN_AMOR,r0
> -END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
> -END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
> -
>  	/* save regs for local vars on new stack.
>  	 * yes, we won't go back, but ...
>  	 */
> diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
> index b4184092172a..a124715f33ea 100644
> --- a/arch/powerpc/kexec/core_64.c
> +++ b/arch/powerpc/kexec/core_64.c
> @@ -152,6 +152,9 @@ static void kexec_smp_down(void *arg)
>  	if (ppc_md.kexec_cpu_down)
>  		ppc_md.kexec_cpu_down(0, 1);
>  
> +	reset_kuap();
> +	reset_kuep();
> +
>  	kexec_smp_wait();
>  	/* NOTREACHED */
>  }
> diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
> index c58ad1049909..9673f4b74c9a 100644
> --- a/arch/powerpc/mm/book3s64/pgtable.c
> +++ b/arch/powerpc/mm/book3s64/pgtable.c
> @@ -165,6 +165,9 @@ void mmu_cleanup_all(void)
>  		radix__mmu_cleanup_all();
>  	else if (mmu_hash_ops.hpte_clear_all)
>  		mmu_hash_ops.hpte_clear_all();
> +
> +	reset_kuap();
> +	reset_kuep();
>  }
>  
>  #ifdef CONFIG_MEMORY_HOTPLUG
> -- 
> 2.26.2
Aneesh Kumar K V July 6, 2020, 2:39 p.m. UTC | #2
On 7/6/20 5:59 PM, Michael Ellerman wrote:
> "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:
>> As we kexec across kernels that use AMR/IAMR for different purposes
>> we need to ensure that new kernels get kexec'd with a reset value
>> of AMR/IAMR. For ex: the new kernel can use key 0 for kernel mapping and the old
>> AMR value prevents access to key 0.
>>
>> This patch also removes reset if IAMR and AMOR in kexec_sequence. Reset of AMOR
>> is not needed and the IAMR reset is partial (it doesn't do the reset
>> on secondary cpus) and is redundant with this patch.
> 
> I like the idea of cleaning this stuff up.
> 
> But I think tying it into kup/kuep/kuap and the MMU features and ifdefs
> and so on is overly complicated.
> 
> We should just have eg:
> 
> void reset_sprs(void)
> {
> 	if (early_cpu_has_feature(CPU_FTR_ARCH_206)) {
>          	mtspr(SPRN_AMR, 0);
>          	mtspr(SPRN_UAMOR, 0);
>          }
> 
> 	if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
>          	mtspr(SPRN_IAMR, 0);
>          }
> }
> 
> And call that from the kexec paths.
>

Will fix. Should that be early_cpu_has_feature()? cpu_has_feature() 
should work there right?

-aneesh
Michael Ellerman July 7, 2020, 1:07 a.m. UTC | #3
"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:
> On 7/6/20 5:59 PM, Michael Ellerman wrote:
>> "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:
>>> As we kexec across kernels that use AMR/IAMR for different purposes
>>> we need to ensure that new kernels get kexec'd with a reset value
>>> of AMR/IAMR. For ex: the new kernel can use key 0 for kernel mapping and the old
>>> AMR value prevents access to key 0.
>>>
>>> This patch also removes reset if IAMR and AMOR in kexec_sequence. Reset of AMOR
>>> is not needed and the IAMR reset is partial (it doesn't do the reset
>>> on secondary cpus) and is redundant with this patch.
>> 
>> I like the idea of cleaning this stuff up.
>> 
>> But I think tying it into kup/kuep/kuap and the MMU features and ifdefs
>> and so on is overly complicated.
>> 
>> We should just have eg:
>> 
>> void reset_sprs(void)
>> {
>> 	if (early_cpu_has_feature(CPU_FTR_ARCH_206)) {
>>          	mtspr(SPRN_AMR, 0);
>>          	mtspr(SPRN_UAMOR, 0);
>>          }
>> 
>> 	if (early_cpu_has_feature(CPU_FTR_ARCH_207S)) {
>>          	mtspr(SPRN_IAMR, 0);
>>          }
>> }
>> 
>> And call that from the kexec paths.
>
> Will fix. Should that be early_cpu_has_feature()? cpu_has_feature() 
> should work there right?

Yeah I guess. I was thinking if we crashed before code patching was
done, but if that happens we can't kdump anyway. So I'm probably being
over paranoid.

cheers
diff mbox series

Patch

diff --git a/arch/powerpc/include/asm/book3s/64/kup-radix.h b/arch/powerpc/include/asm/book3s/64/kup-radix.h
index 3ee1ec60be84..c57063c35833 100644
--- a/arch/powerpc/include/asm/book3s/64/kup-radix.h
+++ b/arch/powerpc/include/asm/book3s/64/kup-radix.h
@@ -180,6 +180,26 @@  static inline unsigned long kuap_get_and_check_amr(void)
 }
 #endif /* CONFIG_PPC_KUAP */
 
+#define reset_kuap reset_kuap
+static inline void reset_kuap(void)
+{
+	if (mmu_has_feature(MMU_FTR_RADIX_KUAP)) {
+		mtspr(SPRN_AMR, 0);
+		/*  Do we need isync()? We are going via a kexec reset */
+		isync();
+	}
+}
+
+#define reset_kuep reset_kuep
+static inline void reset_kuep(void)
+{
+	if (mmu_has_feature(MMU_FTR_KUEP)) {
+		mtspr(SPRN_IAMR, 0);
+		/*  Do we need isync()? We are going via a kexec reset */
+		isync();
+	}
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_BOOK3S_64_KUP_RADIX_H */
diff --git a/arch/powerpc/include/asm/kup.h b/arch/powerpc/include/asm/kup.h
index c745ee41ad66..4dc23a706910 100644
--- a/arch/powerpc/include/asm/kup.h
+++ b/arch/powerpc/include/asm/kup.h
@@ -113,6 +113,20 @@  static inline void prevent_current_write_to_user(void)
 	prevent_user_access(NULL, NULL, ~0UL, KUAP_CURRENT_WRITE);
 }
 
+#ifndef reset_kuap
+#define reset_kuap reset_kuap
+static inline void reset_kuap(void)
+{
+}
+#endif
+
+#ifndef reset_kuep
+#define reset_kuep reset_kuep
+static inline void reset_kuep(void)
+{
+}
+#endif
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_POWERPC_KUAP_H_ */
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 1864605eca29..7bb46ad98207 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -413,20 +413,6 @@  _GLOBAL(kexec_sequence)
 	li	r0,0
 	std	r0,16(r1)
 
-BEGIN_FTR_SECTION
-	/*
-	 * This is the best time to turn AMR/IAMR off.
-	 * key 0 is used in radix for supervisor<->user
-	 * protection, but on hash key 0 is reserved
-	 * ideally we want to enter with a clean state.
-	 * NOTE, we rely on r0 being 0 from above.
-	 */
-	mtspr	SPRN_IAMR,r0
-BEGIN_FTR_SECTION_NESTED(42)
-	mtspr	SPRN_AMOR,r0
-END_FTR_SECTION_NESTED_IFSET(CPU_FTR_HVMODE, 42)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
-
 	/* save regs for local vars on new stack.
 	 * yes, we won't go back, but ...
 	 */
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index b4184092172a..a124715f33ea 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -152,6 +152,9 @@  static void kexec_smp_down(void *arg)
 	if (ppc_md.kexec_cpu_down)
 		ppc_md.kexec_cpu_down(0, 1);
 
+	reset_kuap();
+	reset_kuep();
+
 	kexec_smp_wait();
 	/* NOTREACHED */
 }
diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c
index c58ad1049909..9673f4b74c9a 100644
--- a/arch/powerpc/mm/book3s64/pgtable.c
+++ b/arch/powerpc/mm/book3s64/pgtable.c
@@ -165,6 +165,9 @@  void mmu_cleanup_all(void)
 		radix__mmu_cleanup_all();
 	else if (mmu_hash_ops.hpte_clear_all)
 		mmu_hash_ops.hpte_clear_all();
+
+	reset_kuap();
+	reset_kuep();
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG