diff mbox series

[1/1] Disable protected execution facility

Message ID 20200205204813.17948-2-grimm@linux.ibm.com
State Accepted
Headers show
Series Disable PEF in BML | expand

Checks

Context Check Description
snowpatch_ozlabs/apply_patch success Successfully applied on branch master (179d53dfcca30436777b0c748d530a979bbc8a45)
snowpatch_ozlabs/snowpatch_job_snowpatch-skiboot success Test snowpatch/job/snowpatch-skiboot on branch master
snowpatch_ozlabs/snowpatch_job_snowpatch-skiboot-dco success Signed-off-by present

Commit Message

Ryan Grimm Feb. 5, 2020, 8:48 p.m. UTC
This patch disables Protected Execution Faciltiy (PEF).

This software procedure is needed for the lab because Cronus will be
configured to bring the machine up with PEF on.  Hostboot has a similar
procedure for running with PEF off.

Skiboot can run with PEF on but the kernel cannot; the kernel will take
a machine check when trying to write a protected resource, such as the
PTCR.

So, use this until we have an ultravisor, or if we want to use BML with
Cronus without UV = 1.

Signed-off-by: Ryan Grimm <grimm@linux.ibm.com>
---
 asm/misc.S          | 37 ++++++++++++++++++++++++++++
 core/cpu.c          | 59 +++++++++++++++++++++++++++++++++++++++++++++
 core/init.c         |  3 +++
 include/cpu.h       |  3 +++
 include/processor.h |  4 +++
 5 files changed, 106 insertions(+)

Comments

Alistair Popple Feb. 18, 2020, 5:29 a.m. UTC | #1
We've run into this issue with Simics this works there as well so:

Tested-by: Alistair Popple <alistair@popple.id.au>

Couple of questions though:

> +.global exit_uv_mode
> +exit_uv_mode:
> +	mfmsr	%r4
> +	LOAD_IMM64(%r5, ~MSR_S)
> +	and	%r4,%r4,%r5
> +	mtspr	SPR_USRR1,%r4
> +
> +	mfspr   %r4,SPR_HSRR1
> +	and     %r4,%r4,%r5
> +	mtspr   SPR_HSRR1,%r3
> +
> +	mfspr   %r4,SPR_SRR1
> +	and     %r4,%r4,%r5
> +	mtspr   SPR_SRR1,%r4

Is there a reason we need to update [H]SRR1 as well? I doubt we'd be running 
this in the context of an exception and other uses of SRR1 tend to set it 
explicitly rather than relying on existing values, although I may be missing 
something.

> +	cmpdi	%r3,1
> +	bne	1f
> +	mfspr   %r4, SPR_SMFCTRL
> +	LOAD_IMM64(%r5, ~PPC_BIT(0))
> +	and     %r4,%r4,%r5
> +	mtspr   SPR_SMFCTRL,%r4
> +1:
> +	isync
> +
> +	mflr	%r4
> +	mtspr	SPR_USRR0,%r4
> +
> +	urfid
> diff --git a/core/cpu.c b/core/cpu.c
> index d5b7d623..1adf16cc 100644
> --- a/core/cpu.c
> +++ b/core/cpu.c
> @@ -1644,3 +1644,62 @@ static int64_t opal_nmmu_set_ptcr(uint64_t chip_id,
> uint64_t ptcr) return rc;
>  }
>  opal_call(OPAL_NMMU_SET_PTCR, opal_nmmu_set_ptcr, 2);
> +
> +static void _exit_uv_mode(void *data __unused)
> +{
> +	prlog(PR_DEBUG, "Exit uv mode on cpu pir 0x%04x\n", this_cpu()->pir);
> +	/* HW has smfctrl shared between threads but on Mambo it is per-thread */
> +	if (chip_quirk(QUIRK_MAMBO_CALLOUTS))
> +		exit_uv_mode(1);
> +	else
> +		exit_uv_mode(cpu_is_thread0(this_cpu()));
> +}
> +
> +void cpu_disable_pef(void)
> +{
> +	struct cpu_thread *cpu;
> +	struct cpu_job **jobs;
> +
> +	if (!(mfmsr() & MSR_S)) {
> +		prlog(PR_DEBUG, "UV mode off on cpu pir 0x%04x\n", this_cpu()->pir);
> +		return;
> +	}
> +
> +	jobs = zalloc(sizeof(struct cpu_job *) * (cpu_max_pir + 1));
> +	assert(jobs);
> +
> +	/* Exit uv mode on all secondary threads before touching
> +	 * smfctrl on thread 0 */

Do we need to separate things out this way though? It seems like it should 
have much the same affect to clear SMFCTRL and UV mode on every thread. Doing 
so might simplify the code a bit as you could just call exit_uv_mode() 
directly from main/secondary_cpu_init().

> +	for_each_available_cpu(cpu) {
> +		if (cpu == this_cpu())
> +			continue;
> +
> +		if (!cpu_is_thread0(cpu))
> +			jobs[cpu->pir] = cpu_queue_job(cpu, "exit_uv_mode",
> +					_exit_uv_mode, NULL);
> +	}
> +
> +	for_each_available_cpu(cpu)
> +		if (jobs[cpu->pir]) {
> +			cpu_wait_job(jobs[cpu->pir], true);
> +			jobs[cpu->pir] = NULL;
> +		}
> +
> +	/* Exit uv mode and disable smfctrl on primary threads */
> +	for_each_available_cpu(cpu) {

Bit of a nit-pick but you could use for_each_available_core_in_chip() instead.

- Alistair

> +		if (cpu == this_cpu())
> +			continue;
> +
> +		if (cpu_is_thread0(cpu))
> +			jobs[cpu->pir] = cpu_queue_job(cpu, "exit_uv_mode",
> +					_exit_uv_mode, NULL);
> +	}
> +
> +	for_each_available_cpu(cpu)
> +		if (jobs[cpu->pir])
> +			cpu_wait_job(jobs[cpu->pir], true);
> +
> +	free(jobs);
> +
> +	_exit_uv_mode(NULL);
> +}
> diff --git a/core/init.c b/core/init.c
> index 339462e5..0d993abb 100644
> --- a/core/init.c
> +++ b/core/init.c
> @@ -1354,6 +1354,9 @@ void __noreturn __nomcount main_cpu_entry(const void
> *fdt) /* Add the list of interrupts going to OPAL */
>  	add_opal_interrupts();
> 
> +	/* Disable protected execution facility in BML */
> +	cpu_disable_pef();
> +
>  	/* Now release parts of memory nodes we haven't used ourselves... */
>  	mem_region_release_unused();
> 
> diff --git a/include/cpu.h b/include/cpu.h
> index 686310d7..cab63360 100644
> --- a/include/cpu.h
> +++ b/include/cpu.h
> @@ -309,4 +309,7 @@ int dctl_set_special_wakeup(struct cpu_thread *t);
>  int dctl_clear_special_wakeup(struct cpu_thread *t);
>  int dctl_core_is_gated(struct cpu_thread *t);
> 
> +extern void exit_uv_mode(int);
> +void cpu_disable_pef(void);
> +
>  #endif /* __CPU_H */
> diff --git a/include/processor.h b/include/processor.h
> index a0c2864a..1fdcc02b 100644
> --- a/include/processor.h
> +++ b/include/processor.h
> @@ -11,6 +11,7 @@
>  #define MSR_HV		PPC_BIT(3)	/* Hypervisor mode */
>  #define MSR_VEC		PPC_BIT(38)	/* VMX enable */
>  #define MSR_VSX		PPC_BIT(40)	/* VSX enable */
> +#define MSR_S		PPC_BIT(41)	/* Secure mode */
>  #define MSR_EE		PPC_BIT(48)	/* External Int. Enable */
>  #define MSR_PR		PPC_BIT(49)       	/* Problem state */
>  #define MSR_FP		PPC_BIT(50)	/* Floating Point Enable */
> @@ -65,6 +66,9 @@
>  #define SPR_HMEER	0x151	/* HMER interrupt enable mask */
>  #define SPR_PCR		0x152
>  #define SPR_AMOR	0x15d
> +#define SPR_USRR0	0x1fa   /* RW: Ultravisor Save/Restore Register 0 */
> +#define SPR_USRR1	0x1fb   /* RW: Ultravisor Save/Restore Register 1 */
> +#define SPR_SMFCTRL	0x1ff   /* RW: Secure Memory Facility Control */
>  #define SPR_PSSCR	0x357   /* RW: Stop status and control (ISA 3) */
>  #define SPR_TSCR	0x399
>  #define SPR_HID0	0x3f0
Alistair Popple Feb. 21, 2020, 12:53 a.m. UTC | #2
Also I think Oliver wanted to know what would happen if Hostboot boots Skiboot 
expecting an Ultravisor (ie. with secure memory setup and the xscom base 
address in secure memory). I assume we would crash and burn in some rather 
unfortunate way? At the very least we should probably print a dirty big 
warning if we're supposed to be starting an Ultravisor but instead just 
disable SMF and continue.

Also I guess this is unlikely but do we need to worry about ever being booted 
with urmor being set?

- Alistair

On Tuesday, 18 February 2020 4:29:35 PM AEDT Alistair Popple wrote:
> We've run into this issue with Simics this works there as well so:
> 
> Tested-by: Alistair Popple <alistair@popple.id.au>
> 
> Couple of questions though:
> > +.global exit_uv_mode
> > +exit_uv_mode:
> > +	mfmsr	%r4
> > +	LOAD_IMM64(%r5, ~MSR_S)
> > +	and	%r4,%r4,%r5
> > +	mtspr	SPR_USRR1,%r4
> > +
> > +	mfspr   %r4,SPR_HSRR1
> > +	and     %r4,%r4,%r5
> > +	mtspr   SPR_HSRR1,%r3
> > +
> > +	mfspr   %r4,SPR_SRR1
> > +	and     %r4,%r4,%r5
> > +	mtspr   SPR_SRR1,%r4
> 
> Is there a reason we need to update [H]SRR1 as well? I doubt we'd be running
> this in the context of an exception and other uses of SRR1 tend to set it
> explicitly rather than relying on existing values, although I may be
> missing something.
> 
> > +	cmpdi	%r3,1
> > +	bne	1f
> > +	mfspr   %r4, SPR_SMFCTRL
> > +	LOAD_IMM64(%r5, ~PPC_BIT(0))
> > +	and     %r4,%r4,%r5
> > +	mtspr   SPR_SMFCTRL,%r4
> > +1:
> > +	isync
> > +
> > +	mflr	%r4
> > +	mtspr	SPR_USRR0,%r4
> > +
> > +	urfid
> > diff --git a/core/cpu.c b/core/cpu.c
> > index d5b7d623..1adf16cc 100644
> > --- a/core/cpu.c
> > +++ b/core/cpu.c
> > @@ -1644,3 +1644,62 @@ static int64_t opal_nmmu_set_ptcr(uint64_t chip_id,
> > uint64_t ptcr) return rc;
> > 
> >  }
> >  opal_call(OPAL_NMMU_SET_PTCR, opal_nmmu_set_ptcr, 2);
> > 
> > +
> > +static void _exit_uv_mode(void *data __unused)
> > +{
> > +	prlog(PR_DEBUG, "Exit uv mode on cpu pir 0x%04x\n", this_cpu()->pir);
> > +	/* HW has smfctrl shared between threads but on Mambo it is per-thread
> > */
> > +	if (chip_quirk(QUIRK_MAMBO_CALLOUTS))
> > +		exit_uv_mode(1);
> > +	else
> > +		exit_uv_mode(cpu_is_thread0(this_cpu()));
> > +}
> > +
> > +void cpu_disable_pef(void)
> > +{
> > +	struct cpu_thread *cpu;
> > +	struct cpu_job **jobs;
> > +
> > +	if (!(mfmsr() & MSR_S)) {
> > +		prlog(PR_DEBUG, "UV mode off on cpu pir 0x%04x\n", this_cpu()-
>pir);
> > +		return;
> > +	}
> > +
> > +	jobs = zalloc(sizeof(struct cpu_job *) * (cpu_max_pir + 1));
> > +	assert(jobs);
> > +
> > +	/* Exit uv mode on all secondary threads before touching
> > +	 * smfctrl on thread 0 */
> 
> Do we need to separate things out this way though? It seems like it should
> have much the same affect to clear SMFCTRL and UV mode on every thread.
> Doing so might simplify the code a bit as you could just call
> exit_uv_mode() directly from main/secondary_cpu_init().
> 
> > +	for_each_available_cpu(cpu) {
> > +		if (cpu == this_cpu())
> > +			continue;
> > +
> > +		if (!cpu_is_thread0(cpu))
> > +			jobs[cpu->pir] = cpu_queue_job(cpu, "exit_uv_mode",
> > +					_exit_uv_mode, NULL);
> > +	}
> > +
> > +	for_each_available_cpu(cpu)
> > +		if (jobs[cpu->pir]) {
> > +			cpu_wait_job(jobs[cpu->pir], true);
> > +			jobs[cpu->pir] = NULL;
> > +		}
> > +
> > +	/* Exit uv mode and disable smfctrl on primary threads */
> > +	for_each_available_cpu(cpu) {
> 
> Bit of a nit-pick but you could use for_each_available_core_in_chip()
> instead.
> 
> - Alistair
> 
> > +		if (cpu == this_cpu())
> > +			continue;
> > +
> > +		if (cpu_is_thread0(cpu))
> > +			jobs[cpu->pir] = cpu_queue_job(cpu, "exit_uv_mode",
> > +					_exit_uv_mode, NULL);
> > +	}
> > +
> > +	for_each_available_cpu(cpu)
> > +		if (jobs[cpu->pir])
> > +			cpu_wait_job(jobs[cpu->pir], true);
> > +
> > +	free(jobs);
> > +
> > +	_exit_uv_mode(NULL);
> > +}
> > diff --git a/core/init.c b/core/init.c
> > index 339462e5..0d993abb 100644
> > --- a/core/init.c
> > +++ b/core/init.c
> > @@ -1354,6 +1354,9 @@ void __noreturn __nomcount main_cpu_entry(const void
> > *fdt) /* Add the list of interrupts going to OPAL */
> > 
> >  	add_opal_interrupts();
> > 
> > +	/* Disable protected execution facility in BML */
> > +	cpu_disable_pef();
> > +
> > 
> >  	/* Now release parts of memory nodes we haven't used ourselves... */
> >  	mem_region_release_unused();
> > 
> > diff --git a/include/cpu.h b/include/cpu.h
> > index 686310d7..cab63360 100644
> > --- a/include/cpu.h
> > +++ b/include/cpu.h
> > @@ -309,4 +309,7 @@ int dctl_set_special_wakeup(struct cpu_thread *t);
> > 
> >  int dctl_clear_special_wakeup(struct cpu_thread *t);
> >  int dctl_core_is_gated(struct cpu_thread *t);
> > 
> > +extern void exit_uv_mode(int);
> > +void cpu_disable_pef(void);
> > +
> > 
> >  #endif /* __CPU_H */
> > 
> > diff --git a/include/processor.h b/include/processor.h
> > index a0c2864a..1fdcc02b 100644
> > --- a/include/processor.h
> > +++ b/include/processor.h
> > @@ -11,6 +11,7 @@
> > 
> >  #define MSR_HV		PPC_BIT(3)	/* Hypervisor mode */
> >  #define MSR_VEC		PPC_BIT(38)	/* VMX enable */
> >  #define MSR_VSX		PPC_BIT(40)	/* VSX enable */
> > 
> > +#define MSR_S		PPC_BIT(41)	/* Secure mode */
> > 
> >  #define MSR_EE		PPC_BIT(48)	/* External Int. Enable */
> >  #define MSR_PR		PPC_BIT(49)       	/* Problem state */
> >  #define MSR_FP		PPC_BIT(50)	/* Floating Point Enable */
> > 
> > @@ -65,6 +66,9 @@
> > 
> >  #define SPR_HMEER	0x151	/* HMER interrupt enable mask */
> >  #define SPR_PCR		0x152
> >  #define SPR_AMOR	0x15d
> > 
> > +#define SPR_USRR0	0x1fa   /* RW: Ultravisor Save/Restore Register 0 */
> > +#define SPR_USRR1	0x1fb   /* RW: Ultravisor Save/Restore Register 1 */
> > +#define SPR_SMFCTRL	0x1ff   /* RW: Secure Memory Facility Control */
> > 
> >  #define SPR_PSSCR	0x357   /* RW: Stop status and control (ISA 3) */
> >  #define SPR_TSCR	0x399
> >  #define SPR_HID0	0x3f0
> 
> _______________________________________________
> Skiboot mailing list
> Skiboot@lists.ozlabs.org
> https://lists.ozlabs.org/listinfo/skiboot
Ryan Grimm April 20, 2020, 4:02 p.m. UTC | #3
On Tue, 2020-02-18 at 16:29 +1100, Alistair Popple wrote:
> We've run into this issue with Simics this works there as well so:
> 
> Tested-by: Alistair Popple <alistair@popple.id.au>
> 
> Couple of questions though:
> 
> > +.global exit_uv_mode
> > +exit_uv_mode:
> > +	mfmsr	%r4
> > +	LOAD_IMM64(%r5, ~MSR_S)
> > +	and	%r4,%r4,%r5
> > +	mtspr	SPR_USRR1,%r4
> > +
> > +	mfspr   %r4,SPR_HSRR1
> > +	and     %r4,%r4,%r5
> > +	mtspr   SPR_HSRR1,%r3
> > +
> > +	mfspr   %r4,SPR_SRR1
> > +	and     %r4,%r4,%r5
> > +	mtspr   SPR_SRR1,%r4
> 
> Is there a reason we need to update [H]SRR1 as well? I doubt we'd be
> running 
> this in the context of an exception and other uses of SRR1 tend to
> set it 
> explicitly rather than relying on existing values, although I may be
> missing 
> something.
> 

This sequence is documented in the P9 user manual section 25.3.7
"Firmware code sequence for disabling SMF".  We have to do it in such
an explict way because of the hardware.  I'll add this info to the
patch.


> > +	cmpdi	%r3,1
> > +	bne	1f
> > +	mfspr   %r4, SPR_SMFCTRL
> > +	LOAD_IMM64(%r5, ~PPC_BIT(0))
> > +	and     %r4,%r4,%r5
> > +	mtspr   SPR_SMFCTRL,%r4
> > +1:
> > +	isync
> > +
> > +	mflr	%r4
> > +	mtspr	SPR_USRR0,%r4
> > +
> > +	urfid
> > diff --git a/core/cpu.c b/core/cpu.c
> > index d5b7d623..1adf16cc 100644
> > --- a/core/cpu.c
> > +++ b/core/cpu.c
> > @@ -1644,3 +1644,62 @@ static int64_t opal_nmmu_set_ptcr(uint64_t
> > chip_id,
> > uint64_t ptcr) return rc;
> >  }
> >  opal_call(OPAL_NMMU_SET_PTCR, opal_nmmu_set_ptcr, 2);
> > +
> > +static void _exit_uv_mode(void *data __unused)
> > +{
> > +	prlog(PR_DEBUG, "Exit uv mode on cpu pir 0x%04x\n", this_cpu()-
> > >pir);
> > +	/* HW has smfctrl shared between threads but on Mambo it is
> > per-thread */
> > +	if (chip_quirk(QUIRK_MAMBO_CALLOUTS))
> > +		exit_uv_mode(1);
> > +	else
> > +		exit_uv_mode(cpu_is_thread0(this_cpu()));
> > +}
> > +
> > +void cpu_disable_pef(void)
> > +{
> > +	struct cpu_thread *cpu;
> > +	struct cpu_job **jobs;
> > +
> > +	if (!(mfmsr() & MSR_S)) {
> > +		prlog(PR_DEBUG, "UV mode off on cpu pir 0x%04x\n",
> > this_cpu()->pir);
> > +		return;
> > +	}
> > +
> > +	jobs = zalloc(sizeof(struct cpu_job *) * (cpu_max_pir + 1));
> > +	assert(jobs);
> > +
> > +	/* Exit uv mode on all secondary threads before touching
> > +	 * smfctrl on thread 0 */
> 
> Do we need to separate things out this way though? It seems like it
> should 
> have much the same affect to clear SMFCTRL and UV mode on every
> thread. Doing 
> so might simplify the code a bit as you could just call
> exit_uv_mode() 
> directly from main/secondary_cpu_init().

Well, the sequence doesn't say what you're describing won't work.  But,
firmware does it in smt1.  The sequence wasn't tested on smt2 and smt4.
I tested clearing the bit in smfctrl on every thread concurrently on
p9ndd2.3 with threads enabled and the result was a core logic checktop.

We could go to smt1 mode before we run it, but I'm going on gut
instinct to say that sounds like more of a pain.  And now trying to
think about it more, that doesn't even make sense for skiboot to do,
right? 

I tinkered with coding it all in assembly early on in asm/head.S and
abandoned that plan.

> 
> > +	for_each_available_cpu(cpu) {
> > +		if (cpu == this_cpu())
> > +			continue;
> > +
> > +		if (!cpu_is_thread0(cpu))
> > +			jobs[cpu->pir] = cpu_queue_job(cpu,
> > "exit_uv_mode",
> > +					_exit_uv_mode, NULL);
> > +	}
> > +
> > +	for_each_available_cpu(cpu)
> > +		if (jobs[cpu->pir]) {
> > +			cpu_wait_job(jobs[cpu->pir], true);
> > +			jobs[cpu->pir] = NULL;
> > +		}
> > +
> > +	/* Exit uv mode and disable smfctrl on primary threads */
> > +	for_each_available_cpu(cpu) {
> 
> Bit of a nit-pick but you could use for_each_available_core_in_chip()
> instead.
> 

The code would then require another local variable, chip, and would add
an extra line, how is that better?

-Ryan
Oliver O'Halloran June 5, 2020, 6:53 a.m. UTC | #4
On Thu, Feb 6, 2020 at 7:48 AM Ryan Grimm <grimm@linux.ibm.com> wrote:
>
> This patch disables Protected Execution Faciltiy (PEF).
>
> This software procedure is needed for the lab because Cronus will be
> configured to bring the machine up with PEF on.  Hostboot has a similar
> procedure for running with PEF off.
>
> Skiboot can run with PEF on but the kernel cannot; the kernel will take
> a machine check when trying to write a protected resource, such as the
> PTCR.
>
> So, use this until we have an ultravisor, or if we want to use BML with
> Cronus without UV = 1.
>
> Signed-off-by: Ryan Grimm <grimm@linux.ibm.com>

Thanks, merged as 37a369bbcb5a3f37de3affb77fc774375b83783e with a
small fix that replaces the urfid instruction with a macro to make it
build on older toolchains and LLVM.

Oliver
diff mbox series

Patch

diff --git a/asm/misc.S b/asm/misc.S
index 647f60b2..55a9a99c 100644
--- a/asm/misc.S
+++ b/asm/misc.S
@@ -255,3 +255,40 @@  enter_p9_pm_state:
 	mtspr	SPR_PSSCR,%r3
 	PPC_INST_STOP
 	b	.
+
+/* Exit UV mode and disable Protected Execution Facility
+ * For each core, this should be run on all secondary threads first to bring
+ * them out of UV mode.  Then, it is called by the primary thread to disable
+ * PEF and bring it out of UV mode.  All threads will then be running in HV
+ * mode.  The only way to reenable UV mode is with a reboot.
+ * r3 = 1 if primary thread
+ *      0 if secondary thread
+ */
+.global exit_uv_mode
+exit_uv_mode:
+	mfmsr	%r4
+	LOAD_IMM64(%r5, ~MSR_S)
+	and	%r4,%r4,%r5
+	mtspr	SPR_USRR1,%r4
+
+	mfspr   %r4,SPR_HSRR1
+	and     %r4,%r4,%r5
+	mtspr   SPR_HSRR1,%r3
+
+	mfspr   %r4,SPR_SRR1
+	and     %r4,%r4,%r5
+	mtspr   SPR_SRR1,%r4
+
+	cmpdi	%r3,1
+	bne	1f
+	mfspr   %r4, SPR_SMFCTRL
+	LOAD_IMM64(%r5, ~PPC_BIT(0))
+	and     %r4,%r4,%r5
+	mtspr   SPR_SMFCTRL,%r4
+1:
+	isync
+
+	mflr	%r4
+	mtspr	SPR_USRR0,%r4
+
+	urfid
diff --git a/core/cpu.c b/core/cpu.c
index d5b7d623..1adf16cc 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -1644,3 +1644,62 @@  static int64_t opal_nmmu_set_ptcr(uint64_t chip_id, uint64_t ptcr)
 	return rc;
 }
 opal_call(OPAL_NMMU_SET_PTCR, opal_nmmu_set_ptcr, 2);
+
+static void _exit_uv_mode(void *data __unused)
+{
+	prlog(PR_DEBUG, "Exit uv mode on cpu pir 0x%04x\n", this_cpu()->pir);
+	/* HW has smfctrl shared between threads but on Mambo it is per-thread */
+	if (chip_quirk(QUIRK_MAMBO_CALLOUTS))
+		exit_uv_mode(1);
+	else
+		exit_uv_mode(cpu_is_thread0(this_cpu()));
+}
+
+void cpu_disable_pef(void)
+{
+	struct cpu_thread *cpu;
+	struct cpu_job **jobs;
+
+	if (!(mfmsr() & MSR_S)) {
+		prlog(PR_DEBUG, "UV mode off on cpu pir 0x%04x\n", this_cpu()->pir);
+		return;
+	}
+
+	jobs = zalloc(sizeof(struct cpu_job *) * (cpu_max_pir + 1));
+	assert(jobs);
+
+	/* Exit uv mode on all secondary threads before touching
+	 * smfctrl on thread 0 */
+	for_each_available_cpu(cpu) {
+		if (cpu == this_cpu())
+			continue;
+
+		if (!cpu_is_thread0(cpu))
+			jobs[cpu->pir] = cpu_queue_job(cpu, "exit_uv_mode",
+					_exit_uv_mode, NULL);
+	}
+
+	for_each_available_cpu(cpu)
+		if (jobs[cpu->pir]) {
+			cpu_wait_job(jobs[cpu->pir], true);
+			jobs[cpu->pir] = NULL;
+		}
+
+	/* Exit uv mode and disable smfctrl on primary threads */
+	for_each_available_cpu(cpu) {
+		if (cpu == this_cpu())
+			continue;
+
+		if (cpu_is_thread0(cpu))
+			jobs[cpu->pir] = cpu_queue_job(cpu, "exit_uv_mode",
+					_exit_uv_mode, NULL);
+	}
+
+	for_each_available_cpu(cpu)
+		if (jobs[cpu->pir])
+			cpu_wait_job(jobs[cpu->pir], true);
+
+	free(jobs);
+
+	_exit_uv_mode(NULL);
+}
diff --git a/core/init.c b/core/init.c
index 339462e5..0d993abb 100644
--- a/core/init.c
+++ b/core/init.c
@@ -1354,6 +1354,9 @@  void __noreturn __nomcount main_cpu_entry(const void *fdt)
 	/* Add the list of interrupts going to OPAL */
 	add_opal_interrupts();
 
+	/* Disable protected execution facility in BML */
+	cpu_disable_pef();
+
 	/* Now release parts of memory nodes we haven't used ourselves... */
 	mem_region_release_unused();
 
diff --git a/include/cpu.h b/include/cpu.h
index 686310d7..cab63360 100644
--- a/include/cpu.h
+++ b/include/cpu.h
@@ -309,4 +309,7 @@  int dctl_set_special_wakeup(struct cpu_thread *t);
 int dctl_clear_special_wakeup(struct cpu_thread *t);
 int dctl_core_is_gated(struct cpu_thread *t);
 
+extern void exit_uv_mode(int);
+void cpu_disable_pef(void);
+
 #endif /* __CPU_H */
diff --git a/include/processor.h b/include/processor.h
index a0c2864a..1fdcc02b 100644
--- a/include/processor.h
+++ b/include/processor.h
@@ -11,6 +11,7 @@ 
 #define MSR_HV		PPC_BIT(3)	/* Hypervisor mode */
 #define MSR_VEC		PPC_BIT(38)	/* VMX enable */
 #define MSR_VSX		PPC_BIT(40)	/* VSX enable */
+#define MSR_S		PPC_BIT(41)	/* Secure mode */
 #define MSR_EE		PPC_BIT(48)	/* External Int. Enable */
 #define MSR_PR		PPC_BIT(49)       	/* Problem state */
 #define MSR_FP		PPC_BIT(50)	/* Floating Point Enable */
@@ -65,6 +66,9 @@ 
 #define SPR_HMEER	0x151	/* HMER interrupt enable mask */
 #define SPR_PCR		0x152
 #define SPR_AMOR	0x15d
+#define SPR_USRR0	0x1fa   /* RW: Ultravisor Save/Restore Register 0 */
+#define SPR_USRR1	0x1fb   /* RW: Ultravisor Save/Restore Register 1 */
+#define SPR_SMFCTRL	0x1ff   /* RW: Secure Memory Facility Control */
 #define SPR_PSSCR	0x357   /* RW: Stop status and control (ISA 3) */
 #define SPR_TSCR	0x399
 #define SPR_HID0	0x3f0