diff mbox

[U-Boot,1/2] armv8: Support loading 32-bit OS in AArch32 execution state

Message ID 1463128808-46730-2-git-send-email-b18965@freescale.com
State Superseded
Headers show

Commit Message

Alison Wang May 13, 2016, 8:40 a.m. UTC
To support loading a 32-bit OS, the execution state will change from
AArch64 to AArch32 when jumping to kernel.

The architecture information will be got through checking FIT
image, then U-Boot will load 32-bit OS or 64-bit OS automatically.

Signed-off-by: Ebony Zhu <ebony.zhu@nxp.com>
Signed-off-by: Alison Wang <alison.wang@nxp.com>
Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
---
 arch/arm/cpu/armv8/transition.S | 100 ++++++++++++++++++++++++++++++++++++++++
 arch/arm/include/asm/system.h   |   2 +
 arch/arm/lib/bootm.c            |  20 +++++++-
 common/image-fit.c              |  12 ++++-
 4 files changed, 131 insertions(+), 3 deletions(-)

Comments

Alexander Graf May 13, 2016, 11:11 a.m. UTC | #1
On 13.05.16 10:40, Alison Wang wrote:
> To support loading a 32-bit OS, the execution state will change from
> AArch64 to AArch32 when jumping to kernel.
> 
> The architecture information will be got through checking FIT
> image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
> 
> Signed-off-by: Ebony Zhu <ebony.zhu@nxp.com>
> Signed-off-by: Alison Wang <alison.wang@nxp.com>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
> ---
>  arch/arm/cpu/armv8/transition.S | 100 ++++++++++++++++++++++++++++++++++++++++
>  arch/arm/include/asm/system.h   |   2 +
>  arch/arm/lib/bootm.c            |  20 +++++++-
>  common/image-fit.c              |  12 ++++-
>  4 files changed, 131 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S
> index 253a39b..9d7a17a 100644
> --- a/arch/arm/cpu/armv8/transition.S
> +++ b/arch/arm/cpu/armv8/transition.S
> @@ -21,3 +21,103 @@ ENTRY(armv8_switch_to_el1)
>  0:	ret
>  1:	armv8_switch_to_el1_m x0, x1
>  ENDPROC(armv8_switch_to_el1)
> +
> +/*
> + * x0: kernel entry point
> + * x1: machine nr
> + * x2: fdt address
> + */
> +ENTRY(armv8_switch_to_el2_aarch32)
> +	switch_el x3, 1f, 0f, 0f
> +0:	ret
> +1:
> +	mov	x7, x0
> +	mov	x8, x1
> +	mov	x9, x2
> +
> +	/* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1 */
> +	mov	x1, 0x1b1
> +	msr	scr_el3, x1
> +	msr	cptr_el3, xzr	/* Disable coprocessor traps to EL3 */
> +	mov	x1, 0x33ff
> +	msr	cptr_el2, x1	/* Disable coprocessor traps to EL2 */
> +
> +	/* Initialize Generic Timers */
> +	msr	cntvoff_el2, xzr
> +
> +	mov	x1, #0x0830
> +	movk	x1, #0x30c5, lsl #16
> +	msr	sctlr_el2, x1

Why is this necessary?

> +
> +	/* Return to AArch32 Hypervisor mode */
> +	mov	x1, sp
> +	msr	sp_el2, x1
> +	mrs	x1, vbar_el3
> +	msr	vbar_el2, x1	/* Migrate VBAR */
> +	mov	x1, #0x1da
> +	msr	spsr_el3, x1
> +	msr	elr_el3, x7
> +
> +	mov	x0, #0
> +	mov	x1, x8
> +	mov	x2, x9
> +
> +	eret
> +ENDPROC(armv8_switch_to_el2_aarch32)

This whole thing looks like a copy of armv8_switch_to_el2_m. Just
parameterize that one and put the few bits that are different in macro ifs.

> +
> +/*
> + * x0: kernel entry point
> + * x1: machine nr
> + * x2: fdt address
> + */
> +ENTRY(armv8_switch_to_el1_aarch32)
> +	switch_el x3, 0f, 1f, 0f
> +0:	ret
> +1:
> +	mov	x7, x0
> +	mov	x8, x1
> +	mov	x9, x2
> +
> +	/* Initialize Generic Timers */
> +	mrs	x0, cnthctl_el2
> +	orr	x0, x0, #0x3		/* Enable EL1 access to timers */
> +	msr	cnthctl_el2, x0
> +	msr	cntvoff_el2, xzr
> +
> +        /* Initialize MPID/MPIDR registers */
> +	mrs	x0, midr_el1
> +	mrs	x1, mpidr_el1
> +	msr	vpidr_el2, x0
> +	msr	vmpidr_el2, x1
> +
> +        /* Disable coprocessor traps */
> +	mov	x0, #0x33ff
> +	msr	cptr_el2, x0		/* Disable coprocessor traps to EL2 */
> +        msr	hstr_el2, xzr		/* Disable coprocessor traps to EL2 */
> +        mov	x0, #3 << 20
> +        msr	cpacr_el1, x0		/* Enable FP/SIMD at EL1 */
> +
> +	/* Initialize HCR_EL2 */
> +	mov	x0, #(0 << 31)		/* 32bit EL1 */
> +	orr	x0, x0, #(1 << 29)	/* Disable HVC */
> +	msr	hcr_el2, x0
> +
> +	mov	x0, #0x0800
> +	movk	x0, #0x30d0, lsl #16
> +	msr	sctlr_el1, x0
> +
> +	/* Return to AArch32 Supervisor mode */
> +	mov	x0, sp
> +	msr	sp_el1, x0		/* Migrate SP */
> +	mrs	x0, vbar_el2
> +	msr	vbar_el1, x0		/* Migrate VBAR */
> +	mov     x0, #0x1d3
> +	msr	spsr_el2, x0
> +	msr	elr_el2, x7
> +
> +	mov	x0, #0
> +	mov	x1, x8
> +	mov	x2, x9
> +
> +	eret
> +ENDPROC(armv8_switch_to_el1_aarch32)

Does anybody really care about jumping to el1?

> diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
> index 9ae890a..bb87cf0 100644
> --- a/arch/arm/include/asm/system.h
> +++ b/arch/arm/include/asm/system.h
> @@ -102,6 +102,8 @@ void __asm_switch_ttbr(u64 new_ttbr);
>  
>  void armv8_switch_to_el2(void);
>  void armv8_switch_to_el1(void);
> +void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64 fdt_addr);
> +void armv8_switch_to_el1_aarch32(u64 entry_point, u64 mach_nr, u64 fdt_addr);
>  void gic_init(void);
>  void gic_send_sgi(unsigned long sgino);
>  void wait_for_wakeup(void);
> diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c
> index 0838d89..a39c3d2 100644
> --- a/arch/arm/lib/bootm.c
> +++ b/arch/arm/lib/bootm.c
> @@ -286,8 +286,24 @@ static void boot_jump_linux(bootm_headers_t *images, int flag)
>  	announce_and_cleanup(fake);
>  
>  	if (!fake) {
> -		do_nonsec_virt_switch();
> -		kernel_entry(images->ft_addr, NULL, NULL, NULL);
> +		if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
> +		    (images->os.arch == IH_ARCH_ARM)) {
> +			smp_kick_all_cpus();
> +			dcache_disable();
> +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
> +			armv8_switch_to_el2();
> +			armv8_switch_to_el1_aarch32((u64)images->ep,
> +						    (u64)gd->bd->bi_arch_number,
> +						    (u64)images->ft_addr);
> +#else
> +			armv8_switch_to_el2_aarch32((u64)images->ep,
> +						    (u64)gd->bd->bi_arch_number,
> +						    (u64)images->ft_addr);
> +#endif

Does this compile on 32bit targets?


Alex
York Sun May 13, 2016, 4:34 p.m. UTC | #2
On 05/13/2016 01:50 AM, Alison Wang wrote:
> To support loading a 32-bit OS, the execution state will change from
> AArch64 to AArch32 when jumping to kernel.
> 
> The architecture information will be got through checking FIT
> image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
> 
> Signed-off-by: Ebony Zhu <ebony.zhu@nxp.com>
> Signed-off-by: Alison Wang <alison.wang@nxp.com>
> Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
> ---
>  arch/arm/cpu/armv8/transition.S | 100 ++++++++++++++++++++++++++++++++++++++++
>  arch/arm/include/asm/system.h   |   2 +
>  arch/arm/lib/bootm.c            |  20 +++++++-
>  common/image-fit.c              |  12 ++++-
>  4 files changed, 131 insertions(+), 3 deletions(-)
> 

If you repsin the patch for any reason, please remember to add correct version
number and change log.

York
Alison Wang May 16, 2016, 5:28 a.m. UTC | #3
> On 13.05.16 10:40, Alison Wang wrote:
> > To support loading a 32-bit OS, the execution state will change from
> > AArch64 to AArch32 when jumping to kernel.
> >
> > The architecture information will be got through checking FIT image,
> > then U-Boot will load 32-bit OS or 64-bit OS automatically.
> >
> > Signed-off-by: Ebony Zhu <ebony.zhu@nxp.com>
> > Signed-off-by: Alison Wang <alison.wang@nxp.com>
> > Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
> > ---
> >  arch/arm/cpu/armv8/transition.S | 100
> ++++++++++++++++++++++++++++++++++++++++
> >  arch/arm/include/asm/system.h   |   2 +
> >  arch/arm/lib/bootm.c            |  20 +++++++-
> >  common/image-fit.c              |  12 ++++-
> >  4 files changed, 131 insertions(+), 3 deletions(-)
> >
> > diff --git a/arch/arm/cpu/armv8/transition.S
> > b/arch/arm/cpu/armv8/transition.S index 253a39b..9d7a17a 100644
> > --- a/arch/arm/cpu/armv8/transition.S
> > +++ b/arch/arm/cpu/armv8/transition.S
> > @@ -21,3 +21,103 @@ ENTRY(armv8_switch_to_el1)
> >  0:	ret
> >  1:	armv8_switch_to_el1_m x0, x1
> >  ENDPROC(armv8_switch_to_el1)
> > +
> > +/*
> > + * x0: kernel entry point
> > + * x1: machine nr
> > + * x2: fdt address
> > + */
> > +ENTRY(armv8_switch_to_el2_aarch32)
> > +	switch_el x3, 1f, 0f, 0f
> > +0:	ret
> > +1:
> > +	mov	x7, x0
> > +	mov	x8, x1
> > +	mov	x9, x2
> > +
> > +	/* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1
> */
> > +	mov	x1, 0x1b1
> > +	msr	scr_el3, x1
> > +	msr	cptr_el3, xzr	/* Disable coprocessor traps to EL3 */
> > +	mov	x1, 0x33ff
> > +	msr	cptr_el2, x1	/* Disable coprocessor traps to EL2 */
> > +
> > +	/* Initialize Generic Timers */
> > +	msr	cntvoff_el2, xzr
> > +
> > +	mov	x1, #0x0830
> > +	movk	x1, #0x30c5, lsl #16
> > +	msr	sctlr_el2, x1
> 
> Why is this necessary?
[Alison Wang] SCTLR_EL2 is architecturally mapped to AArch32 register HSCTLR.
HSCTLR will provide control of the system operation in Hyp mode.
> 
> > +
> > +	/* Return to AArch32 Hypervisor mode */
> > +	mov	x1, sp
> > +	msr	sp_el2, x1
> > +	mrs	x1, vbar_el3
> > +	msr	vbar_el2, x1	/* Migrate VBAR */
> > +	mov	x1, #0x1da
> > +	msr	spsr_el3, x1
> > +	msr	elr_el3, x7
> > +
> > +	mov	x0, #0
> > +	mov	x1, x8
> > +	mov	x2, x9
> > +
> > +	eret
> > +ENDPROC(armv8_switch_to_el2_aarch32)
> 
> This whole thing looks like a copy of armv8_switch_to_el2_m. Just
> parameterize that one and put the few bits that are different in macro
> ifs.
[Alison Wang] Yes, they are similar because they both switch from EL3 to EL2.
But some bits are different because one switch from AArch64 EL3 to AArch64 EL2
and the other switch from AArch64 EL3 to AArch32 EL2. The parameters need to
use too.
> 
> > +
> > +/*
> > + * x0: kernel entry point
> > + * x1: machine nr
> > + * x2: fdt address
> > + */
> > +ENTRY(armv8_switch_to_el1_aarch32)
> > +	switch_el x3, 0f, 1f, 0f
> > +0:	ret
> > +1:
> > +	mov	x7, x0
> > +	mov	x8, x1
> > +	mov	x9, x2
> > +
> > +	/* Initialize Generic Timers */
> > +	mrs	x0, cnthctl_el2
> > +	orr	x0, x0, #0x3		/* Enable EL1 access to timers */
> > +	msr	cnthctl_el2, x0
> > +	msr	cntvoff_el2, xzr
> > +
> > +        /* Initialize MPID/MPIDR registers */
> > +	mrs	x0, midr_el1
> > +	mrs	x1, mpidr_el1
> > +	msr	vpidr_el2, x0
> > +	msr	vmpidr_el2, x1
> > +
> > +        /* Disable coprocessor traps */
> > +	mov	x0, #0x33ff
> > +	msr	cptr_el2, x0		/* Disable coprocessor traps to EL2 */
> > +        msr	hstr_el2, xzr		/* Disable coprocessor traps
> to EL2 */
> > +        mov	x0, #3 << 20
> > +        msr	cpacr_el1, x0		/* Enable FP/SIMD at EL1 */
> > +
> > +	/* Initialize HCR_EL2 */
> > +	mov	x0, #(0 << 31)		/* 32bit EL1 */
> > +	orr	x0, x0, #(1 << 29)	/* Disable HVC */
> > +	msr	hcr_el2, x0
> > +
> > +	mov	x0, #0x0800
> > +	movk	x0, #0x30d0, lsl #16
> > +	msr	sctlr_el1, x0
> > +
> > +	/* Return to AArch32 Supervisor mode */
> > +	mov	x0, sp
> > +	msr	sp_el1, x0		/* Migrate SP */
> > +	mrs	x0, vbar_el2
> > +	msr	vbar_el1, x0		/* Migrate VBAR */
> > +	mov     x0, #0x1d3
> > +	msr	spsr_el2, x0
> > +	msr	elr_el2, x7
> > +
> > +	mov	x0, #0
> > +	mov	x1, x8
> > +	mov	x2, x9
> > +
> > +	eret
> > +ENDPROC(armv8_switch_to_el1_aarch32)
> 
> Does anybody really care about jumping to el1?
[Alison Wang] I am not sure if anybody will jump to el1. Anyway, I provide
this support. If anybody want to jump to el1, he can just define
CONFIG_ARMV8_SWITCH_TO_EL1.
> 
> > diff --git a/arch/arm/include/asm/system.h
> > b/arch/arm/include/asm/system.h index 9ae890a..bb87cf0 100644
> > --- a/arch/arm/include/asm/system.h
> > +++ b/arch/arm/include/asm/system.h
> > @@ -102,6 +102,8 @@ void __asm_switch_ttbr(u64 new_ttbr);
> >
> >  void armv8_switch_to_el2(void);
> >  void armv8_switch_to_el1(void);
> > +void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64
> > +fdt_addr); void armv8_switch_to_el1_aarch32(u64 entry_point, u64
> > +mach_nr, u64 fdt_addr);
> >  void gic_init(void);
> >  void gic_send_sgi(unsigned long sgino);  void wait_for_wakeup(void);
> > diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index
> > 0838d89..a39c3d2 100644
> > --- a/arch/arm/lib/bootm.c
> > +++ b/arch/arm/lib/bootm.c
> > @@ -286,8 +286,24 @@ static void boot_jump_linux(bootm_headers_t
> *images, int flag)
> >  	announce_and_cleanup(fake);
> >
> >  	if (!fake) {
> > -		do_nonsec_virt_switch();
> > -		kernel_entry(images->ft_addr, NULL, NULL, NULL);
> > +		if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
> > +		    (images->os.arch == IH_ARCH_ARM)) {
> > +			smp_kick_all_cpus();
> > +			dcache_disable();
> > +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
> > +			armv8_switch_to_el2();
> > +			armv8_switch_to_el1_aarch32((u64)images->ep,
> > +						    (u64)gd->bd->bi_arch_number,
> > +						    (u64)images->ft_addr);
> > +#else
> > +			armv8_switch_to_el2_aarch32((u64)images->ep,
> > +						    (u64)gd->bd->bi_arch_number,
> > +						    (u64)images->ft_addr);
> > +#endif
> 
> Does this compile on 32bit targets?
[Alison Wang] This compile for 64bit target to support 64-bit U-Boot and
32-bit kernel. It will not affect 32bit targets. 


Best Regards,
Alison Wang
Alison Wang May 16, 2016, 5:29 a.m. UTC | #4
> On 05/13/2016 01:50 AM, Alison Wang wrote:
> > To support loading a 32-bit OS, the execution state will change from
> > AArch64 to AArch32 when jumping to kernel.
> >
> > The architecture information will be got through checking FIT image,
> > then U-Boot will load 32-bit OS or 64-bit OS automatically.
> >
> > Signed-off-by: Ebony Zhu <ebony.zhu@nxp.com>
> > Signed-off-by: Alison Wang <alison.wang@nxp.com>
> > Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
> > ---
> >  arch/arm/cpu/armv8/transition.S | 100
> ++++++++++++++++++++++++++++++++++++++++
> >  arch/arm/include/asm/system.h   |   2 +
> >  arch/arm/lib/bootm.c            |  20 +++++++-
> >  common/image-fit.c              |  12 ++++-
> >  4 files changed, 131 insertions(+), 3 deletions(-)
> >
> 
> If you repsin the patch for any reason, please remember to add correct
> version number and change log.
> 
[Alison Wang] Yes. This is the first version I sent to upstream, I will add
Version number and change log when I send the next version.

Thanks.

Best Regards,
Alison Wang
Alexander Graf May 16, 2016, 10:30 a.m. UTC | #5
On 16.05.16 07:28, Huan Wang wrote:
>> On 13.05.16 10:40, Alison Wang wrote:
>>> To support loading a 32-bit OS, the execution state will change from
>>> AArch64 to AArch32 when jumping to kernel.
>>>
>>> The architecture information will be got through checking FIT image,
>>> then U-Boot will load 32-bit OS or 64-bit OS automatically.
>>>
>>> Signed-off-by: Ebony Zhu <ebony.zhu@nxp.com>
>>> Signed-off-by: Alison Wang <alison.wang@nxp.com>
>>> Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
>>> ---
>>>  arch/arm/cpu/armv8/transition.S | 100
>> ++++++++++++++++++++++++++++++++++++++++
>>>  arch/arm/include/asm/system.h   |   2 +
>>>  arch/arm/lib/bootm.c            |  20 +++++++-
>>>  common/image-fit.c              |  12 ++++-
>>>  4 files changed, 131 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/arch/arm/cpu/armv8/transition.S
>>> b/arch/arm/cpu/armv8/transition.S index 253a39b..9d7a17a 100644
>>> --- a/arch/arm/cpu/armv8/transition.S
>>> +++ b/arch/arm/cpu/armv8/transition.S
>>> @@ -21,3 +21,103 @@ ENTRY(armv8_switch_to_el1)
>>>  0:	ret
>>>  1:	armv8_switch_to_el1_m x0, x1
>>>  ENDPROC(armv8_switch_to_el1)
>>> +
>>> +/*
>>> + * x0: kernel entry point
>>> + * x1: machine nr
>>> + * x2: fdt address
>>> + */
>>> +ENTRY(armv8_switch_to_el2_aarch32)
>>> +	switch_el x3, 1f, 0f, 0f
>>> +0:	ret
>>> +1:
>>> +	mov	x7, x0
>>> +	mov	x8, x1
>>> +	mov	x9, x2
>>> +
>>> +	/* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1
>> */
>>> +	mov	x1, 0x1b1
>>> +	msr	scr_el3, x1
>>> +	msr	cptr_el3, xzr	/* Disable coprocessor traps to EL3 */
>>> +	mov	x1, 0x33ff
>>> +	msr	cptr_el2, x1	/* Disable coprocessor traps to EL2 */
>>> +
>>> +	/* Initialize Generic Timers */
>>> +	msr	cntvoff_el2, xzr
>>> +
>>> +	mov	x1, #0x0830
>>> +	movk	x1, #0x30c5, lsl #16
>>> +	msr	sctlr_el2, x1
>>
>> Why is this necessary?
> [Alison Wang] SCTLR_EL2 is architecturally mapped to AArch32 register HSCTLR.
> HSCTLR will provide control of the system operation in Hyp mode.

It still doesn't explain why you move magical values into a random
register that is not set in the 64-bit path.

Please make this code more readable :). Try to #define values for the
bits that you set. Add comments explaining why you do what you do.

>>
>>> +
>>> +	/* Return to AArch32 Hypervisor mode */
>>> +	mov	x1, sp
>>> +	msr	sp_el2, x1
>>> +	mrs	x1, vbar_el3
>>> +	msr	vbar_el2, x1	/* Migrate VBAR */
>>> +	mov	x1, #0x1da
>>> +	msr	spsr_el3, x1
>>> +	msr	elr_el3, x7
>>> +
>>> +	mov	x0, #0
>>> +	mov	x1, x8
>>> +	mov	x2, x9
>>> +
>>> +	eret
>>> +ENDPROC(armv8_switch_to_el2_aarch32)
>>
>> This whole thing looks like a copy of armv8_switch_to_el2_m. Just
>> parameterize that one and put the few bits that are different in macro
>> ifs.
> [Alison Wang] Yes, they are similar because they both switch from EL3 to EL2.
> But some bits are different because one switch from AArch64 EL3 to AArch64 EL2
> and the other switch from AArch64 EL3 to AArch32 EL2. The parameters need to
> use too.

Yes, so I think it makes a lot of sense to combine the
jump-to-64-bit-el2 and jump-to-32-bit-el2 functions be a single
implementation. That way there's less chance an accidental difference
creeps in.

>>
>>> +
>>> +/*
>>> + * x0: kernel entry point
>>> + * x1: machine nr
>>> + * x2: fdt address
>>> + */
>>> +ENTRY(armv8_switch_to_el1_aarch32)
>>> +	switch_el x3, 0f, 1f, 0f
>>> +0:	ret
>>> +1:
>>> +	mov	x7, x0
>>> +	mov	x8, x1
>>> +	mov	x9, x2
>>> +
>>> +	/* Initialize Generic Timers */
>>> +	mrs	x0, cnthctl_el2
>>> +	orr	x0, x0, #0x3		/* Enable EL1 access to timers */
>>> +	msr	cnthctl_el2, x0
>>> +	msr	cntvoff_el2, xzr
>>> +
>>> +        /* Initialize MPID/MPIDR registers */
>>> +	mrs	x0, midr_el1
>>> +	mrs	x1, mpidr_el1
>>> +	msr	vpidr_el2, x0
>>> +	msr	vmpidr_el2, x1
>>> +
>>> +        /* Disable coprocessor traps */
>>> +	mov	x0, #0x33ff
>>> +	msr	cptr_el2, x0		/* Disable coprocessor traps to EL2 */
>>> +        msr	hstr_el2, xzr		/* Disable coprocessor traps
>> to EL2 */
>>> +        mov	x0, #3 << 20
>>> +        msr	cpacr_el1, x0		/* Enable FP/SIMD at EL1 */
>>> +
>>> +	/* Initialize HCR_EL2 */
>>> +	mov	x0, #(0 << 31)		/* 32bit EL1 */
>>> +	orr	x0, x0, #(1 << 29)	/* Disable HVC */
>>> +	msr	hcr_el2, x0
>>> +
>>> +	mov	x0, #0x0800
>>> +	movk	x0, #0x30d0, lsl #16
>>> +	msr	sctlr_el1, x0
>>> +
>>> +	/* Return to AArch32 Supervisor mode */
>>> +	mov	x0, sp
>>> +	msr	sp_el1, x0		/* Migrate SP */
>>> +	mrs	x0, vbar_el2
>>> +	msr	vbar_el1, x0		/* Migrate VBAR */
>>> +	mov     x0, #0x1d3
>>> +	msr	spsr_el2, x0
>>> +	msr	elr_el2, x7
>>> +
>>> +	mov	x0, #0
>>> +	mov	x1, x8
>>> +	mov	x2, x9
>>> +
>>> +	eret
>>> +ENDPROC(armv8_switch_to_el1_aarch32)
>>
>> Does anybody really care about jumping to el1?
> [Alison Wang] I am not sure if anybody will jump to el1. Anyway, I provide
> this support. If anybody want to jump to el1, he can just define
> CONFIG_ARMV8_SWITCH_TO_EL1.

Well, yes, I'm actually questioning the existence of the define. Why did
it get introduced? Is there any case where it's actually sensible?

>>
>>> diff --git a/arch/arm/include/asm/system.h
>>> b/arch/arm/include/asm/system.h index 9ae890a..bb87cf0 100644
>>> --- a/arch/arm/include/asm/system.h
>>> +++ b/arch/arm/include/asm/system.h
>>> @@ -102,6 +102,8 @@ void __asm_switch_ttbr(u64 new_ttbr);
>>>
>>>  void armv8_switch_to_el2(void);
>>>  void armv8_switch_to_el1(void);
>>> +void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64
>>> +fdt_addr); void armv8_switch_to_el1_aarch32(u64 entry_point, u64
>>> +mach_nr, u64 fdt_addr);
>>>  void gic_init(void);
>>>  void gic_send_sgi(unsigned long sgino);  void wait_for_wakeup(void);
>>> diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index
>>> 0838d89..a39c3d2 100644
>>> --- a/arch/arm/lib/bootm.c
>>> +++ b/arch/arm/lib/bootm.c
>>> @@ -286,8 +286,24 @@ static void boot_jump_linux(bootm_headers_t
>> *images, int flag)
>>>  	announce_and_cleanup(fake);
>>>
>>>  	if (!fake) {
>>> -		do_nonsec_virt_switch();
>>> -		kernel_entry(images->ft_addr, NULL, NULL, NULL);
>>> +		if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
>>> +		    (images->os.arch == IH_ARCH_ARM)) {
>>> +			smp_kick_all_cpus();
>>> +			dcache_disable();
>>> +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
>>> +			armv8_switch_to_el2();
>>> +			armv8_switch_to_el1_aarch32((u64)images->ep,
>>> +						    (u64)gd->bd->bi_arch_number,
>>> +						    (u64)images->ft_addr);
>>> +#else
>>> +			armv8_switch_to_el2_aarch32((u64)images->ep,
>>> +						    (u64)gd->bd->bi_arch_number,
>>> +						    (u64)images->ft_addr);
>>> +#endif
>>
>> Does this compile on 32bit targets?
> [Alison Wang] This compile for 64bit target to support 64-bit U-Boot and
> 32-bit kernel. It will not affect 32bit targets. 

Ah, we're inside an #ifdef CONFIG_ARM64.


Alex
Alison Wang May 17, 2016, 9:24 a.m. UTC | #6
> On 16.05.16 07:28, Huan Wang wrote:
> >> On 13.05.16 10:40, Alison Wang wrote:
> >>> To support loading a 32-bit OS, the execution state will change from
> >>> AArch64 to AArch32 when jumping to kernel.
> >>>
> >>> The architecture information will be got through checking FIT image,
> >>> then U-Boot will load 32-bit OS or 64-bit OS automatically.
> >>>
> >>> Signed-off-by: Ebony Zhu <ebony.zhu@nxp.com>
> >>> Signed-off-by: Alison Wang <alison.wang@nxp.com>
> >>> Signed-off-by: Chenhui Zhao <chenhui.zhao@nxp.com>
> >>> ---
> >>>  arch/arm/cpu/armv8/transition.S | 100
> >> ++++++++++++++++++++++++++++++++++++++++
> >>>  arch/arm/include/asm/system.h   |   2 +
> >>>  arch/arm/lib/bootm.c            |  20 +++++++-
> >>>  common/image-fit.c              |  12 ++++-
> >>>  4 files changed, 131 insertions(+), 3 deletions(-)
> >>>
> >>> diff --git a/arch/arm/cpu/armv8/transition.S
> >>> b/arch/arm/cpu/armv8/transition.S index 253a39b..9d7a17a 100644
> >>> --- a/arch/arm/cpu/armv8/transition.S
> >>> +++ b/arch/arm/cpu/armv8/transition.S
> >>> @@ -21,3 +21,103 @@ ENTRY(armv8_switch_to_el1)
> >>>  0:	ret
> >>>  1:	armv8_switch_to_el1_m x0, x1
> >>>  ENDPROC(armv8_switch_to_el1)
> >>> +
> >>> +/*
> >>> + * x0: kernel entry point
> >>> + * x1: machine nr
> >>> + * x2: fdt address
> >>> + */
> >>> +ENTRY(armv8_switch_to_el2_aarch32)
> >>> +	switch_el x3, 1f, 0f, 0f
> >>> +0:	ret
> >>> +1:
> >>> +	mov	x7, x0
> >>> +	mov	x8, x1
> >>> +	mov	x9, x2
> >>> +
> >>> +	/* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1
> >> */
> >>> +	mov	x1, 0x1b1
> >>> +	msr	scr_el3, x1
> >>> +	msr	cptr_el3, xzr	/* Disable coprocessor traps to EL3 */
> >>> +	mov	x1, 0x33ff
> >>> +	msr	cptr_el2, x1	/* Disable coprocessor traps to EL2 */
> >>> +
> >>> +	/* Initialize Generic Timers */
> >>> +	msr	cntvoff_el2, xzr
> >>> +
> >>> +	mov	x1, #0x0830
> >>> +	movk	x1, #0x30c5, lsl #16
> >>> +	msr	sctlr_el2, x1
> >>
> >> Why is this necessary?
> > [Alison Wang] SCTLR_EL2 is architecturally mapped to AArch32 register
> HSCTLR.
> > HSCTLR will provide control of the system operation in Hyp mode.
> 
> It still doesn't explain why you move magical values into a random
> register that is not set in the 64-bit path.
> 
> Please make this code more readable :). Try to #define values for the
> bits that you set. Add comments explaining why you do what you do.
[Alison Wang] This setting is copied from armv8_switch_to_el2_m.
The comment will be added.

	  /* Initialize SCTLR_EL2
         *
         * setting RES1 bits (29,28,23,22,18,16,11,5,4) to 1
         * and RES0 bits (31,30,27,26,24,21,20,17,15-13,10-6) +
         * EE,WXN,I,SA,C,A,M to 0
         */
> 
> >>
> >>> +
> >>> +	/* Return to AArch32 Hypervisor mode */
> >>> +	mov	x1, sp
> >>> +	msr	sp_el2, x1
> >>> +	mrs	x1, vbar_el3
> >>> +	msr	vbar_el2, x1	/* Migrate VBAR */
> >>> +	mov	x1, #0x1da
> >>> +	msr	spsr_el3, x1
> >>> +	msr	elr_el3, x7
> >>> +
> >>> +	mov	x0, #0
> >>> +	mov	x1, x8
> >>> +	mov	x2, x9
> >>> +
> >>> +	eret
> >>> +ENDPROC(armv8_switch_to_el2_aarch32)
> >>
> >> This whole thing looks like a copy of armv8_switch_to_el2_m. Just
> >> parameterize that one and put the few bits that are different in
> >> macro ifs.
> > [Alison Wang] Yes, they are similar because they both switch from EL3
> to EL2.
> > But some bits are different because one switch from AArch64 EL3 to
> > AArch64 EL2 and the other switch from AArch64 EL3 to AArch32 EL2. The
> > parameters need to use too.
> 
> Yes, so I think it makes a lot of sense to combine the
> jump-to-64-bit-el2 and jump-to-32-bit-el2 functions be a single
> implementation. That way there's less chance an accidental difference
> creeps in.
[Alison Wang] Ok, I agree it makes sense. I will try to realize
it in the next version.
> 
> >>
> >>> +
> >>> +/*
> >>> + * x0: kernel entry point
> >>> + * x1: machine nr
> >>> + * x2: fdt address
> >>> + */
> >>> +ENTRY(armv8_switch_to_el1_aarch32)
> >>> +	switch_el x3, 0f, 1f, 0f
> >>> +0:	ret
> >>> +1:
> >>> +	mov	x7, x0
> >>> +	mov	x8, x1
> >>> +	mov	x9, x2
> >>> +
> >>> +	/* Initialize Generic Timers */
> >>> +	mrs	x0, cnthctl_el2
> >>> +	orr	x0, x0, #0x3		/* Enable EL1 access to timers */
> >>> +	msr	cnthctl_el2, x0
> >>> +	msr	cntvoff_el2, xzr
> >>> +
> >>> +        /* Initialize MPID/MPIDR registers */
> >>> +	mrs	x0, midr_el1
> >>> +	mrs	x1, mpidr_el1
> >>> +	msr	vpidr_el2, x0
> >>> +	msr	vmpidr_el2, x1
> >>> +
> >>> +        /* Disable coprocessor traps */
> >>> +	mov	x0, #0x33ff
> >>> +	msr	cptr_el2, x0		/* Disable coprocessor traps to EL2 */
> >>> +        msr	hstr_el2, xzr		/* Disable coprocessor traps
> >> to EL2 */
> >>> +        mov	x0, #3 << 20
> >>> +        msr	cpacr_el1, x0		/* Enable FP/SIMD at EL1 */
> >>> +
> >>> +	/* Initialize HCR_EL2 */
> >>> +	mov	x0, #(0 << 31)		/* 32bit EL1 */
> >>> +	orr	x0, x0, #(1 << 29)	/* Disable HVC */
> >>> +	msr	hcr_el2, x0
> >>> +
> >>> +	mov	x0, #0x0800
> >>> +	movk	x0, #0x30d0, lsl #16
> >>> +	msr	sctlr_el1, x0
> >>> +
> >>> +	/* Return to AArch32 Supervisor mode */
> >>> +	mov	x0, sp
> >>> +	msr	sp_el1, x0		/* Migrate SP */
> >>> +	mrs	x0, vbar_el2
> >>> +	msr	vbar_el1, x0		/* Migrate VBAR */
> >>> +	mov     x0, #0x1d3
> >>> +	msr	spsr_el2, x0
> >>> +	msr	elr_el2, x7
> >>> +
> >>> +	mov	x0, #0
> >>> +	mov	x1, x8
> >>> +	mov	x2, x9
> >>> +
> >>> +	eret
> >>> +ENDPROC(armv8_switch_to_el1_aarch32)
> >>
> >> Does anybody really care about jumping to el1?
> > [Alison Wang] I am not sure if anybody will jump to el1. Anyway, I
> > provide this support. If anybody want to jump to el1, he can just
> > define CONFIG_ARMV8_SWITCH_TO_EL1.
> 
> Well, yes, I'm actually questioning the existence of the define. Why did
> it get introduced? Is there any case where it's actually sensible?
[Alison Wang] This define is introduced a long time ago and there is
armv8_switch_to_el1_m for it. I think it makes sense, because some
users want to switch from EL3 to EL1 in U-Boot.
> 
> >>
> >>> diff --git a/arch/arm/include/asm/system.h
> >>> b/arch/arm/include/asm/system.h index 9ae890a..bb87cf0 100644
> >>> --- a/arch/arm/include/asm/system.h
> >>> +++ b/arch/arm/include/asm/system.h
> >>> @@ -102,6 +102,8 @@ void __asm_switch_ttbr(u64 new_ttbr);
> >>>
> >>>  void armv8_switch_to_el2(void);
> >>>  void armv8_switch_to_el1(void);
> >>> +void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64
> >>> +fdt_addr); void armv8_switch_to_el1_aarch32(u64 entry_point, u64
> >>> +mach_nr, u64 fdt_addr);
> >>>  void gic_init(void);
> >>>  void gic_send_sgi(unsigned long sgino);  void
> >>> wait_for_wakeup(void); diff --git a/arch/arm/lib/bootm.c
> >>> b/arch/arm/lib/bootm.c index
> >>> 0838d89..a39c3d2 100644
> >>> --- a/arch/arm/lib/bootm.c
> >>> +++ b/arch/arm/lib/bootm.c
> >>> @@ -286,8 +286,24 @@ static void boot_jump_linux(bootm_headers_t
> >> *images, int flag)
> >>>  	announce_and_cleanup(fake);
> >>>
> >>>  	if (!fake) {
> >>> -		do_nonsec_virt_switch();
> >>> -		kernel_entry(images->ft_addr, NULL, NULL, NULL);
> >>> +		if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
> >>> +		    (images->os.arch == IH_ARCH_ARM)) {
> >>> +			smp_kick_all_cpus();
> >>> +			dcache_disable();
> >>> +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
> >>> +			armv8_switch_to_el2();
> >>> +			armv8_switch_to_el1_aarch32((u64)images->ep,
> >>> +						    (u64)gd->bd->bi_arch_number,
> >>> +						    (u64)images->ft_addr);
> >>> +#else
> >>> +			armv8_switch_to_el2_aarch32((u64)images->ep,
> >>> +						    (u64)gd->bd->bi_arch_number,
> >>> +						    (u64)images->ft_addr);
> >>> +#endif
> >>
> >> Does this compile on 32bit targets?
> > [Alison Wang] This compile for 64bit target to support 64-bit U-Boot
> > and 32-bit kernel. It will not affect 32bit targets.
> 
> Ah, we're inside an #ifdef CONFIG_ARM64.
[Alison Wang] Yes.


Best Regards,
Alison Wang
diff mbox

Patch

diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S
index 253a39b..9d7a17a 100644
--- a/arch/arm/cpu/armv8/transition.S
+++ b/arch/arm/cpu/armv8/transition.S
@@ -21,3 +21,103 @@  ENTRY(armv8_switch_to_el1)
 0:	ret
 1:	armv8_switch_to_el1_m x0, x1
 ENDPROC(armv8_switch_to_el1)
+
+/*
+ * x0: kernel entry point
+ * x1: machine nr
+ * x2: fdt address
+ */
+ENTRY(armv8_switch_to_el2_aarch32)
+	switch_el x3, 1f, 0f, 0f
+0:	ret
+1:
+	mov	x7, x0
+	mov	x8, x1
+	mov	x9, x2
+
+	/* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1 */
+	mov	x1, 0x1b1
+	msr	scr_el3, x1
+	msr	cptr_el3, xzr	/* Disable coprocessor traps to EL3 */
+	mov	x1, 0x33ff
+	msr	cptr_el2, x1	/* Disable coprocessor traps to EL2 */
+
+	/* Initialize Generic Timers */
+	msr	cntvoff_el2, xzr
+
+	mov	x1, #0x0830
+	movk	x1, #0x30c5, lsl #16
+	msr	sctlr_el2, x1
+
+	/* Return to AArch32 Hypervisor mode */
+	mov	x1, sp
+	msr	sp_el2, x1
+	mrs	x1, vbar_el3
+	msr	vbar_el2, x1	/* Migrate VBAR */
+	mov	x1, #0x1da
+	msr	spsr_el3, x1
+	msr	elr_el3, x7
+
+	mov	x0, #0
+	mov	x1, x8
+	mov	x2, x9
+
+	eret
+ENDPROC(armv8_switch_to_el2_aarch32)
+
+/*
+ * x0: kernel entry point
+ * x1: machine nr
+ * x2: fdt address
+ */
+ENTRY(armv8_switch_to_el1_aarch32)
+	switch_el x3, 0f, 1f, 0f
+0:	ret
+1:
+	mov	x7, x0
+	mov	x8, x1
+	mov	x9, x2
+
+	/* Initialize Generic Timers */
+	mrs	x0, cnthctl_el2
+	orr	x0, x0, #0x3		/* Enable EL1 access to timers */
+	msr	cnthctl_el2, x0
+	msr	cntvoff_el2, xzr
+
+        /* Initialize MPID/MPIDR registers */
+	mrs	x0, midr_el1
+	mrs	x1, mpidr_el1
+	msr	vpidr_el2, x0
+	msr	vmpidr_el2, x1
+
+        /* Disable coprocessor traps */
+	mov	x0, #0x33ff
+	msr	cptr_el2, x0		/* Disable coprocessor traps to EL2 */
+        msr	hstr_el2, xzr		/* Disable coprocessor traps to EL2 */
+        mov	x0, #3 << 20
+        msr	cpacr_el1, x0		/* Enable FP/SIMD at EL1 */
+
+	/* Initialize HCR_EL2 */
+	mov	x0, #(0 << 31)		/* 32bit EL1 */
+	orr	x0, x0, #(1 << 29)	/* Disable HVC */
+	msr	hcr_el2, x0
+
+	mov	x0, #0x0800
+	movk	x0, #0x30d0, lsl #16
+	msr	sctlr_el1, x0
+
+	/* Return to AArch32 Supervisor mode */
+	mov	x0, sp
+	msr	sp_el1, x0		/* Migrate SP */
+	mrs	x0, vbar_el2
+	msr	vbar_el1, x0		/* Migrate VBAR */
+	mov     x0, #0x1d3
+	msr	spsr_el2, x0
+	msr	elr_el2, x7
+
+	mov	x0, #0
+	mov	x1, x8
+	mov	x2, x9
+
+	eret
+ENDPROC(armv8_switch_to_el1_aarch32)
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index 9ae890a..bb87cf0 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -102,6 +102,8 @@  void __asm_switch_ttbr(u64 new_ttbr);
 
 void armv8_switch_to_el2(void);
 void armv8_switch_to_el1(void);
+void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64 fdt_addr);
+void armv8_switch_to_el1_aarch32(u64 entry_point, u64 mach_nr, u64 fdt_addr);
 void gic_init(void);
 void gic_send_sgi(unsigned long sgino);
 void wait_for_wakeup(void);
diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c
index 0838d89..a39c3d2 100644
--- a/arch/arm/lib/bootm.c
+++ b/arch/arm/lib/bootm.c
@@ -286,8 +286,24 @@  static void boot_jump_linux(bootm_headers_t *images, int flag)
 	announce_and_cleanup(fake);
 
 	if (!fake) {
-		do_nonsec_virt_switch();
-		kernel_entry(images->ft_addr, NULL, NULL, NULL);
+		if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
+		    (images->os.arch == IH_ARCH_ARM)) {
+			smp_kick_all_cpus();
+			dcache_disable();
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
+			armv8_switch_to_el2();
+			armv8_switch_to_el1_aarch32((u64)images->ep,
+						    (u64)gd->bd->bi_arch_number,
+						    (u64)images->ft_addr);
+#else
+			armv8_switch_to_el2_aarch32((u64)images->ep,
+						    (u64)gd->bd->bi_arch_number,
+						    (u64)images->ft_addr);
+#endif
+		} else {
+			do_nonsec_virt_switch();
+			kernel_entry(images->ft_addr, NULL, NULL, NULL);
+		}
 	}
 #else
 	unsigned long machid = gd->bd->bi_arch_number;
diff --git a/common/image-fit.c b/common/image-fit.c
index 25f8a11..2986469 100644
--- a/common/image-fit.c
+++ b/common/image-fit.c
@@ -1163,7 +1163,8 @@  int fit_image_check_arch(const void *fit, int noffset, uint8_t arch)
 	if (fit_image_get_arch(fit, noffset, &image_arch))
 		return 0;
 	return (arch == image_arch) ||
-		(arch == IH_ARCH_I386 && image_arch == IH_ARCH_X86_64);
+		(arch == IH_ARCH_I386 && image_arch == IH_ARCH_X86_64) ||
+		(arch == IH_ARCH_ARM64 && image_arch == IH_ARCH_ARM);
 }
 
 /**
@@ -1586,6 +1587,9 @@  int fit_image_load(bootm_headers_t *images, ulong addr,
 	int type_ok, os_ok;
 	ulong load, data, len;
 	uint8_t os;
+#ifndef USE_HOSTCC
+	uint8_t os_arch;
+#endif
 	const char *prop_name;
 	int ret;
 
@@ -1669,6 +1673,12 @@  int fit_image_load(bootm_headers_t *images, ulong addr,
 		return -ENOEXEC;
 	}
 #endif
+
+#ifndef USE_HOSTCC
+	fit_image_get_arch(fit, noffset, &os_arch);
+	images->os.arch = os_arch;
+#endif
+
 	if (image_type == IH_TYPE_FLATDT &&
 	    !fit_image_check_comp(fit, noffset, IH_COMP_NONE)) {
 		puts("FDT image is compressed");