diff mbox

[U-Boot,v2,22/23] sunxi: introduce RMR switch to enter payloads in 64-bit mode

Message ID 1480902750-839-23-git-send-email-andre.przywara@arm.com
State Superseded
Delegated to: Jagannadha Sutradharudu Teki
Headers show

Commit Message

Andre Przywara Dec. 5, 2016, 1:52 a.m. UTC
The ARMv8 capable Allwinner A64 SoC comes out of reset in AArch32 mode.
To run AArch64 code, we have to trigger a warm reset via the RMR register,
which proceeds with code execution at the address stored in the RVBAR
register.
If the bootable payload in the FIT image is using a different
architecture than the SPL has been compiled for, enter it via this said
RMR switch mechanism, by writing the entry point address into the MMIO
mapped, writable version of the RVBAR register.
Then the warm reset is triggered via a system register write.
If the payload architecture is the same as the SPL, we use the normal
branch as usual.

Signed-off-by: Andre Przywara <andre.przywara@arm.com>
---
 arch/arm/mach-sunxi/Makefile     |  1 +
 arch/arm/mach-sunxi/spl_switch.c | 60 ++++++++++++++++++++++++++++++++++++++++
 2 files changed, 61 insertions(+)
 create mode 100644 arch/arm/mach-sunxi/spl_switch.c

Comments

Simon Glass Dec. 5, 2016, 6:26 a.m. UTC | #1
Hi Andre,

On 4 December 2016 at 18:52, Andre Przywara <andre.przywara@arm.com> wrote:
> The ARMv8 capable Allwinner A64 SoC comes out of reset in AArch32 mode.
> To run AArch64 code, we have to trigger a warm reset via the RMR register,
> which proceeds with code execution at the address stored in the RVBAR
> register.
> If the bootable payload in the FIT image is using a different
> architecture than the SPL has been compiled for, enter it via this said
> RMR switch mechanism, by writing the entry point address into the MMIO
> mapped, writable version of the RVBAR register.
> Then the warm reset is triggered via a system register write.
> If the payload architecture is the same as the SPL, we use the normal
> branch as usual.
>
> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
> ---
>  arch/arm/mach-sunxi/Makefile     |  1 +
>  arch/arm/mach-sunxi/spl_switch.c | 60 ++++++++++++++++++++++++++++++++++++++++
>  2 files changed, 61 insertions(+)
>  create mode 100644 arch/arm/mach-sunxi/spl_switch.c
>
> diff --git a/arch/arm/mach-sunxi/Makefile b/arch/arm/mach-sunxi/Makefile
> index 7daba11..128091e 100644
> --- a/arch/arm/mach-sunxi/Makefile
> +++ b/arch/arm/mach-sunxi/Makefile
> @@ -51,4 +51,5 @@ obj-$(CONFIG_MACH_SUN8I_A83T) += dram_sun8i_a83t.o
>  obj-$(CONFIG_MACH_SUN8I_H3)    += dram_sun8i_h3.o
>  obj-$(CONFIG_MACH_SUN9I)       += dram_sun9i.o
>  obj-$(CONFIG_MACH_SUN50I)      += dram_sun8i_h3.o
> +obj-$(CONFIG_MACH_SUN50I)      += spl_switch.o
>  endif
> diff --git a/arch/arm/mach-sunxi/spl_switch.c b/arch/arm/mach-sunxi/spl_switch.c
> new file mode 100644
> index 0000000..20f21b1
> --- /dev/null
> +++ b/arch/arm/mach-sunxi/spl_switch.c
> @@ -0,0 +1,60 @@
> +/*
> + * (C) Copyright 2016 ARM Ltd.
> + *
> + * SPDX-License-Identifier:     GPL-2.0+
> + */
> +
> +#include <common.h>
> +#include <spl.h>
> +
> +#include <asm/io.h>
> +#include <asm/barriers.h>
> +
> +static void __noreturn jump_to_image_native(struct spl_image_info *spl_image)
> +{
> +       typedef void __noreturn (*image_entry_noargs_t)(void);
> +
> +       image_entry_noargs_t image_entry =
> +                               (image_entry_noargs_t)spl_image->entry_point;
> +
> +       image_entry();
> +}
> +
> +static void __noreturn reset_rmr_switch(void)
> +{
> +#ifdef CONFIG_ARM64
> +       __asm__ volatile ( "mrs  x0, RMR_EL3\n\t"
> +                          "bic  x0, x0, #1\n\t"   /* Clear enter-in-64 bit */
> +                          "orr  x0, x0, #2\n\t"   /* set reset request bit */
> +                          "msr  RMR_EL3, x0\n\t"
> +                          "isb  sy\n\t"
> +                          "nop\n\t"
> +                          "wfi\n\t"
> +                          "b    .\n"
> +                          ::: "x0");
> +#else
> +       __asm__ volatile ( "mrc  15, 0, r0, cr12, cr0, 2\n\t"
> +                          "orr  r0, r0, #3\n\t"   /* request reset in 64 bit */
> +                          "mcr  15, 0, r0, cr12, cr0, 2\n\t"
> +                          "isb\n\t"
> +                          "nop\n\t"
> +                          "wfi\n\t"
> +                          "b    .\n"
> +                          ::: "r0");
> +#endif
> +       while (1);      /* to avoid a compiler warning about __noreturn */
> +}
> +
> +void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
> +{
> +       if (spl_image->arch == IH_ARCH_DEFAULT) {
> +               debug("entering by branch\n");
> +               jump_to_image_native(spl_image);
> +       } else {
> +               debug("entering by RMR switch\n");
> +               writel(spl_image->entry_point, 0x17000a0);
> +               DSB;
> +               ISB;
> +               reset_rmr_switch();
> +       }

I think this could use some comments or a pointer to a README to
explain what is going on.

> +}
> --
> 2.8.2
>

Regards,
Simon
diff mbox

Patch

diff --git a/arch/arm/mach-sunxi/Makefile b/arch/arm/mach-sunxi/Makefile
index 7daba11..128091e 100644
--- a/arch/arm/mach-sunxi/Makefile
+++ b/arch/arm/mach-sunxi/Makefile
@@ -51,4 +51,5 @@  obj-$(CONFIG_MACH_SUN8I_A83T)	+= dram_sun8i_a83t.o
 obj-$(CONFIG_MACH_SUN8I_H3)	+= dram_sun8i_h3.o
 obj-$(CONFIG_MACH_SUN9I)	+= dram_sun9i.o
 obj-$(CONFIG_MACH_SUN50I)	+= dram_sun8i_h3.o
+obj-$(CONFIG_MACH_SUN50I)	+= spl_switch.o
 endif
diff --git a/arch/arm/mach-sunxi/spl_switch.c b/arch/arm/mach-sunxi/spl_switch.c
new file mode 100644
index 0000000..20f21b1
--- /dev/null
+++ b/arch/arm/mach-sunxi/spl_switch.c
@@ -0,0 +1,60 @@ 
+/*
+ * (C) Copyright 2016 ARM Ltd.
+ *
+ * SPDX-License-Identifier:     GPL-2.0+
+ */
+
+#include <common.h>
+#include <spl.h>
+
+#include <asm/io.h>
+#include <asm/barriers.h>
+
+static void __noreturn jump_to_image_native(struct spl_image_info *spl_image)
+{
+	typedef void __noreturn (*image_entry_noargs_t)(void);
+
+	image_entry_noargs_t image_entry =
+				(image_entry_noargs_t)spl_image->entry_point;
+
+	image_entry();
+}
+
+static void __noreturn reset_rmr_switch(void)
+{
+#ifdef CONFIG_ARM64
+	__asm__ volatile ( "mrs	 x0, RMR_EL3\n\t"
+			   "bic  x0, x0, #1\n\t"   /* Clear enter-in-64 bit */
+			   "orr  x0, x0, #2\n\t"   /* set reset request bit */
+			   "msr  RMR_EL3, x0\n\t"
+			   "isb  sy\n\t"
+			   "nop\n\t"
+			   "wfi\n\t"
+			   "b    .\n"
+			   ::: "x0");
+#else
+	__asm__ volatile ( "mrc  15, 0, r0, cr12, cr0, 2\n\t"
+			   "orr  r0, r0, #3\n\t"   /* request reset in 64 bit */
+			   "mcr  15, 0, r0, cr12, cr0, 2\n\t"
+			   "isb\n\t"
+			   "nop\n\t"
+			   "wfi\n\t"
+			   "b    .\n"
+			   ::: "r0");
+#endif
+	while (1);	/* to avoid a compiler warning about __noreturn */
+}
+
+void __noreturn jump_to_image_no_args(struct spl_image_info *spl_image)
+{
+	if (spl_image->arch == IH_ARCH_DEFAULT) {
+		debug("entering by branch\n");
+		jump_to_image_native(spl_image);
+	} else {
+		debug("entering by RMR switch\n");
+		writel(spl_image->entry_point, 0x17000a0);
+		DSB;
+		ISB;
+		reset_rmr_switch();
+	}
+}