diff mbox series

[U-Boot,v3,3/7] MSCC: add support for Ocelot SoCs

Message ID 20181205171054.926-4-gregory.clement@bootlin.com
State Superseded
Delegated to: Daniel Schwierzeck
Headers show
Series ] Add support for the SoCs found in Microsemi switches | expand

Commit Message

Gregory CLEMENT Dec. 5, 2018, 5:10 p.m. UTC
This family of SoCs are found in the Microsemi Switches solution and have
already a support in the linux kernel.

Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
---
 MAINTAINERS                                   |   7 +
 arch/mips/Kconfig                             |   6 +
 arch/mips/Makefile                            |   1 +
 arch/mips/mach-mscc/Kconfig                   |  69 ++
 arch/mips/mach-mscc/Makefile                  |   5 +
 arch/mips/mach-mscc/cpu.c                     |  90 +++
 arch/mips/mach-mscc/dram.c                    |  71 ++
 arch/mips/mach-mscc/include/ioremap.h         |  51 ++
 arch/mips/mach-mscc/include/mach/common.h     |  24 +
 arch/mips/mach-mscc/include/mach/ddr.h        | 692 ++++++++++++++++++
 .../mach-mscc/include/mach/ocelot/ocelot.h    |  24 +
 .../include/mach/ocelot/ocelot_devcpu_gcb.h   |  21 +
 .../include/mach/ocelot/ocelot_icpu_cfg.h     | 274 +++++++
 arch/mips/mach-mscc/include/mach/tlb.h        |  55 ++
 arch/mips/mach-mscc/lowlevel_init.S           |  23 +
 arch/mips/mach-mscc/reset.c                   |  36 +
 16 files changed, 1449 insertions(+)
 create mode 100644 arch/mips/mach-mscc/Kconfig
 create mode 100644 arch/mips/mach-mscc/Makefile
 create mode 100644 arch/mips/mach-mscc/cpu.c
 create mode 100644 arch/mips/mach-mscc/dram.c
 create mode 100644 arch/mips/mach-mscc/include/ioremap.h
 create mode 100644 arch/mips/mach-mscc/include/mach/common.h
 create mode 100644 arch/mips/mach-mscc/include/mach/ddr.h
 create mode 100644 arch/mips/mach-mscc/include/mach/ocelot/ocelot.h
 create mode 100644 arch/mips/mach-mscc/include/mach/ocelot/ocelot_devcpu_gcb.h
 create mode 100644 arch/mips/mach-mscc/include/mach/ocelot/ocelot_icpu_cfg.h
 create mode 100644 arch/mips/mach-mscc/include/mach/tlb.h
 create mode 100644 arch/mips/mach-mscc/lowlevel_init.S
 create mode 100644 arch/mips/mach-mscc/reset.c

Comments

Daniel Schwierzeck Dec. 10, 2018, 4:57 p.m. UTC | #1
Am 05.12.18 um 18:10 schrieb Gregory CLEMENT:
> This family of SoCs are found in the Microsemi Switches solution and have
> already a support in the linux kernel.
> 
> Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
> ---
>  MAINTAINERS                                   |   7 +
>  arch/mips/Kconfig                             |   6 +
>  arch/mips/Makefile                            |   1 +
>  arch/mips/mach-mscc/Kconfig                   |  69 ++
>  arch/mips/mach-mscc/Makefile                  |   5 +
>  arch/mips/mach-mscc/cpu.c                     |  90 +++
>  arch/mips/mach-mscc/dram.c                    |  71 ++
>  arch/mips/mach-mscc/include/ioremap.h         |  51 ++
>  arch/mips/mach-mscc/include/mach/common.h     |  24 +
>  arch/mips/mach-mscc/include/mach/ddr.h        | 692 ++++++++++++++++++
>  .../mach-mscc/include/mach/ocelot/ocelot.h    |  24 +
>  .../include/mach/ocelot/ocelot_devcpu_gcb.h   |  21 +
>  .../include/mach/ocelot/ocelot_icpu_cfg.h     | 274 +++++++
>  arch/mips/mach-mscc/include/mach/tlb.h        |  55 ++
>  arch/mips/mach-mscc/lowlevel_init.S           |  23 +
>  arch/mips/mach-mscc/reset.c                   |  36 +
>  16 files changed, 1449 insertions(+)
>  create mode 100644 arch/mips/mach-mscc/Kconfig
>  create mode 100644 arch/mips/mach-mscc/Makefile
>  create mode 100644 arch/mips/mach-mscc/cpu.c
>  create mode 100644 arch/mips/mach-mscc/dram.c
>  create mode 100644 arch/mips/mach-mscc/include/ioremap.h
>  create mode 100644 arch/mips/mach-mscc/include/mach/common.h
>  create mode 100644 arch/mips/mach-mscc/include/mach/ddr.h
>  create mode 100644 arch/mips/mach-mscc/include/mach/ocelot/ocelot.h
>  create mode 100644 arch/mips/mach-mscc/include/mach/ocelot/ocelot_devcpu_gcb.h
>  create mode 100644 arch/mips/mach-mscc/include/mach/ocelot/ocelot_icpu_cfg.h
>  create mode 100644 arch/mips/mach-mscc/include/mach/tlb.h
>  create mode 100644 arch/mips/mach-mscc/lowlevel_init.S
>  create mode 100644 arch/mips/mach-mscc/reset.c
> 
> diff --git a/MAINTAINERS b/MAINTAINERS
> index abdb6dcdb5..53a3c5bec6 100644
> --- a/MAINTAINERS
> +++ b/MAINTAINERS
> @@ -484,6 +484,13 @@ S:	Maintained
>  T:	git git://git.denx.de/u-boot-mips.git
>  F:	arch/mips/
>  
> +MIPS MSCC
> +M:	Gregory CLEMENT <gregory.clement@bootlin.com>
> +M:	Lars Povlsen <lars.povlsen@microchip.com>
> +M:	Horatiu Vultur <horatiu.vultur@microchip.com>
> +S:	Maintained
> +F:	arch/mips/mach-mscc/
> +
>  MMC
>  M:	Jaehoon Chung <jh80.chung@samsung.com>
>  S:	Maintained
> diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
> index 6d646ef999..bfe9c11069 100644
> --- a/arch/mips/Kconfig
> +++ b/arch/mips/Kconfig
> @@ -59,6 +59,11 @@ config ARCH_ATH79
>  	select OF_CONTROL
>  	imply CMD_DM
>  
> +config ARCH_MSCC
> +	bool "Support MSCC VCore-III"
> +	select OF_CONTROL
> +	select DM
> +
>  config ARCH_BMIPS
>  	bool "Support BMIPS SoCs"
>  	select CLK
> @@ -135,6 +140,7 @@ source "board/imgtec/xilfpga/Kconfig"
>  source "board/micronas/vct/Kconfig"
>  source "board/qemu-mips/Kconfig"
>  source "arch/mips/mach-ath79/Kconfig"
> +source "arch/mips/mach-mscc/Kconfig"
>  source "arch/mips/mach-bmips/Kconfig"
>  source "arch/mips/mach-pic32/Kconfig"
>  source "arch/mips/mach-mt7620/Kconfig"
> diff --git a/arch/mips/Makefile b/arch/mips/Makefile
> index 802244a06e..124e93fa26 100644
> --- a/arch/mips/Makefile
> +++ b/arch/mips/Makefile
> @@ -15,6 +15,7 @@ machine-$(CONFIG_ARCH_ATH79) += ath79
>  machine-$(CONFIG_ARCH_BMIPS) += bmips
>  machine-$(CONFIG_MACH_PIC32) += pic32
>  machine-$(CONFIG_ARCH_MT7620) += mt7620
> +machine-$(CONFIG_ARCH_MSCC) += mscc
>  
>  machdirs := $(patsubst %,arch/mips/mach-%/,$(machine-y))
>  libs-y += $(machdirs)
> diff --git a/arch/mips/mach-mscc/Kconfig b/arch/mips/mach-mscc/Kconfig
> new file mode 100644
> index 0000000000..7f1b270207
> --- /dev/null
> +++ b/arch/mips/mach-mscc/Kconfig
> @@ -0,0 +1,69 @@
> +# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
> +
> +menu "MSCC VCore-III platforms"
> +	depends on ARCH_MSCC
> +
> +config SOC_VCOREIII
> +	select MIPS_TUNE_24KC
> +	select ROM_EXCEPTION_VECTORS
> +	select SUPPORTS_BIG_ENDIAN
> +	select SUPPORTS_CPU_MIPS32_R1
> +	select SUPPORTS_CPU_MIPS32_R2
> +	select SUPPORTS_LITTLE_ENDIAN
> +	bool
> +
> +config SYS_SOC
> +	default "mscc"
> +
> +config SOC_OCELOT
> +	bool
> +	select SOC_VCOREIII
> +	help
> +	  This supports MSCC Ocelot family of SOCs.
> +
> +config SYS_CONFIG_NAME
> +	default "vcoreiii"
> +
> +choice
> +	prompt "Board select"
> +
> +config TARGET_OCELOT_PCB120
> +	bool "MSCC PCB120 Reference Board (aka VSC5635EV)"
> +	select SOC_OCELOT
> +	help
> +	  When selected, CONFIG_DEFAULT_DEVICE_TREE should be set to
> +	  ocelot_pcb120
> +
> +config TARGET_OCELOT_PCB123
> +	bool "MSCC PCB123 Reference Board (aka VSC7514EV))"
> +	select SOC_OCELOT
> +	help
> +	  When selected, CONFIG_DEFAULT_DEVICE_TREE should be set to
> +	  ocelot_pcb123
> +
> +endchoice
> +
> +choice
> +	prompt "DDR type"
> +
> +config DDRTYPE_H5TQ4G63MFR
> +	bool "Hynix H5TQ4G63MFR-PBC (4Gbit, DDR3-800, 256Mbitx16)"
> +
> +config DDRTYPE_MT41K256M16
> +	bool "Micron MT41K256M16 (4Gbit, DDR3L-800, 256Mbitx16)"
> +
> +config DDRTYPE_H5TQ1G63BFA
> +	bool "Hynix H5TQ1G63BFA (1Gbit DDR3, x16)"
> +
> +config DDRTYPE_MT41J128M16HA
> +	bool "Micron MT41J128M16HA-15E:D (2Gbit DDR3, x16)"
> +
> +config DDRTYPE_MT41K128M16JT
> +	bool "Micron MT41K128M16JT-125 (2Gbit DDR3L, 128Mbitx16)"
> +
> +config DDRTYPE_MT47H128M8HQ
> +	bool "Micron MT47H128M8-3 (1Gbit, DDR-533@CL4 @ 4.80ns 16Mbisx8x8)"
> +
> +endchoice
> +
> +endmenu
> diff --git a/arch/mips/mach-mscc/Makefile b/arch/mips/mach-mscc/Makefile
> new file mode 100644
> index 0000000000..d14ec33838
> --- /dev/null
> +++ b/arch/mips/mach-mscc/Makefile
> @@ -0,0 +1,5 @@
> +# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
> +
> +CFLAGS_cpu.o += -finline-limit=64000
> +
> +obj-y += cpu.o dram.o reset.o lowlevel_init.o
> diff --git a/arch/mips/mach-mscc/cpu.c b/arch/mips/mach-mscc/cpu.c
> new file mode 100644
> index 0000000000..b503e1407b
> --- /dev/null
> +++ b/arch/mips/mach-mscc/cpu.c
> @@ -0,0 +1,90 @@
> +// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#include <common.h>
> +
> +#include <asm/io.h>
> +#include <asm/types.h>
> +
> +#include <mach/tlb.h>
> +#include <mach/ddr.h>
> +
> +DECLARE_GLOBAL_DATA_PTR;
> +
> +#if CONFIG_SYS_SDRAM_SIZE <= SZ_64M
> +#define MSCC_RAM_TLB_SIZE   SZ_64M
> +#define MSCC_ATTRIB2   MMU_REGIO_INVAL
> +#elif CONFIG_SYS_SDRAM_SIZE <= SZ_128M
> +#define MSCC_RAM_TLB_SIZE   SZ_64M
> +#define MSCC_ATTRIB2   MMU_REGIO_RW
> +#elif CONFIG_SYS_SDRAM_SIZE <= SZ_256M
> +#define MSCC_RAM_TLB_SIZE   SZ_256M
> +#define MSCC_ATTRIB2   MMU_REGIO_INVAL
> +#elif CONFIG_SYS_SDRAM_SIZE <= SZ_512M
> +#define MSCC_RAM_TLB_SIZE   SZ_256M
> +#define MSCC_ATTRIB2   MMU_REGIO_RW
> +#else
> +#define MSCC_RAM_TLB_SIZE   SZ_512M
> +#define MSCC_ATTRIB2   MMU_REGIO_RW
> +#endif
> +
> +/* NOTE: lowlevel_init() function does not have access to the
> + * stack. Thus, all called functions must be inlined, and (any) local
> + * variables must be kept in registers.
> + */
> +void vcoreiii_tlb_init(void)
> +{
> +	register int tlbix = 0;
> +
> +	/*
> +	 * Unlike most of the MIPS based SoCs, the IO register address
> +	 * are not in KSEG0. The mainline linux kernel built in legacy
> +	 * mode needs to access some of the registers very early in
> +	 * the boot and make the assumption that the bootloader has
> +	 * already configured them, so we have to match this
> +	 * expectation.
> +	 */
> +	create_tlb(tlbix++, MSCC_IO_ORIGIN1_OFFSET, SZ_16M, MMU_REGIO_RW,
> +		   MMU_REGIO_RW);
> +
> +#if  CONFIG_SYS_TEXT_BASE == MSCC_FLASH_TO
> +	/*
> +	 * If U-Boot is located in NOR then we want to be able to use
> +	 * the data cache in order to boot in a decent duration
> +	 */
> +	create_tlb(tlbix++, MSCC_FLASH_TO, SZ_16M, MMU_REGIO_RO_C,
> +		   MMU_REGIO_RO_C);
> +	create_tlb(tlbix++, MSCC_FLASH_TO + SZ_32M, SZ_16M, MMU_REGIO_RO_C,
> +		   MMU_REGIO_RO_C);
> +
> +	/*
> +	 * Using cache for RAM also helps to improve boot time. Thanks
> +	 * to this the time to relocate U-Boot in RAM went from 2.092
> +	 * secs to 0.104 secs.
> +	 */
> +	create_tlb(tlbix++, MSCC_DDR_TO, MSCC_RAM_TLB_SIZE, MMU_REGIO_RW,
> +		   MSCC_ATTRIB2);
> +
> +	/* Enable caches by clearing the bit ERL, which is set on reset */
> +	write_c0_status(read_c0_status() & ~BIT(2));
> +#endif /* CONFIG_SYS_TEXT_BASE */
> +}
> +
> +int mach_cpu_init(void)
> +{
> +	/* Speed up NOR flash access */
> +	writel(ICPU_SPI_MST_CFG_CS_DESELECT_TIME(0x19) +
> +	       ICPU_SPI_MST_CFG_CLK_DIV(9), BASE_CFG + ICPU_SPI_MST_CFG);
> +	/*
> +	 * Legacy and mainline linux kernel expect that the
> +	 * interruption map was set as it was done by redboot.
> +	 */
> +	writel(~0, BASE_CFG + ICPU_DST_INTR_MAP(0));
> +	writel(0, BASE_CFG + ICPU_DST_INTR_MAP(1));
> +	writel(0, BASE_CFG + ICPU_DST_INTR_MAP(2));
> +	writel(0, BASE_CFG + ICPU_DST_INTR_MAP(3));
> +
> +	return 0;
> +}
> diff --git a/arch/mips/mach-mscc/dram.c b/arch/mips/mach-mscc/dram.c
> new file mode 100644
> index 0000000000..5acee6f918
> --- /dev/null
> +++ b/arch/mips/mach-mscc/dram.c
> @@ -0,0 +1,71 @@
> +// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#include <common.h>
> +
> +#include <asm/io.h>
> +#include <asm/types.h>
> +
> +#include <mach/tlb.h>
> +#include <mach/ddr.h>
> +
> +DECLARE_GLOBAL_DATA_PTR;
> +
> +static inline int vcoreiii_train_bytelane(void)
> +{
> +	int ret;
> +
> +	ret = hal_vcoreiii_train_bytelane(0);
> +
> +	if (ret)
> +		return ret;
> +	ret = hal_vcoreiii_train_bytelane(1);
> +
> +	return ret;
> +}
> +
> +int vcoreiii_ddr_init(void)
> +{
> +	int res;
> +
> +	if (!(readl(BASE_CFG + ICPU_MEMCTRL_STAT)
> +	      & ICPU_MEMCTRL_STAT_INIT_DONE)) {
> +		hal_vcoreiii_init_memctl();
> +		hal_vcoreiii_wait_memctl();
> +		if (hal_vcoreiii_init_dqs() || vcoreiii_train_bytelane())
> +			hal_vcoreiii_ddr_failed();
> +	}
> +#if (CONFIG_SYS_TEXT_BASE != 0x20000000)
> +	res = dram_check();
> +	if (res == 0)
> +		hal_vcoreiii_ddr_verified();
> +	else
> +		hal_vcoreiii_ddr_failed();
> +
> +	/* Clear boot-mode and read-back to activate/verify */
> +	clrbits_le32(BASE_CFG + ICPU_GENERAL_CTRL,
> +		     ICPU_GENERAL_CTRL_BOOT_MODE_ENA);
> +	readl(BASE_CFG + ICPU_GENERAL_CTRL);
> +#else
> +	res = 0;
> +#endif
> +	return res;
> +}> +
> +int print_cpuinfo(void)
> +{
> +	printf("MSCC VCore-III MIPS 24Kec\n");
> +
> +	return 0;
> +}
> +
> +int dram_init(void)
> +{
> +	while (vcoreiii_ddr_init())
> +		;
> +
> +	gd->ram_size = CONFIG_SYS_SDRAM_SIZE;
> +	return 0;
> +}
> diff --git a/arch/mips/mach-mscc/include/ioremap.h b/arch/mips/mach-mscc/include/ioremap.h
> new file mode 100644
> index 0000000000..8ea5c65ce3
> --- /dev/null
> +++ b/arch/mips/mach-mscc/include/ioremap.h
> @@ -0,0 +1,51 @@
> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */

this line should start with a //. There are more files in this patch
which need to be fixed.

> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#ifndef __ASM_MACH_MSCC_IOREMAP_H
> +#define __ASM_MACH_MSCC_IOREMAP_H
> +
> +#include <linux/types.h>
> +#include <mach/common.h>
> +
> +/*
> + * Allow physical addresses to be fixed up to help peripherals located
> + * outside the low 32-bit range -- generic pass-through version.
> + */
> +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr,
> +					     phys_addr_t size)
> +{
> +	return phys_addr;
> +}
> +
> +static inline int is_vcoreiii_internal_registers(phys_addr_t offset)
> +{
> +#if defined(CONFIG_ARCH_MSCC)

this define is superfluous because this directory is only added to the
include paths when CONFIG_ARCH_MSCC is selected

> +	if ((offset >= MSCC_IO_ORIGIN1_OFFSET &&
> +	     offset < (MSCC_IO_ORIGIN1_OFFSET + MSCC_IO_ORIGIN1_SIZE)) ||
> +	    (offset >= MSCC_IO_ORIGIN2_OFFSET &&
> +	     offset < (MSCC_IO_ORIGIN2_OFFSET + MSCC_IO_ORIGIN2_SIZE)))
> +		return 1;
> +#endif
> +
> +	return 0;
> +}
> +
> +static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
> +					 unsigned long flags)
> +{
> +	if (is_vcoreiii_internal_registers(offset))
> +		return (void __iomem *)offset;
> +
> +	return NULL;
> +}
> +
> +static inline int plat_iounmap(const volatile void __iomem *addr)
> +{
> +	return is_vcoreiii_internal_registers((unsigned long)addr);
> +}
> +
> +#define _page_cachable_default	_CACHE_CACHABLE_NONCOHERENT
> +
> +#endif				/* __ASM_MACH_MSCC_IOREMAP_H */
> diff --git a/arch/mips/mach-mscc/include/mach/common.h b/arch/mips/mach-mscc/include/mach/common.h
> new file mode 100644
> index 0000000000..842462aeed
> --- /dev/null
> +++ b/arch/mips/mach-mscc/include/mach/common.h
> @@ -0,0 +1,24 @@
> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#ifndef __ASM_MACH_COMMON_H
> +#define __ASM_MACH_COMMON_H
> +
> +#if defined(CONFIG_SOC_OCELOT)
> +#include <mach/ocelot/ocelot.h>
> +#include <mach/ocelot/ocelot_devcpu_gcb.h>
> +#include <mach/ocelot/ocelot_icpu_cfg.h>
> +#else
> +#error Unsupported platform
> +#endif
> +
> +#define MSCC_DDR_TO	0x20000000	/* DDR RAM base offset */
> +#define MSCC_MEMCTL1_TO	0x40000000	/* SPI/PI base offset */
> +#define MSCC_MEMCTL2_TO	0x50000000	/* SPI/PI base offset */
> +#define MSCC_FLASH_TO	MSCC_MEMCTL1_TO	/* Flash base offset */
> +
> +#define VCOREIII_TIMER_DIVIDER 25	/* Clock tick ~ 0.1 us */
> +
> +#endif				/* __ASM_MACH_COMMON_H */
> diff --git a/arch/mips/mach-mscc/include/mach/ddr.h b/arch/mips/mach-mscc/include/mach/ddr.h
> new file mode 100644
> index 0000000000..4bdea90506
> --- /dev/null
> +++ b/arch/mips/mach-mscc/include/mach/ddr.h
> @@ -0,0 +1,692 @@
> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#ifndef __ASM_MACH_DDR_H
> +#define __ASM_MACH_DDR_H
> +
> +#include <asm/cacheops.h>
> +#include <asm/io.h>
> +#include <asm/reboot.h>
> +#include <mach/common.h>
> +
> +#define MIPS_VCOREIII_MEMORY_DDR3
> +#define MIPS_VCOREIII_DDR_SIZE CONFIG_SYS_SDRAM_SIZE
> +
> +#if defined(CONFIG_DDRTYPE_H5TQ1G63BFA)	/* Serval1 Refboard */
> +
> +/* Hynix H5TQ1G63BFA (1Gbit DDR3, x16) @ 3.20ns */
> +#define VC3_MPAR_bank_addr_cnt    3
> +#define VC3_MPAR_row_addr_cnt     13
> +#define VC3_MPAR_col_addr_cnt     10
> +#define VC3_MPAR_tREFI            2437
> +#define VC3_MPAR_tRAS_min         12
> +#define VC3_MPAR_CL               6
> +#define VC3_MPAR_tWTR             4
> +#define VC3_MPAR_tRC              16
> +#define VC3_MPR_tFAW             16
> +#define VC3_MPAR_tRP              5
> +#define VC3_MPAR_tRRD             4
> +#define VC3_MPAR_tRCD             5
> +#define VC3_MPAR_tMRD             4
> +#define VC3_MPAR_tRFC             35
> +#define VC3_MPAR_CWL              5
> +#define VC3_MPAR_tXPR             38
> +#define VC3_MPAR_tMOD             12
> +#define VC3_MPAR_tDLLK            512
> +#define VC3_MPAR_tWR              5
> +
> +#elif defined(CONFIG_DDRTYPE_MT41J128M16HA)	/* Validation board */
> +
> +/* Micron MT41J128M16HA-15E:D (2Gbit DDR3, x16) @ 3.20ns */
> +#define VC3_MPAR_bank_addr_cnt    3
> +#define VC3_MPAR_row_addr_cnt     14
> +#define VC3_MPAR_col_addr_cnt     10
> +#define VC3_MPAR_tREFI            2437
> +#define VC3_MPAR_tRAS_min         12
> +#define VC3_MPAR_CL               5
> +#define VC3_MPAR_tWTR             4
> +#define VC3_MPAR_tRC              16
> +#define VC3_MPAR_tFAW             16
> +#define VC3_MPAR_tRP              5
> +#define VC3_MPAR_tRRD             4
> +#define VC3_MPAR_tRCD             5
> +#define VC3_MPAR_tMRD             4
> +#define VC3_MPAR_tRFC             50
> +#define VC3_MPAR_CWL              5
> +#define VC3_MPAR_tXPR             54
> +#define VC3_MPAR_tMOD             12
> +#define VC3_MPAR_tDLLK            512
> +#define VC3_MPAR_tWR              5
> +
> +#elif defined(CONFIG_DDRTYPE_MT41K256M16)	/* JR2 Validation board */
> +
> +/* Micron MT41K256M16 (4Gbit, DDR3L-800, 256Mbitx16) @ 3.20ns */
> +#define VC3_MPAR_bank_addr_cnt    3
> +#define VC3_MPAR_row_addr_cnt     15
> +#define VC3_MPAR_col_addr_cnt     10
> +#define VC3_MPAR_tREFI            2437
> +#define VC3_MPAR_tRAS_min         12
> +#define VC3_MPAR_CL               5
> +#define VC3_MPAR_tWTR             4
> +#define VC3_MPAR_tRC              16
> +#define VC3_MPAR_tFAW             16
> +#define VC3_MPAR_tRP              5
> +#define VC3_MPAR_tRRD             4
> +#define VC3_MPAR_tRCD             5
> +#define VC3_MPAR_tMRD             4
> +#define VC3_MPAR_tRFC             82
> +#define VC3_MPAR_CWL              5
> +#define VC3_MPAR_tXPR             85
> +#define VC3_MPAR_tMOD             12
> +#define VC3_MPAR_tDLLK            512
> +#define VC3_MPAR_tWR              5
> +
> +#elif defined(CONFIG_DDRTYPE_H5TQ4G63MFR)	/* JR2 Reference board */
> +
> +/* Hynix H5TQ4G63MFR-PBC (4Gbit, DDR3-800, 256Mbitx16) - 2kb pages @ 3.20ns */
> +#define VC3_MPAR_bank_addr_cnt    3
> +#define VC3_MPAR_row_addr_cnt     15
> +#define VC3_MPAR_col_addr_cnt     10
> +#define VC3_MPAR_tREFI            2437
> +#define VC3_MPAR_tRAS_min         12
> +#define VC3_MPAR_CL               6
> +#define VC3_MPAR_tWTR             4
> +#define VC3_MPAR_tRC              17
> +#define VC3_MPAR_tFAW             16
> +#define VC3_MPAR_tRP              5
> +#define VC3_MPAR_tRRD             4
> +#define VC3_MPAR_tRCD             5
> +#define VC3_MPAR_tMRD             4
> +#define VC3_MPAR_tRFC             82
> +#define VC3_MPAR_CWL              5
> +#define VC3_MPAR_tXPR             85
> +#define VC3_MPAR_tMOD             12
> +#define VC3_MPAR_tDLLK            512
> +#define VC3_MPAR_tWR              5
> +
> +#elif defined(CONFIG_DDRTYPE_MT41K128M16JT)
> +
> +/* Micron Micron MT41K128M16JT-125 (2Gbit DDR3L, 128Mbitx16) @ 3.20ns */
> +#define VC3_MPAR_bank_addr_cnt    3
> +#define VC3_MPAR_row_addr_cnt     14
> +#define VC3_MPAR_col_addr_cnt     10
> +#define VC3_MPAR_tREFI            2437
> +#define VC3_MPAR_tRAS_min         12
> +#define VC3_MPAR_CL               6
> +#define VC3_MPAR_tWTR             4
> +#define VC3_MPAR_tRC              16
> +#define VC3_MPAR_tFAW             16
> +#define VC3_MPAR_tRP              5
> +#define VC3_MPAR_tRRD             4
> +#define VC3_MPAR_tRCD             5
> +#define VC3_MPAR_tMRD             4
> +#define VC3_MPAR_tRFC             82
> +#define VC3_MPAR_CWL              5
> +#define VC3_MPAR_tXPR             85
> +#define VC3_MPAR_tMOD             12
> +#define VC3_MPAR_tDLLK            512
> +#define VC3_MPAR_tWR              5
> +
> +#elif defined(CONFIG_DDRTYPE_MT47H128M8HQ)	/* Luton10/26 Refboards */
> +
> +/* Micron 1Gb MT47H128M8-3 16Meg x 8 x 8 banks, DDR-533@CL4 @ 4.80ns */
> +#define VC3_MPAR_bank_addr_cnt    3
> +#define VC3_MPAR_row_addr_cnt     14
> +#define VC3_MPAR_col_addr_cnt     10
> +#define VC3_MPAR_tREFI            1625
> +#define VC3_MPAR_tRAS_min         9
> +#define VC3_MPAR_CL               4
> +#define VC3_MPAR_tWTR             2
> +#define VC3_MPAR_tRC              12
> +#define VC3_MPAR_tFAW             8
> +#define VC3_MPAR_tRP              4
> +#define VC3_MPAR_tRRD             2
> +#define VC3_MPAR_tRCD             4
> +
> +#define VC3_MPAR_tRPA             4
> +#define VC3_MPAR_tRP              4
> +
> +#define VC3_MPAR_tMRD             2
> +#define VC3_MPAR_tRFC             27
> +
> +#define VC3_MPAR__400_ns_dly      84
> +
> +#define VC3_MPAR_tWR              4
> +#undef MIPS_VCOREIII_MEMORY_DDR3
> +#else
> +
> +#error Unknown DDR system configuration - please add!
> +
> +#endif
> +
> +#ifdef CONFIG_SOC_OCELOT
> +#define MIPS_VCOREIII_MEMORY_16BIT 1
> +#endif
> +
> +#define MIPS_VCOREIII_MEMORY_SSTL_ODT 7
> +#define MIPS_VCOREIII_MEMORY_SSTL_DRIVE 7
> +#define VCOREIII_DDR_DQS_MODE_CALIBRATE
> +
> +#ifdef MIPS_VCOREIII_MEMORY_16BIT
> +#define VC3_MPAR_16BIT       1
> +#else
> +#define VC3_MPAR_16BIT       0
> +#endif
> +
> +#ifdef MIPS_VCOREIII_MEMORY_DDR3
> +#define VC3_MPAR_DDR3_MODE    1	/* DDR3 */
> +#define VC3_MPAR_BURST_LENGTH 8	/* Always 8 (1) for DDR3 */
> +#ifdef MIPS_VCOREIII_MEMORY_16BIT
> +#define VC3_MPAR_BURST_SIZE   1	/* Always 1 for DDR3/16bit */
> +#else
> +#define VC3_MPAR_BURST_SIZE   0
> +#endif
> +#else
> +#define VC3_MPAR_DDR3_MODE    0	/* DDR2 */
> +#ifdef MIPS_VCOREIII_MEMORY_16BIT
> +#define VC3_MPAR_BURST_LENGTH 4	/* in DDR2 16-bit mode, use burstlen 4 */
> +#else
> +#define VC3_MPAR_BURST_LENGTH 8	/* For 8-bit IF we must run burst-8 */
> +#endif
> +#define VC3_MPAR_BURST_SIZE   0	/* Always 0 for DDR2 */
> +#endif
> +
> +#define VC3_MPAR_RL VC3_MPAR_CL
> +#if !defined(MIPS_VCOREIII_MEMORY_DDR3)
> +#define VC3_MPAR_WL (VC3_MPAR_RL - 1)
> +#define VC3_MPAR_MD VC3_MPAR_tMRD
> +#define VC3_MPAR_ID VC3_MPAR__400_ns_dly
> +#define VC3_MPAR_SD VC3_MPAR_tXSRD
> +#define VC3_MPAR_OW (VC3_MPAR_WL - 2)
> +#define VC3_MPAR_OR (VC3_MPAR_WL - 3)
> +#define VC3_MPAR_RP (VC3_MPAR_bank_addr_cnt < 3 ? VC3_MPAR_tRP : VC3_MPAR_tRPA)
> +#define VC3_MPAR_FAW (VC3_MPAR_bank_addr_cnt < 3 ? 1 : VC3_MPAR_tFAW)
> +#define VC3_MPAR_BL (VC3_MPAR_BURST_LENGTH == 4 ? 2 : 4)
> +#define MSCC_MEMPARM_MR0 \
> +	(VC3_MPAR_BURST_LENGTH == 8 ? 3 : 2) | (VC3_MPAR_CL << 4) | \
> +	((VC3_MPAR_tWR - 1) << 9)
> +/* DLL-on, Full-OD, AL=0, RTT=off, nDQS-on, RDQS-off, out-en */
> +#define MSCC_MEMPARM_MR1 0x382
> +#define MSCC_MEMPARM_MR2 0
> +#define MSCC_MEMPARM_MR3 0
> +#else
> +#define VC3_MPAR_WL VC3_MPAR_CWL
> +#define VC3_MPAR_MD VC3_MPAR_tMOD
> +#define VC3_MPAR_ID VC3_MPAR_tXPR
> +#define VC3_MPAR_SD VC3_MPAR_tDLLK
> +#define VC3_MPAR_OW 2
> +#define VC3_MPAR_OR 2
> +#define VC3_MPAR_RP VC3_MPAR_tRP
> +#define VC3_MPAR_FAW VC3_MPAR_tFAW
> +#define VC3_MPAR_BL 4
> +#define MSCC_MEMPARM_MR0 ((VC3_MPAR_RL - 4) << 4) | ((VC3_MPAR_tWR - 4) << 9)
> +/* ODT_RTT: “0x0040” for 120ohm, and “0x0004” for 60ohm. */
> +#define MSCC_MEMPARM_MR1 0x0040
> +#define MSCC_MEMPARM_MR2 ((VC3_MPAR_WL - 5) << 3)
> +#define MSCC_MEMPARM_MR3 0
> +#endif				/* MIPS_VCOREIII_MEMORY_DDR3 */
> +
> +#define MSCC_MEMPARM_MEMCFG                                             \
> +	((MIPS_VCOREIII_DDR_SIZE > SZ_512M) ?				\
> +	 ICPU_MEMCTRL_CFG_DDR_512MBYTE_PLUS : 0) |			\
> +	(VC3_MPAR_16BIT ? ICPU_MEMCTRL_CFG_DDR_WIDTH : 0) |		\
> +	(VC3_MPAR_DDR3_MODE ? ICPU_MEMCTRL_CFG_DDR_MODE : 0) |		\
> +	(VC3_MPAR_BURST_SIZE ? ICPU_MEMCTRL_CFG_BURST_SIZE : 0) |	\
> +	(VC3_MPAR_BURST_LENGTH == 8 ? ICPU_MEMCTRL_CFG_BURST_LEN : 0) | \
> +	(VC3_MPAR_bank_addr_cnt == 3 ? ICPU_MEMCTRL_CFG_BANK_CNT : 0) | \
> +	ICPU_MEMCTRL_CFG_MSB_ROW_ADDR(VC3_MPAR_row_addr_cnt - 1) |	\
> +	ICPU_MEMCTRL_CFG_MSB_COL_ADDR(VC3_MPAR_col_addr_cnt - 1)
> +
> +#ifdef CONFIG_SOC_OCELOT
> +#define MSCC_MEMPARM_PERIOD					\
> +	ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF(8) |		\
> +	ICPU_MEMCTRL_REF_PERIOD_REF_PERIOD(VC3_MPAR_tREFI)
> +
> +#define MSCC_MEMPARM_TIMING0                                            \
> +	ICPU_MEMCTRL_TIMING0_RD_TO_WR_DLY(VC3_MPAR_RL + VC3_MPAR_BL + 1 - \
> +					  VC3_MPAR_WL) |		\
> +	ICPU_MEMCTRL_TIMING0_WR_CS_CHANGE_DLY(VC3_MPAR_BL - 1) |	\
> +	ICPU_MEMCTRL_TIMING0_RD_CS_CHANGE_DLY(VC3_MPAR_BL) |		\
> +	ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY(VC3_MPAR_tRAS_min - 1) |	\
> +	ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY(VC3_MPAR_WL +		\
> +					     VC3_MPAR_BL +		\
> +					     VC3_MPAR_tWR - 1) |	\
> +	ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY(VC3_MPAR_BL - 1) |		\
> +		ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY(VC3_MPAR_WL - 1) |	\
> +	ICPU_MEMCTRL_TIMING0_RD_DATA_XFR_DLY(VC3_MPAR_RL - 3)
> +
> +#define MSCC_MEMPARM_TIMING1                                            \
> +	ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY(VC3_MPAR_tRC - 1) | \
> +	ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY(VC3_MPAR_FAW - 1) |		\
> +	ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY(VC3_MPAR_RP - 1) |	\
> +	ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY(VC3_MPAR_tRRD - 1) |	\
> +	ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY(VC3_MPAR_tRCD - 1) |	\
> +	ICPU_MEMCTRL_TIMING1_WR_TO_RD_DLY(VC3_MPAR_WL +			\
> +					  VC3_MPAR_BL +			\
> +					  VC3_MPAR_tWTR - 1)
> +
> +#define MSCC_MEMPARM_TIMING2					\
> +	ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY(VC3_MPAR_RP - 1) |	\
> +	ICPU_MEMCTRL_TIMING2_MDSET_DLY(VC3_MPAR_MD - 1) |		\
> +	ICPU_MEMCTRL_TIMING2_REF_DLY(VC3_MPAR_tRFC - 1) |		\
> +	ICPU_MEMCTRL_TIMING2_INIT_DLY(VC3_MPAR_ID - 1)
> +
> +#define MSCC_MEMPARM_TIMING3						\
> +	ICPU_MEMCTRL_TIMING3_WR_TO_RD_CS_CHANGE_DLY(VC3_MPAR_WL +	\
> +						    VC3_MPAR_tWTR - 1) |\
> +	ICPU_MEMCTRL_TIMING3_ODT_RD_DLY(VC3_MPAR_OR - 1) |		\
> +	ICPU_MEMCTRL_TIMING3_ODT_WR_DLY(VC3_MPAR_OW - 1) |		\
> +	ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY(VC3_MPAR_RL - 3)
> +
> +#else
> +#define MSCC_MEMPARM_PERIOD					\
> +	ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF(1) |		\
> +	ICPU_MEMCTRL_REF_PERIOD_REF_PERIOD(VC3_MPAR_tREFI)
> +
> +#define MSCC_MEMPARM_TIMING0                                            \
> +	ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY(VC3_MPAR_tRAS_min - 1) |	\
> +	ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY(VC3_MPAR_CL +		\
> +					     (VC3_MPAR_BURST_LENGTH == 8 ? 2 : 0) + \
> +					     VC3_MPAR_tWR) |		\
> +	ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY(VC3_MPAR_BURST_LENGTH == 8 ? 3 : 1) | \
> +	ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY(VC3_MPAR_CL - 3) |		\
> +	ICPU_MEMCTRL_TIMING0_RD_DATA_XFR_DLY(VC3_MPAR_CL - 3)
> +
> +#define MSCC_MEMPARM_TIMING1                                            \
> +	ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY(VC3_MPAR_tRC - 1) | \
> +	ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY(VC3_MPAR_tFAW - 1) |		\
> +	ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY(VC3_MPAR_tRP - 1) |	\
> +	ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY(VC3_MPAR_tRRD - 1) |	\
> +	ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY(VC3_MPAR_tRCD - 1) |	\
> +	ICPU_MEMCTRL_TIMING1_WR_TO_RD_DLY(VC3_MPAR_CL +			\
> +					  (VC3_MPAR_BURST_LENGTH == 8 ? 2 : 0) + \
> +					  VC3_MPAR_tWTR)
> +#define MSCC_MEMPARM_TIMING2                                            \
> +	ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY(VC3_MPAR_tRPA - 1) |		\
> +	ICPU_MEMCTRL_TIMING2_MDSET_DLY(VC3_MPAR_tMRD - 1) |		\
> +	ICPU_MEMCTRL_TIMING2_REF_DLY(VC3_MPAR_tRFC - 1) |		\
> +	ICPU_MEMCTRL_TIMING2_FOUR_HUNDRED_NS_DLY(VC3_MPAR__400_ns_dly)
> +
> +#define MSCC_MEMPARM_TIMING3						\
> +	ICPU_MEMCTRL_TIMING3_WR_TO_RD_CS_CHANGE_DLY(VC3_MPAR_CL - 1) |	\
> +	ICPU_MEMCTRL_TIMING3_ODT_WR_DLY(VC3_MPAR_CL - 1) |		\
> +	ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY(VC3_MPAR_CL - 1)
> +
> +#endif
> +
> +enum {
> +	DDR_TRAIN_OK,
> +	DDR_TRAIN_CONTINUE,
> +	DDR_TRAIN_ERROR,
> +};
> +
> +/*
> + * We actually have very few 'pause' possibilities apart from
> + * these assembly nops (at this very early stage).
> + */
> +#define PAUSE() asm volatile("nop; nop; nop; nop; nop; nop; nop; nop")
> +
> +/* NB: Assumes inlining as no stack is available! */
> +static inline void set_dly(u32 bytelane, u32 dly)
> +{
> +	register u32 r = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
> +
> +	r &= ~ICPU_MEMCTRL_DQS_DLY_DQS_DLY_M;
> +	r |= ICPU_MEMCTRL_DQS_DLY_DQS_DLY(dly);
> +	writel(r, BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
> +}
> +
> +static inline bool incr_dly(u32 bytelane)
> +{
> +	register u32 r = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
> +
> +	if (ICPU_MEMCTRL_DQS_DLY_DQS_DLY(r) < 31) {
> +		writel(r + 1, BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
> +		return true;
> +	}
> +
> +	return false;
> +}
> +
> +static inline bool adjust_dly(int adjust)
> +{
> +	register u32 r = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(0));
> +
> +	if (ICPU_MEMCTRL_DQS_DLY_DQS_DLY(r) < 31) {
> +		writel(r + adjust, BASE_CFG + ICPU_MEMCTRL_DQS_DLY(0));
> +		return true;
> +	}
> +
> +	return false;
> +}
> +
> +/* NB: Assumes inlining as no stack is available! */
> +static inline void center_dly(u32 bytelane, u32 start)
> +{
> +	register u32 r = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane)) - start;
> +
> +	writel(start + (r >> 1), BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
> +}
> +
> +static inline void memphy_soft_reset(void)
> +{
> +	setbits_le32(BASE_CFG + ICPU_MEMPHY_CFG, ICPU_MEMPHY_CFG_PHY_FIFO_RST);
> +	PAUSE();
> +	clrbits_le32(BASE_CFG + ICPU_MEMPHY_CFG, ICPU_MEMPHY_CFG_PHY_FIFO_RST);
> +	PAUSE();
> +}
> +
> +#ifdef CONFIG_SOC_OCELOT
> +static u8 training_data[] = { 0xfe, 0x11, 0x33, 0x55, 0x77, 0x99, 0xbb, 0xdd };
> +
> +static inline void sleep_100ns(u32 val)
> +{
> +	/* Set the timer tick generator to 100 ns */
> +	writel(VCOREIII_TIMER_DIVIDER - 1, BASE_CFG + ICPU_TIMER_TICK_DIV);
> +
> +	/* Set the timer value */
> +	writel(val, BASE_CFG + ICPU_TIMER_VALUE(0));
> +
> +	/* Enable timer 0 for one-shot */
> +	writel(ICPU_TIMER_CTRL_ONE_SHOT_ENA | ICPU_TIMER_CTRL_TIMER_ENA,
> +	       BASE_CFG + ICPU_TIMER_CTRL(0));
> +
> +	/* Wait for timer 0 to reach 0 */
> +	while (readl(BASE_CFG + ICPU_TIMER_VALUE(0)) != 0)
> +		;
> +}
> +
> +static inline void hal_vcoreiii_ddr_reset_assert(void)
> +{
> +	/* DDR has reset pin on GPIO 19 toggle Low-High to release */
> +	setbits_le32(BASE_DEVCPU_GCB + PERF_GPIO_OE, BIT(19));
> +	writel(BIT(19), BASE_DEVCPU_GCB + PERF_GPIO_OUT_CLR);
> +	sleep_100ns(10000);
> +}
> +
> +static inline void hal_vcoreiii_ddr_reset_release(void)
> +{
> +	/* DDR has reset pin on GPIO 19 toggle Low-High to release */
> +	setbits_le32(BASE_DEVCPU_GCB + PERF_GPIO_OE, BIT(19));
> +	writel(BIT(19), BASE_DEVCPU_GCB + PERF_GPIO_OUT_SET);
> +	sleep_100ns(10000);
> +}
> +
> +/*
> + * DDR memory sanity checking failed, tally and do hard reset
> + *
> + * NB: Assumes inlining as no stack is available!
> + */
> +static inline void hal_vcoreiii_ddr_failed(void)
> +{
> +	register u32 reset;
> +
> +	writel(readl(BASE_CFG + ICPU_GPR(6)) + 1, BASE_CFG + ICPU_GPR(6));
> +
> +	clrbits_le32(BASE_DEVCPU_GCB + PERF_GPIO_OE, BIT(19));
> +
> +	/* Jump to reset - does not return */
> +	reset = KSEG0ADDR(_machine_restart);
> +	/* Reset while running from cache */
> +	icache_lock((void *)reset, 128);
> +	asm volatile ("jr %0"::"r" (reset));

could you briefly describe the reason for this in a comment? It's not
clear why this code is necessary without knowing the SoC. AFAIU from
your last mail the boot SPI flash is mapped to KUSEG and you need to
establish a TLB mapping in lowlevel_init() to be able to move to KSEG0.

> +
> +	panic("DDR init failed\n");
> +}
> +
> +/*
> + * DDR memory sanity checking done, possibly enable ECC.
> + *
> + * NB: Assumes inlining as no stack is available!
> + */
> +static inline void hal_vcoreiii_ddr_verified(void)
> +{
> +#ifdef MIPS_VCOREIII_MEMORY_ECC
> +	/* Finally, enable ECC */
> +	register u32 val = readl(BASE_CFG + ICPU_MEMCTRL_CFG);
> +
> +	val |= ICPU_MEMCTRL_CFG_DDR_ECC_ERR_ENA;
> +	val &= ~ICPU_MEMCTRL_CFG_BURST_SIZE;
> +
> +	writel(val, BASE_CFG + ICPU_MEMCTRL_CFG);
> +#endif
> +
> +	/* Reset Status register - sticky bits */
> +	writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT), BASE_CFG + ICPU_MEMCTRL_STAT);
> +}
> +
> +/* NB: Assumes inlining as no stack is available! */
> +static inline int look_for(u32 bytelane)
> +{
> +	register u32 i;
> +
> +	/* Reset FIFO in case any previous access failed */
> +	for (i = 0; i < sizeof(training_data); i++) {
> +		register u32 byte;
> +
> +		memphy_soft_reset();
> +		/* Reset sticky bits */
> +		writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
> +		       BASE_CFG + ICPU_MEMCTRL_STAT);
> +		/* Read data */
> +		byte = ((volatile u8 *)MSCC_DDR_TO)[bytelane + (i * 4)];

__raw_readl()?

> +		/*
> +		 * Prevent the compiler reordering the instruction so
> +		 * the read of RAM happens after the check of the
> +		 * errors.
> +		 */
> +		asm volatile("" : : : "memory");

this is available as barrier(). But according to context you could use
rmb(). Anyway with the current volatile pointer or the suggested
__raw_readl() the compiler shouldn't reorder at all

> +		if (readl(BASE_CFG + ICPU_MEMCTRL_STAT) &
> +		    (ICPU_MEMCTRL_STAT_RDATA_MASKED |
> +		     ICPU_MEMCTRL_STAT_RDATA_DUMMY)) {
> +			/* Noise on the line */
> +			goto read_error;
> +		}
> +		/* If mismatch, increment DQS - if possible */
> +		if (byte != training_data[i]) {
> + read_error:
> +			if (!incr_dly(bytelane))
> +				return DDR_TRAIN_ERROR;
> +			return DDR_TRAIN_CONTINUE;
> +		}
> +	}
> +	return DDR_TRAIN_OK;
> +}
> +
> +/* NB: Assumes inlining as no stack is available! */
> +static inline int look_past(u32 bytelane)
> +{
> +	register u32 i;
> +
> +	/* Reset FIFO in case any previous access failed */
> +	for (i = 0; i < sizeof(training_data); i++) {
> +		register u32 byte;
> +
> +		memphy_soft_reset();
> +		/* Ack sticky bits */
> +		writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
> +		       BASE_CFG + ICPU_MEMCTRL_STAT);
> +		byte = ((volatile u8 *)MSCC_DDR_TO)[bytelane + (i * 4)];
> +		/*
> +		 * Prevent the compiler reordering the instruction so
> +		 * the read of RAM happens after the check of the
> +		 * errors.
> +		 */
> +		asm volatile("" : : : "memory");

same as above

> +		if (readl(BASE_CFG + ICPU_MEMCTRL_STAT) &
> +		    (ICPU_MEMCTRL_STAT_RDATA_MASKED |
> +		     ICPU_MEMCTRL_STAT_RDATA_DUMMY)) {
> +			/* Noise on the line */
> +			goto read_error;
> +		}
> +		/* Bail out when we see first mismatch */
> +		if (byte != training_data[i]) {
> + read_error:
> +			return DDR_TRAIN_OK;
> +		}
> +	}
> +	/* All data compares OK, increase DQS and retry */
> +	if (!incr_dly(bytelane))
> +		return DDR_TRAIN_ERROR;
> +
> +	return DDR_TRAIN_CONTINUE;
> +}
> +
> +static inline int hal_vcoreiii_train_bytelane(u32 bytelane)
> +{
> +	register int res;
> +	register u32 dqs_s;
> +
> +	set_dly(bytelane, 0);	// Start training at DQS=0

no C++ style comments

> +	while ((res = look_for(bytelane)) == DDR_TRAIN_CONTINUE)
> +		;
> +	if (res != DDR_TRAIN_OK)
> +		return res;
> +
> +	dqs_s = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
> +	while ((res = look_past(bytelane)) == DDR_TRAIN_CONTINUE)
> +		;
> +	if (res != DDR_TRAIN_OK)
> +		return res;
> +	/* Reset FIFO - for good measure */
> +	memphy_soft_reset();
> +	/* Adjust to center [dqs_s;cur] */
> +	center_dly(bytelane, dqs_s);
> +	return DDR_TRAIN_OK;
> +}
> +
> +/* This algorithm is converted from the TCL training algorithm used
> + * during silicon simulation.
> + * NB: Assumes inlining as no stack is available!
> + */
> +static inline int hal_vcoreiii_init_dqs(void)
> +{
> +#define MAX_DQS 32
> +	register u32 i, j;
> +
> +	for (i = 0; i < MAX_DQS; i++) {
> +		set_dly(0, i);	// Byte-lane 0

no C++ style comments

> +		for (j = 0; j < MAX_DQS; j++) {
> +			register u32 __attribute__ ((unused)) byte;

why unused? If you really need it, you could use __maybe_unused

> +			set_dly(1, j);	// Byte-lane 1
> +			/* Reset FIFO in case any previous access failed */
> +			memphy_soft_reset();
> +			writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
> +			       BASE_CFG + ICPU_MEMCTRL_STAT);
> +			byte = ((volatile u8 *)MSCC_DDR_TO)[0];
> +			byte = ((volatile u8 *)MSCC_DDR_TO)[1];
> +			if (!(readl(BASE_CFG + ICPU_MEMCTRL_STAT) &
> +			    (ICPU_MEMCTRL_STAT_RDATA_MASKED |
> +			     ICPU_MEMCTRL_STAT_RDATA_DUMMY)))
> +				return 0;
> +		}
> +	}
> +	return -1;
> +}
> +
> +static inline int dram_check(void)
> +{
> +#define DDR ((volatile u32 *) MSCC_DDR_TO)
> +	register u32 i;
> +
> +	for (i = 0; i < 8; i++) {
> +		DDR[i] = ~i;
> +		if (DDR[i] != ~i)

__raw_readl(), __raw_writel() and drop the explicit volatile?

> +			return 1;
> +	}
> +	return 0;
> +}
> +
> +/*
> + * NB: Called *early* to init memory controller - assumes inlining as
> + * no stack is available!
> + */
> +static inline void hal_vcoreiii_init_memctl(void)
> +{
> +	/* Ensure DDR is in reset */
> +	hal_vcoreiii_ddr_reset_assert();
> +
> +	/* Wait maybe not needed, but ... */
> +	PAUSE();
> +
> +	/* Drop sys ctl memory controller forced reset */
> +	clrbits_le32(BASE_CFG + ICPU_RESET, ICPU_RESET_MEM_RST_FORCE);
> +
> +	PAUSE();
> +
> +	/* Drop Reset, enable SSTL */
> +	writel(ICPU_MEMPHY_CFG_PHY_SSTL_ENA, BASE_CFG + ICPU_MEMPHY_CFG);
> +	PAUSE();
> +
> +	/* Start the automatic SSTL output and ODT drive-strength calibration */
> +	writel(ICPU_MEMPHY_ZCAL_ZCAL_PROG_ODT(MIPS_VCOREIII_MEMORY_SSTL_ODT) |
> +	       /* drive strength */
> +	       ICPU_MEMPHY_ZCAL_ZCAL_PROG(MIPS_VCOREIII_MEMORY_SSTL_DRIVE) |
> +	       /* Start calibration process */
> +	       ICPU_MEMPHY_ZCAL_ZCAL_ENA, BASE_CFG + ICPU_MEMPHY_ZCAL);
> +
> +	/* Wait for ZCAL to clear */
> +	while (readl(BASE_CFG + ICPU_MEMPHY_ZCAL) & ICPU_MEMPHY_ZCAL_ZCAL_ENA)
> +		;
> +
> +	/* Check no ZCAL_ERR */
> +	if (readl(BASE_CFG + ICPU_MEMPHY_ZCAL_STAT)
> +	    & ICPU_MEMPHY_ZCAL_STAT_ZCAL_ERR)
> +		hal_vcoreiii_ddr_failed();
> +
> +	/* Drive CL, CK, ODT */
> +	setbits_le32(BASE_CFG + ICPU_MEMPHY_CFG, ICPU_MEMPHY_CFG_PHY_ODT_OE |
> +		     ICPU_MEMPHY_CFG_PHY_CK_OE | ICPU_MEMPHY_CFG_PHY_CL_OE);
> +
> +	/* Initialize memory controller */
> +	writel(MSCC_MEMPARM_MEMCFG, BASE_CFG + ICPU_MEMCTRL_CFG);
> +	writel(MSCC_MEMPARM_PERIOD, BASE_CFG + ICPU_MEMCTRL_REF_PERIOD);
> +
> +	writel(MSCC_MEMPARM_TIMING0, BASE_CFG + ICPU_MEMCTRL_TIMING0);
> +
> +	writel(MSCC_MEMPARM_TIMING1, BASE_CFG + ICPU_MEMCTRL_TIMING1);
> +	writel(MSCC_MEMPARM_TIMING2, BASE_CFG + ICPU_MEMCTRL_TIMING2);
> +	writel(MSCC_MEMPARM_TIMING3, BASE_CFG + ICPU_MEMCTRL_TIMING3);
> +	writel(MSCC_MEMPARM_MR0, BASE_CFG + ICPU_MEMCTRL_MR0_VAL);
> +	writel(MSCC_MEMPARM_MR1, BASE_CFG + ICPU_MEMCTRL_MR1_VAL);
> +	writel(MSCC_MEMPARM_MR2, BASE_CFG + ICPU_MEMCTRL_MR2_VAL);
> +	writel(MSCC_MEMPARM_MR3, BASE_CFG + ICPU_MEMCTRL_MR3_VAL);
> +
> +	/* Termination setup - enable ODT */
> +	writel(ICPU_MEMCTRL_TERMRES_CTRL_LOCAL_ODT_RD_ENA |
> +	       /* Assert ODT0 for any write */
> +	       ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_ENA(3),
> +	       BASE_CFG + ICPU_MEMCTRL_TERMRES_CTRL);
> +
> +	/* Release Reset from DDR */
> +	hal_vcoreiii_ddr_reset_release();
> +
> +	writel(readl(BASE_CFG + ICPU_GPR(7)) + 1, BASE_CFG + ICPU_GPR(7));
> +}
> +
> +static inline void hal_vcoreiii_wait_memctl(void)
> +{
> +	/* Now, rip it! */
> +	writel(ICPU_MEMCTRL_CTRL_INITIALIZE, BASE_CFG + ICPU_MEMCTRL_CTRL);
> +
> +	while (!(readl(BASE_CFG + ICPU_MEMCTRL_STAT)
> +		 & ICPU_MEMCTRL_STAT_INIT_DONE))
> +		;
> +
> +	/* Settle...? */
> +	sleep_100ns(10000);
> +
> +	/* Establish data contents in DDR RAM for training */
> +
> +	__raw_writel(0xcacafefe, ((void __iomem *)MSCC_DDR_TO));
> +	__raw_writel(0x22221111, ((void __iomem *)MSCC_DDR_TO + 0x4));
> +	__raw_writel(0x44443333, ((void __iomem *)MSCC_DDR_TO + 0x8));
> +	__raw_writel(0x66665555, ((void __iomem *)MSCC_DDR_TO + 0xC));
> +	__raw_writel(0x88887777, ((void __iomem *)MSCC_DDR_TO + 0x10));
> +	__raw_writel(0xaaaa9999, ((void __iomem *)MSCC_DDR_TO + 0x14));
> +	__raw_writel(0xccccbbbb, ((void __iomem *)MSCC_DDR_TO + 0x18));
> +	__raw_writel(0xeeeedddd, ((void __iomem *)MSCC_DDR_TO + 0x1C));
> +}
> +#endif				/* __ASM_MACH_DDR_H */
> diff --git a/arch/mips/mach-mscc/include/mach/ocelot/ocelot.h b/arch/mips/mach-mscc/include/mach/ocelot/ocelot.h
> new file mode 100644
> index 0000000000..2cb2135d37
> --- /dev/null
> +++ b/arch/mips/mach-mscc/include/mach/ocelot/ocelot.h
> @@ -0,0 +1,24 @@
> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
> +/*
> + * Microsemi Ocelot Switch driver
> + *
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#ifndef _MSCC_OCELOT_H_
> +#define _MSCC_OCELOT_H_
> +
> +#include <linux/bitops.h>
> +#include <dm.h>
> +
> +/*
> + * Target offset base(s)
> + */
> +#define MSCC_IO_ORIGIN1_OFFSET 0x70000000
> +#define MSCC_IO_ORIGIN1_SIZE   0x00200000
> +#define MSCC_IO_ORIGIN2_OFFSET 0x71000000
> +#define MSCC_IO_ORIGIN2_SIZE   0x01000000
> +#define BASE_CFG        ((void __iomem *)0x70000000)
> +#define BASE_DEVCPU_GCB ((void __iomem *)0x71070000)

Would it be possible on that SoC to define those register offsets as
simple physical address and create the mapping when needed?
For example:

void foo()
{
    void __iomem *base_cfg = ioremap(BASE_CFG, ...);
    writel(base_cfg + XXX, 0);
}

> +
> +#endif
> diff --git a/arch/mips/mach-mscc/include/mach/ocelot/ocelot_devcpu_gcb.h b/arch/mips/mach-mscc/include/mach/ocelot/ocelot_devcpu_gcb.h
> new file mode 100644
> index 0000000000..f8aa97ba26
> --- /dev/null
> +++ b/arch/mips/mach-mscc/include/mach/ocelot/ocelot_devcpu_gcb.h
> @@ -0,0 +1,21 @@
> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#ifndef _MSCC_OCELOT_DEVCPU_GCB_H_
> +#define _MSCC_OCELOT_DEVCPU_GCB_H_
> +
> +#define PERF_SOFT_RST                                     0x8
> +
> +#define PERF_SOFT_RST_SOFT_NON_CFG_RST                    BIT(2)
> +#define PERF_SOFT_RST_SOFT_SWC_RST                        BIT(1)
> +#define PERF_SOFT_RST_SOFT_CHIP_RST                       BIT(0)
> +
> +#define PERF_GPIO_OUT_SET                                 0x34
> +
> +#define PERF_GPIO_OUT_CLR                                 0x38
> +
> +#define PERF_GPIO_OE                                      0x44
> +
> +#endif
> diff --git a/arch/mips/mach-mscc/include/mach/ocelot/ocelot_icpu_cfg.h b/arch/mips/mach-mscc/include/mach/ocelot/ocelot_icpu_cfg.h
> new file mode 100644
> index 0000000000..04cf70bec3
> --- /dev/null
> +++ b/arch/mips/mach-mscc/include/mach/ocelot/ocelot_icpu_cfg.h
> @@ -0,0 +1,274 @@
> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#ifndef _MSCC_OCELOT_ICPU_CFG_H_
> +#define _MSCC_OCELOT_ICPU_CFG_H_
> +
> +#define ICPU_GPR(x) (0x4 * (x))
> +#define ICPU_GPR_RSZ                                      0x4
> +
> +#define ICPU_RESET                                        0x20
> +
> +#define ICPU_RESET_CORE_RST_CPU_ONLY                      BIT(3)
> +#define ICPU_RESET_CORE_RST_PROTECT                       BIT(2)
> +#define ICPU_RESET_CORE_RST_FORCE                         BIT(1)
> +#define ICPU_RESET_MEM_RST_FORCE                          BIT(0)
> +
> +#define ICPU_GENERAL_CTRL                                 0x24
> +
> +#define ICPU_GENERAL_CTRL_CPU_BUSIF_SLEEP_DIS             BIT(14)
> +#define ICPU_GENERAL_CTRL_CPU_BUSIF_WERR_ENA              BIT(13)
> +#define ICPU_GENERAL_CTRL_CPU_8051_IROM_ENA               BIT(12)
> +#define ICPU_GENERAL_CTRL_CPU_MIPS_DIS                    BIT(11)
> +#define ICPU_GENERAL_CTRL_IF_MIIM_SLV_ADDR_SEL            BIT(10)
> +#define ICPU_GENERAL_CTRL_IF_MIIM_SLV_ENA                 BIT(9)
> +#define ICPU_GENERAL_CTRL_IF_PI_SLV_DONEPOL               BIT(8)
> +#define ICPU_GENERAL_CTRL_IF_PI_MST_ENA                   BIT(7)
> +#define ICPU_GENERAL_CTRL_IF_PI_SLV_ENA                   BIT(6)
> +#define ICPU_GENERAL_CTRL_IF_SI_OWNER(x)                  (((x) << 4) & GENMASK(5, 4))
> +#define ICPU_GENERAL_CTRL_IF_SI_OWNER_M                   GENMASK(5, 4)
> +#define ICPU_GENERAL_CTRL_IF_SI_OWNER_X(x)                (((x) & GENMASK(5, 4)) >> 4)
> +#define ICPU_GENERAL_CTRL_SSI_MST_CONTENTION              BIT(3)
> +#define ICPU_GENERAL_CTRL_CPU_BE_ENA                      BIT(2)
> +#define ICPU_GENERAL_CTRL_CPU_DIS                         BIT(1)
> +#define ICPU_GENERAL_CTRL_BOOT_MODE_ENA                   BIT(0)
> +#define ICPU_SPI_MST_CFG                                  0x3c
> +
> +#define ICPU_SPI_MST_CFG_A32B_ENA                         BIT(11)
> +#define ICPU_SPI_MST_CFG_FAST_READ_ENA                    BIT(10)
> +#define ICPU_SPI_MST_CFG_CS_DESELECT_TIME(x)              (((x) << 5) & GENMASK(9, 5))
> +#define ICPU_SPI_MST_CFG_CS_DESELECT_TIME_M               GENMASK(9, 5)
> +#define ICPU_SPI_MST_CFG_CS_DESELECT_TIME_X(x)            (((x) & GENMASK(9, 5)) >> 5)
> +#define ICPU_SPI_MST_CFG_CLK_DIV(x)                       ((x) & GENMASK(4, 0))
> +#define ICPU_SPI_MST_CFG_CLK_DIV_M                        GENMASK(4, 0)
> +
> +#define ICPU_SW_MODE                                      0x50
> +
> +#define ICPU_SW_MODE_SW_PIN_CTRL_MODE                     BIT(13)
> +#define ICPU_SW_MODE_SW_SPI_SCK                           BIT(12)
> +#define ICPU_SW_MODE_SW_SPI_SCK_OE                        BIT(11)
> +#define ICPU_SW_MODE_SW_SPI_SDO                           BIT(10)
> +#define ICPU_SW_MODE_SW_SPI_SDO_OE                        BIT(9)
> +#define ICPU_SW_MODE_SW_SPI_CS(x)                         (((x) << 5) & GENMASK(8, 5))
> +#define ICPU_SW_MODE_SW_SPI_CS_M                          GENMASK(8, 5)
> +#define ICPU_SW_MODE_SW_SPI_CS_X(x)                       (((x) & GENMASK(8, 5)) >> 5)
> +#define ICPU_SW_MODE_SW_SPI_CS_OE(x)                      (((x) << 1) & GENMASK(4, 1))
> +#define ICPU_SW_MODE_SW_SPI_CS_OE_M                       GENMASK(4, 1)
> +#define ICPU_SW_MODE_SW_SPI_CS_OE_X(x)                    (((x) & GENMASK(4, 1)) >> 1)
> +#define ICPU_SW_MODE_SW_SPI_SDI                           BIT(0)
> +
> +#define ICPU_INTR_ENA					  0x88
> +
> +#define ICPU_DST_INTR_MAP(x)  (0x98 + 0x4 * (x))
> +#define ICPU_DST_INTR_MAP_RSZ                             0x4
> +
> +#define ICPU_DST_INTR_IDENT                               0xa8
> +#define ICPU_DST_INTR_IDENT_RSZ                           0x4
> +
> +#define ICPU_TIMER_TICK_DIV                               0xe8
> +#define ICPU_TIMER_VALUE(x) (0xec + 0x4 * (x))
> +
> +#define ICPU_TIMER_CTRL(x) (0x104 + 0x4 * (x))
> +#define ICPU_TIMER_CTRL_MAX_FREQ_ENA			  BIT(3)
> +#define ICPU_TIMER_CTRL_ONE_SHOT_ENA			  BIT(2)
> +#define ICPU_TIMER_CTRL_TIMER_ENA			  BIT(1)
> +#define ICPU_TIMER_CTRL_FORCE_RELOAD			  BIT(0)
> +
> +#define ICPU_MEMCTRL_CTRL                                 0x110
> +#define ICPU_MEMCTRL_CTRL_PWR_DOWN                        BIT(3)
> +#define ICPU_MEMCTRL_CTRL_MDSET                           BIT(2)
> +#define ICPU_MEMCTRL_CTRL_STALL_REF_ENA                   BIT(1)
> +#define ICPU_MEMCTRL_CTRL_INITIALIZE                      BIT(0)
> +
> +#define ICPU_MEMCTRL_CFG                                  0x114
> +
> +#define ICPU_MEMCTRL_CFG_DDR_512MBYTE_PLUS                BIT(16)
> +#define ICPU_MEMCTRL_CFG_DDR_ECC_ERR_ENA                  BIT(15)
> +#define ICPU_MEMCTRL_CFG_DDR_ECC_COR_ENA                  BIT(14)
> +#define ICPU_MEMCTRL_CFG_DDR_ECC_ENA                      BIT(13)
> +#define ICPU_MEMCTRL_CFG_DDR_WIDTH                        BIT(12)
> +#define ICPU_MEMCTRL_CFG_DDR_MODE                         BIT(11)
> +#define ICPU_MEMCTRL_CFG_BURST_SIZE                       BIT(10)
> +#define ICPU_MEMCTRL_CFG_BURST_LEN                        BIT(9)
> +#define ICPU_MEMCTRL_CFG_BANK_CNT                         BIT(8)
> +#define ICPU_MEMCTRL_CFG_MSB_ROW_ADDR(x)                  (((x) << 4) & GENMASK(7, 4))
> +#define ICPU_MEMCTRL_CFG_MSB_ROW_ADDR_M                   GENMASK(7, 4)
> +#define ICPU_MEMCTRL_CFG_MSB_ROW_ADDR_X(x)                (((x) & GENMASK(7, 4)) >> 4)
> +#define ICPU_MEMCTRL_CFG_MSB_COL_ADDR(x)                  ((x) & GENMASK(3, 0))
> +#define ICPU_MEMCTRL_CFG_MSB_COL_ADDR_M                   GENMASK(3, 0)
> +
> +#define ICPU_MEMCTRL_STAT                                 0x118
> +
> +#define ICPU_MEMCTRL_STAT_RDATA_MASKED                    BIT(5)
> +#define ICPU_MEMCTRL_STAT_RDATA_DUMMY                     BIT(4)
> +#define ICPU_MEMCTRL_STAT_RDATA_ECC_ERR                   BIT(3)
> +#define ICPU_MEMCTRL_STAT_RDATA_ECC_COR                   BIT(2)
> +#define ICPU_MEMCTRL_STAT_PWR_DOWN_ACK                    BIT(1)
> +#define ICPU_MEMCTRL_STAT_INIT_DONE                       BIT(0)
> +
> +#define ICPU_MEMCTRL_REF_PERIOD                           0x11c
> +
> +#define ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF(x)           (((x) << 16) & GENMASK(19, 16))
> +#define ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF_M            GENMASK(19, 16)
> +#define ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF_X(x)         (((x) & GENMASK(19, 16)) >> 16)
> +#define ICPU_MEMCTRL_REF_PERIOD_REF_PERIOD(x)             ((x) & GENMASK(15, 0))
> +#define ICPU_MEMCTRL_REF_PERIOD_REF_PERIOD_M              GENMASK(15, 0)
> +
> +#define ICPU_MEMCTRL_TIMING0                              0x124
> +
> +#define ICPU_MEMCTRL_TIMING0_RD_TO_WR_DLY(x)              (((x) << 28) & GENMASK(31, 28))
> +#define ICPU_MEMCTRL_TIMING0_RD_TO_WR_DLY_M               GENMASK(31, 28)
> +#define ICPU_MEMCTRL_TIMING0_RD_TO_WR_DLY_X(x)            (((x) & GENMASK(31, 28)) >> 28)
> +#define ICPU_MEMCTRL_TIMING0_WR_CS_CHANGE_DLY(x)          (((x) << 24) & GENMASK(27, 24))
> +#define ICPU_MEMCTRL_TIMING0_WR_CS_CHANGE_DLY_M           GENMASK(27, 24)
> +#define ICPU_MEMCTRL_TIMING0_WR_CS_CHANGE_DLY_X(x)        (((x) & GENMASK(27, 24)) >> 24)
> +#define ICPU_MEMCTRL_TIMING0_RD_CS_CHANGE_DLY(x)          (((x) << 20) & GENMASK(23, 20))
> +#define ICPU_MEMCTRL_TIMING0_RD_CS_CHANGE_DLY_M           GENMASK(23, 20)
> +#define ICPU_MEMCTRL_TIMING0_RD_CS_CHANGE_DLY_X(x)        (((x) & GENMASK(23, 20)) >> 20)
> +#define ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY(x)          (((x) << 16) & GENMASK(19, 16))
> +#define ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY_M           GENMASK(19, 16)
> +#define ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY_X(x)        (((x) & GENMASK(19, 16)) >> 16)
> +#define ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY(x)           (((x) << 12) & GENMASK(15, 12))
> +#define ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY_M            GENMASK(15, 12)
> +#define ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY_X(x)         (((x) & GENMASK(15, 12)) >> 12)
> +#define ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY(x)           (((x) << 8) & GENMASK(11, 8))
> +#define ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY_M            GENMASK(11, 8)
> +#define ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY_X(x)         (((x) & GENMASK(11, 8)) >> 8)
> +#define ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY(x)           (((x) << 4) & GENMASK(7, 4))
> +#define ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY_M            GENMASK(7, 4)
> +#define ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY_X(x)         (((x) & GENMASK(7, 4)) >> 4)
> +#define ICPU_MEMCTRL_TIMING0_RD_DATA_XFR_DLY(x)           ((x) & GENMASK(3, 0))
> +#define ICPU_MEMCTRL_TIMING0_RD_DATA_XFR_DLY_M            GENMASK(3, 0)
> +
> +#define ICPU_MEMCTRL_TIMING1                              0x128
> +
> +#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY(x)  (((x) << 24) & GENMASK(31, 24))
> +#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY_M   GENMASK(31, 24)
> +#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY_X(x) (((x) & GENMASK(31, 24)) >> 24)
> +#define ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY(x)             (((x) << 16) & GENMASK(23, 16))
> +#define ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY_M              GENMASK(23, 16)
> +#define ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY_X(x)           (((x) & GENMASK(23, 16)) >> 16)
> +#define ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY(x)          (((x) << 12) & GENMASK(15, 12))
> +#define ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY_M           GENMASK(15, 12)
> +#define ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY_X(x)        (((x) & GENMASK(15, 12)) >> 12)
> +#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY(x)            (((x) << 8) & GENMASK(11, 8))
> +#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY_M             GENMASK(11, 8)
> +#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY_X(x)          (((x) & GENMASK(11, 8)) >> 8)
> +#define ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY(x)            (((x) << 4) & GENMASK(7, 4))
> +#define ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY_M             GENMASK(7, 4)
> +#define ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY_X(x)          (((x) & GENMASK(7, 4)) >> 4)
> +#define ICPU_MEMCTRL_TIMING1_WR_TO_RD_DLY(x)              ((x) & GENMASK(3, 0))
> +#define ICPU_MEMCTRL_TIMING1_WR_TO_RD_DLY_M               GENMASK(3, 0)
> +
> +#define ICPU_MEMCTRL_TIMING2                              0x12c
> +
> +#define ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY(x)             (((x) << 28) & GENMASK(31, 28))
> +#define ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY_M              GENMASK(31, 28)
> +#define ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY_X(x)           (((x) & GENMASK(31, 28)) >> 28)
> +#define ICPU_MEMCTRL_TIMING2_MDSET_DLY(x)                 (((x) << 24) & GENMASK(27, 24))
> +#define ICPU_MEMCTRL_TIMING2_MDSET_DLY_M                  GENMASK(27, 24)
> +#define ICPU_MEMCTRL_TIMING2_MDSET_DLY_X(x)               (((x) & GENMASK(27, 24)) >> 24)
> +#define ICPU_MEMCTRL_TIMING2_REF_DLY(x)                   (((x) << 16) & GENMASK(23, 16))
> +#define ICPU_MEMCTRL_TIMING2_REF_DLY_M                    GENMASK(23, 16)
> +#define ICPU_MEMCTRL_TIMING2_REF_DLY_X(x)                 (((x) & GENMASK(23, 16)) >> 16)
> +#define ICPU_MEMCTRL_TIMING2_INIT_DLY(x)                  ((x) & GENMASK(15, 0))
> +#define ICPU_MEMCTRL_TIMING2_INIT_DLY_M                   GENMASK(15, 0)
> +
> +#define ICPU_MEMCTRL_TIMING3                              0x130
> +
> +#define ICPU_MEMCTRL_TIMING3_RMW_DLY(x)                   (((x) << 16) & GENMASK(19, 16))
> +#define ICPU_MEMCTRL_TIMING3_RMW_DLY_M                    GENMASK(19, 16)
> +#define ICPU_MEMCTRL_TIMING3_RMW_DLY_X(x)                 (((x) & GENMASK(19, 16)) >> 16)
> +#define ICPU_MEMCTRL_TIMING3_ODT_RD_DLY(x)                (((x) << 12) & GENMASK(15, 12))
> +#define ICPU_MEMCTRL_TIMING3_ODT_RD_DLY_M                 GENMASK(15, 12)
> +#define ICPU_MEMCTRL_TIMING3_ODT_RD_DLY_X(x)              (((x) & GENMASK(15, 12)) >> 12)
> +#define ICPU_MEMCTRL_TIMING3_ODT_WR_DLY(x)                (((x) << 8) & GENMASK(11, 8))
> +#define ICPU_MEMCTRL_TIMING3_ODT_WR_DLY_M                 GENMASK(11, 8)
> +#define ICPU_MEMCTRL_TIMING3_ODT_WR_DLY_X(x)              (((x) & GENMASK(11, 8)) >> 8)
> +#define ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY(x)          (((x) << 4) & GENMASK(7, 4))
> +#define ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY_M           GENMASK(7, 4)
> +#define ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY_X(x)        (((x) & GENMASK(7, 4)) >> 4)
> +#define ICPU_MEMCTRL_TIMING3_WR_TO_RD_CS_CHANGE_DLY(x)    ((x) & GENMASK(3, 0))
> +#define ICPU_MEMCTRL_TIMING3_WR_TO_RD_CS_CHANGE_DLY_M     GENMASK(3, 0)
> +
> +#define ICPU_MEMCTRL_MR0_VAL                              0x138
> +
> +#define ICPU_MEMCTRL_MR1_VAL                              0x13c
> +
> +#define ICPU_MEMCTRL_MR2_VAL                              0x140
> +
> +#define ICPU_MEMCTRL_MR3_VAL                              0x144
> +
> +#define ICPU_MEMCTRL_TERMRES_CTRL                         0x148
> +
> +#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_RD_EXT              BIT(11)
> +#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_RD_ENA(x)           (((x) << 7) & GENMASK(10, 7))
> +#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_RD_ENA_M            GENMASK(10, 7)
> +#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_RD_ENA_X(x)         (((x) & GENMASK(10, 7)) >> 7)
> +#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_EXT              BIT(6)
> +#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_ENA(x)           (((x) << 2) & GENMASK(5, 2))
> +#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_ENA_M            GENMASK(5, 2)
> +#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_ENA_X(x)         (((x) & GENMASK(5, 2)) >> 2)
> +#define ICPU_MEMCTRL_TERMRES_CTRL_LOCAL_ODT_RD_EXT        BIT(1)
> +#define ICPU_MEMCTRL_TERMRES_CTRL_LOCAL_ODT_RD_ENA        BIT(0)
> +
> +#define ICPU_MEMCTRL_DQS_DLY(x) (0x150 + 0x4 * (x))
> +#define ICPU_MEMCTRL_DQS_DLY_RSZ                          0x4
> +
> +#define ICPU_MEMCTRL_DQS_DLY_TRAIN_DQ_ENA                 BIT(11)
> +#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM1(x)              (((x) << 8) & GENMASK(10, 8))
> +#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM1_M               GENMASK(10, 8)
> +#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM1_X(x)            (((x) & GENMASK(10, 8)) >> 8)
> +#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM0(x)              (((x) << 5) & GENMASK(7, 5))
> +#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM0_M               GENMASK(7, 5)
> +#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM0_X(x)            (((x) & GENMASK(7, 5)) >> 5)
> +#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY(x)                   ((x) & GENMASK(4, 0))
> +#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_M                    GENMASK(4, 0)
> +
> +#define ICPU_MEMPHY_CFG                                   0x160
> +
> +#define ICPU_MEMPHY_CFG_PHY_FLUSH_DIS                     BIT(10)
> +#define ICPU_MEMPHY_CFG_PHY_RD_ADJ_DIS                    BIT(9)
> +#define ICPU_MEMPHY_CFG_PHY_DQS_EXT                       BIT(8)
> +#define ICPU_MEMPHY_CFG_PHY_FIFO_RST                      BIT(7)
> +#define ICPU_MEMPHY_CFG_PHY_DLL_BL_RST                    BIT(6)
> +#define ICPU_MEMPHY_CFG_PHY_DLL_CL_RST                    BIT(5)
> +#define ICPU_MEMPHY_CFG_PHY_ODT_OE                        BIT(4)
> +#define ICPU_MEMPHY_CFG_PHY_CK_OE                         BIT(3)
> +#define ICPU_MEMPHY_CFG_PHY_CL_OE                         BIT(2)
> +#define ICPU_MEMPHY_CFG_PHY_SSTL_ENA                      BIT(1)
> +#define ICPU_MEMPHY_CFG_PHY_RST                           BIT(0)
> +
> +#define ICPU_MEMPHY_ZCAL                                  0x188
> +
> +#define ICPU_MEMPHY_ZCAL_ZCAL_CLK_SEL                     BIT(9)
> +#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_ODT(x)                 (((x) << 5) & GENMASK(8, 5))
> +#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_ODT_M                  GENMASK(8, 5)
> +#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_ODT_X(x)               (((x) & GENMASK(8, 5)) >> 5)
> +#define ICPU_MEMPHY_ZCAL_ZCAL_PROG(x)                     (((x) << 1) & GENMASK(4, 1))
> +#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_M                      GENMASK(4, 1)
> +#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_X(x)                   (((x) & GENMASK(4, 1)) >> 1)
> +#define ICPU_MEMPHY_ZCAL_ZCAL_ENA                         BIT(0)
> +
> +#define ICPU_MEMPHY_ZCAL_STAT                             0x18c
> +
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_ZCTRL(x)               (((x) << 12) & GENMASK(31, 12))
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_ZCTRL_M                GENMASK(31, 12)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_ZCTRL_X(x)             (((x) & GENMASK(31, 12)) >> 12)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPU(x)          (((x) << 8) & GENMASK(9, 8))
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPU_M           GENMASK(9, 8)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPU_X(x)        (((x) & GENMASK(9, 8)) >> 8)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPD(x)          (((x) << 6) & GENMASK(7, 6))
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPD_M           GENMASK(7, 6)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPD_X(x)        (((x) & GENMASK(7, 6)) >> 6)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PU(x)             (((x) << 4) & GENMASK(5, 4))
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PU_M              GENMASK(5, 4)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PU_X(x)           (((x) & GENMASK(5, 4)) >> 4)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PD(x)             (((x) << 2) & GENMASK(3, 2))
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PD_M              GENMASK(3, 2)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PD_X(x)           (((x) & GENMASK(3, 2)) >> 2)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_ERR                    BIT(1)
> +#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_DONE                   BIT(0)
> +#endif
> diff --git a/arch/mips/mach-mscc/include/mach/tlb.h b/arch/mips/mach-mscc/include/mach/tlb.h
> new file mode 100644
> index 0000000000..fdb554f551
> --- /dev/null
> +++ b/arch/mips/mach-mscc/include/mach/tlb.h
> @@ -0,0 +1,55 @@
> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#ifndef __ASM_MACH_TLB_H
> +#define __ASM_MACH_TLB_H
> +
> +#include <asm/mipsregs.h>
> +#include <mach/common.h>
> +#include <linux/sizes.h>
> +
> +#define TLB_HI_MASK      0xffffe000
> +#define TLB_LO_MASK      0x3fffffff	/* Masks off Fill bits */
> +#define TLB_LO_SHIFT     6	/* PFN Start bit */
> +
> +#define PAGEMASK_SHIFT   13
> +
> +#define MMU_PAGE_CACHED   (3 << 3)	/* C(5:3) Cache Coherency Attributes */
> +#define MMU_PAGE_UNCACHED (2 << 3)	/* C(5:3) Cache Coherency Attributes */
> +#define MMU_PAGE_DIRTY    BIT(2)	/* = Writeable */
> +#define MMU_PAGE_VALID    BIT(1)
> +#define MMU_PAGE_GLOBAL   BIT(0)
> +#define MMU_REGIO_RO_C    (MMU_PAGE_CACHED | MMU_PAGE_VALID | MMU_PAGE_GLOBAL)
> +#define MMU_REGIO_RO      (MMU_PAGE_UNCACHED | MMU_PAGE_VALID | MMU_PAGE_GLOBAL)
> +#define MMU_REGIO_RW      (MMU_PAGE_DIRTY | MMU_REGIO_RO)
> +#define MMU_REGIO_INVAL   (MMU_PAGE_GLOBAL)
> +
> +#define TLB_COUNT_MASK	  GENMASK(5, 0)
> +#define TLB_COUNT_OFF	  25
> +
> +static inline u32 get_tlb_count(void)
> +{
> +	register u32 config1;
> +
> +	config1 = read_c0_config1();
> +	config1 >>= TLB_COUNT_OFF;
> +	config1 &= TLB_COUNT_MASK;
> +
> +	return 1 + config1;
> +}
> +
> +static inline void create_tlb(int index, u32 offset, u32 size, u32 tlb_attrib1,
> +			      u32 tlb_attrib2)
> +{
> +	register u32 tlb_mask, tlb_lo0, tlb_lo1;
> +
> +	tlb_mask = ((size >> 12) - 1) << PAGEMASK_SHIFT;
> +	tlb_lo0 = tlb_attrib1 | (offset >> TLB_LO_SHIFT);
> +	tlb_lo1 = tlb_attrib2 | ((offset + size) >> TLB_LO_SHIFT);
> +
> +	write_one_tlb(index, tlb_mask, offset & TLB_HI_MASK,
> +		      tlb_lo0 & TLB_LO_MASK, tlb_lo1 & TLB_LO_MASK);
> +}
> +#endif				/* __ASM_MACH_TLB_H */
> diff --git a/arch/mips/mach-mscc/lowlevel_init.S b/arch/mips/mach-mscc/lowlevel_init.S
> new file mode 100644
> index 0000000000..8e4f0d02c8
> --- /dev/null
> +++ b/arch/mips/mach-mscc/lowlevel_init.S
> @@ -0,0 +1,23 @@
> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#include <asm/asm.h>
> +#include <asm/regdef.h>
> +
> +    .set noreorder
> +    .extern     vcoreiii_tlb_init
> +
> +LEAF(lowlevel_init)
> +	/*
> +	 * As we have no stack yet, we can assume the restricted
> +	 * luxury of the sX-registers without saving them
> +	 */
> +	move	s0,ra
> +
> +	jal	vcoreiii_tlb_init
> +	nop

we use the same style as Linux MIPS where instructions in the delay slot
should be indented by an extra space.

> +	jr	s0
> +	nop
> +	END(lowlevel_init)
> diff --git a/arch/mips/mach-mscc/reset.c b/arch/mips/mach-mscc/reset.c
> new file mode 100644
> index 0000000000..cbc1fd2285
> --- /dev/null
> +++ b/arch/mips/mach-mscc/reset.c
> @@ -0,0 +1,36 @@
> +// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
> +/*
> + * Copyright (c) 2018 Microsemi Corporation
> + */
> +
> +#include <common.h>
> +
> +#include <asm/sections.h>
> +#include <asm/io.h>
> +
> +#include <asm/reboot.h>
> +
> +void _machine_restart(void)
> +{
> +	register u32 resetbits = PERF_SOFT_RST_SOFT_CHIP_RST;
> +	(void)readl(BASE_DEVCPU_GCB + PERF_SOFT_RST);
> +
> +	/*
> +	 * Make sure VCore is NOT protected from reset
> +	 */
> +	clrbits_le32(BASE_CFG + ICPU_RESET, ICPU_RESET_CORE_RST_PROTECT);
> +
> +	/*
> +	 * Change to SPI bitbang for SPI reset workaround...
> +	 */
> +	writel(ICPU_SW_MODE_SW_SPI_CS_OE(1) | ICPU_SW_MODE_SW_SPI_CS(1) |
> +	       ICPU_SW_MODE_SW_PIN_CTRL_MODE, BASE_CFG + ICPU_SW_MODE);
> +
> +	/*
> +	 * Do the global reset
> +	 */
> +	writel(resetbits, BASE_DEVCPU_GCB + PERF_SOFT_RST);
> +
> +	while (1)
> +		; /* NOP */
> +}
>
Gregory CLEMENT Dec. 13, 2018, 2:05 p.m. UTC | #2
Hi Daniel,
 
 On lun., déc. 10 2018, Daniel Schwierzeck <daniel.schwierzeck@gmail.com> wrote:

>> diff --git a/arch/mips/mach-mscc/include/ioremap.h b/arch/mips/mach-mscc/include/ioremap.h
>> new file mode 100644
>> index 0000000000..8ea5c65ce3
>> --- /dev/null
>> +++ b/arch/mips/mach-mscc/include/ioremap.h
>> @@ -0,0 +1,51 @@
>> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
>
> this line should start with a //. There are more files in this patch
> which need to be fixed.

Actually, according to the documentation (Licenses/README):
  The SPDX license identifier is added in form of a comment.  The comment
   style depends on the file type::

      C source:	// SPDX-License-Identifier: <SPDX License Expression>
      C header:	/* SPDX-License-Identifier: <SPDX License Expression> */

So for a C header file, /* comment */ is correct.

>
>> +/*
>> + * Copyright (c) 2018 Microsemi Corporation
>> + */
>> +
>> +#ifndef __ASM_MACH_MSCC_IOREMAP_H
>> +#define __ASM_MACH_MSCC_IOREMAP_H
>> +
>> +#include <linux/types.h>
>> +#include <mach/common.h>
>> +
>> +/*
>> + * Allow physical addresses to be fixed up to help peripherals located
>> + * outside the low 32-bit range -- generic pass-through version.
>> + */
>> +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr,
>> +					     phys_addr_t size)
>> +{
>> +	return phys_addr;
>> +}
>> +
>> +static inline int is_vcoreiii_internal_registers(phys_addr_t offset)
>> +{
>> +#if defined(CONFIG_ARCH_MSCC)
>
> this define is superfluous because this directory is only added to the
> include paths when CONFIG_ARCH_MSCC is selected

OK

>
>> +	if ((offset >= MSCC_IO_ORIGIN1_OFFSET &&
>> +	     offset < (MSCC_IO_ORIGIN1_OFFSET + MSCC_IO_ORIGIN1_SIZE)) ||
>> +	    (offset >= MSCC_IO_ORIGIN2_OFFSET &&
>> +	     offset < (MSCC_IO_ORIGIN2_OFFSET + MSCC_IO_ORIGIN2_SIZE)))
>> +		return 1;
>> +#endif
>> +
>> +	return 0;
>> +}

[...]

>> +/*
>> + * DDR memory sanity checking failed, tally and do hard reset
>> + *
>> + * NB: Assumes inlining as no stack is available!
>> + */
>> +static inline void hal_vcoreiii_ddr_failed(void)
>> +{
>> +	register u32 reset;
>> +
>> +	writel(readl(BASE_CFG + ICPU_GPR(6)) + 1, BASE_CFG + ICPU_GPR(6));
>> +
>> +	clrbits_le32(BASE_DEVCPU_GCB + PERF_GPIO_OE, BIT(19));
>> +
>> +	/* Jump to reset - does not return */
>> +	reset = KSEG0ADDR(_machine_restart);
>> +	/* Reset while running from cache */
>> +	icache_lock((void *)reset, 128);
>> +	asm volatile ("jr %0"::"r" (reset));
>
> could you briefly describe the reason for this in a comment? It's not
> clear why this code is necessary without knowing the SoC. AFAIU from
> your last mail the boot SPI flash is mapped to KUSEG and you need to
> establish a TLB mapping in lowlevel_init() to be able to move to
> KSEG0.

The reboot workaround in _machine_restart() will change the SPI NOR
into SW bitbang.

This will render the CPU unable to execute directly from the NOR, which
is why the reset instructions are prefetched into the I-cache.

When failing the DDR initialization we are executing from NOR.

The last instruction in _machine_restart() will reset the MIPS CPU
(and the cache), and the CPU will start executing from the reset vactor.

I will add this explanation as comment.

>
>> +
>> +	panic("DDR init failed\n");
>> +}
>> +
>> +/*
>> + * DDR memory sanity checking done, possibly enable ECC.
>> + *
>> + * NB: Assumes inlining as no stack is available!
>> + */
>> +static inline void hal_vcoreiii_ddr_verified(void)
>> +{
>> +#ifdef MIPS_VCOREIII_MEMORY_ECC
>> +	/* Finally, enable ECC */
>> +	register u32 val = readl(BASE_CFG + ICPU_MEMCTRL_CFG);
>> +
>> +	val |= ICPU_MEMCTRL_CFG_DDR_ECC_ERR_ENA;
>> +	val &= ~ICPU_MEMCTRL_CFG_BURST_SIZE;
>> +
>> +	writel(val, BASE_CFG + ICPU_MEMCTRL_CFG);
>> +#endif
>> +
>> +	/* Reset Status register - sticky bits */
>> +	writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT), BASE_CFG + ICPU_MEMCTRL_STAT);
>> +}
>> +
>> +/* NB: Assumes inlining as no stack is available! */
>> +static inline int look_for(u32 bytelane)
>> +{
>> +	register u32 i;
>> +
>> +	/* Reset FIFO in case any previous access failed */
>> +	for (i = 0; i < sizeof(training_data); i++) {
>> +		register u32 byte;
>> +
>> +		memphy_soft_reset();
>> +		/* Reset sticky bits */
>> +		writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
>> +		       BASE_CFG + ICPU_MEMCTRL_STAT);
>> +		/* Read data */
>> +		byte = ((volatile u8 *)MSCC_DDR_TO)[bytelane + (i * 4)];
>
> __raw_readl()?

I had tried it before but without luck, but after trying harder this
time I managed to use read(b|l)/write(b|l) everywhere and get ride of
the volatile variable.

>
>> +		/*
>> +		 * Prevent the compiler reordering the instruction so
>> +		 * the read of RAM happens after the check of the
>> +		 * errors.
>> +		 */
>> +		asm volatile("" : : : "memory");
>
> this is available as barrier(). But according to context you could use
> rmb(). Anyway with the current volatile pointer or the suggested
> __raw_readl() the compiler shouldn't reorder at all

I had a close look on the code generating the __raw_readl and there is
nothing there to guaranty the ordering. Actually in our case (32 bits)
__read_readl is just:
static inline u32 __raw_readl(const volatile void __iomem *mem)
{
	u32 __val;

	__val = *mem;
	return __val;
}

initial code is here:
https://elixir.bootlin.com/u-boot/v2018.11-rc3/source/arch/mips/include/asm/io.h#L265
but __swizzle_addr_l() did nothing
https://elixir.bootlin.com/u-boot/v2018.11-rc3/source/arch/mips/include/asm/mach-generic/mangle-port.h#L10
same for __raw_ioswabl():
https://elixir.bootlin.com/u-boot/v2018.11-rc3/source/arch/mips/include/asm/io.h#L35

So the code is the same that we have written. I agree it is cleaner
to use __raw_readl but it doesn't add anything about the ordering.

It is the same for the use of the volatile, it ensures that the compiler
will always produce a operation to read the data in memory, but it is
not about ordering.

As you suggested I will use rmb();

>> +static inline int hal_vcoreiii_train_bytelane(u32 bytelane)
>> +{
>> +	register int res;
>> +	register u32 dqs_s;
>> +
>> +	set_dly(bytelane, 0);	// Start training at DQS=0
>
> no C++ style comments

OK

>
>> +	while ((res = look_for(bytelane)) == DDR_TRAIN_CONTINUE)
>> +		;
>> +	if (res != DDR_TRAIN_OK)
>> +		return res;
>> +
>> +	dqs_s = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
>> +	while ((res = look_past(bytelane)) == DDR_TRAIN_CONTINUE)
>> +		;
>> +	if (res != DDR_TRAIN_OK)
>> +		return res;
>> +	/* Reset FIFO - for good measure */
>> +	memphy_soft_reset();
>> +	/* Adjust to center [dqs_s;cur] */
>> +	center_dly(bytelane, dqs_s);
>> +	return DDR_TRAIN_OK;
>> +}
>> +
>> +/* This algorithm is converted from the TCL training algorithm used
>> + * during silicon simulation.
>> + * NB: Assumes inlining as no stack is available!
>> + */
>> +static inline int hal_vcoreiii_init_dqs(void)
>> +{
>> +#define MAX_DQS 32
>> +	register u32 i, j;
>> +
>> +	for (i = 0; i < MAX_DQS; i++) {
>> +		set_dly(0, i);	// Byte-lane 0
>
> no C++ style comments

OK

>
>> +		for (j = 0; j < MAX_DQS; j++) {
>> +			register u32 __attribute__ ((unused)) byte;
>
> why unused? If you really need it, you could use __maybe_unused

Because the purpose of this variable is just to access the memory, we
don't do nothing of the value read, and gcc complain about it. But as
you suggest I will use __maybe_unused.

>
>> +			set_dly(1, j);	// Byte-lane 1
>> +			/* Reset FIFO in case any previous access failed */
>> +			memphy_soft_reset();
>> +			writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
>> +			       BASE_CFG + ICPU_MEMCTRL_STAT);
>> +			byte = ((volatile u8 *)MSCC_DDR_TO)[0];
>> +			byte = ((volatile u8 *)MSCC_DDR_TO)[1];
>> +			if (!(readl(BASE_CFG + ICPU_MEMCTRL_STAT) &
>> +			    (ICPU_MEMCTRL_STAT_RDATA_MASKED |
>> +			     ICPU_MEMCTRL_STAT_RDATA_DUMMY)))
>> +				return 0;
>> +		}
>> +	}
>> +	return -1;
>> +}
>> +
>> +static inline int dram_check(void)
>> +{
>> +#define DDR ((volatile u32 *) MSCC_DDR_TO)
>> +	register u32 i;
>> +
>> +	for (i = 0; i < 8; i++) {
>> +		DDR[i] = ~i;
>> +		if (DDR[i] != ~i)
>
> __raw_readl(), __raw_writel() and drop the explicit volatile?

Yes, as explain above, it s done now.

>> +
>> +/*
>> + * Target offset base(s)
>> + */
>> +#define MSCC_IO_ORIGIN1_OFFSET 0x70000000
>> +#define MSCC_IO_ORIGIN1_SIZE   0x00200000
>> +#define MSCC_IO_ORIGIN2_OFFSET 0x71000000
>> +#define MSCC_IO_ORIGIN2_SIZE   0x01000000
>> +#define BASE_CFG        ((void __iomem *)0x70000000)
>> +#define BASE_DEVCPU_GCB ((void __iomem *)0x71070000)
>
> Would it be possible on that SoC to define those register offsets as
> simple physical address and create the mapping when needed?
> For example:
>
> void foo()
> {
>     void __iomem *base_cfg = ioremap(BASE_CFG, ...);
>     writel(base_cfg + XXX, 0);
> }

Actually creating the mapping is just casting the physical address in an
(void __iomem *), see our plat_ioremap.

Calling ioremap in every function will just grow them with little
benefit.

If you really want it, what I could is sharing void __iomem *base_cfg
and void __iomem *base_devcpu_gcb at platform level, and initialize them
only once very early during the boot.


>> +LEAF(lowlevel_init)
>> +	/*
>> +	 * As we have no stack yet, we can assume the restricted
>> +	 * luxury of the sX-registers without saving them
>> +	 */
>> +	move	s0,ra
>> +
>> +	jal	vcoreiii_tlb_init
>> +	nop
>
> we use the same style as Linux MIPS where instructions in the delay slot
> should be indented by an extra space.

OK

Thanks,

Gregory
Daniel Schwierzeck Dec. 13, 2018, 2:55 p.m. UTC | #3
Am Do., 13. Dez. 2018 um 15:05 Uhr schrieb Gregory CLEMENT
<gregory.clement@bootlin.com>:
>
> Hi Daniel,
>
>  On lun., déc. 10 2018, Daniel Schwierzeck <daniel.schwierzeck@gmail.com> wrote:
>
> >> diff --git a/arch/mips/mach-mscc/include/ioremap.h b/arch/mips/mach-mscc/include/ioremap.h
> >> new file mode 100644
> >> index 0000000000..8ea5c65ce3
> >> --- /dev/null
> >> +++ b/arch/mips/mach-mscc/include/ioremap.h
> >> @@ -0,0 +1,51 @@
> >> +/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
> >
> > this line should start with a //. There are more files in this patch
> > which need to be fixed.
>
> Actually, according to the documentation (Licenses/README):
>   The SPDX license identifier is added in form of a comment.  The comment
>    style depends on the file type::
>
>       C source: // SPDX-License-Identifier: <SPDX License Expression>
>       C header: /* SPDX-License-Identifier: <SPDX License Expression> */
>
> So for a C header file, /* comment */ is correct.

oh sorry, I missed that there is a difference

>
> >
> >> +/*
> >> + * Copyright (c) 2018 Microsemi Corporation
> >> + */
> >> +
> >> +#ifndef __ASM_MACH_MSCC_IOREMAP_H
> >> +#define __ASM_MACH_MSCC_IOREMAP_H
> >> +
> >> +#include <linux/types.h>
> >> +#include <mach/common.h>
> >> +
> >> +/*
> >> + * Allow physical addresses to be fixed up to help peripherals located
> >> + * outside the low 32-bit range -- generic pass-through version.
> >> + */
> >> +static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr,
> >> +                                         phys_addr_t size)
> >> +{
> >> +    return phys_addr;
> >> +}
> >> +
> >> +static inline int is_vcoreiii_internal_registers(phys_addr_t offset)
> >> +{
> >> +#if defined(CONFIG_ARCH_MSCC)
> >
> > this define is superfluous because this directory is only added to the
> > include paths when CONFIG_ARCH_MSCC is selected
>
> OK
>
> >
> >> +    if ((offset >= MSCC_IO_ORIGIN1_OFFSET &&
> >> +         offset < (MSCC_IO_ORIGIN1_OFFSET + MSCC_IO_ORIGIN1_SIZE)) ||
> >> +        (offset >= MSCC_IO_ORIGIN2_OFFSET &&
> >> +         offset < (MSCC_IO_ORIGIN2_OFFSET + MSCC_IO_ORIGIN2_SIZE)))
> >> +            return 1;
> >> +#endif
> >> +
> >> +    return 0;
> >> +}
>
> [...]
>
> >> +/*
> >> + * DDR memory sanity checking failed, tally and do hard reset
> >> + *
> >> + * NB: Assumes inlining as no stack is available!
> >> + */
> >> +static inline void hal_vcoreiii_ddr_failed(void)
> >> +{
> >> +    register u32 reset;
> >> +
> >> +    writel(readl(BASE_CFG + ICPU_GPR(6)) + 1, BASE_CFG + ICPU_GPR(6));
> >> +
> >> +    clrbits_le32(BASE_DEVCPU_GCB + PERF_GPIO_OE, BIT(19));
> >> +
> >> +    /* Jump to reset - does not return */
> >> +    reset = KSEG0ADDR(_machine_restart);
> >> +    /* Reset while running from cache */
> >> +    icache_lock((void *)reset, 128);
> >> +    asm volatile ("jr %0"::"r" (reset));
> >
> > could you briefly describe the reason for this in a comment? It's not
> > clear why this code is necessary without knowing the SoC. AFAIU from
> > your last mail the boot SPI flash is mapped to KUSEG and you need to
> > establish a TLB mapping in lowlevel_init() to be able to move to
> > KSEG0.
>
> The reboot workaround in _machine_restart() will change the SPI NOR
> into SW bitbang.
>
> This will render the CPU unable to execute directly from the NOR, which
> is why the reset instructions are prefetched into the I-cache.
>
> When failing the DDR initialization we are executing from NOR.
>
> The last instruction in _machine_restart() will reset the MIPS CPU
> (and the cache), and the CPU will start executing from the reset vactor.
>
> I will add this explanation as comment.

thanks

>
> >
> >> +
> >> +    panic("DDR init failed\n");
> >> +}
> >> +
> >> +/*
> >> + * DDR memory sanity checking done, possibly enable ECC.
> >> + *
> >> + * NB: Assumes inlining as no stack is available!
> >> + */
> >> +static inline void hal_vcoreiii_ddr_verified(void)
> >> +{
> >> +#ifdef MIPS_VCOREIII_MEMORY_ECC
> >> +    /* Finally, enable ECC */
> >> +    register u32 val = readl(BASE_CFG + ICPU_MEMCTRL_CFG);
> >> +
> >> +    val |= ICPU_MEMCTRL_CFG_DDR_ECC_ERR_ENA;
> >> +    val &= ~ICPU_MEMCTRL_CFG_BURST_SIZE;
> >> +
> >> +    writel(val, BASE_CFG + ICPU_MEMCTRL_CFG);
> >> +#endif
> >> +
> >> +    /* Reset Status register - sticky bits */
> >> +    writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT), BASE_CFG + ICPU_MEMCTRL_STAT);
> >> +}
> >> +
> >> +/* NB: Assumes inlining as no stack is available! */
> >> +static inline int look_for(u32 bytelane)
> >> +{
> >> +    register u32 i;
> >> +
> >> +    /* Reset FIFO in case any previous access failed */
> >> +    for (i = 0; i < sizeof(training_data); i++) {
> >> +            register u32 byte;
> >> +
> >> +            memphy_soft_reset();
> >> +            /* Reset sticky bits */
> >> +            writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
> >> +                   BASE_CFG + ICPU_MEMCTRL_STAT);
> >> +            /* Read data */
> >> +            byte = ((volatile u8 *)MSCC_DDR_TO)[bytelane + (i * 4)];
> >
> > __raw_readl()?
>
> I had tried it before but without luck, but after trying harder this
> time I managed to use read(b|l)/write(b|l) everywhere and get ride of
> the volatile variable.
>
> >
> >> +            /*
> >> +             * Prevent the compiler reordering the instruction so
> >> +             * the read of RAM happens after the check of the
> >> +             * errors.
> >> +             */
> >> +            asm volatile("" : : : "memory");
> >
> > this is available as barrier(). But according to context you could use
> > rmb(). Anyway with the current volatile pointer or the suggested
> > __raw_readl() the compiler shouldn't reorder at all
>
> I had a close look on the code generating the __raw_readl and there is
> nothing there to guaranty the ordering. Actually in our case (32 bits)
> __read_readl is just:
> static inline u32 __raw_readl(const volatile void __iomem *mem)
> {
>         u32 __val;
>
>         __val = *mem;
>         return __val;
> }
>
> initial code is here:
> https://elixir.bootlin.com/u-boot/v2018.11-rc3/source/arch/mips/include/asm/io.h#L265
> but __swizzle_addr_l() did nothing
> https://elixir.bootlin.com/u-boot/v2018.11-rc3/source/arch/mips/include/asm/mach-generic/mangle-port.h#L10
> same for __raw_ioswabl():
> https://elixir.bootlin.com/u-boot/v2018.11-rc3/source/arch/mips/include/asm/io.h#L35
>
> So the code is the same that we have written. I agree it is cleaner
> to use __raw_readl but it doesn't add anything about the ordering.
>
> It is the same for the use of the volatile, it ensures that the compiler
> will always produce a operation to read the data in memory, but it is
> not about ordering.
>
> As you suggested I will use rmb();
>
> >> +static inline int hal_vcoreiii_train_bytelane(u32 bytelane)
> >> +{
> >> +    register int res;
> >> +    register u32 dqs_s;
> >> +
> >> +    set_dly(bytelane, 0);   // Start training at DQS=0
> >
> > no C++ style comments
>
> OK
>
> >
> >> +    while ((res = look_for(bytelane)) == DDR_TRAIN_CONTINUE)
> >> +            ;
> >> +    if (res != DDR_TRAIN_OK)
> >> +            return res;
> >> +
> >> +    dqs_s = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
> >> +    while ((res = look_past(bytelane)) == DDR_TRAIN_CONTINUE)
> >> +            ;
> >> +    if (res != DDR_TRAIN_OK)
> >> +            return res;
> >> +    /* Reset FIFO - for good measure */
> >> +    memphy_soft_reset();
> >> +    /* Adjust to center [dqs_s;cur] */
> >> +    center_dly(bytelane, dqs_s);
> >> +    return DDR_TRAIN_OK;
> >> +}
> >> +
> >> +/* This algorithm is converted from the TCL training algorithm used
> >> + * during silicon simulation.
> >> + * NB: Assumes inlining as no stack is available!
> >> + */
> >> +static inline int hal_vcoreiii_init_dqs(void)
> >> +{
> >> +#define MAX_DQS 32
> >> +    register u32 i, j;
> >> +
> >> +    for (i = 0; i < MAX_DQS; i++) {
> >> +            set_dly(0, i);  // Byte-lane 0
> >
> > no C++ style comments
>
> OK
>
> >
> >> +            for (j = 0; j < MAX_DQS; j++) {
> >> +                    register u32 __attribute__ ((unused)) byte;
> >
> > why unused? If you really need it, you could use __maybe_unused
>
> Because the purpose of this variable is just to access the memory, we
> don't do nothing of the value read, and gcc complain about it. But as
> you suggest I will use __maybe_unused.
>
> >
> >> +                    set_dly(1, j);  // Byte-lane 1
> >> +                    /* Reset FIFO in case any previous access failed */
> >> +                    memphy_soft_reset();
> >> +                    writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
> >> +                           BASE_CFG + ICPU_MEMCTRL_STAT);
> >> +                    byte = ((volatile u8 *)MSCC_DDR_TO)[0];
> >> +                    byte = ((volatile u8 *)MSCC_DDR_TO)[1];
> >> +                    if (!(readl(BASE_CFG + ICPU_MEMCTRL_STAT) &
> >> +                        (ICPU_MEMCTRL_STAT_RDATA_MASKED |
> >> +                         ICPU_MEMCTRL_STAT_RDATA_DUMMY)))
> >> +                            return 0;
> >> +            }
> >> +    }
> >> +    return -1;
> >> +}
> >> +
> >> +static inline int dram_check(void)
> >> +{
> >> +#define DDR ((volatile u32 *) MSCC_DDR_TO)
> >> +    register u32 i;
> >> +
> >> +    for (i = 0; i < 8; i++) {
> >> +            DDR[i] = ~i;
> >> +            if (DDR[i] != ~i)
> >
> > __raw_readl(), __raw_writel() and drop the explicit volatile?
>
> Yes, as explain above, it s done now.
>
> >> +
> >> +/*
> >> + * Target offset base(s)
> >> + */
> >> +#define MSCC_IO_ORIGIN1_OFFSET 0x70000000
> >> +#define MSCC_IO_ORIGIN1_SIZE   0x00200000
> >> +#define MSCC_IO_ORIGIN2_OFFSET 0x71000000
> >> +#define MSCC_IO_ORIGIN2_SIZE   0x01000000
> >> +#define BASE_CFG        ((void __iomem *)0x70000000)
> >> +#define BASE_DEVCPU_GCB ((void __iomem *)0x71070000)
> >
> > Would it be possible on that SoC to define those register offsets as
> > simple physical address and create the mapping when needed?
> > For example:
> >
> > void foo()
> > {
> >     void __iomem *base_cfg = ioremap(BASE_CFG, ...);
> >     writel(base_cfg + XXX, 0);
> > }
>
> Actually creating the mapping is just casting the physical address in an
> (void __iomem *), see our plat_ioremap.
>
> Calling ioremap in every function will just grow them with little
> benefit.
>
> If you really want it, what I could is sharing void __iomem *base_cfg
> and void __iomem *base_devcpu_gcb at platform level, and initialize them
> only once very early during the boot.

ok, it was only a question. Normally ioremap should be completely optimised
away by the compiler.

>
>
> >> +LEAF(lowlevel_init)
> >> +    /*
> >> +     * As we have no stack yet, we can assume the restricted
> >> +     * luxury of the sX-registers without saving them
> >> +     */
> >> +    move    s0,ra
> >> +
> >> +    jal     vcoreiii_tlb_init
> >> +    nop
> >
> > we use the same style as Linux MIPS where instructions in the delay slot
> > should be indented by an extra space.
>
> OK
>
> Thanks,
>
> Gregory
>
> --
> Gregory Clement, Bootlin
> Embedded Linux and Kernel engineering
> http://bootlin.com
diff mbox series

Patch

diff --git a/MAINTAINERS b/MAINTAINERS
index abdb6dcdb5..53a3c5bec6 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -484,6 +484,13 @@  S:	Maintained
 T:	git git://git.denx.de/u-boot-mips.git
 F:	arch/mips/
 
+MIPS MSCC
+M:	Gregory CLEMENT <gregory.clement@bootlin.com>
+M:	Lars Povlsen <lars.povlsen@microchip.com>
+M:	Horatiu Vultur <horatiu.vultur@microchip.com>
+S:	Maintained
+F:	arch/mips/mach-mscc/
+
 MMC
 M:	Jaehoon Chung <jh80.chung@samsung.com>
 S:	Maintained
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 6d646ef999..bfe9c11069 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -59,6 +59,11 @@  config ARCH_ATH79
 	select OF_CONTROL
 	imply CMD_DM
 
+config ARCH_MSCC
+	bool "Support MSCC VCore-III"
+	select OF_CONTROL
+	select DM
+
 config ARCH_BMIPS
 	bool "Support BMIPS SoCs"
 	select CLK
@@ -135,6 +140,7 @@  source "board/imgtec/xilfpga/Kconfig"
 source "board/micronas/vct/Kconfig"
 source "board/qemu-mips/Kconfig"
 source "arch/mips/mach-ath79/Kconfig"
+source "arch/mips/mach-mscc/Kconfig"
 source "arch/mips/mach-bmips/Kconfig"
 source "arch/mips/mach-pic32/Kconfig"
 source "arch/mips/mach-mt7620/Kconfig"
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 802244a06e..124e93fa26 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -15,6 +15,7 @@  machine-$(CONFIG_ARCH_ATH79) += ath79
 machine-$(CONFIG_ARCH_BMIPS) += bmips
 machine-$(CONFIG_MACH_PIC32) += pic32
 machine-$(CONFIG_ARCH_MT7620) += mt7620
+machine-$(CONFIG_ARCH_MSCC) += mscc
 
 machdirs := $(patsubst %,arch/mips/mach-%/,$(machine-y))
 libs-y += $(machdirs)
diff --git a/arch/mips/mach-mscc/Kconfig b/arch/mips/mach-mscc/Kconfig
new file mode 100644
index 0000000000..7f1b270207
--- /dev/null
+++ b/arch/mips/mach-mscc/Kconfig
@@ -0,0 +1,69 @@ 
+# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+menu "MSCC VCore-III platforms"
+	depends on ARCH_MSCC
+
+config SOC_VCOREIII
+	select MIPS_TUNE_24KC
+	select ROM_EXCEPTION_VECTORS
+	select SUPPORTS_BIG_ENDIAN
+	select SUPPORTS_CPU_MIPS32_R1
+	select SUPPORTS_CPU_MIPS32_R2
+	select SUPPORTS_LITTLE_ENDIAN
+	bool
+
+config SYS_SOC
+	default "mscc"
+
+config SOC_OCELOT
+	bool
+	select SOC_VCOREIII
+	help
+	  This supports MSCC Ocelot family of SOCs.
+
+config SYS_CONFIG_NAME
+	default "vcoreiii"
+
+choice
+	prompt "Board select"
+
+config TARGET_OCELOT_PCB120
+	bool "MSCC PCB120 Reference Board (aka VSC5635EV)"
+	select SOC_OCELOT
+	help
+	  When selected, CONFIG_DEFAULT_DEVICE_TREE should be set to
+	  ocelot_pcb120
+
+config TARGET_OCELOT_PCB123
+	bool "MSCC PCB123 Reference Board (aka VSC7514EV))"
+	select SOC_OCELOT
+	help
+	  When selected, CONFIG_DEFAULT_DEVICE_TREE should be set to
+	  ocelot_pcb123
+
+endchoice
+
+choice
+	prompt "DDR type"
+
+config DDRTYPE_H5TQ4G63MFR
+	bool "Hynix H5TQ4G63MFR-PBC (4Gbit, DDR3-800, 256Mbitx16)"
+
+config DDRTYPE_MT41K256M16
+	bool "Micron MT41K256M16 (4Gbit, DDR3L-800, 256Mbitx16)"
+
+config DDRTYPE_H5TQ1G63BFA
+	bool "Hynix H5TQ1G63BFA (1Gbit DDR3, x16)"
+
+config DDRTYPE_MT41J128M16HA
+	bool "Micron MT41J128M16HA-15E:D (2Gbit DDR3, x16)"
+
+config DDRTYPE_MT41K128M16JT
+	bool "Micron MT41K128M16JT-125 (2Gbit DDR3L, 128Mbitx16)"
+
+config DDRTYPE_MT47H128M8HQ
+	bool "Micron MT47H128M8-3 (1Gbit, DDR-533@CL4 @ 4.80ns 16Mbisx8x8)"
+
+endchoice
+
+endmenu
diff --git a/arch/mips/mach-mscc/Makefile b/arch/mips/mach-mscc/Makefile
new file mode 100644
index 0000000000..d14ec33838
--- /dev/null
+++ b/arch/mips/mach-mscc/Makefile
@@ -0,0 +1,5 @@ 
+# SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+
+CFLAGS_cpu.o += -finline-limit=64000
+
+obj-y += cpu.o dram.o reset.o lowlevel_init.o
diff --git a/arch/mips/mach-mscc/cpu.c b/arch/mips/mach-mscc/cpu.c
new file mode 100644
index 0000000000..b503e1407b
--- /dev/null
+++ b/arch/mips/mach-mscc/cpu.c
@@ -0,0 +1,90 @@ 
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#include <common.h>
+
+#include <asm/io.h>
+#include <asm/types.h>
+
+#include <mach/tlb.h>
+#include <mach/ddr.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+#if CONFIG_SYS_SDRAM_SIZE <= SZ_64M
+#define MSCC_RAM_TLB_SIZE   SZ_64M
+#define MSCC_ATTRIB2   MMU_REGIO_INVAL
+#elif CONFIG_SYS_SDRAM_SIZE <= SZ_128M
+#define MSCC_RAM_TLB_SIZE   SZ_64M
+#define MSCC_ATTRIB2   MMU_REGIO_RW
+#elif CONFIG_SYS_SDRAM_SIZE <= SZ_256M
+#define MSCC_RAM_TLB_SIZE   SZ_256M
+#define MSCC_ATTRIB2   MMU_REGIO_INVAL
+#elif CONFIG_SYS_SDRAM_SIZE <= SZ_512M
+#define MSCC_RAM_TLB_SIZE   SZ_256M
+#define MSCC_ATTRIB2   MMU_REGIO_RW
+#else
+#define MSCC_RAM_TLB_SIZE   SZ_512M
+#define MSCC_ATTRIB2   MMU_REGIO_RW
+#endif
+
+/* NOTE: lowlevel_init() function does not have access to the
+ * stack. Thus, all called functions must be inlined, and (any) local
+ * variables must be kept in registers.
+ */
+void vcoreiii_tlb_init(void)
+{
+	register int tlbix = 0;
+
+	/*
+	 * Unlike most of the MIPS based SoCs, the IO register address
+	 * are not in KSEG0. The mainline linux kernel built in legacy
+	 * mode needs to access some of the registers very early in
+	 * the boot and make the assumption that the bootloader has
+	 * already configured them, so we have to match this
+	 * expectation.
+	 */
+	create_tlb(tlbix++, MSCC_IO_ORIGIN1_OFFSET, SZ_16M, MMU_REGIO_RW,
+		   MMU_REGIO_RW);
+
+#if  CONFIG_SYS_TEXT_BASE == MSCC_FLASH_TO
+	/*
+	 * If U-Boot is located in NOR then we want to be able to use
+	 * the data cache in order to boot in a decent duration
+	 */
+	create_tlb(tlbix++, MSCC_FLASH_TO, SZ_16M, MMU_REGIO_RO_C,
+		   MMU_REGIO_RO_C);
+	create_tlb(tlbix++, MSCC_FLASH_TO + SZ_32M, SZ_16M, MMU_REGIO_RO_C,
+		   MMU_REGIO_RO_C);
+
+	/*
+	 * Using cache for RAM also helps to improve boot time. Thanks
+	 * to this the time to relocate U-Boot in RAM went from 2.092
+	 * secs to 0.104 secs.
+	 */
+	create_tlb(tlbix++, MSCC_DDR_TO, MSCC_RAM_TLB_SIZE, MMU_REGIO_RW,
+		   MSCC_ATTRIB2);
+
+	/* Enable caches by clearing the bit ERL, which is set on reset */
+	write_c0_status(read_c0_status() & ~BIT(2));
+#endif /* CONFIG_SYS_TEXT_BASE */
+}
+
+int mach_cpu_init(void)
+{
+	/* Speed up NOR flash access */
+	writel(ICPU_SPI_MST_CFG_CS_DESELECT_TIME(0x19) +
+	       ICPU_SPI_MST_CFG_CLK_DIV(9), BASE_CFG + ICPU_SPI_MST_CFG);
+	/*
+	 * Legacy and mainline linux kernel expect that the
+	 * interruption map was set as it was done by redboot.
+	 */
+	writel(~0, BASE_CFG + ICPU_DST_INTR_MAP(0));
+	writel(0, BASE_CFG + ICPU_DST_INTR_MAP(1));
+	writel(0, BASE_CFG + ICPU_DST_INTR_MAP(2));
+	writel(0, BASE_CFG + ICPU_DST_INTR_MAP(3));
+
+	return 0;
+}
diff --git a/arch/mips/mach-mscc/dram.c b/arch/mips/mach-mscc/dram.c
new file mode 100644
index 0000000000..5acee6f918
--- /dev/null
+++ b/arch/mips/mach-mscc/dram.c
@@ -0,0 +1,71 @@ 
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#include <common.h>
+
+#include <asm/io.h>
+#include <asm/types.h>
+
+#include <mach/tlb.h>
+#include <mach/ddr.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+static inline int vcoreiii_train_bytelane(void)
+{
+	int ret;
+
+	ret = hal_vcoreiii_train_bytelane(0);
+
+	if (ret)
+		return ret;
+	ret = hal_vcoreiii_train_bytelane(1);
+
+	return ret;
+}
+
+int vcoreiii_ddr_init(void)
+{
+	int res;
+
+	if (!(readl(BASE_CFG + ICPU_MEMCTRL_STAT)
+	      & ICPU_MEMCTRL_STAT_INIT_DONE)) {
+		hal_vcoreiii_init_memctl();
+		hal_vcoreiii_wait_memctl();
+		if (hal_vcoreiii_init_dqs() || vcoreiii_train_bytelane())
+			hal_vcoreiii_ddr_failed();
+	}
+#if (CONFIG_SYS_TEXT_BASE != 0x20000000)
+	res = dram_check();
+	if (res == 0)
+		hal_vcoreiii_ddr_verified();
+	else
+		hal_vcoreiii_ddr_failed();
+
+	/* Clear boot-mode and read-back to activate/verify */
+	clrbits_le32(BASE_CFG + ICPU_GENERAL_CTRL,
+		     ICPU_GENERAL_CTRL_BOOT_MODE_ENA);
+	readl(BASE_CFG + ICPU_GENERAL_CTRL);
+#else
+	res = 0;
+#endif
+	return res;
+}
+
+int print_cpuinfo(void)
+{
+	printf("MSCC VCore-III MIPS 24Kec\n");
+
+	return 0;
+}
+
+int dram_init(void)
+{
+	while (vcoreiii_ddr_init())
+		;
+
+	gd->ram_size = CONFIG_SYS_SDRAM_SIZE;
+	return 0;
+}
diff --git a/arch/mips/mach-mscc/include/ioremap.h b/arch/mips/mach-mscc/include/ioremap.h
new file mode 100644
index 0000000000..8ea5c65ce3
--- /dev/null
+++ b/arch/mips/mach-mscc/include/ioremap.h
@@ -0,0 +1,51 @@ 
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef __ASM_MACH_MSCC_IOREMAP_H
+#define __ASM_MACH_MSCC_IOREMAP_H
+
+#include <linux/types.h>
+#include <mach/common.h>
+
+/*
+ * Allow physical addresses to be fixed up to help peripherals located
+ * outside the low 32-bit range -- generic pass-through version.
+ */
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr,
+					     phys_addr_t size)
+{
+	return phys_addr;
+}
+
+static inline int is_vcoreiii_internal_registers(phys_addr_t offset)
+{
+#if defined(CONFIG_ARCH_MSCC)
+	if ((offset >= MSCC_IO_ORIGIN1_OFFSET &&
+	     offset < (MSCC_IO_ORIGIN1_OFFSET + MSCC_IO_ORIGIN1_SIZE)) ||
+	    (offset >= MSCC_IO_ORIGIN2_OFFSET &&
+	     offset < (MSCC_IO_ORIGIN2_OFFSET + MSCC_IO_ORIGIN2_SIZE)))
+		return 1;
+#endif
+
+	return 0;
+}
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+					 unsigned long flags)
+{
+	if (is_vcoreiii_internal_registers(offset))
+		return (void __iomem *)offset;
+
+	return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+	return is_vcoreiii_internal_registers((unsigned long)addr);
+}
+
+#define _page_cachable_default	_CACHE_CACHABLE_NONCOHERENT
+
+#endif				/* __ASM_MACH_MSCC_IOREMAP_H */
diff --git a/arch/mips/mach-mscc/include/mach/common.h b/arch/mips/mach-mscc/include/mach/common.h
new file mode 100644
index 0000000000..842462aeed
--- /dev/null
+++ b/arch/mips/mach-mscc/include/mach/common.h
@@ -0,0 +1,24 @@ 
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef __ASM_MACH_COMMON_H
+#define __ASM_MACH_COMMON_H
+
+#if defined(CONFIG_SOC_OCELOT)
+#include <mach/ocelot/ocelot.h>
+#include <mach/ocelot/ocelot_devcpu_gcb.h>
+#include <mach/ocelot/ocelot_icpu_cfg.h>
+#else
+#error Unsupported platform
+#endif
+
+#define MSCC_DDR_TO	0x20000000	/* DDR RAM base offset */
+#define MSCC_MEMCTL1_TO	0x40000000	/* SPI/PI base offset */
+#define MSCC_MEMCTL2_TO	0x50000000	/* SPI/PI base offset */
+#define MSCC_FLASH_TO	MSCC_MEMCTL1_TO	/* Flash base offset */
+
+#define VCOREIII_TIMER_DIVIDER 25	/* Clock tick ~ 0.1 us */
+
+#endif				/* __ASM_MACH_COMMON_H */
diff --git a/arch/mips/mach-mscc/include/mach/ddr.h b/arch/mips/mach-mscc/include/mach/ddr.h
new file mode 100644
index 0000000000..4bdea90506
--- /dev/null
+++ b/arch/mips/mach-mscc/include/mach/ddr.h
@@ -0,0 +1,692 @@ 
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef __ASM_MACH_DDR_H
+#define __ASM_MACH_DDR_H
+
+#include <asm/cacheops.h>
+#include <asm/io.h>
+#include <asm/reboot.h>
+#include <mach/common.h>
+
+#define MIPS_VCOREIII_MEMORY_DDR3
+#define MIPS_VCOREIII_DDR_SIZE CONFIG_SYS_SDRAM_SIZE
+
+#if defined(CONFIG_DDRTYPE_H5TQ1G63BFA)	/* Serval1 Refboard */
+
+/* Hynix H5TQ1G63BFA (1Gbit DDR3, x16) @ 3.20ns */
+#define VC3_MPAR_bank_addr_cnt    3
+#define VC3_MPAR_row_addr_cnt     13
+#define VC3_MPAR_col_addr_cnt     10
+#define VC3_MPAR_tREFI            2437
+#define VC3_MPAR_tRAS_min         12
+#define VC3_MPAR_CL               6
+#define VC3_MPAR_tWTR             4
+#define VC3_MPAR_tRC              16
+#define VC3_MPR_tFAW             16
+#define VC3_MPAR_tRP              5
+#define VC3_MPAR_tRRD             4
+#define VC3_MPAR_tRCD             5
+#define VC3_MPAR_tMRD             4
+#define VC3_MPAR_tRFC             35
+#define VC3_MPAR_CWL              5
+#define VC3_MPAR_tXPR             38
+#define VC3_MPAR_tMOD             12
+#define VC3_MPAR_tDLLK            512
+#define VC3_MPAR_tWR              5
+
+#elif defined(CONFIG_DDRTYPE_MT41J128M16HA)	/* Validation board */
+
+/* Micron MT41J128M16HA-15E:D (2Gbit DDR3, x16) @ 3.20ns */
+#define VC3_MPAR_bank_addr_cnt    3
+#define VC3_MPAR_row_addr_cnt     14
+#define VC3_MPAR_col_addr_cnt     10
+#define VC3_MPAR_tREFI            2437
+#define VC3_MPAR_tRAS_min         12
+#define VC3_MPAR_CL               5
+#define VC3_MPAR_tWTR             4
+#define VC3_MPAR_tRC              16
+#define VC3_MPAR_tFAW             16
+#define VC3_MPAR_tRP              5
+#define VC3_MPAR_tRRD             4
+#define VC3_MPAR_tRCD             5
+#define VC3_MPAR_tMRD             4
+#define VC3_MPAR_tRFC             50
+#define VC3_MPAR_CWL              5
+#define VC3_MPAR_tXPR             54
+#define VC3_MPAR_tMOD             12
+#define VC3_MPAR_tDLLK            512
+#define VC3_MPAR_tWR              5
+
+#elif defined(CONFIG_DDRTYPE_MT41K256M16)	/* JR2 Validation board */
+
+/* Micron MT41K256M16 (4Gbit, DDR3L-800, 256Mbitx16) @ 3.20ns */
+#define VC3_MPAR_bank_addr_cnt    3
+#define VC3_MPAR_row_addr_cnt     15
+#define VC3_MPAR_col_addr_cnt     10
+#define VC3_MPAR_tREFI            2437
+#define VC3_MPAR_tRAS_min         12
+#define VC3_MPAR_CL               5
+#define VC3_MPAR_tWTR             4
+#define VC3_MPAR_tRC              16
+#define VC3_MPAR_tFAW             16
+#define VC3_MPAR_tRP              5
+#define VC3_MPAR_tRRD             4
+#define VC3_MPAR_tRCD             5
+#define VC3_MPAR_tMRD             4
+#define VC3_MPAR_tRFC             82
+#define VC3_MPAR_CWL              5
+#define VC3_MPAR_tXPR             85
+#define VC3_MPAR_tMOD             12
+#define VC3_MPAR_tDLLK            512
+#define VC3_MPAR_tWR              5
+
+#elif defined(CONFIG_DDRTYPE_H5TQ4G63MFR)	/* JR2 Reference board */
+
+/* Hynix H5TQ4G63MFR-PBC (4Gbit, DDR3-800, 256Mbitx16) - 2kb pages @ 3.20ns */
+#define VC3_MPAR_bank_addr_cnt    3
+#define VC3_MPAR_row_addr_cnt     15
+#define VC3_MPAR_col_addr_cnt     10
+#define VC3_MPAR_tREFI            2437
+#define VC3_MPAR_tRAS_min         12
+#define VC3_MPAR_CL               6
+#define VC3_MPAR_tWTR             4
+#define VC3_MPAR_tRC              17
+#define VC3_MPAR_tFAW             16
+#define VC3_MPAR_tRP              5
+#define VC3_MPAR_tRRD             4
+#define VC3_MPAR_tRCD             5
+#define VC3_MPAR_tMRD             4
+#define VC3_MPAR_tRFC             82
+#define VC3_MPAR_CWL              5
+#define VC3_MPAR_tXPR             85
+#define VC3_MPAR_tMOD             12
+#define VC3_MPAR_tDLLK            512
+#define VC3_MPAR_tWR              5
+
+#elif defined(CONFIG_DDRTYPE_MT41K128M16JT)
+
+/* Micron Micron MT41K128M16JT-125 (2Gbit DDR3L, 128Mbitx16) @ 3.20ns */
+#define VC3_MPAR_bank_addr_cnt    3
+#define VC3_MPAR_row_addr_cnt     14
+#define VC3_MPAR_col_addr_cnt     10
+#define VC3_MPAR_tREFI            2437
+#define VC3_MPAR_tRAS_min         12
+#define VC3_MPAR_CL               6
+#define VC3_MPAR_tWTR             4
+#define VC3_MPAR_tRC              16
+#define VC3_MPAR_tFAW             16
+#define VC3_MPAR_tRP              5
+#define VC3_MPAR_tRRD             4
+#define VC3_MPAR_tRCD             5
+#define VC3_MPAR_tMRD             4
+#define VC3_MPAR_tRFC             82
+#define VC3_MPAR_CWL              5
+#define VC3_MPAR_tXPR             85
+#define VC3_MPAR_tMOD             12
+#define VC3_MPAR_tDLLK            512
+#define VC3_MPAR_tWR              5
+
+#elif defined(CONFIG_DDRTYPE_MT47H128M8HQ)	/* Luton10/26 Refboards */
+
+/* Micron 1Gb MT47H128M8-3 16Meg x 8 x 8 banks, DDR-533@CL4 @ 4.80ns */
+#define VC3_MPAR_bank_addr_cnt    3
+#define VC3_MPAR_row_addr_cnt     14
+#define VC3_MPAR_col_addr_cnt     10
+#define VC3_MPAR_tREFI            1625
+#define VC3_MPAR_tRAS_min         9
+#define VC3_MPAR_CL               4
+#define VC3_MPAR_tWTR             2
+#define VC3_MPAR_tRC              12
+#define VC3_MPAR_tFAW             8
+#define VC3_MPAR_tRP              4
+#define VC3_MPAR_tRRD             2
+#define VC3_MPAR_tRCD             4
+
+#define VC3_MPAR_tRPA             4
+#define VC3_MPAR_tRP              4
+
+#define VC3_MPAR_tMRD             2
+#define VC3_MPAR_tRFC             27
+
+#define VC3_MPAR__400_ns_dly      84
+
+#define VC3_MPAR_tWR              4
+#undef MIPS_VCOREIII_MEMORY_DDR3
+#else
+
+#error Unknown DDR system configuration - please add!
+
+#endif
+
+#ifdef CONFIG_SOC_OCELOT
+#define MIPS_VCOREIII_MEMORY_16BIT 1
+#endif
+
+#define MIPS_VCOREIII_MEMORY_SSTL_ODT 7
+#define MIPS_VCOREIII_MEMORY_SSTL_DRIVE 7
+#define VCOREIII_DDR_DQS_MODE_CALIBRATE
+
+#ifdef MIPS_VCOREIII_MEMORY_16BIT
+#define VC3_MPAR_16BIT       1
+#else
+#define VC3_MPAR_16BIT       0
+#endif
+
+#ifdef MIPS_VCOREIII_MEMORY_DDR3
+#define VC3_MPAR_DDR3_MODE    1	/* DDR3 */
+#define VC3_MPAR_BURST_LENGTH 8	/* Always 8 (1) for DDR3 */
+#ifdef MIPS_VCOREIII_MEMORY_16BIT
+#define VC3_MPAR_BURST_SIZE   1	/* Always 1 for DDR3/16bit */
+#else
+#define VC3_MPAR_BURST_SIZE   0
+#endif
+#else
+#define VC3_MPAR_DDR3_MODE    0	/* DDR2 */
+#ifdef MIPS_VCOREIII_MEMORY_16BIT
+#define VC3_MPAR_BURST_LENGTH 4	/* in DDR2 16-bit mode, use burstlen 4 */
+#else
+#define VC3_MPAR_BURST_LENGTH 8	/* For 8-bit IF we must run burst-8 */
+#endif
+#define VC3_MPAR_BURST_SIZE   0	/* Always 0 for DDR2 */
+#endif
+
+#define VC3_MPAR_RL VC3_MPAR_CL
+#if !defined(MIPS_VCOREIII_MEMORY_DDR3)
+#define VC3_MPAR_WL (VC3_MPAR_RL - 1)
+#define VC3_MPAR_MD VC3_MPAR_tMRD
+#define VC3_MPAR_ID VC3_MPAR__400_ns_dly
+#define VC3_MPAR_SD VC3_MPAR_tXSRD
+#define VC3_MPAR_OW (VC3_MPAR_WL - 2)
+#define VC3_MPAR_OR (VC3_MPAR_WL - 3)
+#define VC3_MPAR_RP (VC3_MPAR_bank_addr_cnt < 3 ? VC3_MPAR_tRP : VC3_MPAR_tRPA)
+#define VC3_MPAR_FAW (VC3_MPAR_bank_addr_cnt < 3 ? 1 : VC3_MPAR_tFAW)
+#define VC3_MPAR_BL (VC3_MPAR_BURST_LENGTH == 4 ? 2 : 4)
+#define MSCC_MEMPARM_MR0 \
+	(VC3_MPAR_BURST_LENGTH == 8 ? 3 : 2) | (VC3_MPAR_CL << 4) | \
+	((VC3_MPAR_tWR - 1) << 9)
+/* DLL-on, Full-OD, AL=0, RTT=off, nDQS-on, RDQS-off, out-en */
+#define MSCC_MEMPARM_MR1 0x382
+#define MSCC_MEMPARM_MR2 0
+#define MSCC_MEMPARM_MR3 0
+#else
+#define VC3_MPAR_WL VC3_MPAR_CWL
+#define VC3_MPAR_MD VC3_MPAR_tMOD
+#define VC3_MPAR_ID VC3_MPAR_tXPR
+#define VC3_MPAR_SD VC3_MPAR_tDLLK
+#define VC3_MPAR_OW 2
+#define VC3_MPAR_OR 2
+#define VC3_MPAR_RP VC3_MPAR_tRP
+#define VC3_MPAR_FAW VC3_MPAR_tFAW
+#define VC3_MPAR_BL 4
+#define MSCC_MEMPARM_MR0 ((VC3_MPAR_RL - 4) << 4) | ((VC3_MPAR_tWR - 4) << 9)
+/* ODT_RTT: “0x0040” for 120ohm, and “0x0004” for 60ohm. */
+#define MSCC_MEMPARM_MR1 0x0040
+#define MSCC_MEMPARM_MR2 ((VC3_MPAR_WL - 5) << 3)
+#define MSCC_MEMPARM_MR3 0
+#endif				/* MIPS_VCOREIII_MEMORY_DDR3 */
+
+#define MSCC_MEMPARM_MEMCFG                                             \
+	((MIPS_VCOREIII_DDR_SIZE > SZ_512M) ?				\
+	 ICPU_MEMCTRL_CFG_DDR_512MBYTE_PLUS : 0) |			\
+	(VC3_MPAR_16BIT ? ICPU_MEMCTRL_CFG_DDR_WIDTH : 0) |		\
+	(VC3_MPAR_DDR3_MODE ? ICPU_MEMCTRL_CFG_DDR_MODE : 0) |		\
+	(VC3_MPAR_BURST_SIZE ? ICPU_MEMCTRL_CFG_BURST_SIZE : 0) |	\
+	(VC3_MPAR_BURST_LENGTH == 8 ? ICPU_MEMCTRL_CFG_BURST_LEN : 0) | \
+	(VC3_MPAR_bank_addr_cnt == 3 ? ICPU_MEMCTRL_CFG_BANK_CNT : 0) | \
+	ICPU_MEMCTRL_CFG_MSB_ROW_ADDR(VC3_MPAR_row_addr_cnt - 1) |	\
+	ICPU_MEMCTRL_CFG_MSB_COL_ADDR(VC3_MPAR_col_addr_cnt - 1)
+
+#ifdef CONFIG_SOC_OCELOT
+#define MSCC_MEMPARM_PERIOD					\
+	ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF(8) |		\
+	ICPU_MEMCTRL_REF_PERIOD_REF_PERIOD(VC3_MPAR_tREFI)
+
+#define MSCC_MEMPARM_TIMING0                                            \
+	ICPU_MEMCTRL_TIMING0_RD_TO_WR_DLY(VC3_MPAR_RL + VC3_MPAR_BL + 1 - \
+					  VC3_MPAR_WL) |		\
+	ICPU_MEMCTRL_TIMING0_WR_CS_CHANGE_DLY(VC3_MPAR_BL - 1) |	\
+	ICPU_MEMCTRL_TIMING0_RD_CS_CHANGE_DLY(VC3_MPAR_BL) |		\
+	ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY(VC3_MPAR_tRAS_min - 1) |	\
+	ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY(VC3_MPAR_WL +		\
+					     VC3_MPAR_BL +		\
+					     VC3_MPAR_tWR - 1) |	\
+	ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY(VC3_MPAR_BL - 1) |		\
+		ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY(VC3_MPAR_WL - 1) |	\
+	ICPU_MEMCTRL_TIMING0_RD_DATA_XFR_DLY(VC3_MPAR_RL - 3)
+
+#define MSCC_MEMPARM_TIMING1                                            \
+	ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY(VC3_MPAR_tRC - 1) | \
+	ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY(VC3_MPAR_FAW - 1) |		\
+	ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY(VC3_MPAR_RP - 1) |	\
+	ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY(VC3_MPAR_tRRD - 1) |	\
+	ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY(VC3_MPAR_tRCD - 1) |	\
+	ICPU_MEMCTRL_TIMING1_WR_TO_RD_DLY(VC3_MPAR_WL +			\
+					  VC3_MPAR_BL +			\
+					  VC3_MPAR_tWTR - 1)
+
+#define MSCC_MEMPARM_TIMING2					\
+	ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY(VC3_MPAR_RP - 1) |	\
+	ICPU_MEMCTRL_TIMING2_MDSET_DLY(VC3_MPAR_MD - 1) |		\
+	ICPU_MEMCTRL_TIMING2_REF_DLY(VC3_MPAR_tRFC - 1) |		\
+	ICPU_MEMCTRL_TIMING2_INIT_DLY(VC3_MPAR_ID - 1)
+
+#define MSCC_MEMPARM_TIMING3						\
+	ICPU_MEMCTRL_TIMING3_WR_TO_RD_CS_CHANGE_DLY(VC3_MPAR_WL +	\
+						    VC3_MPAR_tWTR - 1) |\
+	ICPU_MEMCTRL_TIMING3_ODT_RD_DLY(VC3_MPAR_OR - 1) |		\
+	ICPU_MEMCTRL_TIMING3_ODT_WR_DLY(VC3_MPAR_OW - 1) |		\
+	ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY(VC3_MPAR_RL - 3)
+
+#else
+#define MSCC_MEMPARM_PERIOD					\
+	ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF(1) |		\
+	ICPU_MEMCTRL_REF_PERIOD_REF_PERIOD(VC3_MPAR_tREFI)
+
+#define MSCC_MEMPARM_TIMING0                                            \
+	ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY(VC3_MPAR_tRAS_min - 1) |	\
+	ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY(VC3_MPAR_CL +		\
+					     (VC3_MPAR_BURST_LENGTH == 8 ? 2 : 0) + \
+					     VC3_MPAR_tWR) |		\
+	ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY(VC3_MPAR_BURST_LENGTH == 8 ? 3 : 1) | \
+	ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY(VC3_MPAR_CL - 3) |		\
+	ICPU_MEMCTRL_TIMING0_RD_DATA_XFR_DLY(VC3_MPAR_CL - 3)
+
+#define MSCC_MEMPARM_TIMING1                                            \
+	ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY(VC3_MPAR_tRC - 1) | \
+	ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY(VC3_MPAR_tFAW - 1) |		\
+	ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY(VC3_MPAR_tRP - 1) |	\
+	ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY(VC3_MPAR_tRRD - 1) |	\
+	ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY(VC3_MPAR_tRCD - 1) |	\
+	ICPU_MEMCTRL_TIMING1_WR_TO_RD_DLY(VC3_MPAR_CL +			\
+					  (VC3_MPAR_BURST_LENGTH == 8 ? 2 : 0) + \
+					  VC3_MPAR_tWTR)
+#define MSCC_MEMPARM_TIMING2                                            \
+	ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY(VC3_MPAR_tRPA - 1) |		\
+	ICPU_MEMCTRL_TIMING2_MDSET_DLY(VC3_MPAR_tMRD - 1) |		\
+	ICPU_MEMCTRL_TIMING2_REF_DLY(VC3_MPAR_tRFC - 1) |		\
+	ICPU_MEMCTRL_TIMING2_FOUR_HUNDRED_NS_DLY(VC3_MPAR__400_ns_dly)
+
+#define MSCC_MEMPARM_TIMING3						\
+	ICPU_MEMCTRL_TIMING3_WR_TO_RD_CS_CHANGE_DLY(VC3_MPAR_CL - 1) |	\
+	ICPU_MEMCTRL_TIMING3_ODT_WR_DLY(VC3_MPAR_CL - 1) |		\
+	ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY(VC3_MPAR_CL - 1)
+
+#endif
+
+enum {
+	DDR_TRAIN_OK,
+	DDR_TRAIN_CONTINUE,
+	DDR_TRAIN_ERROR,
+};
+
+/*
+ * We actually have very few 'pause' possibilities apart from
+ * these assembly nops (at this very early stage).
+ */
+#define PAUSE() asm volatile("nop; nop; nop; nop; nop; nop; nop; nop")
+
+/* NB: Assumes inlining as no stack is available! */
+static inline void set_dly(u32 bytelane, u32 dly)
+{
+	register u32 r = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
+
+	r &= ~ICPU_MEMCTRL_DQS_DLY_DQS_DLY_M;
+	r |= ICPU_MEMCTRL_DQS_DLY_DQS_DLY(dly);
+	writel(r, BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
+}
+
+static inline bool incr_dly(u32 bytelane)
+{
+	register u32 r = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
+
+	if (ICPU_MEMCTRL_DQS_DLY_DQS_DLY(r) < 31) {
+		writel(r + 1, BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
+		return true;
+	}
+
+	return false;
+}
+
+static inline bool adjust_dly(int adjust)
+{
+	register u32 r = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(0));
+
+	if (ICPU_MEMCTRL_DQS_DLY_DQS_DLY(r) < 31) {
+		writel(r + adjust, BASE_CFG + ICPU_MEMCTRL_DQS_DLY(0));
+		return true;
+	}
+
+	return false;
+}
+
+/* NB: Assumes inlining as no stack is available! */
+static inline void center_dly(u32 bytelane, u32 start)
+{
+	register u32 r = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane)) - start;
+
+	writel(start + (r >> 1), BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
+}
+
+static inline void memphy_soft_reset(void)
+{
+	setbits_le32(BASE_CFG + ICPU_MEMPHY_CFG, ICPU_MEMPHY_CFG_PHY_FIFO_RST);
+	PAUSE();
+	clrbits_le32(BASE_CFG + ICPU_MEMPHY_CFG, ICPU_MEMPHY_CFG_PHY_FIFO_RST);
+	PAUSE();
+}
+
+#ifdef CONFIG_SOC_OCELOT
+static u8 training_data[] = { 0xfe, 0x11, 0x33, 0x55, 0x77, 0x99, 0xbb, 0xdd };
+
+static inline void sleep_100ns(u32 val)
+{
+	/* Set the timer tick generator to 100 ns */
+	writel(VCOREIII_TIMER_DIVIDER - 1, BASE_CFG + ICPU_TIMER_TICK_DIV);
+
+	/* Set the timer value */
+	writel(val, BASE_CFG + ICPU_TIMER_VALUE(0));
+
+	/* Enable timer 0 for one-shot */
+	writel(ICPU_TIMER_CTRL_ONE_SHOT_ENA | ICPU_TIMER_CTRL_TIMER_ENA,
+	       BASE_CFG + ICPU_TIMER_CTRL(0));
+
+	/* Wait for timer 0 to reach 0 */
+	while (readl(BASE_CFG + ICPU_TIMER_VALUE(0)) != 0)
+		;
+}
+
+static inline void hal_vcoreiii_ddr_reset_assert(void)
+{
+	/* DDR has reset pin on GPIO 19 toggle Low-High to release */
+	setbits_le32(BASE_DEVCPU_GCB + PERF_GPIO_OE, BIT(19));
+	writel(BIT(19), BASE_DEVCPU_GCB + PERF_GPIO_OUT_CLR);
+	sleep_100ns(10000);
+}
+
+static inline void hal_vcoreiii_ddr_reset_release(void)
+{
+	/* DDR has reset pin on GPIO 19 toggle Low-High to release */
+	setbits_le32(BASE_DEVCPU_GCB + PERF_GPIO_OE, BIT(19));
+	writel(BIT(19), BASE_DEVCPU_GCB + PERF_GPIO_OUT_SET);
+	sleep_100ns(10000);
+}
+
+/*
+ * DDR memory sanity checking failed, tally and do hard reset
+ *
+ * NB: Assumes inlining as no stack is available!
+ */
+static inline void hal_vcoreiii_ddr_failed(void)
+{
+	register u32 reset;
+
+	writel(readl(BASE_CFG + ICPU_GPR(6)) + 1, BASE_CFG + ICPU_GPR(6));
+
+	clrbits_le32(BASE_DEVCPU_GCB + PERF_GPIO_OE, BIT(19));
+
+	/* Jump to reset - does not return */
+	reset = KSEG0ADDR(_machine_restart);
+	/* Reset while running from cache */
+	icache_lock((void *)reset, 128);
+	asm volatile ("jr %0"::"r" (reset));
+
+	panic("DDR init failed\n");
+}
+
+/*
+ * DDR memory sanity checking done, possibly enable ECC.
+ *
+ * NB: Assumes inlining as no stack is available!
+ */
+static inline void hal_vcoreiii_ddr_verified(void)
+{
+#ifdef MIPS_VCOREIII_MEMORY_ECC
+	/* Finally, enable ECC */
+	register u32 val = readl(BASE_CFG + ICPU_MEMCTRL_CFG);
+
+	val |= ICPU_MEMCTRL_CFG_DDR_ECC_ERR_ENA;
+	val &= ~ICPU_MEMCTRL_CFG_BURST_SIZE;
+
+	writel(val, BASE_CFG + ICPU_MEMCTRL_CFG);
+#endif
+
+	/* Reset Status register - sticky bits */
+	writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT), BASE_CFG + ICPU_MEMCTRL_STAT);
+}
+
+/* NB: Assumes inlining as no stack is available! */
+static inline int look_for(u32 bytelane)
+{
+	register u32 i;
+
+	/* Reset FIFO in case any previous access failed */
+	for (i = 0; i < sizeof(training_data); i++) {
+		register u32 byte;
+
+		memphy_soft_reset();
+		/* Reset sticky bits */
+		writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
+		       BASE_CFG + ICPU_MEMCTRL_STAT);
+		/* Read data */
+		byte = ((volatile u8 *)MSCC_DDR_TO)[bytelane + (i * 4)];
+		/*
+		 * Prevent the compiler reordering the instruction so
+		 * the read of RAM happens after the check of the
+		 * errors.
+		 */
+		asm volatile("" : : : "memory");
+		if (readl(BASE_CFG + ICPU_MEMCTRL_STAT) &
+		    (ICPU_MEMCTRL_STAT_RDATA_MASKED |
+		     ICPU_MEMCTRL_STAT_RDATA_DUMMY)) {
+			/* Noise on the line */
+			goto read_error;
+		}
+		/* If mismatch, increment DQS - if possible */
+		if (byte != training_data[i]) {
+ read_error:
+			if (!incr_dly(bytelane))
+				return DDR_TRAIN_ERROR;
+			return DDR_TRAIN_CONTINUE;
+		}
+	}
+	return DDR_TRAIN_OK;
+}
+
+/* NB: Assumes inlining as no stack is available! */
+static inline int look_past(u32 bytelane)
+{
+	register u32 i;
+
+	/* Reset FIFO in case any previous access failed */
+	for (i = 0; i < sizeof(training_data); i++) {
+		register u32 byte;
+
+		memphy_soft_reset();
+		/* Ack sticky bits */
+		writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
+		       BASE_CFG + ICPU_MEMCTRL_STAT);
+		byte = ((volatile u8 *)MSCC_DDR_TO)[bytelane + (i * 4)];
+		/*
+		 * Prevent the compiler reordering the instruction so
+		 * the read of RAM happens after the check of the
+		 * errors.
+		 */
+		asm volatile("" : : : "memory");
+		if (readl(BASE_CFG + ICPU_MEMCTRL_STAT) &
+		    (ICPU_MEMCTRL_STAT_RDATA_MASKED |
+		     ICPU_MEMCTRL_STAT_RDATA_DUMMY)) {
+			/* Noise on the line */
+			goto read_error;
+		}
+		/* Bail out when we see first mismatch */
+		if (byte != training_data[i]) {
+ read_error:
+			return DDR_TRAIN_OK;
+		}
+	}
+	/* All data compares OK, increase DQS and retry */
+	if (!incr_dly(bytelane))
+		return DDR_TRAIN_ERROR;
+
+	return DDR_TRAIN_CONTINUE;
+}
+
+static inline int hal_vcoreiii_train_bytelane(u32 bytelane)
+{
+	register int res;
+	register u32 dqs_s;
+
+	set_dly(bytelane, 0);	// Start training at DQS=0
+	while ((res = look_for(bytelane)) == DDR_TRAIN_CONTINUE)
+		;
+	if (res != DDR_TRAIN_OK)
+		return res;
+
+	dqs_s = readl(BASE_CFG + ICPU_MEMCTRL_DQS_DLY(bytelane));
+	while ((res = look_past(bytelane)) == DDR_TRAIN_CONTINUE)
+		;
+	if (res != DDR_TRAIN_OK)
+		return res;
+	/* Reset FIFO - for good measure */
+	memphy_soft_reset();
+	/* Adjust to center [dqs_s;cur] */
+	center_dly(bytelane, dqs_s);
+	return DDR_TRAIN_OK;
+}
+
+/* This algorithm is converted from the TCL training algorithm used
+ * during silicon simulation.
+ * NB: Assumes inlining as no stack is available!
+ */
+static inline int hal_vcoreiii_init_dqs(void)
+{
+#define MAX_DQS 32
+	register u32 i, j;
+
+	for (i = 0; i < MAX_DQS; i++) {
+		set_dly(0, i);	// Byte-lane 0
+		for (j = 0; j < MAX_DQS; j++) {
+			register u32 __attribute__ ((unused)) byte;
+			set_dly(1, j);	// Byte-lane 1
+			/* Reset FIFO in case any previous access failed */
+			memphy_soft_reset();
+			writel(readl(BASE_CFG + ICPU_MEMCTRL_STAT),
+			       BASE_CFG + ICPU_MEMCTRL_STAT);
+			byte = ((volatile u8 *)MSCC_DDR_TO)[0];
+			byte = ((volatile u8 *)MSCC_DDR_TO)[1];
+			if (!(readl(BASE_CFG + ICPU_MEMCTRL_STAT) &
+			    (ICPU_MEMCTRL_STAT_RDATA_MASKED |
+			     ICPU_MEMCTRL_STAT_RDATA_DUMMY)))
+				return 0;
+		}
+	}
+	return -1;
+}
+
+static inline int dram_check(void)
+{
+#define DDR ((volatile u32 *) MSCC_DDR_TO)
+	register u32 i;
+
+	for (i = 0; i < 8; i++) {
+		DDR[i] = ~i;
+		if (DDR[i] != ~i)
+			return 1;
+	}
+	return 0;
+}
+
+/*
+ * NB: Called *early* to init memory controller - assumes inlining as
+ * no stack is available!
+ */
+static inline void hal_vcoreiii_init_memctl(void)
+{
+	/* Ensure DDR is in reset */
+	hal_vcoreiii_ddr_reset_assert();
+
+	/* Wait maybe not needed, but ... */
+	PAUSE();
+
+	/* Drop sys ctl memory controller forced reset */
+	clrbits_le32(BASE_CFG + ICPU_RESET, ICPU_RESET_MEM_RST_FORCE);
+
+	PAUSE();
+
+	/* Drop Reset, enable SSTL */
+	writel(ICPU_MEMPHY_CFG_PHY_SSTL_ENA, BASE_CFG + ICPU_MEMPHY_CFG);
+	PAUSE();
+
+	/* Start the automatic SSTL output and ODT drive-strength calibration */
+	writel(ICPU_MEMPHY_ZCAL_ZCAL_PROG_ODT(MIPS_VCOREIII_MEMORY_SSTL_ODT) |
+	       /* drive strength */
+	       ICPU_MEMPHY_ZCAL_ZCAL_PROG(MIPS_VCOREIII_MEMORY_SSTL_DRIVE) |
+	       /* Start calibration process */
+	       ICPU_MEMPHY_ZCAL_ZCAL_ENA, BASE_CFG + ICPU_MEMPHY_ZCAL);
+
+	/* Wait for ZCAL to clear */
+	while (readl(BASE_CFG + ICPU_MEMPHY_ZCAL) & ICPU_MEMPHY_ZCAL_ZCAL_ENA)
+		;
+
+	/* Check no ZCAL_ERR */
+	if (readl(BASE_CFG + ICPU_MEMPHY_ZCAL_STAT)
+	    & ICPU_MEMPHY_ZCAL_STAT_ZCAL_ERR)
+		hal_vcoreiii_ddr_failed();
+
+	/* Drive CL, CK, ODT */
+	setbits_le32(BASE_CFG + ICPU_MEMPHY_CFG, ICPU_MEMPHY_CFG_PHY_ODT_OE |
+		     ICPU_MEMPHY_CFG_PHY_CK_OE | ICPU_MEMPHY_CFG_PHY_CL_OE);
+
+	/* Initialize memory controller */
+	writel(MSCC_MEMPARM_MEMCFG, BASE_CFG + ICPU_MEMCTRL_CFG);
+	writel(MSCC_MEMPARM_PERIOD, BASE_CFG + ICPU_MEMCTRL_REF_PERIOD);
+
+	writel(MSCC_MEMPARM_TIMING0, BASE_CFG + ICPU_MEMCTRL_TIMING0);
+
+	writel(MSCC_MEMPARM_TIMING1, BASE_CFG + ICPU_MEMCTRL_TIMING1);
+	writel(MSCC_MEMPARM_TIMING2, BASE_CFG + ICPU_MEMCTRL_TIMING2);
+	writel(MSCC_MEMPARM_TIMING3, BASE_CFG + ICPU_MEMCTRL_TIMING3);
+	writel(MSCC_MEMPARM_MR0, BASE_CFG + ICPU_MEMCTRL_MR0_VAL);
+	writel(MSCC_MEMPARM_MR1, BASE_CFG + ICPU_MEMCTRL_MR1_VAL);
+	writel(MSCC_MEMPARM_MR2, BASE_CFG + ICPU_MEMCTRL_MR2_VAL);
+	writel(MSCC_MEMPARM_MR3, BASE_CFG + ICPU_MEMCTRL_MR3_VAL);
+
+	/* Termination setup - enable ODT */
+	writel(ICPU_MEMCTRL_TERMRES_CTRL_LOCAL_ODT_RD_ENA |
+	       /* Assert ODT0 for any write */
+	       ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_ENA(3),
+	       BASE_CFG + ICPU_MEMCTRL_TERMRES_CTRL);
+
+	/* Release Reset from DDR */
+	hal_vcoreiii_ddr_reset_release();
+
+	writel(readl(BASE_CFG + ICPU_GPR(7)) + 1, BASE_CFG + ICPU_GPR(7));
+}
+
+static inline void hal_vcoreiii_wait_memctl(void)
+{
+	/* Now, rip it! */
+	writel(ICPU_MEMCTRL_CTRL_INITIALIZE, BASE_CFG + ICPU_MEMCTRL_CTRL);
+
+	while (!(readl(BASE_CFG + ICPU_MEMCTRL_STAT)
+		 & ICPU_MEMCTRL_STAT_INIT_DONE))
+		;
+
+	/* Settle...? */
+	sleep_100ns(10000);
+
+	/* Establish data contents in DDR RAM for training */
+
+	__raw_writel(0xcacafefe, ((void __iomem *)MSCC_DDR_TO));
+	__raw_writel(0x22221111, ((void __iomem *)MSCC_DDR_TO + 0x4));
+	__raw_writel(0x44443333, ((void __iomem *)MSCC_DDR_TO + 0x8));
+	__raw_writel(0x66665555, ((void __iomem *)MSCC_DDR_TO + 0xC));
+	__raw_writel(0x88887777, ((void __iomem *)MSCC_DDR_TO + 0x10));
+	__raw_writel(0xaaaa9999, ((void __iomem *)MSCC_DDR_TO + 0x14));
+	__raw_writel(0xccccbbbb, ((void __iomem *)MSCC_DDR_TO + 0x18));
+	__raw_writel(0xeeeedddd, ((void __iomem *)MSCC_DDR_TO + 0x1C));
+}
+#endif				/* __ASM_MACH_DDR_H */
diff --git a/arch/mips/mach-mscc/include/mach/ocelot/ocelot.h b/arch/mips/mach-mscc/include/mach/ocelot/ocelot.h
new file mode 100644
index 0000000000..2cb2135d37
--- /dev/null
+++ b/arch/mips/mach-mscc/include/mach/ocelot/ocelot.h
@@ -0,0 +1,24 @@ 
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_H_
+#define _MSCC_OCELOT_H_
+
+#include <linux/bitops.h>
+#include <dm.h>
+
+/*
+ * Target offset base(s)
+ */
+#define MSCC_IO_ORIGIN1_OFFSET 0x70000000
+#define MSCC_IO_ORIGIN1_SIZE   0x00200000
+#define MSCC_IO_ORIGIN2_OFFSET 0x71000000
+#define MSCC_IO_ORIGIN2_SIZE   0x01000000
+#define BASE_CFG        ((void __iomem *)0x70000000)
+#define BASE_DEVCPU_GCB ((void __iomem *)0x71070000)
+
+#endif
diff --git a/arch/mips/mach-mscc/include/mach/ocelot/ocelot_devcpu_gcb.h b/arch/mips/mach-mscc/include/mach/ocelot/ocelot_devcpu_gcb.h
new file mode 100644
index 0000000000..f8aa97ba26
--- /dev/null
+++ b/arch/mips/mach-mscc/include/mach/ocelot/ocelot_devcpu_gcb.h
@@ -0,0 +1,21 @@ 
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_DEVCPU_GCB_H_
+#define _MSCC_OCELOT_DEVCPU_GCB_H_
+
+#define PERF_SOFT_RST                                     0x8
+
+#define PERF_SOFT_RST_SOFT_NON_CFG_RST                    BIT(2)
+#define PERF_SOFT_RST_SOFT_SWC_RST                        BIT(1)
+#define PERF_SOFT_RST_SOFT_CHIP_RST                       BIT(0)
+
+#define PERF_GPIO_OUT_SET                                 0x34
+
+#define PERF_GPIO_OUT_CLR                                 0x38
+
+#define PERF_GPIO_OE                                      0x44
+
+#endif
diff --git a/arch/mips/mach-mscc/include/mach/ocelot/ocelot_icpu_cfg.h b/arch/mips/mach-mscc/include/mach/ocelot/ocelot_icpu_cfg.h
new file mode 100644
index 0000000000..04cf70bec3
--- /dev/null
+++ b/arch/mips/mach-mscc/include/mach/ocelot/ocelot_icpu_cfg.h
@@ -0,0 +1,274 @@ 
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_ICPU_CFG_H_
+#define _MSCC_OCELOT_ICPU_CFG_H_
+
+#define ICPU_GPR(x) (0x4 * (x))
+#define ICPU_GPR_RSZ                                      0x4
+
+#define ICPU_RESET                                        0x20
+
+#define ICPU_RESET_CORE_RST_CPU_ONLY                      BIT(3)
+#define ICPU_RESET_CORE_RST_PROTECT                       BIT(2)
+#define ICPU_RESET_CORE_RST_FORCE                         BIT(1)
+#define ICPU_RESET_MEM_RST_FORCE                          BIT(0)
+
+#define ICPU_GENERAL_CTRL                                 0x24
+
+#define ICPU_GENERAL_CTRL_CPU_BUSIF_SLEEP_DIS             BIT(14)
+#define ICPU_GENERAL_CTRL_CPU_BUSIF_WERR_ENA              BIT(13)
+#define ICPU_GENERAL_CTRL_CPU_8051_IROM_ENA               BIT(12)
+#define ICPU_GENERAL_CTRL_CPU_MIPS_DIS                    BIT(11)
+#define ICPU_GENERAL_CTRL_IF_MIIM_SLV_ADDR_SEL            BIT(10)
+#define ICPU_GENERAL_CTRL_IF_MIIM_SLV_ENA                 BIT(9)
+#define ICPU_GENERAL_CTRL_IF_PI_SLV_DONEPOL               BIT(8)
+#define ICPU_GENERAL_CTRL_IF_PI_MST_ENA                   BIT(7)
+#define ICPU_GENERAL_CTRL_IF_PI_SLV_ENA                   BIT(6)
+#define ICPU_GENERAL_CTRL_IF_SI_OWNER(x)                  (((x) << 4) & GENMASK(5, 4))
+#define ICPU_GENERAL_CTRL_IF_SI_OWNER_M                   GENMASK(5, 4)
+#define ICPU_GENERAL_CTRL_IF_SI_OWNER_X(x)                (((x) & GENMASK(5, 4)) >> 4)
+#define ICPU_GENERAL_CTRL_SSI_MST_CONTENTION              BIT(3)
+#define ICPU_GENERAL_CTRL_CPU_BE_ENA                      BIT(2)
+#define ICPU_GENERAL_CTRL_CPU_DIS                         BIT(1)
+#define ICPU_GENERAL_CTRL_BOOT_MODE_ENA                   BIT(0)
+#define ICPU_SPI_MST_CFG                                  0x3c
+
+#define ICPU_SPI_MST_CFG_A32B_ENA                         BIT(11)
+#define ICPU_SPI_MST_CFG_FAST_READ_ENA                    BIT(10)
+#define ICPU_SPI_MST_CFG_CS_DESELECT_TIME(x)              (((x) << 5) & GENMASK(9, 5))
+#define ICPU_SPI_MST_CFG_CS_DESELECT_TIME_M               GENMASK(9, 5)
+#define ICPU_SPI_MST_CFG_CS_DESELECT_TIME_X(x)            (((x) & GENMASK(9, 5)) >> 5)
+#define ICPU_SPI_MST_CFG_CLK_DIV(x)                       ((x) & GENMASK(4, 0))
+#define ICPU_SPI_MST_CFG_CLK_DIV_M                        GENMASK(4, 0)
+
+#define ICPU_SW_MODE                                      0x50
+
+#define ICPU_SW_MODE_SW_PIN_CTRL_MODE                     BIT(13)
+#define ICPU_SW_MODE_SW_SPI_SCK                           BIT(12)
+#define ICPU_SW_MODE_SW_SPI_SCK_OE                        BIT(11)
+#define ICPU_SW_MODE_SW_SPI_SDO                           BIT(10)
+#define ICPU_SW_MODE_SW_SPI_SDO_OE                        BIT(9)
+#define ICPU_SW_MODE_SW_SPI_CS(x)                         (((x) << 5) & GENMASK(8, 5))
+#define ICPU_SW_MODE_SW_SPI_CS_M                          GENMASK(8, 5)
+#define ICPU_SW_MODE_SW_SPI_CS_X(x)                       (((x) & GENMASK(8, 5)) >> 5)
+#define ICPU_SW_MODE_SW_SPI_CS_OE(x)                      (((x) << 1) & GENMASK(4, 1))
+#define ICPU_SW_MODE_SW_SPI_CS_OE_M                       GENMASK(4, 1)
+#define ICPU_SW_MODE_SW_SPI_CS_OE_X(x)                    (((x) & GENMASK(4, 1)) >> 1)
+#define ICPU_SW_MODE_SW_SPI_SDI                           BIT(0)
+
+#define ICPU_INTR_ENA					  0x88
+
+#define ICPU_DST_INTR_MAP(x)  (0x98 + 0x4 * (x))
+#define ICPU_DST_INTR_MAP_RSZ                             0x4
+
+#define ICPU_DST_INTR_IDENT                               0xa8
+#define ICPU_DST_INTR_IDENT_RSZ                           0x4
+
+#define ICPU_TIMER_TICK_DIV                               0xe8
+#define ICPU_TIMER_VALUE(x) (0xec + 0x4 * (x))
+
+#define ICPU_TIMER_CTRL(x) (0x104 + 0x4 * (x))
+#define ICPU_TIMER_CTRL_MAX_FREQ_ENA			  BIT(3)
+#define ICPU_TIMER_CTRL_ONE_SHOT_ENA			  BIT(2)
+#define ICPU_TIMER_CTRL_TIMER_ENA			  BIT(1)
+#define ICPU_TIMER_CTRL_FORCE_RELOAD			  BIT(0)
+
+#define ICPU_MEMCTRL_CTRL                                 0x110
+#define ICPU_MEMCTRL_CTRL_PWR_DOWN                        BIT(3)
+#define ICPU_MEMCTRL_CTRL_MDSET                           BIT(2)
+#define ICPU_MEMCTRL_CTRL_STALL_REF_ENA                   BIT(1)
+#define ICPU_MEMCTRL_CTRL_INITIALIZE                      BIT(0)
+
+#define ICPU_MEMCTRL_CFG                                  0x114
+
+#define ICPU_MEMCTRL_CFG_DDR_512MBYTE_PLUS                BIT(16)
+#define ICPU_MEMCTRL_CFG_DDR_ECC_ERR_ENA                  BIT(15)
+#define ICPU_MEMCTRL_CFG_DDR_ECC_COR_ENA                  BIT(14)
+#define ICPU_MEMCTRL_CFG_DDR_ECC_ENA                      BIT(13)
+#define ICPU_MEMCTRL_CFG_DDR_WIDTH                        BIT(12)
+#define ICPU_MEMCTRL_CFG_DDR_MODE                         BIT(11)
+#define ICPU_MEMCTRL_CFG_BURST_SIZE                       BIT(10)
+#define ICPU_MEMCTRL_CFG_BURST_LEN                        BIT(9)
+#define ICPU_MEMCTRL_CFG_BANK_CNT                         BIT(8)
+#define ICPU_MEMCTRL_CFG_MSB_ROW_ADDR(x)                  (((x) << 4) & GENMASK(7, 4))
+#define ICPU_MEMCTRL_CFG_MSB_ROW_ADDR_M                   GENMASK(7, 4)
+#define ICPU_MEMCTRL_CFG_MSB_ROW_ADDR_X(x)                (((x) & GENMASK(7, 4)) >> 4)
+#define ICPU_MEMCTRL_CFG_MSB_COL_ADDR(x)                  ((x) & GENMASK(3, 0))
+#define ICPU_MEMCTRL_CFG_MSB_COL_ADDR_M                   GENMASK(3, 0)
+
+#define ICPU_MEMCTRL_STAT                                 0x118
+
+#define ICPU_MEMCTRL_STAT_RDATA_MASKED                    BIT(5)
+#define ICPU_MEMCTRL_STAT_RDATA_DUMMY                     BIT(4)
+#define ICPU_MEMCTRL_STAT_RDATA_ECC_ERR                   BIT(3)
+#define ICPU_MEMCTRL_STAT_RDATA_ECC_COR                   BIT(2)
+#define ICPU_MEMCTRL_STAT_PWR_DOWN_ACK                    BIT(1)
+#define ICPU_MEMCTRL_STAT_INIT_DONE                       BIT(0)
+
+#define ICPU_MEMCTRL_REF_PERIOD                           0x11c
+
+#define ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF(x)           (((x) << 16) & GENMASK(19, 16))
+#define ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF_M            GENMASK(19, 16)
+#define ICPU_MEMCTRL_REF_PERIOD_MAX_PEND_REF_X(x)         (((x) & GENMASK(19, 16)) >> 16)
+#define ICPU_MEMCTRL_REF_PERIOD_REF_PERIOD(x)             ((x) & GENMASK(15, 0))
+#define ICPU_MEMCTRL_REF_PERIOD_REF_PERIOD_M              GENMASK(15, 0)
+
+#define ICPU_MEMCTRL_TIMING0                              0x124
+
+#define ICPU_MEMCTRL_TIMING0_RD_TO_WR_DLY(x)              (((x) << 28) & GENMASK(31, 28))
+#define ICPU_MEMCTRL_TIMING0_RD_TO_WR_DLY_M               GENMASK(31, 28)
+#define ICPU_MEMCTRL_TIMING0_RD_TO_WR_DLY_X(x)            (((x) & GENMASK(31, 28)) >> 28)
+#define ICPU_MEMCTRL_TIMING0_WR_CS_CHANGE_DLY(x)          (((x) << 24) & GENMASK(27, 24))
+#define ICPU_MEMCTRL_TIMING0_WR_CS_CHANGE_DLY_M           GENMASK(27, 24)
+#define ICPU_MEMCTRL_TIMING0_WR_CS_CHANGE_DLY_X(x)        (((x) & GENMASK(27, 24)) >> 24)
+#define ICPU_MEMCTRL_TIMING0_RD_CS_CHANGE_DLY(x)          (((x) << 20) & GENMASK(23, 20))
+#define ICPU_MEMCTRL_TIMING0_RD_CS_CHANGE_DLY_M           GENMASK(23, 20)
+#define ICPU_MEMCTRL_TIMING0_RD_CS_CHANGE_DLY_X(x)        (((x) & GENMASK(23, 20)) >> 20)
+#define ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY(x)          (((x) << 16) & GENMASK(19, 16))
+#define ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY_M           GENMASK(19, 16)
+#define ICPU_MEMCTRL_TIMING0_RAS_TO_PRECH_DLY_X(x)        (((x) & GENMASK(19, 16)) >> 16)
+#define ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY(x)           (((x) << 12) & GENMASK(15, 12))
+#define ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY_M            GENMASK(15, 12)
+#define ICPU_MEMCTRL_TIMING0_WR_TO_PRECH_DLY_X(x)         (((x) & GENMASK(15, 12)) >> 12)
+#define ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY(x)           (((x) << 8) & GENMASK(11, 8))
+#define ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY_M            GENMASK(11, 8)
+#define ICPU_MEMCTRL_TIMING0_RD_TO_PRECH_DLY_X(x)         (((x) & GENMASK(11, 8)) >> 8)
+#define ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY(x)           (((x) << 4) & GENMASK(7, 4))
+#define ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY_M            GENMASK(7, 4)
+#define ICPU_MEMCTRL_TIMING0_WR_DATA_XFR_DLY_X(x)         (((x) & GENMASK(7, 4)) >> 4)
+#define ICPU_MEMCTRL_TIMING0_RD_DATA_XFR_DLY(x)           ((x) & GENMASK(3, 0))
+#define ICPU_MEMCTRL_TIMING0_RD_DATA_XFR_DLY_M            GENMASK(3, 0)
+
+#define ICPU_MEMCTRL_TIMING1                              0x128
+
+#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY(x)  (((x) << 24) & GENMASK(31, 24))
+#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY_M   GENMASK(31, 24)
+#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_SAME_BANK_DLY_X(x) (((x) & GENMASK(31, 24)) >> 24)
+#define ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY(x)             (((x) << 16) & GENMASK(23, 16))
+#define ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY_M              GENMASK(23, 16)
+#define ICPU_MEMCTRL_TIMING1_BANK8_FAW_DLY_X(x)           (((x) & GENMASK(23, 16)) >> 16)
+#define ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY(x)          (((x) << 12) & GENMASK(15, 12))
+#define ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY_M           GENMASK(15, 12)
+#define ICPU_MEMCTRL_TIMING1_PRECH_TO_RAS_DLY_X(x)        (((x) & GENMASK(15, 12)) >> 12)
+#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY(x)            (((x) << 8) & GENMASK(11, 8))
+#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY_M             GENMASK(11, 8)
+#define ICPU_MEMCTRL_TIMING1_RAS_TO_RAS_DLY_X(x)          (((x) & GENMASK(11, 8)) >> 8)
+#define ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY(x)            (((x) << 4) & GENMASK(7, 4))
+#define ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY_M             GENMASK(7, 4)
+#define ICPU_MEMCTRL_TIMING1_RAS_TO_CAS_DLY_X(x)          (((x) & GENMASK(7, 4)) >> 4)
+#define ICPU_MEMCTRL_TIMING1_WR_TO_RD_DLY(x)              ((x) & GENMASK(3, 0))
+#define ICPU_MEMCTRL_TIMING1_WR_TO_RD_DLY_M               GENMASK(3, 0)
+
+#define ICPU_MEMCTRL_TIMING2                              0x12c
+
+#define ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY(x)             (((x) << 28) & GENMASK(31, 28))
+#define ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY_M              GENMASK(31, 28)
+#define ICPU_MEMCTRL_TIMING2_PRECH_ALL_DLY_X(x)           (((x) & GENMASK(31, 28)) >> 28)
+#define ICPU_MEMCTRL_TIMING2_MDSET_DLY(x)                 (((x) << 24) & GENMASK(27, 24))
+#define ICPU_MEMCTRL_TIMING2_MDSET_DLY_M                  GENMASK(27, 24)
+#define ICPU_MEMCTRL_TIMING2_MDSET_DLY_X(x)               (((x) & GENMASK(27, 24)) >> 24)
+#define ICPU_MEMCTRL_TIMING2_REF_DLY(x)                   (((x) << 16) & GENMASK(23, 16))
+#define ICPU_MEMCTRL_TIMING2_REF_DLY_M                    GENMASK(23, 16)
+#define ICPU_MEMCTRL_TIMING2_REF_DLY_X(x)                 (((x) & GENMASK(23, 16)) >> 16)
+#define ICPU_MEMCTRL_TIMING2_INIT_DLY(x)                  ((x) & GENMASK(15, 0))
+#define ICPU_MEMCTRL_TIMING2_INIT_DLY_M                   GENMASK(15, 0)
+
+#define ICPU_MEMCTRL_TIMING3                              0x130
+
+#define ICPU_MEMCTRL_TIMING3_RMW_DLY(x)                   (((x) << 16) & GENMASK(19, 16))
+#define ICPU_MEMCTRL_TIMING3_RMW_DLY_M                    GENMASK(19, 16)
+#define ICPU_MEMCTRL_TIMING3_RMW_DLY_X(x)                 (((x) & GENMASK(19, 16)) >> 16)
+#define ICPU_MEMCTRL_TIMING3_ODT_RD_DLY(x)                (((x) << 12) & GENMASK(15, 12))
+#define ICPU_MEMCTRL_TIMING3_ODT_RD_DLY_M                 GENMASK(15, 12)
+#define ICPU_MEMCTRL_TIMING3_ODT_RD_DLY_X(x)              (((x) & GENMASK(15, 12)) >> 12)
+#define ICPU_MEMCTRL_TIMING3_ODT_WR_DLY(x)                (((x) << 8) & GENMASK(11, 8))
+#define ICPU_MEMCTRL_TIMING3_ODT_WR_DLY_M                 GENMASK(11, 8)
+#define ICPU_MEMCTRL_TIMING3_ODT_WR_DLY_X(x)              (((x) & GENMASK(11, 8)) >> 8)
+#define ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY(x)          (((x) << 4) & GENMASK(7, 4))
+#define ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY_M           GENMASK(7, 4)
+#define ICPU_MEMCTRL_TIMING3_LOCAL_ODT_RD_DLY_X(x)        (((x) & GENMASK(7, 4)) >> 4)
+#define ICPU_MEMCTRL_TIMING3_WR_TO_RD_CS_CHANGE_DLY(x)    ((x) & GENMASK(3, 0))
+#define ICPU_MEMCTRL_TIMING3_WR_TO_RD_CS_CHANGE_DLY_M     GENMASK(3, 0)
+
+#define ICPU_MEMCTRL_MR0_VAL                              0x138
+
+#define ICPU_MEMCTRL_MR1_VAL                              0x13c
+
+#define ICPU_MEMCTRL_MR2_VAL                              0x140
+
+#define ICPU_MEMCTRL_MR3_VAL                              0x144
+
+#define ICPU_MEMCTRL_TERMRES_CTRL                         0x148
+
+#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_RD_EXT              BIT(11)
+#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_RD_ENA(x)           (((x) << 7) & GENMASK(10, 7))
+#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_RD_ENA_M            GENMASK(10, 7)
+#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_RD_ENA_X(x)         (((x) & GENMASK(10, 7)) >> 7)
+#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_EXT              BIT(6)
+#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_ENA(x)           (((x) << 2) & GENMASK(5, 2))
+#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_ENA_M            GENMASK(5, 2)
+#define ICPU_MEMCTRL_TERMRES_CTRL_ODT_WR_ENA_X(x)         (((x) & GENMASK(5, 2)) >> 2)
+#define ICPU_MEMCTRL_TERMRES_CTRL_LOCAL_ODT_RD_EXT        BIT(1)
+#define ICPU_MEMCTRL_TERMRES_CTRL_LOCAL_ODT_RD_ENA        BIT(0)
+
+#define ICPU_MEMCTRL_DQS_DLY(x) (0x150 + 0x4 * (x))
+#define ICPU_MEMCTRL_DQS_DLY_RSZ                          0x4
+
+#define ICPU_MEMCTRL_DQS_DLY_TRAIN_DQ_ENA                 BIT(11)
+#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM1(x)              (((x) << 8) & GENMASK(10, 8))
+#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM1_M               GENMASK(10, 8)
+#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM1_X(x)            (((x) & GENMASK(10, 8)) >> 8)
+#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM0(x)              (((x) << 5) & GENMASK(7, 5))
+#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM0_M               GENMASK(7, 5)
+#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_TRM0_X(x)            (((x) & GENMASK(7, 5)) >> 5)
+#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY(x)                   ((x) & GENMASK(4, 0))
+#define ICPU_MEMCTRL_DQS_DLY_DQS_DLY_M                    GENMASK(4, 0)
+
+#define ICPU_MEMPHY_CFG                                   0x160
+
+#define ICPU_MEMPHY_CFG_PHY_FLUSH_DIS                     BIT(10)
+#define ICPU_MEMPHY_CFG_PHY_RD_ADJ_DIS                    BIT(9)
+#define ICPU_MEMPHY_CFG_PHY_DQS_EXT                       BIT(8)
+#define ICPU_MEMPHY_CFG_PHY_FIFO_RST                      BIT(7)
+#define ICPU_MEMPHY_CFG_PHY_DLL_BL_RST                    BIT(6)
+#define ICPU_MEMPHY_CFG_PHY_DLL_CL_RST                    BIT(5)
+#define ICPU_MEMPHY_CFG_PHY_ODT_OE                        BIT(4)
+#define ICPU_MEMPHY_CFG_PHY_CK_OE                         BIT(3)
+#define ICPU_MEMPHY_CFG_PHY_CL_OE                         BIT(2)
+#define ICPU_MEMPHY_CFG_PHY_SSTL_ENA                      BIT(1)
+#define ICPU_MEMPHY_CFG_PHY_RST                           BIT(0)
+
+#define ICPU_MEMPHY_ZCAL                                  0x188
+
+#define ICPU_MEMPHY_ZCAL_ZCAL_CLK_SEL                     BIT(9)
+#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_ODT(x)                 (((x) << 5) & GENMASK(8, 5))
+#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_ODT_M                  GENMASK(8, 5)
+#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_ODT_X(x)               (((x) & GENMASK(8, 5)) >> 5)
+#define ICPU_MEMPHY_ZCAL_ZCAL_PROG(x)                     (((x) << 1) & GENMASK(4, 1))
+#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_M                      GENMASK(4, 1)
+#define ICPU_MEMPHY_ZCAL_ZCAL_PROG_X(x)                   (((x) & GENMASK(4, 1)) >> 1)
+#define ICPU_MEMPHY_ZCAL_ZCAL_ENA                         BIT(0)
+
+#define ICPU_MEMPHY_ZCAL_STAT                             0x18c
+
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_ZCTRL(x)               (((x) << 12) & GENMASK(31, 12))
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_ZCTRL_M                GENMASK(31, 12)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_ZCTRL_X(x)             (((x) & GENMASK(31, 12)) >> 12)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPU(x)          (((x) << 8) & GENMASK(9, 8))
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPU_M           GENMASK(9, 8)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPU_X(x)        (((x) & GENMASK(9, 8)) >> 8)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPD(x)          (((x) << 6) & GENMASK(7, 6))
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPD_M           GENMASK(7, 6)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_ODTPD_X(x)        (((x) & GENMASK(7, 6)) >> 6)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PU(x)             (((x) << 4) & GENMASK(5, 4))
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PU_M              GENMASK(5, 4)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PU_X(x)           (((x) & GENMASK(5, 4)) >> 4)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PD(x)             (((x) << 2) & GENMASK(3, 2))
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PD_M              GENMASK(3, 2)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_STAT_PD_X(x)           (((x) & GENMASK(3, 2)) >> 2)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_ERR                    BIT(1)
+#define ICPU_MEMPHY_ZCAL_STAT_ZCAL_DONE                   BIT(0)
+#endif
diff --git a/arch/mips/mach-mscc/include/mach/tlb.h b/arch/mips/mach-mscc/include/mach/tlb.h
new file mode 100644
index 0000000000..fdb554f551
--- /dev/null
+++ b/arch/mips/mach-mscc/include/mach/tlb.h
@@ -0,0 +1,55 @@ 
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#ifndef __ASM_MACH_TLB_H
+#define __ASM_MACH_TLB_H
+
+#include <asm/mipsregs.h>
+#include <mach/common.h>
+#include <linux/sizes.h>
+
+#define TLB_HI_MASK      0xffffe000
+#define TLB_LO_MASK      0x3fffffff	/* Masks off Fill bits */
+#define TLB_LO_SHIFT     6	/* PFN Start bit */
+
+#define PAGEMASK_SHIFT   13
+
+#define MMU_PAGE_CACHED   (3 << 3)	/* C(5:3) Cache Coherency Attributes */
+#define MMU_PAGE_UNCACHED (2 << 3)	/* C(5:3) Cache Coherency Attributes */
+#define MMU_PAGE_DIRTY    BIT(2)	/* = Writeable */
+#define MMU_PAGE_VALID    BIT(1)
+#define MMU_PAGE_GLOBAL   BIT(0)
+#define MMU_REGIO_RO_C    (MMU_PAGE_CACHED | MMU_PAGE_VALID | MMU_PAGE_GLOBAL)
+#define MMU_REGIO_RO      (MMU_PAGE_UNCACHED | MMU_PAGE_VALID | MMU_PAGE_GLOBAL)
+#define MMU_REGIO_RW      (MMU_PAGE_DIRTY | MMU_REGIO_RO)
+#define MMU_REGIO_INVAL   (MMU_PAGE_GLOBAL)
+
+#define TLB_COUNT_MASK	  GENMASK(5, 0)
+#define TLB_COUNT_OFF	  25
+
+static inline u32 get_tlb_count(void)
+{
+	register u32 config1;
+
+	config1 = read_c0_config1();
+	config1 >>= TLB_COUNT_OFF;
+	config1 &= TLB_COUNT_MASK;
+
+	return 1 + config1;
+}
+
+static inline void create_tlb(int index, u32 offset, u32 size, u32 tlb_attrib1,
+			      u32 tlb_attrib2)
+{
+	register u32 tlb_mask, tlb_lo0, tlb_lo1;
+
+	tlb_mask = ((size >> 12) - 1) << PAGEMASK_SHIFT;
+	tlb_lo0 = tlb_attrib1 | (offset >> TLB_LO_SHIFT);
+	tlb_lo1 = tlb_attrib2 | ((offset + size) >> TLB_LO_SHIFT);
+
+	write_one_tlb(index, tlb_mask, offset & TLB_HI_MASK,
+		      tlb_lo0 & TLB_LO_MASK, tlb_lo1 & TLB_LO_MASK);
+}
+#endif				/* __ASM_MACH_TLB_H */
diff --git a/arch/mips/mach-mscc/lowlevel_init.S b/arch/mips/mach-mscc/lowlevel_init.S
new file mode 100644
index 0000000000..8e4f0d02c8
--- /dev/null
+++ b/arch/mips/mach-mscc/lowlevel_init.S
@@ -0,0 +1,23 @@ 
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#include <asm/asm.h>
+#include <asm/regdef.h>
+
+    .set noreorder
+    .extern     vcoreiii_tlb_init
+
+LEAF(lowlevel_init)
+	/*
+	 * As we have no stack yet, we can assume the restricted
+	 * luxury of the sX-registers without saving them
+	 */
+	move	s0,ra
+
+	jal	vcoreiii_tlb_init
+	nop
+	jr	s0
+	nop
+	END(lowlevel_init)
diff --git a/arch/mips/mach-mscc/reset.c b/arch/mips/mach-mscc/reset.c
new file mode 100644
index 0000000000..cbc1fd2285
--- /dev/null
+++ b/arch/mips/mach-mscc/reset.c
@@ -0,0 +1,36 @@ 
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+/*
+ * Copyright (c) 2018 Microsemi Corporation
+ */
+
+#include <common.h>
+
+#include <asm/sections.h>
+#include <asm/io.h>
+
+#include <asm/reboot.h>
+
+void _machine_restart(void)
+{
+	register u32 resetbits = PERF_SOFT_RST_SOFT_CHIP_RST;
+	(void)readl(BASE_DEVCPU_GCB + PERF_SOFT_RST);
+
+	/*
+	 * Make sure VCore is NOT protected from reset
+	 */
+	clrbits_le32(BASE_CFG + ICPU_RESET, ICPU_RESET_CORE_RST_PROTECT);
+
+	/*
+	 * Change to SPI bitbang for SPI reset workaround...
+	 */
+	writel(ICPU_SW_MODE_SW_SPI_CS_OE(1) | ICPU_SW_MODE_SW_SPI_CS(1) |
+	       ICPU_SW_MODE_SW_PIN_CTRL_MODE, BASE_CFG + ICPU_SW_MODE);
+
+	/*
+	 * Do the global reset
+	 */
+	writel(resetbits, BASE_DEVCPU_GCB + PERF_SOFT_RST);
+
+	while (1)
+		; /* NOP */
+}