diff mbox series

[U-Boot,RFC,1/6] mtd: spi: Port SPI NOR framework from Linux

Message ID 20181128172607.10179-2-vigneshr@ti.com
State RFC
Delegated to: Jagannadha Sutradharudu Teki
Headers show
Series SF: Migrate to Linux SPI NOR framework | expand

Commit Message

Raghavendra, Vignesh Nov. 28, 2018, 5:26 p.m. UTC
This ports SPI NOR framework from Linux to support newer SPI NOR
features like SFDP parsing, 4 byte addressing support and make it easier
to port upcoming features from Linux SPI NOR framework. This should also
make it easier to completely migrate to U-Boot MTD framework in future.

Signed-off-by: Vignesh R <vigneshr@ti.com>
---
 drivers/mtd/spi/spi-nor.c   | 2914 +++++++++++++++++++++++++++++++++++
 include/linux/mtd/cfi.h     |  385 +++++
 include/linux/mtd/mtd.h     |    2 +-
 include/linux/mtd/spi-nor.h |  421 +++++
 4 files changed, 3721 insertions(+), 1 deletion(-)
 create mode 100644 drivers/mtd/spi/spi-nor.c
 create mode 100644 include/linux/mtd/cfi.h
 create mode 100644 include/linux/mtd/spi-nor.h

Comments

Boris Brezillon Nov. 29, 2018, 4:04 p.m. UTC | #1
On Wed, 28 Nov 2018 22:56:02 +0530
Vignesh R <vigneshr@ti.com> wrote:

> +#if defined(CONFIG_DM_SPI) && defined(CONFIG_SPI_MEM)
> +#define spi_nor_mem_exec_op spi_mem_exec_op
> +#else
> +/*
> + * This function is to support transition to DM_SPI. Will be removed
> + * once all boards are converted to DM_SPI
> + */
> +
> +static int spi_nor_mem_exec_op(struct spi_slave *slave,
> +			       const struct spi_mem_op *op)
> +{
> +	unsigned int pos = 0;
> +	const u8 *tx_buf = NULL;
> +	u8 *rx_buf = NULL;
> +	u8 *op_buf;
> +	int op_len;
> +	u32 flag;
> +	int ret;
> +	int i;
> +
> +	if (op->data.nbytes) {
> +		if (op->data.dir == SPI_MEM_DATA_IN)
> +			rx_buf = op->data.buf.in;
> +		else
> +			tx_buf = op->data.buf.out;
> +	}
> +
> +	op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
> +	op_buf = calloc(1, op_len);
> +
> +	ret = spi_claim_bus(slave);
> +	if (ret < 0)
> +		return ret;
> +
> +	op_buf[pos++] = op->cmd.opcode;
> +
> +	if (op->addr.nbytes) {
> +		for (i = 0; i < op->addr.nbytes; i++)
> +			op_buf[pos + i] = op->addr.val >>
> +				(8 * (op->addr.nbytes - i - 1));
> +
> +		pos += op->addr.nbytes;
> +	}
> +
> +	if (op->dummy.nbytes)
> +		memset(op_buf + pos, 0xff, op->dummy.nbytes);
> +
> +	/* 1st transfer: opcode + address + dummy cycles */
> +	flag = SPI_XFER_BEGIN;
> +	/* Make sure to set END bit if no tx or rx data messages follow */
> +	if (!tx_buf && !rx_buf)
> +		flag |= SPI_XFER_END;
> +
> +	ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
> +	if (ret)
> +		return ret;
> +
> +	/* 2nd transfer: rx or tx data path */
> +	if (tx_buf || rx_buf) {
> +		ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
> +			       rx_buf, SPI_XFER_END);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	spi_release_bus(slave);
> +
> +	for (i = 0; i < pos; i++)
> +		debug("%02x ", op_buf[i]);
> +	debug("| [%dB %s] ",
> +	      tx_buf || rx_buf ? op->data.nbytes : 0,
> +	      tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
> +	for (i = 0; i < op->data.nbytes; i++)
> +		debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
> +	debug("[ret %d]\n", ret);
> +
> +	free(op_buf);
> +
> +	if (ret < 0)
> +		return ret;
> +
> +	return 0;
> +}
> +#endif

I'd recommend putting this alternate spi_mem_exec_op() implementation
in drivers/spi/. Either you create a drivers/spi/spi-mem-no-dm.c file
or you add #ifdef CONFIG_DM_SPI #else #endif sections directly in
drivers/spi/spi-mem.c.

> +const struct flash_info spi_nor_ids[] = {
> +#ifdef CONFIG_SPI_FLASH_ATMEL		/* ATMEL */
> +	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
> +	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
> +	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
> +
> +	{ "at45db011d",	INFO(0x1f2200, 0, 64 * 1024,   4, SECT_4K) },
> +	{ "at45db021d",	INFO(0x1f2300, 0, 64 * 1024,   8, SECT_4K) },
> +	{ "at45db041d",	INFO(0x1f2400, 0, 64 * 1024,   8, SECT_4K) },
> +	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024,  16, SECT_4K) },
> +	{ "at45db161d",	INFO(0x1f2600, 0, 64 * 1024,  32, SECT_4K) },
> +	{ "at45db321d",	INFO(0x1f2700, 0, 64 * 1024,  64, SECT_4K) },
> +	{ "at45db641d",	INFO(0x1f2800, 0, 64 * 1024, 128, SECT_4K) },
> +	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024,  16, SECT_4K) },
> +#endif

If you're really short in space (for the SPL build), you might want to
consider fined-grained selection of the chips you want to support. One
more reason to do that is that board manufacturers usually source SPI
NOR parts from different vendors for the same design. With a
per-manufacturer selection logic, you'll have to enable several of them
to have an SPL that works on all variants.

I didn't try, but you might be able to place each NOR chip in its own
section and decide which one to keep at link time (will require some
macros to define flash_info entries + a linker script to decide which
sections you want to discard/keep at link time).

Note that I'd be okay to convert Linux flash_info table to the new
macro usage if you choose to do that.

Just a quick example:

#define SPI_FLASH_INFO(_name, ...)
	ll_entry_declare(struct flash_info, _name, spi_nor_ids) = {
		.name = #_name,
		__VAR_ARGS__
	};
Raghavendra, Vignesh Nov. 29, 2018, 5:26 p.m. UTC | #2
Hi Boris,

On 29/11/18 9:34 PM, Boris Brezillon wrote:
> On Wed, 28 Nov 2018 22:56:02 +0530
> Vignesh R <vigneshr@ti.com> wrote:
> 
>> +#if defined(CONFIG_DM_SPI) && defined(CONFIG_SPI_MEM)
>> +#define spi_nor_mem_exec_op spi_mem_exec_op
>> +#else
>> +/*
>> + * This function is to support transition to DM_SPI. Will be removed
>> + * once all boards are converted to DM_SPI
>> + */
>> +
>> +static int spi_nor_mem_exec_op(struct spi_slave *slave,
>> +			       const struct spi_mem_op *op)
>> +{
>> +	unsigned int pos = 0;
>> +	const u8 *tx_buf = NULL;
>> +	u8 *rx_buf = NULL;
>> +	u8 *op_buf;
>> +	int op_len;
>> +	u32 flag;
>> +	int ret;
>> +	int i;
>> +
>> +	if (op->data.nbytes) {
>> +		if (op->data.dir == SPI_MEM_DATA_IN)
>> +			rx_buf = op->data.buf.in;
>> +		else
>> +			tx_buf = op->data.buf.out;
>> +	}
>> +
>> +	op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
>> +	op_buf = calloc(1, op_len);
>> +
>> +	ret = spi_claim_bus(slave);
>> +	if (ret < 0)
>> +		return ret;
>> +
>> +	op_buf[pos++] = op->cmd.opcode;
>> +
>> +	if (op->addr.nbytes) {
>> +		for (i = 0; i < op->addr.nbytes; i++)
>> +			op_buf[pos + i] = op->addr.val >>
>> +				(8 * (op->addr.nbytes - i - 1));
>> +
>> +		pos += op->addr.nbytes;
>> +	}
>> +
>> +	if (op->dummy.nbytes)
>> +		memset(op_buf + pos, 0xff, op->dummy.nbytes);
>> +
>> +	/* 1st transfer: opcode + address + dummy cycles */
>> +	flag = SPI_XFER_BEGIN;
>> +	/* Make sure to set END bit if no tx or rx data messages follow */
>> +	if (!tx_buf && !rx_buf)
>> +		flag |= SPI_XFER_END;
>> +
>> +	ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
>> +	if (ret)
>> +		return ret;
>> +
>> +	/* 2nd transfer: rx or tx data path */
>> +	if (tx_buf || rx_buf) {
>> +		ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
>> +			       rx_buf, SPI_XFER_END);
>> +		if (ret)
>> +			return ret;
>> +	}
>> +
>> +	spi_release_bus(slave);
>> +
>> +	for (i = 0; i < pos; i++)
>> +		debug("%02x ", op_buf[i]);
>> +	debug("| [%dB %s] ",
>> +	      tx_buf || rx_buf ? op->data.nbytes : 0,
>> +	      tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
>> +	for (i = 0; i < op->data.nbytes; i++)
>> +		debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
>> +	debug("[ret %d]\n", ret);
>> +
>> +	free(op_buf);
>> +
>> +	if (ret < 0)
>> +		return ret;
>> +
>> +	return 0;
>> +}
>> +#endif
> 
> I'd recommend putting this alternate spi_mem_exec_op() implementation
> in drivers/spi/. Either you create a drivers/spi/spi-mem-no-dm.c file
> or you add #ifdef CONFIG_DM_SPI #else #endif sections directly in
> drivers/spi/spi-mem.c.
> 

I thought about that, but then I saw spi-mem.c is under DM_SPI and
DM_SPI migration series by Jagan and thought that this code wont be
needing this at all. Anyways, will move this to drivers/spi next time
around.

>> +const struct flash_info spi_nor_ids[] = {
>> +#ifdef CONFIG_SPI_FLASH_ATMEL		/* ATMEL */
>> +	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
>> +	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
>> +	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
>> +
>> +	{ "at45db011d",	INFO(0x1f2200, 0, 64 * 1024,   4, SECT_4K) },
>> +	{ "at45db021d",	INFO(0x1f2300, 0, 64 * 1024,   8, SECT_4K) },
>> +	{ "at45db041d",	INFO(0x1f2400, 0, 64 * 1024,   8, SECT_4K) },
>> +	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024,  16, SECT_4K) },
>> +	{ "at45db161d",	INFO(0x1f2600, 0, 64 * 1024,  32, SECT_4K) },
>> +	{ "at45db321d",	INFO(0x1f2700, 0, 64 * 1024,  64, SECT_4K) },
>> +	{ "at45db641d",	INFO(0x1f2800, 0, 64 * 1024, 128, SECT_4K) },
>> +	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024,  16, SECT_4K) },
>> +#endif
> 
> If you're really short in space (for the SPL build), you might want to
> consider fined-grained selection of the chips you want to support. One
> more reason to do that is that board manufacturers usually source SPI
> NOR parts from different vendors for the same design. With a
> per-manufacturer selection logic, you'll have to enable several of them
> to have an SPL that works on all variants.
> 
> I didn't try, but you might be able to place each NOR chip in its own
> section and decide which one to keep at link time (will require some
> macros to define flash_info entries + a linker script to decide which
> sections you want to discard/keep at link time).
> 

IIUC, you are proposing per board linker script that will select/choose
which flash parts would to be supported by that board's SPL?

> Note that I'd be okay to convert Linux flash_info table to the new
> macro usage if you choose to do that.
>
> Just a quick example:
> 
> #define SPI_FLASH_INFO(_name, ...)
> 	ll_entry_declare(struct flash_info, _name, spi_nor_ids) = {
> 		.name = #_name,
> 		__VAR_ARGS__
> 	};
>
Boris Brezillon Nov. 29, 2018, 6:52 p.m. UTC | #3
On Thu, 29 Nov 2018 22:56:04 +0530
Vignesh R <vigneshr@ti.com> wrote:

> 
> >> +const struct flash_info spi_nor_ids[] = {
> >> +#ifdef CONFIG_SPI_FLASH_ATMEL		/* ATMEL */
> >> +	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
> >> +	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
> >> +	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
> >> +
> >> +	{ "at45db011d",	INFO(0x1f2200, 0, 64 * 1024,   4, SECT_4K) },
> >> +	{ "at45db021d",	INFO(0x1f2300, 0, 64 * 1024,   8, SECT_4K) },
> >> +	{ "at45db041d",	INFO(0x1f2400, 0, 64 * 1024,   8, SECT_4K) },
> >> +	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024,  16, SECT_4K) },
> >> +	{ "at45db161d",	INFO(0x1f2600, 0, 64 * 1024,  32, SECT_4K) },
> >> +	{ "at45db321d",	INFO(0x1f2700, 0, 64 * 1024,  64, SECT_4K) },
> >> +	{ "at45db641d",	INFO(0x1f2800, 0, 64 * 1024, 128, SECT_4K) },
> >> +	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024,  16, SECT_4K) },
> >> +#endif  
> > 
> > If you're really short in space (for the SPL build), you might want to
> > consider fined-grained selection of the chips you want to support. One
> > more reason to do that is that board manufacturers usually source SPI
> > NOR parts from different vendors for the same design. With a
> > per-manufacturer selection logic, you'll have to enable several of them
> > to have an SPL that works on all variants.
> > 
> > I didn't try, but you might be able to place each NOR chip in its own
> > section and decide which one to keep at link time (will require some
> > macros to define flash_info entries + a linker script to decide which
> > sections you want to discard/keep at link time).
> >   
> 
> IIUC, you are proposing per board linker script that will select/choose
> which flash parts would to be supported by that board's SPL?

Not exactly a per-board linker script. More something that would be
automatically generated based on Kconfig option where you'd list all the
NORs you want to support. Of course, the default would be to have
everything enabled, but that allow people who have hard size
constraint to shrink the spi-nor obj a bit more.
Simon Goldschmidt Nov. 29, 2018, 7:11 p.m. UTC | #4
On 29.11.2018 19:52, Boris Brezillon wrote:
> On Thu, 29 Nov 2018 22:56:04 +0530
> Vignesh R <vigneshr@ti.com> wrote:
>
>>>> +const struct flash_info spi_nor_ids[] = {
>>>> +#ifdef CONFIG_SPI_FLASH_ATMEL		/* ATMEL */
>>>> +	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
>>>> +	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
>>>> +	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
>>>> +
>>>> +	{ "at45db011d",	INFO(0x1f2200, 0, 64 * 1024,   4, SECT_4K) },
>>>> +	{ "at45db021d",	INFO(0x1f2300, 0, 64 * 1024,   8, SECT_4K) },
>>>> +	{ "at45db041d",	INFO(0x1f2400, 0, 64 * 1024,   8, SECT_4K) },
>>>> +	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024,  16, SECT_4K) },
>>>> +	{ "at45db161d",	INFO(0x1f2600, 0, 64 * 1024,  32, SECT_4K) },
>>>> +	{ "at45db321d",	INFO(0x1f2700, 0, 64 * 1024,  64, SECT_4K) },
>>>> +	{ "at45db641d",	INFO(0x1f2800, 0, 64 * 1024, 128, SECT_4K) },
>>>> +	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024,  16, SECT_4K) },
>>>> +#endif
>>> If you're really short in space (for the SPL build), you might want to
>>> consider fined-grained selection of the chips you want to support. One
>>> more reason to do that is that board manufacturers usually source SPI
>>> NOR parts from different vendors for the same design. With a
>>> per-manufacturer selection logic, you'll have to enable several of them
>>> to have an SPL that works on all variants.
>>>
>>> I didn't try, but you might be able to place each NOR chip in its own
>>> section and decide which one to keep at link time (will require some
>>> macros to define flash_info entries + a linker script to decide which
>>> sections you want to discard/keep at link time).
>>>    
>> IIUC, you are proposing per board linker script that will select/choose
>> which flash parts would to be supported by that board's SPL?
> Not exactly a per-board linker script. More something that would be
> automatically generated based on Kconfig option where you'd list all the
> NORs you want to support. Of course, the default would be to have
> everything enabled, but that allow people who have hard size
> constraint to shrink the spi-nor obj a bit more.

Just dropping the names (which are never used in SPL) gave me nearly 1K 
of code size reduction, but this was rejected:

https://patchwork.ozlabs.org/patch/980023/

Maybe we can doe this from the beginning when adding this new code?

Simon
diff mbox series

Patch

diff --git a/drivers/mtd/spi/spi-nor.c b/drivers/mtd/spi/spi-nor.c
new file mode 100644
index 000000000000..f5e9512ee07b
--- /dev/null
+++ b/drivers/mtd/spi/spi-nor.c
@@ -0,0 +1,2914 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on m25p80.c, by Mike Lavender (mike@steroidmicros.com), with
+ * influence from lart.c (Abraham Van Der Merwe) and mtd_dataflash.c
+ *
+ * Copyright (C) 2005, Intec Automation Inc.
+ * Copyright (C) 2014, Freescale Semiconductor, Inc.
+ *
+ * Synced from Linux v4.19
+ */
+
+#include <common.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/log2.h>
+#include <linux/math64.h>
+#include <linux/sizes.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/spi-nor.h>
+#include <spi-mem.h>
+#include <spi.h>
+
+/* Define max times to check status register before we give up. */
+
+/*
+ * For everything but full-chip erase; probably could be much smaller, but kept
+ * around for safety for now
+ */
+
+#define HZ					CONFIG_SYS_HZ
+
+#define DEFAULT_READY_WAIT_JIFFIES		(40UL * HZ)
+
+/*
+ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up
+ * for larger flash
+ */
+#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES	(40UL * HZ)
+
+#define SPI_NOR_MAX_ID_LEN	6
+#define SPI_NOR_MAX_ADDR_WIDTH	4
+
+struct flash_info {
+	char		*name;
+
+	/*
+	 * This array stores the ID bytes.
+	 * The first three bytes are the JEDIC ID.
+	 * JEDEC ID zero means "no ID" (mostly older chips).
+	 */
+	u8		id[SPI_NOR_MAX_ID_LEN];
+	u8		id_len;
+
+	/* The size listed here is what works with SPINOR_OP_SE, which isn't
+	 * necessarily called a "sector" by the vendor.
+	 */
+	unsigned	sector_size;
+	u16		n_sectors;
+
+	u16		page_size;
+	u16		addr_width;
+
+	u16		flags;
+#define SECT_4K			BIT(0)	/* SPINOR_OP_BE_4K works uniformly */
+#define SPI_NOR_NO_ERASE	BIT(1)	/* No erase command needed */
+#define SST_WRITE		BIT(2)	/* use SST byte programming */
+#define SPI_NOR_NO_FR		BIT(3)	/* Can't do fastread */
+#define SECT_4K_PMC		BIT(4)	/* SPINOR_OP_BE_4K_PMC works uniformly */
+#define SPI_NOR_DUAL_READ	BIT(5)	/* Flash supports Dual Read */
+#define SPI_NOR_QUAD_READ	BIT(6)	/* Flash supports Quad Read */
+#define USE_FSR			BIT(7)	/* use flag status register */
+#define SPI_NOR_HAS_LOCK	BIT(8)	/* Flash supports lock/unlock via SR */
+#define SPI_NOR_HAS_TB		BIT(9)	/*
+					 * Flash SR has Top/Bottom (TB) protect
+					 * bit. Must be used with
+					 * SPI_NOR_HAS_LOCK.
+					 */
+#define	SPI_S3AN		BIT(10)	/*
+					 * Xilinx Spartan 3AN In-System Flash
+					 * (MFR cannot be used for probing
+					 * because it has the same value as
+					 * ATMEL flashes)
+					 */
+#define SPI_NOR_4B_OPCODES	BIT(11)	/*
+					 * Use dedicated 4byte address op codes
+					 * to support memory size above 128Mib.
+					 */
+#define NO_CHIP_ERASE		BIT(12) /* Chip does not support chip erase */
+#define SPI_NOR_SKIP_SFDP	BIT(13)	/* Skip parsing of SFDP tables */
+#define USE_CLSR		BIT(14)	/* use CLSR command */
+
+	int	(*quad_enable)(struct spi_nor *nor);
+};
+
+#define JEDEC_MFR(info)	((info)->id[0])
+
+#if defined(CONFIG_DM_SPI) && defined(CONFIG_SPI_MEM)
+#define spi_nor_mem_exec_op spi_mem_exec_op
+#else
+/*
+ * This function is to support transition to DM_SPI. Will be removed
+ * once all boards are converted to DM_SPI
+ */
+
+static int spi_nor_mem_exec_op(struct spi_slave *slave,
+			       const struct spi_mem_op *op)
+{
+	unsigned int pos = 0;
+	const u8 *tx_buf = NULL;
+	u8 *rx_buf = NULL;
+	u8 *op_buf;
+	int op_len;
+	u32 flag;
+	int ret;
+	int i;
+
+	if (op->data.nbytes) {
+		if (op->data.dir == SPI_MEM_DATA_IN)
+			rx_buf = op->data.buf.in;
+		else
+			tx_buf = op->data.buf.out;
+	}
+
+	op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
+	op_buf = calloc(1, op_len);
+
+	ret = spi_claim_bus(slave);
+	if (ret < 0)
+		return ret;
+
+	op_buf[pos++] = op->cmd.opcode;
+
+	if (op->addr.nbytes) {
+		for (i = 0; i < op->addr.nbytes; i++)
+			op_buf[pos + i] = op->addr.val >>
+				(8 * (op->addr.nbytes - i - 1));
+
+		pos += op->addr.nbytes;
+	}
+
+	if (op->dummy.nbytes)
+		memset(op_buf + pos, 0xff, op->dummy.nbytes);
+
+	/* 1st transfer: opcode + address + dummy cycles */
+	flag = SPI_XFER_BEGIN;
+	/* Make sure to set END bit if no tx or rx data messages follow */
+	if (!tx_buf && !rx_buf)
+		flag |= SPI_XFER_END;
+
+	ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
+	if (ret)
+		return ret;
+
+	/* 2nd transfer: rx or tx data path */
+	if (tx_buf || rx_buf) {
+		ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
+			       rx_buf, SPI_XFER_END);
+		if (ret)
+			return ret;
+	}
+
+	spi_release_bus(slave);
+
+	for (i = 0; i < pos; i++)
+		debug("%02x ", op_buf[i]);
+	debug("| [%dB %s] ",
+	      tx_buf || rx_buf ? op->data.nbytes : 0,
+	      tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
+	for (i = 0; i < op->data.nbytes; i++)
+		debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
+	debug("[ret %d]\n", ret);
+
+	free(op_buf);
+
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+#endif
+
+static int spi_nor_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len)
+{
+	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(code, 1),
+					  SPI_MEM_OP_NO_ADDR,
+					  SPI_MEM_OP_NO_DUMMY,
+					  SPI_MEM_OP_DATA_IN(len, NULL, 1));
+	int ret;
+
+	op.data.buf.in = val;
+	ret = spi_nor_mem_exec_op(nor->spi, &op);
+	if (ret < 0)
+		dev_dbg(&flash->spimem->spi->dev, "error %d reading %x\n", ret,
+			code);
+
+	return ret;
+}
+
+static int spi_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len)
+{
+	struct spi_mem_op op = SPI_MEM_OP(SPI_MEM_OP_CMD(opcode, 1),
+					  SPI_MEM_OP_NO_ADDR,
+					  SPI_MEM_OP_NO_DUMMY,
+					  SPI_MEM_OP_DATA_OUT(len, NULL, 1));
+	op.data.buf.out = buf;
+	return spi_nor_mem_exec_op(nor->spi, &op);
+
+}
+
+static ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len,
+			   u_char *buf)
+{
+	struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(nor->read_opcode, 1),
+				   SPI_MEM_OP_ADDR(nor->addr_width, from, 1),
+				   SPI_MEM_OP_DUMMY(nor->read_dummy, 1),
+				   SPI_MEM_OP_DATA_IN(len, buf, 1));
+	size_t remaining = len;
+	int ret;
+
+	/* get transfer protocols. */
+	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->read_proto);
+	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->read_proto);
+	op.dummy.buswidth = op.addr.buswidth;
+	op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->read_proto);
+
+	/* convert the dummy cycles to the number of bytes */
+	op.dummy.nbytes = (nor->read_dummy * op.dummy.buswidth) / 8;
+
+	while (remaining) {
+		op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
+#if defined(CONFIG_DM_SPI) && defined(CONFIG_SPI_MEM)
+		ret = spi_mem_adjust_op_size(nor->spi, &op);
+		if (ret)
+			return ret;
+#endif
+
+		ret = spi_nor_mem_exec_op(nor->spi, &op);
+		if (ret)
+			return ret;
+
+		op.addr.val += op.data.nbytes;
+		remaining -= op.data.nbytes;
+		op.data.buf.in += op.data.nbytes;
+	}
+
+	return len;
+}
+
+static ssize_t spi_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
+			    const u_char *buf)
+{
+	struct spi_mem_op op =
+			SPI_MEM_OP(SPI_MEM_OP_CMD(nor->program_opcode, 1),
+				   SPI_MEM_OP_ADDR(nor->addr_width, to, 1),
+				   SPI_MEM_OP_NO_DUMMY,
+				   SPI_MEM_OP_DATA_OUT(len, buf, 1));
+	size_t remaining = len;
+	int ret;
+
+	/* get transfer protocols. */
+	op.cmd.buswidth = spi_nor_get_protocol_inst_nbits(nor->write_proto);
+	op.addr.buswidth = spi_nor_get_protocol_addr_nbits(nor->write_proto);
+	op.data.buswidth = spi_nor_get_protocol_data_nbits(nor->write_proto);
+
+	if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second)
+		op.addr.nbytes = 0;
+
+	while (remaining) {
+		op.data.nbytes = remaining < UINT_MAX ? remaining : UINT_MAX;
+#if defined(CONFIG_DM_SPI) && defined(CONFIG_SPI_MEM)
+		ret = spi_mem_adjust_op_size(nor->spi, &op);
+		if (ret)
+			return ret;
+#endif
+
+		ret = spi_nor_mem_exec_op(nor->spi, &op);
+		if (ret)
+			return ret;
+
+		op.addr.val += op.data.nbytes;
+		remaining -= op.data.nbytes;
+		op.data.buf.out += op.data.nbytes;
+	}
+
+	return len;
+}
+
+/*
+ * Read the status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_sr(struct spi_nor *nor)
+{
+	int ret;
+	u8 val;
+
+	ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1);
+	if (ret < 0) {
+		pr_debug("error %d reading SR\n", (int) ret);
+		return ret;
+	}
+
+	return val;
+}
+
+/*
+ * Read the flag status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_fsr(struct spi_nor *nor)
+{
+	int ret;
+	u8 val;
+
+	ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1);
+	if (ret < 0) {
+		pr_debug("error %d reading FSR\n", ret);
+		return ret;
+	}
+
+	return val;
+}
+
+/*
+ * Read configuration register, returning its value in the
+ * location. Return the configuration register value.
+ * Returns negative if error occurred.
+ */
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+static int read_cr(struct spi_nor *nor)
+{
+	int ret;
+	u8 val;
+
+	ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1);
+	if (ret < 0) {
+		dev_dbg(nor->dev, "error %d reading CR\n", ret);
+		return ret;
+	}
+
+	return val;
+}
+#endif
+
+/*
+ * Write status register 1 byte
+ * Returns negative if error occurred.
+ */
+static inline int write_sr(struct spi_nor *nor, u8 val)
+{
+	nor->cmd_buf[0] = val;
+	return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1);
+}
+
+/*
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static inline int write_enable(struct spi_nor *nor)
+{
+	return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0);
+}
+
+/*
+ * Send write disable instruction to the chip.
+ */
+static inline int write_disable(struct spi_nor *nor)
+{
+	return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0);
+}
+
+static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd)
+{
+	return mtd->priv;
+}
+
+
+static u8 spi_nor_convert_opcode(u8 opcode, const u8 table[][2], size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; i++)
+		if (table[i][0] == opcode)
+			return table[i][1];
+
+	/* No conversion found, keep input op code. */
+	return opcode;
+}
+
+static inline u8 spi_nor_convert_3to4_read(u8 opcode)
+{
+	static const u8 spi_nor_3to4_read[][2] = {
+		{ SPINOR_OP_READ,	SPINOR_OP_READ_4B },
+		{ SPINOR_OP_READ_FAST,	SPINOR_OP_READ_FAST_4B },
+		{ SPINOR_OP_READ_1_1_2,	SPINOR_OP_READ_1_1_2_4B },
+		{ SPINOR_OP_READ_1_2_2,	SPINOR_OP_READ_1_2_2_4B },
+		{ SPINOR_OP_READ_1_1_4,	SPINOR_OP_READ_1_1_4_4B },
+		{ SPINOR_OP_READ_1_4_4,	SPINOR_OP_READ_1_4_4_4B },
+	};
+
+	return spi_nor_convert_opcode(opcode, spi_nor_3to4_read,
+				      ARRAY_SIZE(spi_nor_3to4_read));
+}
+
+static inline u8 spi_nor_convert_3to4_program(u8 opcode)
+{
+	static const u8 spi_nor_3to4_program[][2] = {
+		{ SPINOR_OP_PP,		SPINOR_OP_PP_4B },
+		{ SPINOR_OP_PP_1_1_4,	SPINOR_OP_PP_1_1_4_4B },
+		{ SPINOR_OP_PP_1_4_4,	SPINOR_OP_PP_1_4_4_4B },
+	};
+
+	return spi_nor_convert_opcode(opcode, spi_nor_3to4_program,
+				      ARRAY_SIZE(spi_nor_3to4_program));
+}
+
+static inline u8 spi_nor_convert_3to4_erase(u8 opcode)
+{
+	static const u8 spi_nor_3to4_erase[][2] = {
+		{ SPINOR_OP_BE_4K,	SPINOR_OP_BE_4K_4B },
+		{ SPINOR_OP_BE_32K,	SPINOR_OP_BE_32K_4B },
+		{ SPINOR_OP_SE,		SPINOR_OP_SE_4B },
+	};
+
+	return spi_nor_convert_opcode(opcode, spi_nor_3to4_erase,
+				      ARRAY_SIZE(spi_nor_3to4_erase));
+}
+
+static void spi_nor_set_4byte_opcodes(struct spi_nor *nor,
+				      const struct flash_info *info)
+{
+	/* Do some manufacturer fixups first */
+	switch (JEDEC_MFR(info)) {
+	case SNOR_MFR_SPANSION:
+		/* No small sector erase for 4-byte command set */
+		nor->erase_opcode = SPINOR_OP_SE;
+		nor->mtd.erasesize = info->sector_size;
+		break;
+
+	default:
+		break;
+	}
+
+	nor->read_opcode = spi_nor_convert_3to4_read(nor->read_opcode);
+	nor->program_opcode = spi_nor_convert_3to4_program(nor->program_opcode);
+	nor->erase_opcode = spi_nor_convert_3to4_erase(nor->erase_opcode);
+}
+
+/* Enable/disable 4-byte addressing mode. */
+static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info,
+			    int enable)
+{
+	int status;
+	bool need_wren = false;
+	u8 cmd;
+
+	switch (JEDEC_MFR(info)) {
+	case SNOR_MFR_MICRON:
+		/* Some Micron need WREN command; all will accept it */
+		need_wren = true;
+	case SNOR_MFR_MACRONIX:
+	case SNOR_MFR_WINBOND:
+		if (need_wren)
+			write_enable(nor);
+
+		cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B;
+		status = nor->write_reg(nor, cmd, NULL, 0);
+		if (need_wren)
+			write_disable(nor);
+
+		if (!status && !enable &&
+		    JEDEC_MFR(info) == SNOR_MFR_WINBOND) {
+			/*
+			 * On Winbond W25Q256FV, leaving 4byte mode causes
+			 * the Extended Address Register to be set to 1, so all
+			 * 3-byte-address reads come from the second 16M.
+			 * We must clear the register to enable normal behavior.
+			 */
+			write_enable(nor);
+			nor->cmd_buf[0] = 0;
+			nor->write_reg(nor, SPINOR_OP_WREAR, nor->cmd_buf, 1);
+			write_disable(nor);
+		}
+
+		return status;
+	default:
+		/* Spansion style */
+		nor->cmd_buf[0] = enable << 7;
+		return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1);
+	}
+}
+
+static inline int spi_nor_sr_ready(struct spi_nor *nor)
+{
+	int sr = read_sr(nor);
+	if (sr < 0)
+		return sr;
+
+	if (nor->flags & SNOR_F_USE_CLSR && sr & (SR_E_ERR | SR_P_ERR)) {
+		if (sr & SR_E_ERR)
+			dev_dbg(nor->dev, "Erase Error occurred\n");
+		else
+			dev_dbg(nor->dev, "Programming Error occurred\n");
+
+		nor->write_reg(nor, SPINOR_OP_CLSR, NULL, 0);
+		return -EIO;
+	}
+
+	return !(sr & SR_WIP);
+}
+
+static inline int spi_nor_fsr_ready(struct spi_nor *nor)
+{
+	int fsr = read_fsr(nor);
+	if (fsr < 0)
+		return fsr;
+
+	if (fsr & (FSR_E_ERR | FSR_P_ERR)) {
+		if (fsr & FSR_E_ERR)
+			dev_dbg(nor->dev, "Erase operation failed.\n");
+		else
+			dev_dbg(nor->dev, "Program operation failed.\n");
+
+		if (fsr & FSR_PT_ERR)
+			dev_dbg(nor->dev,
+			"Attempted to modify a protected sector.\n");
+
+		nor->write_reg(nor, SPINOR_OP_CLFSR, NULL, 0);
+		return -EIO;
+	}
+
+	return fsr & FSR_READY;
+}
+
+static int spi_nor_ready(struct spi_nor *nor)
+{
+	int sr, fsr;
+
+	sr = spi_nor_sr_ready(nor);
+	if (sr < 0)
+		return sr;
+	fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1;
+	if (fsr < 0)
+		return fsr;
+	return sr && fsr;
+}
+
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor,
+						unsigned long timeout)
+{
+	unsigned long timebase;
+	int ret;
+
+	timebase = get_timer(0);
+
+	while (get_timer(timebase) < timeout) {
+		ret = spi_nor_ready(nor);
+		if (ret < 0)
+			return ret;
+		if (ret)
+			return 0;
+	}
+
+	dev_err(nor->dev, "flash operation timed out\n");
+
+	return -ETIMEDOUT;
+}
+
+static int spi_nor_wait_till_ready(struct spi_nor *nor)
+{
+	return spi_nor_wait_till_ready_with_timeout(nor,
+						    DEFAULT_READY_WAIT_JIFFIES);
+}
+
+/*
+ * Erase the whole flash memory
+ *
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int erase_chip(struct spi_nor *nor)
+{
+	dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10));
+
+	return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0);
+}
+
+#ifdef __UBOOT__
+static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+	int ret = 0;
+
+	if (nor->prepare) {
+		ret = nor->prepare(nor, ops);
+		if (ret) {
+			dev_dbg(nor->dev, "failed in the preparation.\n");
+			return ret;
+		}
+	}
+	return ret;
+}
+
+static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops)
+{
+	if (nor->unprepare)
+		nor->unprepare(nor, ops);
+}
+#endif
+
+/*
+ * Initiate the erasure of a single sector
+ */
+static int spi_nor_erase_sector(struct spi_nor *nor, u32 addr)
+{
+	u8 buf[SPI_NOR_MAX_ADDR_WIDTH];
+	int i;
+
+	if (nor->erase)
+		return nor->erase(nor, addr);
+
+	/*
+	 * Default implementation, if driver doesn't have a specialized HW
+	 * control
+	 */
+	for (i = nor->addr_width - 1; i >= 0; i--) {
+		buf[i] = addr & 0xff;
+		addr >>= 8;
+	}
+
+	return nor->write_reg(nor, nor->erase_opcode, buf, nor->addr_width);
+}
+
+/*
+ * Erase an address range on the nor chip.  The address range may extend
+ * one or more erase sectors.  Return an error is there is a problem erasing.
+ */
+static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	u32 addr, len;
+	uint32_t rem;
+	int ret;
+
+	dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr,
+			(long long)instr->len);
+
+	div_u64_rem(instr->len, mtd->erasesize, &rem);
+	if (rem)
+		return -EINVAL;
+
+	addr = instr->addr;
+	len = instr->len;
+
+#ifdef __UBOOT__
+	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE);
+	if (ret)
+		return ret;
+#endif
+
+	/* whole-chip erase? */
+	if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
+		unsigned long timeout;
+
+		write_enable(nor);
+
+		if (erase_chip(nor)) {
+			ret = -EIO;
+			goto erase_err;
+		}
+
+		/*
+		 * Scale the timeout linearly with the size of the flash, with
+		 * a minimum calibrated to an old 2MB flash. We could try to
+		 * pull these from CFI/SFDP, but these values should be good
+		 * enough for now.
+		 */
+		timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES,
+			      CHIP_ERASE_2MB_READY_WAIT_JIFFIES *
+			      (unsigned long)(mtd->size / SZ_2M));
+		ret = spi_nor_wait_till_ready_with_timeout(nor, timeout);
+		if (ret)
+			goto erase_err;
+
+	/* REVISIT in some cases we could speed up erasing large regions
+	 * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K.  We may have set up
+	 * to use "small sector erase", but that's not always optimal.
+	 */
+
+	/* "sector"-at-a-time erase */
+	} else {
+		while (len) {
+			write_enable(nor);
+
+			ret = spi_nor_erase_sector(nor, addr);
+			if (ret)
+				goto erase_err;
+
+			addr += mtd->erasesize;
+			len -= mtd->erasesize;
+
+			ret = spi_nor_wait_till_ready(nor);
+			if (ret)
+				goto erase_err;
+		}
+	}
+
+	write_disable(nor);
+
+erase_err:
+#ifdef __UBOOT__
+	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE);
+#endif
+	return ret;
+}
+
+#if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
+/* Write status register and ensure bits in mask match written values */
+static int write_sr_and_check(struct spi_nor *nor, u8 status_new, u8 mask)
+{
+	int ret;
+
+	write_enable(nor);
+	ret = write_sr(nor, status_new);
+	if (ret)
+		return ret;
+
+	ret = spi_nor_wait_till_ready(nor);
+	if (ret)
+		return ret;
+
+	ret = read_sr(nor);
+	if (ret < 0)
+		return ret;
+
+	return ((ret & mask) != (status_new & mask)) ? -EIO : 0;
+}
+
+static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
+				 uint64_t *len)
+{
+	struct mtd_info *mtd = &nor->mtd;
+	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+	int shift = ffs(mask) - 1;
+	int pow;
+
+	if (!(sr & mask)) {
+		/* No protection */
+		*ofs = 0;
+		*len = 0;
+	} else {
+		pow = ((sr & mask) ^ mask) >> shift;
+		*len = mtd->size >> pow;
+		if (nor->flags & SNOR_F_HAS_SR_TB && sr & SR_TB)
+			*ofs = 0;
+		else
+			*ofs = mtd->size - *len;
+	}
+}
+
+/*
+ * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
+ * @locked is false); 0 otherwise
+ */
+static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+				    u8 sr, bool locked)
+{
+	loff_t lock_offs;
+	uint64_t lock_len;
+
+	if (!len)
+		return 1;
+
+	stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
+
+	if (locked)
+		/* Requested range is a sub-range of locked range */
+		return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
+	else
+		/* Requested range does not overlap with locked range */
+		return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
+}
+
+static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+			    u8 sr)
+{
+	return stm_check_lock_status_sr(nor, ofs, len, sr, true);
+}
+
+static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
+			      u8 sr)
+{
+	return stm_check_lock_status_sr(nor, ofs, len, sr, false);
+}
+
+/*
+ * Lock a region of the flash. Compatible with ST Micro and similar flash.
+ * Supports the block protection bits BP{0,1,2} in the status register
+ * (SR). Does not support these features found in newer SR bitfields:
+ *   - SEC: sector/block protect - only handle SEC=0 (block protect)
+ *   - CMP: complement protect - only support CMP=0 (range is not complemented)
+ *
+ * Support for the following is provided conditionally for some flash:
+ *   - TB: top/bottom protect
+ *
+ * Sample table portion for 8MB flash (Winbond w25q64fw):
+ *
+ *   SEC  |  TB   |  BP2  |  BP1  |  BP0  |  Prot Length  | Protected Portion
+ *  --------------------------------------------------------------------------
+ *    X   |   X   |   0   |   0   |   0   |  NONE         | NONE
+ *    0   |   0   |   0   |   0   |   1   |  128 KB       | Upper 1/64
+ *    0   |   0   |   0   |   1   |   0   |  256 KB       | Upper 1/32
+ *    0   |   0   |   0   |   1   |   1   |  512 KB       | Upper 1/16
+ *    0   |   0   |   1   |   0   |   0   |  1 MB         | Upper 1/8
+ *    0   |   0   |   1   |   0   |   1   |  2 MB         | Upper 1/4
+ *    0   |   0   |   1   |   1   |   0   |  4 MB         | Upper 1/2
+ *    X   |   X   |   1   |   1   |   1   |  8 MB         | ALL
+ *  ------|-------|-------|-------|-------|---------------|-------------------
+ *    0   |   1   |   0   |   0   |   1   |  128 KB       | Lower 1/64
+ *    0   |   1   |   0   |   1   |   0   |  256 KB       | Lower 1/32
+ *    0   |   1   |   0   |   1   |   1   |  512 KB       | Lower 1/16
+ *    0   |   1   |   1   |   0   |   0   |  1 MB         | Lower 1/8
+ *    0   |   1   |   1   |   0   |   1   |  2 MB         | Lower 1/4
+ *    0   |   1   |   1   |   1   |   0   |  4 MB         | Lower 1/2
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+	struct mtd_info *mtd = &nor->mtd;
+	int status_old, status_new;
+	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+	u8 shift = ffs(mask) - 1, pow, val;
+	loff_t lock_len;
+	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+	bool use_top;
+
+	status_old = read_sr(nor);
+	if (status_old < 0)
+		return status_old;
+
+	/* If nothing in our range is unlocked, we don't need to do anything */
+	if (stm_is_locked_sr(nor, ofs, len, status_old))
+		return 0;
+
+	/* If anything below us is unlocked, we can't use 'bottom' protection */
+	if (!stm_is_locked_sr(nor, 0, ofs, status_old))
+		can_be_bottom = false;
+
+	/* If anything above us is unlocked, we can't use 'top' protection */
+	if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
+				status_old))
+		can_be_top = false;
+
+	if (!can_be_bottom && !can_be_top)
+		return -EINVAL;
+
+	/* Prefer top, if both are valid */
+	use_top = can_be_top;
+
+	/* lock_len: length of region that should end up locked */
+	if (use_top)
+		lock_len = mtd->size - ofs;
+	else
+		lock_len = ofs + len;
+
+	/*
+	 * Need smallest pow such that:
+	 *
+	 *   1 / (2^pow) <= (len / size)
+	 *
+	 * so (assuming power-of-2 size) we do:
+	 *
+	 *   pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
+	 */
+	pow = ilog2(mtd->size) - ilog2(lock_len);
+	val = mask - (pow << shift);
+	if (val & ~mask)
+		return -EINVAL;
+	/* Don't "lock" with no region! */
+	if (!(val & mask))
+		return -EINVAL;
+
+	status_new = (status_old & ~mask & ~SR_TB) | val;
+
+	/* Disallow further writes if WP pin is asserted */
+	status_new |= SR_SRWD;
+
+	if (!use_top)
+		status_new |= SR_TB;
+
+	/* Don't bother if they're the same */
+	if (status_new == status_old)
+		return 0;
+
+	/* Only modify protection if it will not unlock other areas */
+	if ((status_new & mask) < (status_old & mask))
+		return -EINVAL;
+
+	return write_sr_and_check(nor, status_new, mask);
+}
+
+/*
+ * Unlock a region of the flash. See stm_lock() for more info
+ *
+ * Returns negative on errors, 0 on success.
+ */
+static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+	struct mtd_info *mtd = &nor->mtd;
+	int status_old, status_new;
+	u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
+	u8 shift = ffs(mask) - 1, pow, val;
+	loff_t lock_len;
+	bool can_be_top = true, can_be_bottom = nor->flags & SNOR_F_HAS_SR_TB;
+	bool use_top;
+
+	status_old = read_sr(nor);
+	if (status_old < 0)
+		return status_old;
+
+	/* If nothing in our range is locked, we don't need to do anything */
+	if (stm_is_unlocked_sr(nor, ofs, len, status_old))
+		return 0;
+
+	/* If anything below us is locked, we can't use 'top' protection */
+	if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
+		can_be_top = false;
+
+	/* If anything above us is locked, we can't use 'bottom' protection */
+	if (!stm_is_unlocked_sr(nor, ofs + len, mtd->size - (ofs + len),
+				status_old))
+		can_be_bottom = false;
+
+	if (!can_be_bottom && !can_be_top)
+		return -EINVAL;
+
+	/* Prefer top, if both are valid */
+	use_top = can_be_top;
+
+	/* lock_len: length of region that should remain locked */
+	if (use_top)
+		lock_len = mtd->size - (ofs + len);
+	else
+		lock_len = ofs;
+
+	/*
+	 * Need largest pow such that:
+	 *
+	 *   1 / (2^pow) >= (len / size)
+	 *
+	 * so (assuming power-of-2 size) we do:
+	 *
+	 *   pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
+	 */
+	pow = ilog2(mtd->size) - order_base_2(lock_len);
+	if (lock_len == 0) {
+		val = 0; /* fully unlocked */
+	} else {
+		val = mask - (pow << shift);
+		/* Some power-of-two sizes are not supported */
+		if (val & ~mask)
+			return -EINVAL;
+	}
+
+	status_new = (status_old & ~mask & ~SR_TB) | val;
+
+	/* Don't protect status register if we're fully unlocked */
+	if (lock_len == 0)
+		status_new &= ~SR_SRWD;
+
+	if (!use_top)
+		status_new |= SR_TB;
+
+	/* Don't bother if they're the same */
+	if (status_new == status_old)
+		return 0;
+
+	/* Only modify protection if it will not lock other areas */
+	if ((status_new & mask) > (status_old & mask))
+		return -EINVAL;
+
+	return write_sr_and_check(nor, status_new, mask);
+}
+
+/*
+ * Check if a region of the flash is (completely) locked. See stm_lock() for
+ * more info.
+ *
+ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and
+ * negative on errors.
+ */
+static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len)
+{
+	int status;
+
+	status = read_sr(nor);
+	if (status < 0)
+		return status;
+
+	return stm_is_locked_sr(nor, ofs, len, status);
+}
+#endif /* CONFIG_SPI_FLASH_STMICRO */
+
+static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	int ret;
+
+#ifdef __UBOOT__
+	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK);
+	if (ret)
+		return ret;
+#endif
+	ret = nor->flash_lock(nor, ofs, len);
+
+#ifdef __UBOOT__
+	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK);
+#endif
+	return ret;
+}
+
+static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	int ret;
+
+#ifdef __UBOOT__
+	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+	if (ret)
+		return ret;
+#endif
+
+	ret = nor->flash_unlock(nor, ofs, len);
+
+#ifdef __UBOOT__
+	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+#endif
+	return ret;
+}
+
+static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	int ret;
+
+#ifdef __UBOOT__
+	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK);
+	if (ret)
+		return ret;
+#endif
+
+	ret = nor->flash_is_locked(nor, ofs, len);
+
+#ifdef __UBOOT__
+	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK);
+#endif
+	return ret;
+}
+
+
+/* Used when the "_ext_id" is two bytes at most */
+#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
+		.id = {							\
+			((_jedec_id) >> 16) & 0xff,			\
+			((_jedec_id) >> 8) & 0xff,			\
+			(_jedec_id) & 0xff,				\
+			((_ext_id) >> 8) & 0xff,			\
+			(_ext_id) & 0xff,				\
+			},						\
+		.id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))),	\
+		.sector_size = (_sector_size),				\
+		.n_sectors = (_n_sectors),				\
+		.page_size = 256,					\
+		.flags = (_flags),
+
+#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags)	\
+		.id = {							\
+			((_jedec_id) >> 16) & 0xff,			\
+			((_jedec_id) >> 8) & 0xff,			\
+			(_jedec_id) & 0xff,				\
+			((_ext_id) >> 16) & 0xff,			\
+			((_ext_id) >> 8) & 0xff,			\
+			(_ext_id) & 0xff,				\
+			},						\
+		.id_len = 6,						\
+		.sector_size = (_sector_size),				\
+		.n_sectors = (_n_sectors),				\
+		.page_size = 256,					\
+		.flags = (_flags),
+
+/* NOTE: double check command sets and memory organization when you add
+ * more nor chips.  This current list focusses on newer chips, which
+ * have been converging on command sets which including JEDEC ID.
+ *
+ * All newly added entries should describe *hardware* and should use SECT_4K
+ * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage
+ * scenarios excluding small sectors there is config option that can be
+ * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS.
+ * For historical (and compatibility) reasons (before we got above config) some
+ * old entries may be missing 4K flag.
+ */
+const struct flash_info spi_nor_ids[] = {
+#ifdef CONFIG_SPI_FLASH_ATMEL		/* ATMEL */
+	/* Atmel -- some are (confusingly) marketed as "DataFlash" */
+	{ "at26df321",  INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) },
+	{ "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) },
+
+	{ "at45db011d",	INFO(0x1f2200, 0, 64 * 1024,   4, SECT_4K) },
+	{ "at45db021d",	INFO(0x1f2300, 0, 64 * 1024,   8, SECT_4K) },
+	{ "at45db041d",	INFO(0x1f2400, 0, 64 * 1024,   8, SECT_4K) },
+	{ "at45db081d", INFO(0x1f2500, 0, 64 * 1024,  16, SECT_4K) },
+	{ "at45db161d",	INFO(0x1f2600, 0, 64 * 1024,  32, SECT_4K) },
+	{ "at45db321d",	INFO(0x1f2700, 0, 64 * 1024,  64, SECT_4K) },
+	{ "at45db641d",	INFO(0x1f2800, 0, 64 * 1024, 128, SECT_4K) },
+	{ "at26df081a", INFO(0x1f4501, 0, 64 * 1024,  16, SECT_4K) },
+#endif
+#ifdef CONFIG_SPI_FLASH_EON		/* EON */
+	/* EON -- en25xxx */
+	{ "en25q32b",   INFO(0x1c3016, 0, 64 * 1024,   64, 0) },
+	{ "en25q64",    INFO(0x1c3017, 0, 64 * 1024,  128, SECT_4K) },
+	{ "en25qh128",  INFO(0x1c7018, 0, 64 * 1024,  256, 0) },
+	{ "en25s64",	INFO(0x1c3817, 0, 64 * 1024,  128, SECT_4K) },
+#endif
+#ifdef CONFIG_SPI_FLASH_GIGADEVICE	/* GIGADEVICE */
+	/* GigaDevice */
+	{
+		"gd25q16", INFO(0xc84015, 0, 64 * 1024,  32,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{
+		"gd25q32", INFO(0xc84016, 0, 64 * 1024,  64,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{
+		"gd25lq32", INFO(0xc86016, 0, 64 * 1024, 64,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{
+		"gd25q64", INFO(0xc84017, 0, 64 * 1024, 128,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+#endif
+#ifdef CONFIG_SPI_FLASH_ISSI		/* ISSI */
+	/* ISSI */
+	{ "is25lq040b", INFO(0x9d4013, 0, 64 * 1024,   8,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25lp032",	INFO(0x9d6016, 0, 64 * 1024,  64, 0) },
+	{ "is25lp064",	INFO(0x9d6017, 0, 64 * 1024, 128, 0) },
+	{ "is25lp128",  INFO(0x9d6018, 0, 64 * 1024, 256,
+			SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "is25lp256",  INFO(0x9d6019, 0, 64 * 1024, 512,
+			SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "is25wp032",  INFO(0x9d7016, 0, 64 * 1024,  64,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25wp064",  INFO(0x9d7017, 0, 64 * 1024, 128,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "is25wp128",  INFO(0x9d7018, 0, 64 * 1024, 256,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+#endif
+#ifdef CONFIG_SPI_FLASH_MACRONIX	/* MACRONIX */
+	/* Macronix */
+	{ "mx25l2005a",  INFO(0xc22012, 0, 64 * 1024,   4, SECT_4K) },
+	{ "mx25l4005a",  INFO(0xc22013, 0, 64 * 1024,   8, SECT_4K) },
+	{ "mx25l8005",   INFO(0xc22014, 0, 64 * 1024,  16, 0) },
+	{ "mx25l1606e",  INFO(0xc22015, 0, 64 * 1024,  32, SECT_4K) },
+	{ "mx25l3205d",  INFO(0xc22016, 0, 64 * 1024,  64, SECT_4K) },
+	{ "mx25l6405d",  INFO(0xc22017, 0, 64 * 1024, 128, SECT_4K) },
+	{ "mx25u2033e",  INFO(0xc22532, 0, 64 * 1024,   4, SECT_4K) },
+	{ "mx25u1635e",  INFO(0xc22535, 0, 64 * 1024,  32, SECT_4K) },
+	{ "mx25u6435f",  INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) },
+	{ "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) },
+	{ "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) },
+	{ "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) },
+	{ "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) },
+	{ "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+	{ "mx66l1g45g",  INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "mx25l1633e",	 INFO(0xc22415, 0, 64 * 1024,   32, SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES | SECT_4K) },
+#endif
+
+#ifdef CONFIG_SPI_FLASH_STMICRO		/* STMICRO */
+	/* Micron */
+	{ "n25q016a",	 INFO(0x20bb15, 0, 64 * 1024,   32, SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q032",	 INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
+	{ "n25q032a",	 INFO(0x20bb16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
+	{ "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "n25q256ax1",  INFO(0x20bb19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
+	{ "n25q512a",    INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+	{ "n25q512ax3",  INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) },
+	{ "n25q00",      INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+	{ "n25q00a",     INFO(0x20bb21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+	{ "mt25qu02g",   INFO(0x20bb22, 0, 64 * 1024, 4096, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) },
+#endif
+#ifdef CONFIG_SPI_FLASH_SPANSION	/* SPANSION */
+	/* Spansion/Cypress -- single (large) sector size only, at least
+	 * for the chips listed here (without boot sectors).
+	 */
+	{ "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, USE_CLSR) },
+	{ "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+	{ "s25fl512s",  INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+	{ "s25fl512s_256k",  INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+	{ "s25fl512s_64k",  INFO(0x010220, 0x4d01, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+	{ "s25fl512s_512k",  INFO(0x010220, 0x4f00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+	{ "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
+	{ "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
+	{ "s25fl128s",  INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+	{ "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+	{ "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) },
+	{ "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) },
+	{ "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) },
+	{ "s25sl032a",  INFO(0x010215,      0,  64 * 1024,  64, 0) },
+	{ "s25sl064a",  INFO(0x010216,      0,  64 * 1024, 128, 0) },
+	{ "s25fl116k",  INFO(0x014015,      0,  64 * 1024,  32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "s25fl164k",  INFO(0x014017,      0,  64 * 1024, 128, SECT_4K) },
+	{ "s25fl208k",  INFO(0x014014,      0,  64 * 1024,  16, SECT_4K | SPI_NOR_DUAL_READ) },
+	{ "s25fl128l",  INFO(0x016018,      0,  64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) },
+#endif
+#ifdef CONFIG_SPI_FLASH_SST		/* SST */
+	/* SST -- large erase sizes are "overlays", "sectors" are 4K */
+	{ "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
+	{ "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+	{ "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) },
+	{ "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) },
+	{ "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) },
+	{ "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) },
+	{ "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) },
+	{ "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) },
+	{ "sst25wf020a", INFO(0x621612, 0, 64 * 1024,  4, SECT_4K) },
+	{ "sst25wf040b", INFO(0x621613, 0, 64 * 1024,  8, SECT_4K) },
+	{ "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
+	{ "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
+	{ "sst26vf064b", INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "sst26wf016",  INFO(0xbf2651, 0, 64 * 1024,  32, SECT_4K) },
+	{ "sst26wf032",	 INFO(0xbf2622, 0, 64 * 1024,  64, SECT_4K) },
+	{ "sst26wf064",	 INFO(0xbf2643, 0, 64 * 1024, 128, SECT_4K) },
+#endif
+#ifdef CONFIG_SPI_FLASH_STMICRO		/* STMICRO */
+	/* ST Microelectronics -- newer production may have feature updates */
+	{ "m25p10",  INFO(0x202011,  0,  32 * 1024,   4, 0) },
+	{ "m25p20",  INFO(0x202012,  0,  64 * 1024,   4, 0) },
+	{ "m25p40",  INFO(0x202013,  0,  64 * 1024,   8, 0) },
+	{ "m25p80",  INFO(0x202014,  0,  64 * 1024,  16, 0) },
+	{ "m25p16",  INFO(0x202015,  0,  64 * 1024,  32, 0) },
+	{ "m25p32",  INFO(0x202016,  0,  64 * 1024,  64, 0) },
+	{ "m25p64",  INFO(0x202017,  0,  64 * 1024, 128, 0) },
+	{ "m25p128", INFO(0x202018,  0, 256 * 1024,  64, 0) },
+	{ "m25pe16", INFO(0x208015,  0, 64 * 1024, 32, SECT_4K) },
+	{ "m25px16",    INFO(0x207115,  0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "m25px64",    INFO(0x207117,  0, 64 * 1024, 128, 0) },
+#endif
+#ifdef CONFIG_SPI_FLASH_WINBOND		/* WINBOND */
+	/* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */
+	{ "w25x05", INFO(0xef3010, 0, 64 * 1024,  1,  SECT_4K) },
+	{ "w25x10", INFO(0xef3011, 0, 64 * 1024,  2,  SECT_4K) },
+	{ "w25x20", INFO(0xef3012, 0, 64 * 1024,  4,  SECT_4K) },
+	{ "w25x40", INFO(0xef3013, 0, 64 * 1024,  8,  SECT_4K) },
+	{ "w25x80", INFO(0xef3014, 0, 64 * 1024,  16, SECT_4K) },
+	{ "w25x16", INFO(0xef3015, 0, 64 * 1024,  32, SECT_4K) },
+	{
+		"w25q16dw", INFO(0xef6015, 0, 64 * 1024,  32,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{ "w25x32", INFO(0xef3016, 0, 64 * 1024,  64, SECT_4K) },
+	{ "w25q20cl", INFO(0xef4012, 0, 64 * 1024,  4, SECT_4K) },
+	{ "w25q20bw", INFO(0xef5012, 0, 64 * 1024,  4, SECT_4K) },
+	{ "w25q20ew", INFO(0xef6012, 0, 64 * 1024,  4, SECT_4K) },
+	{ "w25q32", INFO(0xef4016, 0, 64 * 1024,  64, SECT_4K) },
+	{
+		"w25q32dw", INFO(0xef6016, 0, 64 * 1024,  64,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{
+		"w25q32jv", INFO(0xef7016, 0, 64 * 1024,  64,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{ "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
+	{ "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
+	{
+		"w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{
+		"w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256,
+			SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ |
+			SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB)
+	},
+	{ "w25q80", INFO(0xef5014, 0, 64 * 1024,  16, SECT_4K) },
+	{ "w25q80bl", INFO(0xef4014, 0, 64 * 1024,  16, SECT_4K) },
+	{ "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) },
+	{ "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024,
+			SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) },
+#endif
+#ifdef CONFIG_SPI_FLASH_XMC
+	/* XMC (Wuhan Xinxin Semiconductor Manufacturing Corp.) */
+	{ "XM25QH64A", INFO(0x207017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+	{ "XM25QH128A", INFO(0x207018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+#endif
+	{ },
+};
+
+static const struct flash_info *spi_nor_read_id(struct spi_nor *nor)
+{
+	int			tmp;
+	u8			id[SPI_NOR_MAX_ID_LEN];
+	const struct flash_info	*info;
+
+	if (!ARRAY_SIZE(spi_nor_ids))
+		return ERR_PTR(-ENODEV);
+
+	tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN);
+	if (tmp < 0) {
+		dev_dbg(nor->dev, "error %d reading JEDEC ID\n", tmp);
+		return ERR_PTR(tmp);
+	}
+
+	for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) {
+		info = &spi_nor_ids[tmp];
+		if (info->id_len) {
+			if (!memcmp(info->id, id, info->id_len))
+				return &spi_nor_ids[tmp];
+		}
+	}
+	dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %02x, %02x\n",
+		id[0], id[1], id[2]);
+	return ERR_PTR(-ENODEV);
+}
+
+static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
+			size_t *retlen, u_char *buf)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	int ret;
+
+	dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len);
+
+#ifdef __UBOOT__
+	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ);
+	if (ret)
+		return ret;
+#endif
+
+	while (len) {
+		loff_t addr = from;
+
+		ret = nor->read(nor, addr, len, buf);
+		if (ret == 0) {
+			/* We shouldn't see 0-length reads */
+			ret = -EIO;
+			goto read_err;
+		}
+		if (ret < 0)
+			goto read_err;
+
+		*retlen += ret;
+		buf += ret;
+		from += ret;
+		len -= ret;
+	}
+	ret = 0;
+
+read_err:
+#ifdef __UBOOT__
+	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ);
+#endif
+	return ret;
+}
+
+#ifdef CONFIG_SPI_FLASH_SST
+static int sst_write(struct mtd_info *mtd, loff_t to, size_t len,
+		size_t *retlen, const u_char *buf)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	size_t actual;
+	int ret;
+
+	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+#ifdef __UBOOT__
+	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+	if (ret)
+		return ret;
+#endif
+
+	write_enable(nor);
+
+	nor->sst_write_second = false;
+
+	actual = to % 2;
+	/* Start write from odd address. */
+	if (actual) {
+		nor->program_opcode = SPINOR_OP_BP;
+
+		/* write one byte. */
+		ret = nor->write(nor, to, 1, buf);
+		if (ret < 0)
+			goto sst_write_err;
+		ret = spi_nor_wait_till_ready(nor);
+		if (ret)
+			goto sst_write_err;
+	}
+	to += actual;
+
+	/* Write out most of the data here. */
+	for (; actual < len - 1; actual += 2) {
+		nor->program_opcode = SPINOR_OP_AAI_WP;
+
+		/* write two bytes. */
+		ret = nor->write(nor, to, 2, buf + actual);
+		if (ret < 0)
+			goto sst_write_err;
+		ret = spi_nor_wait_till_ready(nor);
+		if (ret)
+			goto sst_write_err;
+		to += 2;
+		nor->sst_write_second = true;
+	}
+	nor->sst_write_second = false;
+
+	write_disable(nor);
+	ret = spi_nor_wait_till_ready(nor);
+	if (ret)
+		goto sst_write_err;
+
+	/* Write out trailing byte if it exists. */
+	if (actual != len) {
+		write_enable(nor);
+
+		nor->program_opcode = SPINOR_OP_BP;
+		ret = nor->write(nor, to, 1, buf + actual);
+		if (ret < 0)
+			goto sst_write_err;
+		ret = spi_nor_wait_till_ready(nor);
+		if (ret)
+			goto sst_write_err;
+		write_disable(nor);
+		actual += 1;
+	}
+sst_write_err:
+	*retlen += actual;
+#ifdef __UBOOT__
+	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+#endif
+	return ret;
+}
+#endif
+/*
+ * Write an address range to the nor chip.  Data must be written in
+ * FLASH_PAGESIZE chunks.  The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+	size_t *retlen, const u_char *buf)
+{
+	struct spi_nor *nor = mtd_to_spi_nor(mtd);
+	size_t page_offset, page_remain, i;
+	ssize_t ret;
+
+	dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len);
+
+#ifdef __UBOOT__
+	ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE);
+	if (ret)
+		return ret;
+#endif
+
+	for (i = 0; i < len; ) {
+		ssize_t written;
+		loff_t addr = to + i;
+
+		/*
+		 * If page_size is a power of two, the offset can be quickly
+		 * calculated with an AND operation. On the other cases we
+		 * need to do a modulus operation (more expensive).
+		 * Power of two numbers have only one bit set and we can use
+		 * the instruction hweight32 to detect if we need to do a
+		 * modulus (do_div()) or not.
+		 */
+		if (hweight32(nor->page_size) == 1) {
+			page_offset = addr & (nor->page_size - 1);
+		} else {
+			uint64_t aux = addr;
+
+			page_offset = do_div(aux, nor->page_size);
+		}
+		/* the size of data remaining on the first page */
+		page_remain = min_t(size_t,
+				    nor->page_size - page_offset, len - i);
+
+		write_enable(nor);
+		ret = nor->write(nor, addr, page_remain, buf + i);
+		if (ret < 0)
+			goto write_err;
+		written = ret;
+
+		ret = spi_nor_wait_till_ready(nor);
+		if (ret)
+			goto write_err;
+		*retlen += written;
+		i += written;
+		if (written != page_remain) {
+			dev_err(nor->dev,
+				"While writing %zu bytes written %zd bytes\n",
+				page_remain, written);
+			ret = -EIO;
+			goto write_err;
+		}
+	}
+
+write_err:
+#ifdef __UBOOT__
+	spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE);
+#endif
+	return ret;
+}
+
+#ifdef CONFIG_SPI_FLASH_MACRONIX
+/**
+ * macronix_quad_enable() - set QE bit in Status Register.
+ * @nor:	pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Status Register.
+ *
+ * bit 6 of the Status Register is the QE bit for Macronix like QSPI memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int macronix_quad_enable(struct spi_nor *nor)
+{
+	int ret, val;
+
+	val = read_sr(nor);
+	if (val < 0)
+		return val;
+	if (val & SR_QUAD_EN_MX)
+		return 0;
+
+	write_enable(nor);
+
+	write_sr(nor, val | SR_QUAD_EN_MX);
+
+	ret = spi_nor_wait_till_ready(nor);
+	if (ret)
+		return ret;
+
+	ret = read_sr(nor);
+	if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) {
+		dev_err(nor->dev, "Macronix Quad bit not set\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#endif
+
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+/*
+ * Write status Register and configuration register with 2 bytes
+ * The first byte will be written to the status register, while the
+ * second byte will be written to the configuration register.
+ * Return negative if error occurred.
+ */
+static int write_sr_cr(struct spi_nor *nor, u8 *sr_cr)
+{
+	int ret;
+
+	write_enable(nor);
+
+	ret = nor->write_reg(nor, SPINOR_OP_WRSR, sr_cr, 2);
+	if (ret < 0) {
+		dev_dbg(nor->dev,
+			"error while writing configuration register\n");
+		return -EINVAL;
+	}
+
+	ret = spi_nor_wait_till_ready(nor);
+	if (ret) {
+		dev_dbg(nor->dev,
+			"timeout while writing configuration register\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * spansion_quad_enable() - set QE bit in Configuraiton Register.
+ * @nor:	pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function is kept for legacy purpose because it has been used for a
+ * long time without anybody complaining but it should be considered as
+ * deprecated and maybe buggy.
+ * First, this function doesn't care about the previous values of the Status
+ * and Configuration Registers when it sets the QE bit (bit 1) in the
+ * Configuration Register: all other bits are cleared, which may have unwanted
+ * side effects like removing some block protections.
+ * Secondly, it uses the Read Configuration Register (35h) instruction though
+ * some very old and few memories don't support this instruction. If a pull-up
+ * resistor is present on the MISO/IO1 line, we might still be able to pass the
+ * "read back" test because the QSPI memory doesn't recognize the command,
+ * so leaves the MISO/IO1 line state unchanged, hence read_cr() returns 0xFF.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_quad_enable(struct spi_nor *nor)
+{
+	u8 sr_cr[2] = {0, CR_QUAD_EN_SPAN};
+	int ret;
+
+	ret = write_sr_cr(nor, sr_cr);
+	if (ret)
+		return ret;
+
+	/* read back and check it */
+	ret = read_cr(nor);
+	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+		dev_err(nor->dev, "Spansion Quad bit not set\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#if CONFIG_IS_ENABLED(SPI_FLASH_SFDP_SUPPORT)
+/**
+ * spansion_no_read_cr_quad_enable() - set QE bit in Configuration Register.
+ * @nor:	pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function should be used with QSPI memories not supporting the Read
+ * Configuration Register (35h) instruction.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_no_read_cr_quad_enable(struct spi_nor *nor)
+{
+	u8 sr_cr[2];
+	int ret;
+
+	/* Keep the current value of the Status Register. */
+	ret = read_sr(nor);
+	if (ret < 0) {
+		dev_dbg(nor->dev, "error while reading status register\n");
+		return -EINVAL;
+	}
+	sr_cr[0] = ret;
+	sr_cr[1] = CR_QUAD_EN_SPAN;
+
+	return write_sr_cr(nor, sr_cr);
+}
+
+/**
+ * spansion_read_cr_quad_enable() - set QE bit in Configuration Register.
+ * @nor:	pointer to a 'struct spi_nor'
+ *
+ * Set the Quad Enable (QE) bit in the Configuration Register.
+ * This function should be used with QSPI memories supporting the Read
+ * Configuration Register (35h) instruction.
+ *
+ * bit 1 of the Configuration Register is the QE bit for Spansion like QSPI
+ * memories.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spansion_read_cr_quad_enable(struct spi_nor *nor)
+{
+	u8 sr_cr[2];
+	int ret;
+
+	/* Check current Quad Enable bit value. */
+	ret = read_cr(nor);
+	if (ret < 0) {
+		dev_dbg(dev, "error while reading configuration register\n");
+		return -EINVAL;
+	}
+
+	if (ret & CR_QUAD_EN_SPAN)
+		return 0;
+
+	sr_cr[1] = ret | CR_QUAD_EN_SPAN;
+
+	/* Keep the current value of the Status Register. */
+	ret = read_sr(nor);
+	if (ret < 0) {
+		dev_dbg(dev, "error while reading status register\n");
+		return -EINVAL;
+	}
+	sr_cr[0] = ret;
+
+	ret = write_sr_cr(nor, sr_cr);
+	if (ret)
+		return ret;
+
+	/* Read back and check it. */
+	ret = read_cr(nor);
+	if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
+		dev_dbg(nor->dev, "Spansion Quad bit not set\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+#endif /* CONFIG_SPI_FLASH_SFDP_SUPPORT */
+#endif /* CONFIG_SPI_FLASH_SPANSION */
+
+struct spi_nor_read_command {
+	u8			num_mode_clocks;
+	u8			num_wait_states;
+	u8			opcode;
+	enum spi_nor_protocol	proto;
+};
+
+struct spi_nor_pp_command {
+	u8			opcode;
+	enum spi_nor_protocol	proto;
+};
+
+enum spi_nor_read_command_index {
+	SNOR_CMD_READ,
+	SNOR_CMD_READ_FAST,
+	SNOR_CMD_READ_1_1_1_DTR,
+
+	/* Dual SPI */
+	SNOR_CMD_READ_1_1_2,
+	SNOR_CMD_READ_1_2_2,
+	SNOR_CMD_READ_2_2_2,
+	SNOR_CMD_READ_1_2_2_DTR,
+
+	/* Quad SPI */
+	SNOR_CMD_READ_1_1_4,
+	SNOR_CMD_READ_1_4_4,
+	SNOR_CMD_READ_4_4_4,
+	SNOR_CMD_READ_1_4_4_DTR,
+
+	/* Octo SPI */
+	SNOR_CMD_READ_1_1_8,
+	SNOR_CMD_READ_1_8_8,
+	SNOR_CMD_READ_8_8_8,
+	SNOR_CMD_READ_1_8_8_DTR,
+
+	SNOR_CMD_READ_MAX
+};
+
+enum spi_nor_pp_command_index {
+	SNOR_CMD_PP,
+
+	/* Quad SPI */
+	SNOR_CMD_PP_1_1_4,
+	SNOR_CMD_PP_1_4_4,
+	SNOR_CMD_PP_4_4_4,
+
+	/* Octo SPI */
+	SNOR_CMD_PP_1_1_8,
+	SNOR_CMD_PP_1_8_8,
+	SNOR_CMD_PP_8_8_8,
+
+	SNOR_CMD_PP_MAX
+};
+
+struct spi_nor_flash_parameter {
+	u64				size;
+	u32				page_size;
+
+	struct spi_nor_hwcaps		hwcaps;
+	struct spi_nor_read_command	reads[SNOR_CMD_READ_MAX];
+	struct spi_nor_pp_command	page_programs[SNOR_CMD_PP_MAX];
+
+	int (*quad_enable)(struct spi_nor *nor);
+};
+
+static void
+spi_nor_set_read_settings(struct spi_nor_read_command *read,
+			  u8 num_mode_clocks,
+			  u8 num_wait_states,
+			  u8 opcode,
+			  enum spi_nor_protocol proto)
+{
+	read->num_mode_clocks = num_mode_clocks;
+	read->num_wait_states = num_wait_states;
+	read->opcode = opcode;
+	read->proto = proto;
+}
+
+static void
+spi_nor_set_pp_settings(struct spi_nor_pp_command *pp,
+			u8 opcode,
+			enum spi_nor_protocol proto)
+{
+	pp->opcode = opcode;
+	pp->proto = proto;
+}
+
+#ifdef CONFIG_SPI_FLASH_SFDP_SUPPORT
+/*
+ * Serial Flash Discoverable Parameters (SFDP) parsing.
+ */
+
+/**
+ * spi_nor_read_sfdp() - read Serial Flash Discoverable Parameters.
+ * @nor:	pointer to a 'struct spi_nor'
+ * @addr:	offset in the SFDP area to start reading data from
+ * @len:	number of bytes to read
+ * @buf:	buffer where the SFDP data are copied into (dma-safe memory)
+ *
+ * Whatever the actual numbers of bytes for address and dummy cycles are
+ * for (Fast) Read commands, the Read SFDP (5Ah) instruction is always
+ * followed by a 3-byte address and 8 dummy clock cycles.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_read_sfdp(struct spi_nor *nor, u32 addr,
+			     size_t len, void *buf)
+{
+	u8 addr_width, read_opcode, read_dummy;
+	int ret;
+
+	read_opcode = nor->read_opcode;
+	addr_width = nor->addr_width;
+	read_dummy = nor->read_dummy;
+
+	nor->read_opcode = SPINOR_OP_RDSFDP;
+	nor->addr_width = 3;
+	nor->read_dummy = 8;
+
+	while (len) {
+		ret = nor->read(nor, addr, len, (u8 *)buf);
+		if (!ret || ret > len) {
+			ret = -EIO;
+			goto read_err;
+		}
+		if (ret < 0)
+			goto read_err;
+
+		buf += ret;
+		addr += ret;
+		len -= ret;
+	}
+	ret = 0;
+
+read_err:
+	nor->read_opcode = read_opcode;
+	nor->addr_width = addr_width;
+	nor->read_dummy = read_dummy;
+
+	return ret;
+}
+
+#ifndef __UBOOT__
+/**
+ * spi_nor_read_sfdp_dma_unsafe() - read Serial Flash Discoverable Parameters.
+ * @nor:	pointer to a 'struct spi_nor'
+ * @addr:	offset in the SFDP area to start reading data from
+ * @len:	number of bytes to read
+ * @buf:	buffer where the SFDP data are copied into
+ *
+ * Wrap spi_nor_read_sfdp() using a kmalloc'ed bounce buffer as @buf is now not
+ * guaranteed to be dma-safe.
+ *
+ * Return: -ENOMEM if kmalloc() fails, the return code of spi_nor_read_sfdp()
+ *          otherwise.
+ */
+static int spi_nor_read_sfdp_dma_unsafe(struct spi_nor *nor, u32 addr,
+					size_t len, void *buf)
+{
+	void *dma_safe_buf;
+	int ret;
+
+	dma_safe_buf = kmalloc(len, GFP_KERNEL);
+	if (!dma_safe_buf)
+		return -ENOMEM;
+
+	ret = spi_nor_read_sfdp(nor, addr, len, dma_safe_buf);
+	memcpy(buf, dma_safe_buf, len);
+	kfree(dma_safe_buf);
+
+	return ret;
+}
+#endif /* __UBOOT__ */
+
+struct sfdp_parameter_header {
+	u8		id_lsb;
+	u8		minor;
+	u8		major;
+	u8		length; /* in double words */
+	u8		parameter_table_pointer[3]; /* byte address */
+	u8		id_msb;
+};
+
+#define SFDP_PARAM_HEADER_ID(p)	(((p)->id_msb << 8) | (p)->id_lsb)
+#define SFDP_PARAM_HEADER_PTP(p) \
+	(((p)->parameter_table_pointer[2] << 16) | \
+	 ((p)->parameter_table_pointer[1] <<  8) | \
+	 ((p)->parameter_table_pointer[0] <<  0))
+
+#define SFDP_BFPT_ID		0xff00	/* Basic Flash Parameter Table */
+#define SFDP_SECTOR_MAP_ID	0xff81	/* Sector Map Table */
+
+#define SFDP_SIGNATURE		0x50444653U
+#define SFDP_JESD216_MAJOR	1
+#define SFDP_JESD216_MINOR	0
+#define SFDP_JESD216A_MINOR	5
+#define SFDP_JESD216B_MINOR	6
+
+struct sfdp_header {
+	u32		signature; /* Ox50444653U <=> "SFDP" */
+	u8		minor;
+	u8		major;
+	u8		nph; /* 0-base number of parameter headers */
+	u8		unused;
+
+	/* Basic Flash Parameter Table. */
+	struct sfdp_parameter_header	bfpt_header;
+};
+
+/* Basic Flash Parameter Table */
+
+/*
+ * JESD216 rev B defines a Basic Flash Parameter Table of 16 DWORDs.
+ * They are indexed from 1 but C arrays are indexed from 0.
+ */
+#define BFPT_DWORD(i)		((i) - 1)
+#define BFPT_DWORD_MAX		16
+
+/* The first version of JESB216 defined only 9 DWORDs. */
+#define BFPT_DWORD_MAX_JESD216			9
+
+/* 1st DWORD. */
+#define BFPT_DWORD1_FAST_READ_1_1_2		BIT(16)
+#define BFPT_DWORD1_ADDRESS_BYTES_MASK		GENMASK(18, 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_ONLY	(0x0UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_3_OR_4	(0x1UL << 17)
+#define BFPT_DWORD1_ADDRESS_BYTES_4_ONLY	(0x2UL << 17)
+#define BFPT_DWORD1_DTR				BIT(19)
+#define BFPT_DWORD1_FAST_READ_1_2_2		BIT(20)
+#define BFPT_DWORD1_FAST_READ_1_4_4		BIT(21)
+#define BFPT_DWORD1_FAST_READ_1_1_4		BIT(22)
+
+/* 5th DWORD. */
+#define BFPT_DWORD5_FAST_READ_2_2_2		BIT(0)
+#define BFPT_DWORD5_FAST_READ_4_4_4		BIT(4)
+
+/* 11th DWORD. */
+#define BFPT_DWORD11_PAGE_SIZE_SHIFT		4
+#define BFPT_DWORD11_PAGE_SIZE_MASK		GENMASK(7, 4)
+
+/* 15th DWORD. */
+
+/*
+ * (from JESD216 rev B)
+ * Quad Enable Requirements (QER):
+ * - 000b: Device does not have a QE bit. Device detects 1-1-4 and 1-4-4
+ *         reads based on instruction. DQ3/HOLD# functions are hold during
+ *         instruction phase.
+ * - 001b: QE is bit 1 of status register 2. It is set via Write Status with
+ *         two data bytes where bit 1 of the second byte is one.
+ *         [...]
+ *         Writing only one byte to the status register has the side-effect of
+ *         clearing status register 2, including the QE bit. The 100b code is
+ *         used if writing one byte to the status register does not modify
+ *         status register 2.
+ * - 010b: QE is bit 6 of status register 1. It is set via Write Status with
+ *         one data byte where bit 6 is one.
+ *         [...]
+ * - 011b: QE is bit 7 of status register 2. It is set via Write status
+ *         register 2 instruction 3Eh with one data byte where bit 7 is one.
+ *         [...]
+ *         The status register 2 is read using instruction 3Fh.
+ * - 100b: QE is bit 1 of status register 2. It is set via Write Status with
+ *         two data bytes where bit 1 of the second byte is one.
+ *         [...]
+ *         In contrast to the 001b code, writing one byte to the status
+ *         register does not modify status register 2.
+ * - 101b: QE is bit 1 of status register 2. Status register 1 is read using
+ *         Read Status instruction 05h. Status register2 is read using
+ *         instruction 35h. QE is set via Writ Status instruction 01h with
+ *         two data bytes where bit 1 of the second byte is one.
+ *         [...]
+ */
+#define BFPT_DWORD15_QER_MASK			GENMASK(22, 20)
+#define BFPT_DWORD15_QER_NONE			(0x0UL << 20) /* Micron */
+#define BFPT_DWORD15_QER_SR2_BIT1_BUGGY		(0x1UL << 20)
+#define BFPT_DWORD15_QER_SR1_BIT6		(0x2UL << 20) /* Macronix */
+#define BFPT_DWORD15_QER_SR2_BIT7		(0x3UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1_NO_RD		(0x4UL << 20)
+#define BFPT_DWORD15_QER_SR2_BIT1		(0x5UL << 20) /* Spansion */
+
+struct sfdp_bfpt {
+	u32	dwords[BFPT_DWORD_MAX];
+};
+
+/* Fast Read settings. */
+
+static inline void
+spi_nor_set_read_settings_from_bfpt(struct spi_nor_read_command *read,
+				    u16 half,
+				    enum spi_nor_protocol proto)
+{
+	read->num_mode_clocks = (half >> 5) & 0x07;
+	read->num_wait_states = (half >> 0) & 0x1f;
+	read->opcode = (half >> 8) & 0xff;
+	read->proto = proto;
+}
+
+struct sfdp_bfpt_read {
+	/* The Fast Read x-y-z hardware capability in params->hwcaps.mask. */
+	u32			hwcaps;
+
+	/*
+	 * The <supported_bit> bit in <supported_dword> BFPT DWORD tells us
+	 * whether the Fast Read x-y-z command is supported.
+	 */
+	u32			supported_dword;
+	u32			supported_bit;
+
+	/*
+	 * The half-word at offset <setting_shift> in <setting_dword> BFPT DWORD
+	 * encodes the op code, the number of mode clocks and the number of wait
+	 * states to be used by Fast Read x-y-z command.
+	 */
+	u32			settings_dword;
+	u32			settings_shift;
+
+	/* The SPI protocol for this Fast Read x-y-z command. */
+	enum spi_nor_protocol	proto;
+};
+
+static const struct sfdp_bfpt_read sfdp_bfpt_reads[] = {
+	/* Fast Read 1-1-2 */
+	{
+		SNOR_HWCAPS_READ_1_1_2,
+		BFPT_DWORD(1), BIT(16),	/* Supported bit */
+		BFPT_DWORD(4), 0,	/* Settings */
+		SNOR_PROTO_1_1_2,
+	},
+
+	/* Fast Read 1-2-2 */
+	{
+		SNOR_HWCAPS_READ_1_2_2,
+		BFPT_DWORD(1), BIT(20),	/* Supported bit */
+		BFPT_DWORD(4), 16,	/* Settings */
+		SNOR_PROTO_1_2_2,
+	},
+
+	/* Fast Read 2-2-2 */
+	{
+		SNOR_HWCAPS_READ_2_2_2,
+		BFPT_DWORD(5),  BIT(0),	/* Supported bit */
+		BFPT_DWORD(6), 16,	/* Settings */
+		SNOR_PROTO_2_2_2,
+	},
+
+	/* Fast Read 1-1-4 */
+	{
+		SNOR_HWCAPS_READ_1_1_4,
+		BFPT_DWORD(1), BIT(22),	/* Supported bit */
+		BFPT_DWORD(3), 16,	/* Settings */
+		SNOR_PROTO_1_1_4,
+	},
+
+	/* Fast Read 1-4-4 */
+	{
+		SNOR_HWCAPS_READ_1_4_4,
+		BFPT_DWORD(1), BIT(21),	/* Supported bit */
+		BFPT_DWORD(3), 0,	/* Settings */
+		SNOR_PROTO_1_4_4,
+	},
+
+	/* Fast Read 4-4-4 */
+	{
+		SNOR_HWCAPS_READ_4_4_4,
+		BFPT_DWORD(5), BIT(4),	/* Supported bit */
+		BFPT_DWORD(7), 16,	/* Settings */
+		SNOR_PROTO_4_4_4,
+	},
+};
+
+struct sfdp_bfpt_erase {
+	/*
+	 * The half-word at offset <shift> in DWORD <dwoard> encodes the
+	 * op code and erase sector size to be used by Sector Erase commands.
+	 */
+	u32			dword;
+	u32			shift;
+};
+
+static const struct sfdp_bfpt_erase sfdp_bfpt_erases[] = {
+	/* Erase Type 1 in DWORD8 bits[15:0] */
+	{BFPT_DWORD(8), 0},
+
+	/* Erase Type 2 in DWORD8 bits[31:16] */
+	{BFPT_DWORD(8), 16},
+
+	/* Erase Type 3 in DWORD9 bits[15:0] */
+	{BFPT_DWORD(9), 0},
+
+	/* Erase Type 4 in DWORD9 bits[31:16] */
+	{BFPT_DWORD(9), 16},
+};
+
+static int spi_nor_hwcaps_read2cmd(u32 hwcaps);
+
+/**
+ * spi_nor_parse_bfpt() - read and parse the Basic Flash Parameter Table.
+ * @nor:		pointer to a 'struct spi_nor'
+ * @bfpt_header:	pointer to the 'struct sfdp_parameter_header' describing
+ *			the Basic Flash Parameter Table length and version
+ * @params:		pointer to the 'struct spi_nor_flash_parameter' to be
+ *			filled
+ *
+ * The Basic Flash Parameter Table is the main and only mandatory table as
+ * defined by the SFDP (JESD216) specification.
+ * It provides us with the total size (memory density) of the data array and
+ * the number of address bytes for Fast Read, Page Program and Sector Erase
+ * commands.
+ * For Fast READ commands, it also gives the number of mode clock cycles and
+ * wait states (regrouped in the number of dummy clock cycles) for each
+ * supported instruction op code.
+ * For Page Program, the page size is now available since JESD216 rev A, however
+ * the supported instruction op codes are still not provided.
+ * For Sector Erase commands, this table stores the supported instruction op
+ * codes and the associated sector sizes.
+ * Finally, the Quad Enable Requirements (QER) are also available since JESD216
+ * rev A. The QER bits encode the manufacturer dependent procedure to be
+ * executed to set the Quad Enable (QE) bit in some internal register of the
+ * Quad SPI memory. Indeed the QE bit, when it exists, must be set before
+ * sending any Quad SPI command to the memory. Actually, setting the QE bit
+ * tells the memory to reassign its WP# and HOLD#/RESET# pins to functions IO2
+ * and IO3 hence enabling 4 (Quad) I/O lines.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_bfpt(struct spi_nor *nor,
+			      const struct sfdp_parameter_header *bfpt_header,
+			      struct spi_nor_flash_parameter *params)
+{
+	struct mtd_info *mtd = &nor->mtd;
+	struct sfdp_bfpt bfpt;
+	size_t len;
+	int i, cmd, err;
+	u32 addr;
+	u16 half;
+
+	/* JESD216 Basic Flash Parameter Table length is at least 9 DWORDs. */
+	if (bfpt_header->length < BFPT_DWORD_MAX_JESD216)
+		return -EINVAL;
+
+	/* Read the Basic Flash Parameter Table. */
+	len = min_t(size_t, sizeof(bfpt),
+		    bfpt_header->length * sizeof(u32));
+	addr = SFDP_PARAM_HEADER_PTP(bfpt_header);
+	memset(&bfpt, 0, sizeof(bfpt));
+	err = spi_nor_read_sfdp(nor,  addr, len, &bfpt);
+	if (err < 0)
+		return err;
+
+	/* Fix endianness of the BFPT DWORDs. */
+	for (i = 0; i < BFPT_DWORD_MAX; i++)
+		bfpt.dwords[i] = le32_to_cpu(bfpt.dwords[i]);
+
+	/* Number of address bytes. */
+	switch (bfpt.dwords[BFPT_DWORD(1)] & BFPT_DWORD1_ADDRESS_BYTES_MASK) {
+	case BFPT_DWORD1_ADDRESS_BYTES_3_ONLY:
+		nor->addr_width = 3;
+		break;
+
+	case BFPT_DWORD1_ADDRESS_BYTES_4_ONLY:
+		nor->addr_width = 4;
+		break;
+
+	default:
+		break;
+	}
+
+	/* Flash Memory Density (in bits). */
+	params->size = bfpt.dwords[BFPT_DWORD(2)];
+	if (params->size & BIT(31)) {
+		params->size &= ~BIT(31);
+
+		/*
+		 * Prevent overflows on params->size. Anyway, a NOR of 2^64
+		 * bits is unlikely to exist so this error probably means
+		 * the BFPT we are reading is corrupted/wrong.
+		 */
+		if (params->size > 63)
+			return -EINVAL;
+
+		params->size = 1ULL << params->size;
+	} else {
+		params->size++;
+	}
+	params->size >>= 3; /* Convert to bytes. */
+
+	/* Fast Read settings. */
+	for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_reads); i++) {
+		const struct sfdp_bfpt_read *rd = &sfdp_bfpt_reads[i];
+		struct spi_nor_read_command *read;
+
+		if (!(bfpt.dwords[rd->supported_dword] & rd->supported_bit)) {
+			params->hwcaps.mask &= ~rd->hwcaps;
+			continue;
+		}
+
+		params->hwcaps.mask |= rd->hwcaps;
+		cmd = spi_nor_hwcaps_read2cmd(rd->hwcaps);
+		read = &params->reads[cmd];
+		half = bfpt.dwords[rd->settings_dword] >> rd->settings_shift;
+		spi_nor_set_read_settings_from_bfpt(read, half, rd->proto);
+	}
+
+	/* Sector Erase settings. */
+	for (i = 0; i < ARRAY_SIZE(sfdp_bfpt_erases); i++) {
+		const struct sfdp_bfpt_erase *er = &sfdp_bfpt_erases[i];
+		u32 erasesize;
+		u8 opcode;
+
+		half = bfpt.dwords[er->dword] >> er->shift;
+		erasesize = half & 0xff;
+
+		/* erasesize == 0 means this Erase Type is not supported. */
+		if (!erasesize)
+			continue;
+
+		erasesize = 1U << erasesize;
+		opcode = (half >> 8) & 0xff;
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+		if (erasesize == SZ_4K) {
+			nor->erase_opcode = opcode;
+			mtd->erasesize = erasesize;
+			break;
+		}
+#endif
+		if (!mtd->erasesize || mtd->erasesize < erasesize) {
+			nor->erase_opcode = opcode;
+			mtd->erasesize = erasesize;
+		}
+	}
+
+	/* Stop here if not JESD216 rev A or later. */
+	if (bfpt_header->length < BFPT_DWORD_MAX)
+		return 0;
+
+	/* Page size: this field specifies 'N' so the page size = 2^N bytes. */
+	params->page_size = bfpt.dwords[BFPT_DWORD(11)];
+	params->page_size &= BFPT_DWORD11_PAGE_SIZE_MASK;
+	params->page_size >>= BFPT_DWORD11_PAGE_SIZE_SHIFT;
+	params->page_size = 1U << params->page_size;
+
+	/* Quad Enable Requirements. */
+	switch (bfpt.dwords[BFPT_DWORD(15)] & BFPT_DWORD15_QER_MASK) {
+	case BFPT_DWORD15_QER_NONE:
+		params->quad_enable = NULL;
+		break;
+
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+	case BFPT_DWORD15_QER_SR2_BIT1_BUGGY:
+	case BFPT_DWORD15_QER_SR2_BIT1_NO_RD:
+		params->quad_enable = spansion_no_read_cr_quad_enable;
+		break;
+#endif
+
+#ifdef CONFIG_SPI_FLASH_MACRONIX
+	case BFPT_DWORD15_QER_SR1_BIT6:
+		params->quad_enable = macronix_quad_enable;
+		break;
+#endif
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+	case BFPT_DWORD15_QER_SR2_BIT1:
+		params->quad_enable = spansion_read_cr_quad_enable;
+		break;
+#endif
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * spi_nor_parse_sfdp() - parse the Serial Flash Discoverable Parameters.
+ * @nor:		pointer to a 'struct spi_nor'
+ * @params:		pointer to the 'struct spi_nor_flash_parameter' to be
+ *			filled
+ *
+ * The Serial Flash Discoverable Parameters are described by the JEDEC JESD216
+ * specification. This is a standard which tends to supported by almost all
+ * (Q)SPI memory manufacturers. Those hard-coded tables allow us to learn at
+ * runtime the main parameters needed to perform basic SPI flash operations such
+ * as Fast Read, Page Program or Sector Erase commands.
+ *
+ * Return: 0 on success, -errno otherwise.
+ */
+static int spi_nor_parse_sfdp(struct spi_nor *nor,
+			      struct spi_nor_flash_parameter *params)
+{
+	const struct sfdp_parameter_header *param_header, *bfpt_header;
+	struct sfdp_parameter_header *param_headers = NULL;
+	struct sfdp_header header;
+	size_t psize;
+	int i, err;
+
+	/* Get the SFDP header. */
+	err = spi_nor_read_sfdp(nor, 0, sizeof(header), &header);
+	if (err < 0)
+		return err;
+
+	/* Check the SFDP header version. */
+	if (le32_to_cpu(header.signature) != SFDP_SIGNATURE ||
+	    header.major != SFDP_JESD216_MAJOR)
+		return -EINVAL;
+
+	/*
+	 * Verify that the first and only mandatory parameter header is a
+	 * Basic Flash Parameter Table header as specified in JESD216.
+	 */
+	bfpt_header = &header.bfpt_header;
+	if (SFDP_PARAM_HEADER_ID(bfpt_header) != SFDP_BFPT_ID ||
+	    bfpt_header->major != SFDP_JESD216_MAJOR)
+		return -EINVAL;
+
+	/*
+	 * Allocate memory then read all parameter headers with a single
+	 * Read SFDP command. These parameter headers will actually be parsed
+	 * twice: a first time to get the latest revision of the basic flash
+	 * parameter table, then a second time to handle the supported optional
+	 * tables.
+	 * Hence we read the parameter headers once for all to reduce the
+	 * processing time. Also we use kmalloc() instead of devm_kmalloc()
+	 * because we don't need to keep these parameter headers: the allocated
+	 * memory is always released with kfree() before exiting this function.
+	 */
+	if (header.nph) {
+		psize = header.nph * sizeof(*param_headers);
+
+		param_headers = kmalloc(psize, GFP_KERNEL);
+		if (!param_headers)
+			return -ENOMEM;
+
+		err = spi_nor_read_sfdp(nor, sizeof(header),
+					psize, param_headers);
+		if (err < 0) {
+			dev_err(dev, "failed to read SFDP parameter headers\n");
+			goto exit;
+		}
+	}
+
+	/*
+	 * Check other parameter headers to get the latest revision of
+	 * the basic flash parameter table.
+	 */
+	for (i = 0; i < header.nph; i++) {
+		param_header = &param_headers[i];
+
+		if (SFDP_PARAM_HEADER_ID(param_header) == SFDP_BFPT_ID &&
+		    param_header->major == SFDP_JESD216_MAJOR &&
+		    (param_header->minor > bfpt_header->minor ||
+		     (param_header->minor == bfpt_header->minor &&
+		      param_header->length > bfpt_header->length)))
+			bfpt_header = param_header;
+	}
+
+	err = spi_nor_parse_bfpt(nor, bfpt_header, params);
+	if (err)
+		goto exit;
+
+	/* Parse other parameter headers. */
+	for (i = 0; i < header.nph; i++) {
+		param_header = &param_headers[i];
+
+		switch (SFDP_PARAM_HEADER_ID(param_header)) {
+		case SFDP_SECTOR_MAP_ID:
+			dev_info(dev, "non-uniform erase sector maps are not supported yet.\n");
+			break;
+
+		default:
+			break;
+		}
+
+		if (err)
+			goto exit;
+	}
+
+exit:
+	kfree(param_headers);
+	return err;
+}
+#else
+static int spi_nor_parse_sfdp(struct spi_nor *nor,
+			      struct spi_nor_flash_parameter *params)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_SPI_FLASH_SFDP_SUPPORT */
+
+static int spi_nor_init_params(struct spi_nor *nor,
+			       const struct flash_info *info,
+			       struct spi_nor_flash_parameter *params)
+{
+	/* Set legacy flash parameters as default. */
+	memset(params, 0, sizeof(*params));
+
+	/* Set SPI NOR sizes. */
+	params->size = info->sector_size * info->n_sectors;
+	params->page_size = info->page_size;
+
+	/* (Fast) Read settings. */
+	params->hwcaps.mask |= SNOR_HWCAPS_READ;
+	spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ],
+				  0, 0, SPINOR_OP_READ,
+				  SNOR_PROTO_1_1_1);
+
+	if (!(info->flags & SPI_NOR_NO_FR)) {
+		params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_FAST],
+					  0, 8, SPINOR_OP_READ_FAST,
+					  SNOR_PROTO_1_1_1);
+	}
+
+	if (info->flags & SPI_NOR_DUAL_READ) {
+		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_2],
+					  0, 8, SPINOR_OP_READ_1_1_2,
+					  SNOR_PROTO_1_1_2);
+	}
+
+	if (info->flags & SPI_NOR_QUAD_READ) {
+		params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+		spi_nor_set_read_settings(&params->reads[SNOR_CMD_READ_1_1_4],
+					  0, 8, SPINOR_OP_READ_1_1_4,
+					  SNOR_PROTO_1_1_4);
+	}
+
+	/* Page Program settings. */
+	params->hwcaps.mask |= SNOR_HWCAPS_PP;
+	spi_nor_set_pp_settings(&params->page_programs[SNOR_CMD_PP],
+				SPINOR_OP_PP, SNOR_PROTO_1_1_1);
+
+	/* Select the procedure to set the Quad Enable bit. */
+	if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD |
+				   SNOR_HWCAPS_PP_QUAD)) {
+		switch (JEDEC_MFR(info)) {
+#ifdef CONFIG_SPI_FLASH_MACRONIX
+		case SNOR_MFR_MACRONIX:
+			params->quad_enable = macronix_quad_enable;
+			break;
+#endif
+		case SNOR_MFR_MICRON:
+			break;
+
+		default:
+#if defined(CONFIG_SPI_FLASH_SPANSION) || defined(CONFIG_SPI_FLASH_WINBOND)
+			/* Kept only for backward compatibility purpose. */
+			params->quad_enable = spansion_quad_enable;
+#endif
+			break;
+		}
+
+		/*
+		 * Some manufacturer like GigaDevice may use different
+		 * bit to set QE on different memories, so the MFR can't
+		 * indicate the quad_enable method for this case, we need
+		 * set it in flash info list.
+		 */
+		if (info->quad_enable)
+			params->quad_enable = info->quad_enable;
+	}
+
+	/* Override the parameters with data read from SFDP tables. */
+	nor->addr_width = 0;
+	nor->mtd.erasesize = 0;
+	if ((info->flags & (SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ)) &&
+	    !(info->flags & SPI_NOR_SKIP_SFDP)) {
+		struct spi_nor_flash_parameter sfdp_params;
+
+		memcpy(&sfdp_params, params, sizeof(sfdp_params));
+		if (spi_nor_parse_sfdp(nor, &sfdp_params)) {
+			nor->addr_width = 0;
+			nor->mtd.erasesize = 0;
+		} else {
+			memcpy(params, &sfdp_params, sizeof(*params));
+		}
+	}
+
+	return 0;
+}
+
+static int spi_nor_select_read(struct spi_nor *nor,
+			       const struct spi_nor_flash_parameter *params,
+			       u32 shared_hwcaps)
+{
+        int best_match = shared_hwcaps & SNOR_HWCAPS_READ_MASK;
+	int cmd;
+	const struct spi_nor_read_command *read;
+
+	if (best_match < 0)
+		return -EINVAL;
+
+	if (best_match & SNOR_HWCAPS_READ_4_4_4)
+		cmd = SNOR_CMD_READ_4_4_4;
+	else if (best_match & SNOR_HWCAPS_READ_1_1_4)
+		cmd = SNOR_CMD_READ_1_1_4;
+	else
+		cmd = SNOR_CMD_READ;
+
+	read = &params->reads[cmd];
+	nor->read_opcode = read->opcode;
+	nor->read_proto = read->proto;
+
+	/*
+	 * In the spi-nor framework, we don't need to make the difference
+	 * between mode clock cycles and wait state clock cycles.
+	 * Indeed, the value of the mode clock cycles is used by a QSPI
+	 * flash memory to know whether it should enter or leave its 0-4-4
+	 * (Continuous Read / XIP) mode.
+	 * eXecution In Place is out of the scope of the mtd sub-system.
+	 * Hence we choose to merge both mode and wait state clock cycles
+	 * into the so called dummy clock cycles.
+	 */
+	nor->read_dummy = read->num_mode_clocks + read->num_wait_states;
+	return 0;
+}
+
+static int spi_nor_select_pp(struct spi_nor *nor,
+			     const struct spi_nor_flash_parameter *params,
+			     u32 shared_hwcaps)
+{
+	int best_match = shared_hwcaps & SNOR_HWCAPS_PP_MASK;
+	int cmd;
+	const struct spi_nor_pp_command *pp;
+
+	if (best_match < 0)
+		return -EINVAL;
+
+	if (best_match & SNOR_HWCAPS_PP_4_4_4)
+		cmd = SNOR_CMD_PP_4_4_4;
+	else if (best_match & SNOR_HWCAPS_PP_1_1_4)
+		cmd = SNOR_CMD_PP_1_1_4;
+	else
+		cmd = SNOR_CMD_PP;
+
+	pp = &params->page_programs[cmd];
+	nor->program_opcode = pp->opcode;
+	nor->write_proto = pp->proto;
+	return 0;
+}
+
+static int spi_nor_select_erase(struct spi_nor *nor,
+				const struct flash_info *info)
+{
+	struct mtd_info *mtd = &nor->mtd;
+
+	/* Do nothing if already configured from SFDP. */
+	if (mtd->erasesize)
+		return 0;
+
+#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS
+	/* prefer "small sector" erase if possible */
+	if (info->flags & SECT_4K) {
+		nor->erase_opcode = SPINOR_OP_BE_4K;
+		mtd->erasesize = 4096;
+	} else if (info->flags & SECT_4K_PMC) {
+		nor->erase_opcode = SPINOR_OP_BE_4K_PMC;
+		mtd->erasesize = 4096;
+	} else
+#endif
+	{
+		nor->erase_opcode = SPINOR_OP_SE;
+		mtd->erasesize = info->sector_size;
+	}
+	return 0;
+}
+
+static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info,
+			 const struct spi_nor_flash_parameter *params,
+			 const struct spi_nor_hwcaps *hwcaps)
+{
+	u32 ignored_mask, shared_mask;
+	bool enable_quad_io;
+	int err;
+
+	/*
+	 * Keep only the hardware capabilities supported by both the SPI
+	 * controller and the SPI flash memory.
+	 */
+	shared_mask = hwcaps->mask & params->hwcaps.mask;
+
+	/* SPI n-n-n protocols are not supported yet. */
+	ignored_mask = (SNOR_HWCAPS_READ_2_2_2 |
+			SNOR_HWCAPS_READ_4_4_4 |
+			SNOR_HWCAPS_READ_8_8_8 |
+			SNOR_HWCAPS_PP_4_4_4 |
+			SNOR_HWCAPS_PP_8_8_8);
+	if (shared_mask & ignored_mask) {
+		dev_dbg(nor->dev,
+			"SPI n-n-n protocols are not supported yet.\n");
+		shared_mask &= ~ignored_mask;
+	}
+
+	/* Select the (Fast) Read command. */
+	err = spi_nor_select_read(nor, params, shared_mask);
+	if (err) {
+		dev_dbg(nor->dev,
+			"can't select read settings supported by both the SPI controller and memory.\n");
+		return err;
+	}
+
+	/* Select the Page Program command. */
+	err = spi_nor_select_pp(nor, params, shared_mask);
+	if (err) {
+		dev_dbg(nor->dev,
+			"can't select write settings supported by both the SPI controller and memory.\n");
+		return err;
+	}
+
+	/* Select the Sector Erase command. */
+	err = spi_nor_select_erase(nor, info);
+	if (err) {
+		dev_dbg(nor->dev,
+			"can't select erase settings supported by both the SPI controller and memory.\n");
+		return err;
+	}
+
+	/* Enable Quad I/O if needed. */
+	enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 ||
+			  spi_nor_get_protocol_width(nor->write_proto) == 4);
+	if (enable_quad_io && params->quad_enable)
+		nor->quad_enable = params->quad_enable;
+	else
+		nor->quad_enable = NULL;
+
+	return 0;
+}
+
+static int spi_nor_init(struct spi_nor *nor)
+{
+	int err;
+
+	/*
+	 * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up
+	 * with the software protection bits set
+	 */
+	if (JEDEC_MFR(nor->info) == SNOR_MFR_ATMEL ||
+	    JEDEC_MFR(nor->info) == SNOR_MFR_INTEL ||
+	    JEDEC_MFR(nor->info) == SNOR_MFR_SST ||
+	    nor->info->flags & SPI_NOR_HAS_LOCK) {
+		write_enable(nor);
+		write_sr(nor, 0);
+		spi_nor_wait_till_ready(nor);
+	}
+
+	if (nor->quad_enable) {
+		err = nor->quad_enable(nor);
+		if (err) {
+			dev_dbg(nor->dev, "quad mode not supported\n");
+			return err;
+		}
+	}
+
+	if ((nor->addr_width == 4) &&
+	    (JEDEC_MFR(nor->info) != SNOR_MFR_SPANSION) &&
+	    !(nor->info->flags & SPI_NOR_4B_OPCODES)) {
+		/*
+		 * If the RESET# pin isn't hooked up properly, or the system
+		 * otherwise doesn't perform a reset command in the boot
+		 * sequence, it's impossible to 100% protect against unexpected
+		 * reboots (e.g., crashes). Warn the user (or hopefully, system
+		 * designer) that this is bad.
+		 */
+		if (nor->flags & SNOR_F_BROKEN_RESET)
+			printf("enabling reset hack; may not recover from unexpected reboots\n");
+		set_4byte(nor, nor->info, 1);
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(spi_nor_restore);
+
+int spi_nor_scan(struct spi_nor *nor)
+{
+	struct spi_nor_flash_parameter params;
+	const struct flash_info *info = NULL;
+	struct mtd_info *mtd = &nor->mtd;
+	struct spi_nor_hwcaps hwcaps = {
+		.mask = SNOR_HWCAPS_READ |
+			SNOR_HWCAPS_READ_FAST |
+			SNOR_HWCAPS_PP,
+	};
+	struct spi_slave *spi = nor->spi;
+	int ret;
+
+	/* Reset SPI protocol for all commands. */
+	nor->reg_proto = SNOR_PROTO_1_1_1;
+	nor->read_proto = SNOR_PROTO_1_1_1;
+	nor->write_proto = SNOR_PROTO_1_1_1;
+	nor->read = spi_nor_read_data;
+	nor->write = spi_nor_write_data;
+	nor->read_reg = spi_nor_read_reg;
+	nor->write_reg = spi_nor_write_reg;
+
+	if (spi->mode & SPI_RX_QUAD) {
+		hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4;
+
+		if (spi->mode & SPI_TX_QUAD)
+			hwcaps.mask |= (SNOR_HWCAPS_READ_1_4_4 |
+					SNOR_HWCAPS_PP_1_1_4 |
+					SNOR_HWCAPS_PP_1_4_4);
+	} else if (spi->mode & SPI_RX_DUAL) {
+		hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2;
+
+		if (spi->mode & SPI_TX_DUAL)
+			hwcaps.mask |= SNOR_HWCAPS_READ_1_2_2;
+	}
+
+	info = spi_nor_read_id(nor);
+	if (IS_ERR_OR_NULL(info))
+		return -ENOENT;
+	/* Parse the Serial Flash Discoverable Parameters table. */
+	ret = spi_nor_init_params(nor, info, &params);
+	if (ret)
+		return ret;
+
+	if (!mtd->name)
+		mtd->name = info->name;
+	mtd->priv = nor;
+	mtd->type = MTD_NORFLASH;
+	mtd->writesize = 1;
+	mtd->flags = MTD_CAP_NORFLASH;
+	mtd->size = params.size;
+	mtd->_erase = spi_nor_erase;
+	mtd->_read = spi_nor_read;
+
+#if defined(CONFIG_SPI_FLASH_STMICRO) || defined(CONFIG_SPI_FLASH_SST)
+	/* NOR protection support for STmicro/Micron chips and similar */
+	if (JEDEC_MFR(info) == SNOR_MFR_ST ||
+	    JEDEC_MFR(info) == SNOR_MFR_MICRON ||
+	    JEDEC_MFR(info) == SNOR_MFR_SST ||
+			info->flags & SPI_NOR_HAS_LOCK) {
+		nor->flash_lock = stm_lock;
+		nor->flash_unlock = stm_unlock;
+		nor->flash_is_locked = stm_is_locked;
+	}
+#endif
+
+	if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) {
+		mtd->_lock = spi_nor_lock;
+		mtd->_unlock = spi_nor_unlock;
+		mtd->_is_locked = spi_nor_is_locked;
+	}
+
+#ifdef CONFIG_SPI_FLASH_SST
+	/* sst nor chips use AAI word program */
+	if (info->flags & SST_WRITE)
+		mtd->_write = sst_write;
+	else
+#endif
+		mtd->_write = spi_nor_write;
+
+	if (info->flags & USE_FSR)
+		nor->flags |= SNOR_F_USE_FSR;
+	if (info->flags & SPI_NOR_HAS_TB)
+		nor->flags |= SNOR_F_HAS_SR_TB;
+	if (info->flags & NO_CHIP_ERASE)
+		nor->flags |= SNOR_F_NO_OP_CHIP_ERASE;
+	if (info->flags & USE_CLSR)
+		nor->flags |= SNOR_F_USE_CLSR;
+
+	if (info->flags & SPI_NOR_NO_ERASE)
+		mtd->flags |= MTD_NO_ERASE;
+
+#ifndef __UBOOT__
+	mtd->dev.parent = dev;
+#endif
+	nor->page_size = params.page_size;
+	mtd->writebufsize = nor->page_size;
+
+#ifndef __UBOOT__
+	if (np) {
+		/* If we were instantiated by DT, use it */
+		if (of_property_read_bool(np, "m25p,fast-read"))
+			params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+		else
+			params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+	} else {
+		/* If we weren't instantiated by DT, default to fast-read */
+		params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST;
+	}
+
+	if (of_property_read_bool(np, "broken-flash-reset"))
+		nor->flags |= SNOR_F_BROKEN_RESET;
+#endif
+	/* Some devices cannot do fast-read, no matter what DT tells us */
+	if (info->flags & SPI_NOR_NO_FR)
+		params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST;
+
+	/*
+	 * Configure the SPI memory:
+	 * - select op codes for (Fast) Read, Page Program and Sector Erase.
+	 * - set the number of dummy cycles (mode cycles + wait states).
+	 * - set the SPI protocols for register and memory accesses.
+	 * - set the Quad Enable bit if needed (required by SPI x-y-4 protos).
+	 */
+	ret = spi_nor_setup(nor, info, &params, &hwcaps);
+	if (ret)
+		return ret;
+
+	if (nor->addr_width) {
+		/* already configured from SFDP */
+	} else if (info->addr_width) {
+		nor->addr_width = info->addr_width;
+	} else if (mtd->size > 0x1000000) {
+		/* enable 4-byte addressing if the device exceeds 16MiB */
+		nor->addr_width = 4;
+		if (JEDEC_MFR(info) == SNOR_MFR_SPANSION ||
+		    info->flags & SPI_NOR_4B_OPCODES)
+			spi_nor_set_4byte_opcodes(nor, info);
+	} else {
+		nor->addr_width = 3;
+	}
+
+	if (nor->addr_width > SPI_NOR_MAX_ADDR_WIDTH) {
+		dev_dbg(dev, "address width is too large: %u\n",
+			nor->addr_width);
+		return -EINVAL;
+	}
+
+	/* Send all the required SPI flash commands to initialize device */
+	nor->info = info;
+	ret = spi_nor_init(nor);
+	if (ret)
+		return ret;
+
+	nor->name = mtd->name;
+	nor->size = mtd->size;
+	nor->erase_size = mtd->erasesize;
+	nor->sector_size = mtd->erasesize;
+
+#ifndef CONFIG_SPL_BUILD
+	printf("SF: Detected %s with page size ", nor->name);
+	print_size(nor->page_size, ", erase size ");
+	print_size(nor->erase_size, ", total ");
+	print_size(nor->size, "");
+	puts("\n");
+#endif
+
+#ifndef __UBOOT__
+	dev_info(dev, "%s (%lld Kbytes)\n", info->name,
+			(long long)mtd->size >> 10);
+
+	dev_dbg(dev,
+		"mtd .name = %s, .size = 0x%llx (%lldMiB), "
+		".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n",
+		mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20),
+		mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions);
+
+	if (mtd->numeraseregions)
+		for (i = 0; i < mtd->numeraseregions; i++)
+			dev_dbg(dev,
+				"mtd.eraseregions[%d] = { .offset = 0x%llx, "
+				".erasesize = 0x%.8x (%uKiB), "
+				".numblocks = %d }\n",
+				i, (long long)mtd->eraseregions[i].offset,
+				mtd->eraseregions[i].erasesize,
+				mtd->eraseregions[i].erasesize / 1024,
+				mtd->eraseregions[i].numblocks);
+#endif
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(spi_nor_scan);
+
+/* U-Boot specific functions, need to extend MTD to support these */
+int spi_flash_cmd_get_sw_write_prot(struct spi_nor *nor)
+{
+	int sr = read_sr(nor);
+	if (sr < 0)
+		return sr;
+
+	return (sr >> 2) & 7;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("framework for SPI NOR");
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
new file mode 100644
index 000000000000..317f182e3692
--- /dev/null
+++ b/include/linux/mtd/cfi.h
@@ -0,0 +1,385 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2@infradead.org> et al.
+ *
+ */
+
+#ifndef __MTD_CFI_H__
+#define __MTD_CFI_H__
+
+#ifndef __UBOOT__
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/map.h>
+#include <linux/mtd/cfi_endian.h>
+#include <linux/mtd/xip.h>
+
+#ifdef CONFIG_MTD_CFI_I1
+#define cfi_interleave(cfi) 1
+#define cfi_interleave_is_1(cfi) (cfi_interleave(cfi) == 1)
+#else
+#define cfi_interleave_is_1(cfi) (0)
+#endif
+
+#ifdef CONFIG_MTD_CFI_I2
+# ifdef cfi_interleave
+#  undef cfi_interleave
+#  define cfi_interleave(cfi) ((cfi)->interleave)
+# else
+#  define cfi_interleave(cfi) 2
+# endif
+#define cfi_interleave_is_2(cfi) (cfi_interleave(cfi) == 2)
+#else
+#define cfi_interleave_is_2(cfi) (0)
+#endif
+
+#ifdef CONFIG_MTD_CFI_I4
+# ifdef cfi_interleave
+#  undef cfi_interleave
+#  define cfi_interleave(cfi) ((cfi)->interleave)
+# else
+#  define cfi_interleave(cfi) 4
+# endif
+#define cfi_interleave_is_4(cfi) (cfi_interleave(cfi) == 4)
+#else
+#define cfi_interleave_is_4(cfi) (0)
+#endif
+
+#ifdef CONFIG_MTD_CFI_I8
+# ifdef cfi_interleave
+#  undef cfi_interleave
+#  define cfi_interleave(cfi) ((cfi)->interleave)
+# else
+#  define cfi_interleave(cfi) 8
+# endif
+#define cfi_interleave_is_8(cfi) (cfi_interleave(cfi) == 8)
+#else
+#define cfi_interleave_is_8(cfi) (0)
+#endif
+
+#ifndef cfi_interleave
+#warning No CONFIG_MTD_CFI_Ix selected. No NOR chip support can work.
+static inline int cfi_interleave(void *cfi)
+{
+	BUG();
+	return 0;
+}
+#endif
+
+static inline int cfi_interleave_supported(int i)
+{
+	switch (i) {
+#ifdef CONFIG_MTD_CFI_I1
+	case 1:
+#endif
+#ifdef CONFIG_MTD_CFI_I2
+	case 2:
+#endif
+#ifdef CONFIG_MTD_CFI_I4
+	case 4:
+#endif
+#ifdef CONFIG_MTD_CFI_I8
+	case 8:
+#endif
+		return 1;
+
+	default:
+		return 0;
+	}
+}
+
+
+/* NB: these values must represents the number of bytes needed to meet the
+ *     device type (x8, x16, x32).  Eg. a 32 bit device is 4 x 8 bytes.
+ *     These numbers are used in calculations.
+ */
+#define CFI_DEVICETYPE_X8  (8 / 8)
+#define CFI_DEVICETYPE_X16 (16 / 8)
+#define CFI_DEVICETYPE_X32 (32 / 8)
+#define CFI_DEVICETYPE_X64 (64 / 8)
+
+
+/* Device Interface Code Assignments from the "Common Flash Memory Interface
+ * Publication 100" dated December 1, 2001.
+ */
+#define CFI_INTERFACE_X8_ASYNC		0x0000
+#define CFI_INTERFACE_X16_ASYNC		0x0001
+#define CFI_INTERFACE_X8_BY_X16_ASYNC	0x0002
+#define CFI_INTERFACE_X32_ASYNC		0x0003
+#define CFI_INTERFACE_X16_BY_X32_ASYNC	0x0005
+#define CFI_INTERFACE_NOT_ALLOWED	0xffff
+
+
+/* NB: We keep these structures in memory in HOST byteorder, except
+ * where individually noted.
+ */
+
+/* Basic Query Structure */
+struct cfi_ident {
+	uint8_t  qry[3];
+	uint16_t P_ID;
+	uint16_t P_ADR;
+	uint16_t A_ID;
+	uint16_t A_ADR;
+	uint8_t  VccMin;
+	uint8_t  VccMax;
+	uint8_t  VppMin;
+	uint8_t  VppMax;
+	uint8_t  WordWriteTimeoutTyp;
+	uint8_t  BufWriteTimeoutTyp;
+	uint8_t  BlockEraseTimeoutTyp;
+	uint8_t  ChipEraseTimeoutTyp;
+	uint8_t  WordWriteTimeoutMax;
+	uint8_t  BufWriteTimeoutMax;
+	uint8_t  BlockEraseTimeoutMax;
+	uint8_t  ChipEraseTimeoutMax;
+	uint8_t  DevSize;
+	uint16_t InterfaceDesc;
+	uint16_t MaxBufWriteSize;
+	uint8_t  NumEraseRegions;
+	uint32_t EraseRegionInfo[0]; /* Not host ordered */
+} __packed;
+
+/* Extended Query Structure for both PRI and ALT */
+
+struct cfi_extquery {
+	uint8_t  pri[3];
+	uint8_t  MajorVersion;
+	uint8_t  MinorVersion;
+} __packed;
+
+/* Vendor-Specific PRI for Intel/Sharp Extended Command Set (0x0001) */
+
+struct cfi_pri_intelext {
+	uint8_t  pri[3];
+	uint8_t  MajorVersion;
+	uint8_t  MinorVersion;
+	uint32_t FeatureSupport; /* if bit 31 is set then an additional uint32_t feature
+				    block follows - FIXME - not currently supported */
+	uint8_t  SuspendCmdSupport;
+	uint16_t BlkStatusRegMask;
+	uint8_t  VccOptimal;
+	uint8_t  VppOptimal;
+	uint8_t  NumProtectionFields;
+	uint16_t ProtRegAddr;
+	uint8_t  FactProtRegSize;
+	uint8_t  UserProtRegSize;
+	uint8_t  extra[0];
+} __packed;
+
+struct cfi_intelext_otpinfo {
+	uint32_t ProtRegAddr;
+	uint16_t FactGroups;
+	uint8_t  FactProtRegSize;
+	uint16_t UserGroups;
+	uint8_t  UserProtRegSize;
+} __packed;
+
+struct cfi_intelext_blockinfo {
+	uint16_t NumIdentBlocks;
+	uint16_t BlockSize;
+	uint16_t MinBlockEraseCycles;
+	uint8_t  BitsPerCell;
+	uint8_t  BlockCap;
+} __packed;
+
+struct cfi_intelext_regioninfo {
+	uint16_t NumIdentPartitions;
+	uint8_t  NumOpAllowed;
+	uint8_t  NumOpAllowedSimProgMode;
+	uint8_t  NumOpAllowedSimEraMode;
+	uint8_t  NumBlockTypes;
+	struct cfi_intelext_blockinfo BlockTypes[1];
+} __packed;
+
+struct cfi_intelext_programming_regioninfo {
+	uint8_t  ProgRegShift;
+	uint8_t  Reserved1;
+	uint8_t  ControlValid;
+	uint8_t  Reserved2;
+	uint8_t  ControlInvalid;
+	uint8_t  Reserved3;
+} __packed;
+
+/* Vendor-Specific PRI for AMD/Fujitsu Extended Command Set (0x0002) */
+
+struct cfi_pri_amdstd {
+	uint8_t  pri[3];
+	uint8_t  MajorVersion;
+	uint8_t  MinorVersion;
+	uint8_t  SiliconRevision; /* bits 1-0: Address Sensitive Unlock */
+	uint8_t  EraseSuspend;
+	uint8_t  BlkProt;
+	uint8_t  TmpBlkUnprotect;
+	uint8_t  BlkProtUnprot;
+	uint8_t  SimultaneousOps;
+	uint8_t  BurstMode;
+	uint8_t  PageMode;
+	uint8_t  VppMin;
+	uint8_t  VppMax;
+	uint8_t  TopBottom;
+} __packed;
+
+/* Vendor-Specific PRI for Atmel chips (command set 0x0002) */
+
+struct cfi_pri_atmel {
+	uint8_t pri[3];
+	uint8_t MajorVersion;
+	uint8_t MinorVersion;
+	uint8_t Features;
+	uint8_t BottomBoot;
+	uint8_t BurstMode;
+	uint8_t PageMode;
+} __packed;
+
+struct cfi_pri_query {
+	uint8_t  NumFields;
+	uint32_t ProtField[1]; /* Not host ordered */
+} __packed;
+
+struct cfi_bri_query {
+	uint8_t  PageModeReadCap;
+	uint8_t  NumFields;
+	uint32_t ConfField[1]; /* Not host ordered */
+} __packed;
+
+#define P_ID_NONE               0x0000
+#define P_ID_INTEL_EXT          0x0001
+#define P_ID_AMD_STD            0x0002
+#define P_ID_INTEL_STD          0x0003
+#define P_ID_AMD_EXT            0x0004
+#define P_ID_WINBOND            0x0006
+#define P_ID_ST_ADV             0x0020
+#define P_ID_MITSUBISHI_STD     0x0100
+#define P_ID_MITSUBISHI_EXT     0x0101
+#define P_ID_SST_PAGE           0x0102
+#define P_ID_SST_OLD            0x0701
+#define P_ID_INTEL_PERFORMANCE  0x0200
+#define P_ID_INTEL_DATA         0x0210
+#define P_ID_RESERVED           0xffff
+
+
+#define CFI_MODE_CFI	1
+#define CFI_MODE_JEDEC	0
+
+struct cfi_private {
+	uint16_t cmdset;
+	void *cmdset_priv;
+	int interleave;
+	int device_type;
+	int cfi_mode;		/* Are we a JEDEC device pretending to be CFI? */
+	int addr_unlock1;
+	int addr_unlock2;
+	struct mtd_info *(*cmdset_setup)(struct map_info *);
+	struct cfi_ident *cfiq; /* For now only one. We insist that all devs
+				  must be of the same type. */
+	int mfr, id;
+	int numchips;
+	map_word sector_erase_cmd;
+	unsigned long chipshift; /* Because they're of the same type */
+	const char *im_name;	 /* inter_module name for cmdset_setup */
+	struct flchip chips[0];  /* per-chip data structure for each chip */
+};
+
+uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
+				struct map_info *map, struct cfi_private *cfi);
+
+map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi);
+#define CMD(x)  cfi_build_cmd((x), map, cfi)
+
+unsigned long cfi_merge_status(map_word val, struct map_info *map,
+					   struct cfi_private *cfi);
+#define MERGESTATUS(x) cfi_merge_status((x), map, cfi)
+
+uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t base,
+				struct map_info *map, struct cfi_private *cfi,
+				int type, map_word *prev_val);
+
+static inline uint8_t cfi_read_query(struct map_info *map, uint32_t addr)
+{
+	map_word val = map_read(map, addr);
+
+	if (map_bankwidth_is_1(map)) {
+		return val.x[0];
+	} else if (map_bankwidth_is_2(map)) {
+		return cfi16_to_cpu(map, val.x[0]);
+	} else {
+		/* No point in a 64-bit byteswap since that would just be
+		   swapping the responses from different chips, and we are
+		   only interested in one chip (a representative sample) */
+		return cfi32_to_cpu(map, val.x[0]);
+	}
+}
+
+static inline uint16_t cfi_read_query16(struct map_info *map, uint32_t addr)
+{
+	map_word val = map_read(map, addr);
+
+	if (map_bankwidth_is_1(map)) {
+		return val.x[0] & 0xff;
+	} else if (map_bankwidth_is_2(map)) {
+		return cfi16_to_cpu(map, val.x[0]);
+	} else {
+		/* No point in a 64-bit byteswap since that would just be
+		   swapping the responses from different chips, and we are
+		   only interested in one chip (a representative sample) */
+		return cfi32_to_cpu(map, val.x[0]);
+	}
+}
+
+void cfi_udelay(int us);
+
+int __xipram cfi_qry_present(struct map_info *map, __u32 base,
+			     struct cfi_private *cfi);
+int __xipram cfi_qry_mode_on(uint32_t base, struct map_info *map,
+			     struct cfi_private *cfi);
+void __xipram cfi_qry_mode_off(uint32_t base, struct map_info *map,
+			       struct cfi_private *cfi);
+
+struct cfi_extquery *cfi_read_pri(struct map_info *map, uint16_t adr, uint16_t size,
+			     const char* name);
+struct cfi_fixup {
+	uint16_t mfr;
+	uint16_t id;
+	void (*fixup)(struct mtd_info *mtd);
+};
+
+#endif /* __UBOOT__ */
+
+#define CFI_MFR_ANY		0xFFFF
+#define CFI_ID_ANY		0xFFFF
+#define CFI_MFR_CONTINUATION	0x007F
+
+#define CFI_MFR_AMD		0x0001
+#define CFI_MFR_AMIC		0x0037
+#define CFI_MFR_ATMEL		0x001F
+#define CFI_MFR_EON		0x001C
+#define CFI_MFR_FUJITSU		0x0004
+#define CFI_MFR_HYUNDAI		0x00AD
+#define CFI_MFR_INTEL		0x0089
+#define CFI_MFR_MACRONIX	0x00C2
+#define CFI_MFR_NEC		0x0010
+#define CFI_MFR_PMC		0x009D
+#define CFI_MFR_SAMSUNG		0x00EC
+#define CFI_MFR_SHARP		0x00B0
+#define CFI_MFR_SST		0x00BF
+#define CFI_MFR_ST		0x0020 /* STMicroelectronics */
+#define CFI_MFR_MICRON		0x002C	/* Micron */
+#define CFI_MFR_TOSHIBA		0x0098
+#define CFI_MFR_WINBOND		0x00DA
+
+#ifndef __UBOOT__
+void cfi_fixup(struct mtd_info *mtd, struct cfi_fixup* fixups);
+
+typedef int (*varsize_frob_t)(struct map_info *map, struct flchip *chip,
+			      unsigned long adr, int len, void *thunk);
+
+int cfi_varsize_frob(struct mtd_info *mtd, varsize_frob_t frob,
+	loff_t ofs, size_t len, void *thunk);
+
+
+#endif /* __UBOOT__ */
+#endif /* __MTD_CFI_H__ */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 68e591532492..95b68a2db1d0 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -1,4 +1,4 @@ 
-/* SPDX-License-Identifier: GPL-2.0+ */
+// SPDX-License-Identifier: GPL-2.0
 /*
  * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org> et al.
  *
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
new file mode 100644
index 000000000000..0f22f063f0d4
--- /dev/null
+++ b/include/linux/mtd/spi-nor.h
@@ -0,0 +1,421 @@ 
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2014 Freescale Semiconductor, Inc.
+ * Synced from Linux v4.19
+ */
+
+#ifndef __LINUX_MTD_SPI_NOR_H
+#define __LINUX_MTD_SPI_NOR_H
+
+#include <linux/bitops.h>
+#include <linux/mtd/cfi.h>
+#include <linux/mtd/mtd.h>
+
+/*
+ * Manufacturer IDs
+ *
+ * The first byte returned from the flash after sending opcode SPINOR_OP_RDID.
+ * Sometimes these are the same as CFI IDs, but sometimes they aren't.
+ */
+#define SNOR_MFR_ATMEL		CFI_MFR_ATMEL
+#define SNOR_MFR_GIGADEVICE	0xc8
+#define SNOR_MFR_INTEL		CFI_MFR_INTEL
+#define SNOR_MFR_ST		CFI_MFR_ST /* ST Micro <--> Micron */
+#define SNOR_MFR_MICRON		CFI_MFR_MICRON /* ST Micro <--> Micron */
+#define SNOR_MFR_MACRONIX	CFI_MFR_MACRONIX
+#define SNOR_MFR_SPANSION	CFI_MFR_AMD
+#define SNOR_MFR_SST		CFI_MFR_SST
+#define SNOR_MFR_WINBOND	0xef /* Also used by some Spansion */
+
+/*
+ * Note on opcode nomenclature: some opcodes have a format like
+ * SPINOR_OP_FUNCTION{4,}_x_y_z. The numbers x, y, and z stand for the number
+ * of I/O lines used for the opcode, address, and data (respectively). The
+ * FUNCTION has an optional suffix of '4', to represent an opcode which
+ * requires a 4-byte (32-bit) address.
+ */
+
+/* Flash opcodes. */
+#define SPINOR_OP_WREN		0x06	/* Write enable */
+#define SPINOR_OP_RDSR		0x05	/* Read status register */
+#define SPINOR_OP_WRSR		0x01	/* Write status register 1 byte */
+#define SPINOR_OP_RDSR2		0x3f	/* Read status register 2 */
+#define SPINOR_OP_WRSR2		0x3e	/* Write status register 2 */
+#define SPINOR_OP_READ		0x03	/* Read data bytes (low frequency) */
+#define SPINOR_OP_READ_FAST	0x0b	/* Read data bytes (high frequency) */
+#define SPINOR_OP_READ_1_1_2	0x3b	/* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2	0xbb	/* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4	0x6b	/* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4	0xeb	/* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_PP		0x02	/* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4	0x32	/* Quad page program */
+#define SPINOR_OP_PP_1_4_4	0x38	/* Quad page program */
+#define SPINOR_OP_BE_4K		0x20	/* Erase 4KiB block */
+#define SPINOR_OP_BE_4K_PMC	0xd7	/* Erase 4KiB block on PMC chips */
+#define SPINOR_OP_BE_32K	0x52	/* Erase 32KiB block */
+#define SPINOR_OP_CHIP_ERASE	0xc7	/* Erase whole flash chip */
+#define SPINOR_OP_SE		0xd8	/* Sector erase (usually 64KiB) */
+#define SPINOR_OP_RDID		0x9f	/* Read JEDEC ID */
+#define SPINOR_OP_RDSFDP	0x5a	/* Read SFDP */
+#define SPINOR_OP_RDCR		0x35	/* Read configuration register */
+#define SPINOR_OP_RDFSR		0x70	/* Read flag status register */
+#define SPINOR_OP_CLFSR		0x50	/* Clear flag status register */
+#define SPINOR_OP_RDEAR		0xc8	/* Read Extended Address Register */
+#define SPINOR_OP_WREAR		0xc5	/* Write Extended Address Register */
+
+/* 4-byte address opcodes - used on Spansion and some Macronix flashes. */
+#define SPINOR_OP_READ_4B	0x13	/* Read data bytes (low frequency) */
+#define SPINOR_OP_READ_FAST_4B	0x0c	/* Read data bytes (high frequency) */
+#define SPINOR_OP_READ_1_1_2_4B	0x3c	/* Read data bytes (Dual Output SPI) */
+#define SPINOR_OP_READ_1_2_2_4B	0xbc	/* Read data bytes (Dual I/O SPI) */
+#define SPINOR_OP_READ_1_1_4_4B	0x6c	/* Read data bytes (Quad Output SPI) */
+#define SPINOR_OP_READ_1_4_4_4B	0xec	/* Read data bytes (Quad I/O SPI) */
+#define SPINOR_OP_PP_4B		0x12	/* Page program (up to 256 bytes) */
+#define SPINOR_OP_PP_1_1_4_4B	0x34	/* Quad page program */
+#define SPINOR_OP_PP_1_4_4_4B	0x3e	/* Quad page program */
+#define SPINOR_OP_BE_4K_4B	0x21	/* Erase 4KiB block */
+#define SPINOR_OP_BE_32K_4B	0x5c	/* Erase 32KiB block */
+#define SPINOR_OP_SE_4B		0xdc	/* Sector erase (usually 64KiB) */
+
+/* Double Transfer Rate opcodes - defined in JEDEC JESD216B. */
+#define SPINOR_OP_READ_1_1_1_DTR	0x0d
+#define SPINOR_OP_READ_1_2_2_DTR	0xbd
+#define SPINOR_OP_READ_1_4_4_DTR	0xed
+
+#define SPINOR_OP_READ_1_1_1_DTR_4B	0x0e
+#define SPINOR_OP_READ_1_2_2_DTR_4B	0xbe
+#define SPINOR_OP_READ_1_4_4_DTR_4B	0xee
+
+/* Used for SST flashes only. */
+#define SPINOR_OP_BP		0x02	/* Byte program */
+#define SPINOR_OP_WRDI		0x04	/* Write disable */
+#define SPINOR_OP_AAI_WP	0xad	/* Auto address increment word program */
+
+/* Used for S3AN flashes only */
+#define SPINOR_OP_XSE		0x50	/* Sector erase */
+#define SPINOR_OP_XPP		0x82	/* Page program */
+#define SPINOR_OP_XRDSR		0xd7	/* Read status register */
+
+#define XSR_PAGESIZE		BIT(0)	/* Page size in Po2 or Linear */
+#define XSR_RDY			BIT(7)	/* Ready */
+
+
+/* Used for Macronix and Winbond flashes. */
+#define SPINOR_OP_EN4B		0xb7	/* Enter 4-byte mode */
+#define SPINOR_OP_EX4B		0xe9	/* Exit 4-byte mode */
+
+/* Used for Spansion flashes only. */
+#define SPINOR_OP_BRWR		0x17	/* Bank register write */
+#define SPINOR_OP_CLSR		0x30	/* Clear status register 1 */
+
+/* Used for Micron flashes only. */
+#define SPINOR_OP_RD_EVCR      0x65    /* Read EVCR register */
+#define SPINOR_OP_WD_EVCR      0x61    /* Write EVCR register */
+
+/* Status Register bits. */
+#define SR_WIP			BIT(0)	/* Write in progress */
+#define SR_WEL			BIT(1)	/* Write enable latch */
+/* meaning of other SR_* bits may differ between vendors */
+#define SR_BP0			BIT(2)	/* Block protect 0 */
+#define SR_BP1			BIT(3)	/* Block protect 1 */
+#define SR_BP2			BIT(4)	/* Block protect 2 */
+#define SR_TB			BIT(5)	/* Top/Bottom protect */
+#define SR_SRWD			BIT(7)	/* SR write protect */
+/* Spansion/Cypress specific status bits */
+#define SR_E_ERR		BIT(5)
+#define SR_P_ERR		BIT(6)
+
+#define SR_QUAD_EN_MX		BIT(6)	/* Macronix Quad I/O */
+
+/* Enhanced Volatile Configuration Register bits */
+#define EVCR_QUAD_EN_MICRON	BIT(7)	/* Micron Quad I/O */
+
+/* Flag Status Register bits */
+#define FSR_READY		BIT(7)	/* Device status, 0 = Busy, 1 = Ready */
+#define FSR_E_ERR		BIT(5)	/* Erase operation status */
+#define FSR_P_ERR		BIT(4)	/* Program operation status */
+#define FSR_PT_ERR		BIT(1)	/* Protection error bit */
+
+/* Configuration Register bits. */
+#define CR_QUAD_EN_SPAN		BIT(1)	/* Spansion Quad I/O */
+
+/* Status Register 2 bits. */
+#define SR2_QUAD_EN_BIT7	BIT(7)
+
+/* Supported SPI protocols */
+#define SNOR_PROTO_INST_MASK	GENMASK(23, 16)
+#define SNOR_PROTO_INST_SHIFT	16
+#define SNOR_PROTO_INST(_nbits)	\
+	((((unsigned long)(_nbits)) << SNOR_PROTO_INST_SHIFT) & \
+	 SNOR_PROTO_INST_MASK)
+
+#define SNOR_PROTO_ADDR_MASK	GENMASK(15, 8)
+#define SNOR_PROTO_ADDR_SHIFT	8
+#define SNOR_PROTO_ADDR(_nbits)	\
+	((((unsigned long)(_nbits)) << SNOR_PROTO_ADDR_SHIFT) & \
+	 SNOR_PROTO_ADDR_MASK)
+
+#define SNOR_PROTO_DATA_MASK	GENMASK(7, 0)
+#define SNOR_PROTO_DATA_SHIFT	0
+#define SNOR_PROTO_DATA(_nbits)	\
+	((((unsigned long)(_nbits)) << SNOR_PROTO_DATA_SHIFT) & \
+	 SNOR_PROTO_DATA_MASK)
+
+#define SNOR_PROTO_IS_DTR	BIT(24)	/* Double Transfer Rate */
+
+#define SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits)	\
+	(SNOR_PROTO_INST(_inst_nbits) |				\
+	 SNOR_PROTO_ADDR(_addr_nbits) |				\
+	 SNOR_PROTO_DATA(_data_nbits))
+#define SNOR_PROTO_DTR(_inst_nbits, _addr_nbits, _data_nbits)	\
+	(SNOR_PROTO_IS_DTR |					\
+	 SNOR_PROTO_STR(_inst_nbits, _addr_nbits, _data_nbits))
+
+enum spi_nor_protocol {
+	SNOR_PROTO_1_1_1 = SNOR_PROTO_STR(1, 1, 1),
+	SNOR_PROTO_1_1_2 = SNOR_PROTO_STR(1, 1, 2),
+	SNOR_PROTO_1_1_4 = SNOR_PROTO_STR(1, 1, 4),
+	SNOR_PROTO_1_1_8 = SNOR_PROTO_STR(1, 1, 8),
+	SNOR_PROTO_1_2_2 = SNOR_PROTO_STR(1, 2, 2),
+	SNOR_PROTO_1_4_4 = SNOR_PROTO_STR(1, 4, 4),
+	SNOR_PROTO_1_8_8 = SNOR_PROTO_STR(1, 8, 8),
+	SNOR_PROTO_2_2_2 = SNOR_PROTO_STR(2, 2, 2),
+	SNOR_PROTO_4_4_4 = SNOR_PROTO_STR(4, 4, 4),
+	SNOR_PROTO_8_8_8 = SNOR_PROTO_STR(8, 8, 8),
+
+	SNOR_PROTO_1_1_1_DTR = SNOR_PROTO_DTR(1, 1, 1),
+	SNOR_PROTO_1_2_2_DTR = SNOR_PROTO_DTR(1, 2, 2),
+	SNOR_PROTO_1_4_4_DTR = SNOR_PROTO_DTR(1, 4, 4),
+	SNOR_PROTO_1_8_8_DTR = SNOR_PROTO_DTR(1, 8, 8),
+};
+
+static inline bool spi_nor_protocol_is_dtr(enum spi_nor_protocol proto)
+{
+	return !!(proto & SNOR_PROTO_IS_DTR);
+}
+
+static inline u8 spi_nor_get_protocol_inst_nbits(enum spi_nor_protocol proto)
+{
+	return ((unsigned long)(proto & SNOR_PROTO_INST_MASK)) >>
+		SNOR_PROTO_INST_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_addr_nbits(enum spi_nor_protocol proto)
+{
+	return ((unsigned long)(proto & SNOR_PROTO_ADDR_MASK)) >>
+		SNOR_PROTO_ADDR_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_data_nbits(enum spi_nor_protocol proto)
+{
+	return ((unsigned long)(proto & SNOR_PROTO_DATA_MASK)) >>
+		SNOR_PROTO_DATA_SHIFT;
+}
+
+static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
+{
+	return spi_nor_get_protocol_data_nbits(proto);
+}
+
+#define SPI_NOR_MAX_CMD_SIZE	8
+enum spi_nor_ops {
+	SPI_NOR_OPS_READ = 0,
+	SPI_NOR_OPS_WRITE,
+	SPI_NOR_OPS_ERASE,
+	SPI_NOR_OPS_LOCK,
+	SPI_NOR_OPS_UNLOCK,
+};
+
+enum spi_nor_option_flags {
+	SNOR_F_USE_FSR		= BIT(0),
+	SNOR_F_HAS_SR_TB	= BIT(1),
+	SNOR_F_NO_OP_CHIP_ERASE	= BIT(2),
+	SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
+	SNOR_F_READY_XSR_RDY	= BIT(4),
+	SNOR_F_USE_CLSR		= BIT(5),
+	SNOR_F_BROKEN_RESET	= BIT(6),
+};
+
+/**
+ * struct flash_info - Forward declaration of a structure used internally by
+ *		       spi_nor_scan()
+ */
+struct flash_info;
+
+/* TODO: Remove, once all users of spi_flash interface are moved to MTD */
+#define spi_flash spi_nor
+
+/**
+ * struct spi_nor - Structure for defining a the SPI NOR layer
+ * @mtd:		point to a mtd_info structure
+ * @lock:		the lock for the read/write/erase/lock/unlock operations
+ * @dev:		point to a spi device, or a spi nor controller device.
+ * @info:		spi-nor part JDEC MFR id and other info
+ * @page_size:		the page size of the SPI NOR
+ * @addr_width:		number of address bytes
+ * @erase_opcode:	the opcode for erasing a sector
+ * @read_opcode:	the read opcode
+ * @read_dummy:		the dummy needed by the read operation
+ * @program_opcode:	the program opcode
+ * @sst_write_second:	used by the SST write operation
+ * @flags:		flag options for the current SPI-NOR (SNOR_F_*)
+ * @read_proto:		the SPI protocol for read operations
+ * @write_proto:	the SPI protocol for write operations
+ * @reg_proto		the SPI protocol for read_reg/write_reg/erase operations
+ * @cmd_buf:		used by the write_reg
+ * @prepare:		[OPTIONAL] do some preparations for the
+ *			read/write/erase/lock/unlock operations
+ * @unprepare:		[OPTIONAL] do some post work after the
+ *			read/write/erase/lock/unlock operations
+ * @read_reg:		[DRIVER-SPECIFIC] read out the register
+ * @write_reg:		[DRIVER-SPECIFIC] write data to the register
+ * @read:		[DRIVER-SPECIFIC] read data from the SPI NOR
+ * @write:		[DRIVER-SPECIFIC] write data to the SPI NOR
+ * @erase:		[DRIVER-SPECIFIC] erase a sector of the SPI NOR
+ *			at the offset @offs; if not provided by the driver,
+ *			spi-nor will send the erase opcode via write_reg()
+ * @flash_lock:		[FLASH-SPECIFIC] lock a region of the SPI NOR
+ * @flash_unlock:	[FLASH-SPECIFIC] unlock a region of the SPI NOR
+ * @flash_is_locked:	[FLASH-SPECIFIC] check if a region of the SPI NOR is
+ * @quad_enable:	[FLASH-SPECIFIC] enables SPI NOR quad mode
+ *			completely locked
+ * @priv:		the private data
+ */
+struct spi_nor {
+	struct mtd_info		mtd;
+#ifndef __UBOOT__
+	struct mutex		lock;
+	struct device		*dev;
+#else
+	struct udevice		*dev;
+	struct spi_slave	*spi;
+#endif
+	const struct flash_info	*info;
+	u32			page_size;
+	u8			addr_width;
+	u8			erase_opcode;
+	u8			read_opcode;
+	u8			read_dummy;
+	u8			program_opcode;
+	enum spi_nor_protocol	read_proto;
+	enum spi_nor_protocol	write_proto;
+	enum spi_nor_protocol	reg_proto;
+	bool			sst_write_second;
+	u32			flags;
+	u8			cmd_buf[SPI_NOR_MAX_CMD_SIZE];
+
+	int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+	void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
+	int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
+	int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
+
+	ssize_t (*read)(struct spi_nor *nor, loff_t from,
+			size_t len, u_char *read_buf);
+	ssize_t (*write)(struct spi_nor *nor, loff_t to,
+			size_t len, const u_char *write_buf);
+	int (*erase)(struct spi_nor *nor, loff_t offs);
+
+	int (*flash_lock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+	int (*flash_unlock)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+	int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len);
+	int (*quad_enable)(struct spi_nor *nor);
+
+	void *priv;
+/* Compatibility for spi_flash, remove once sf layer is merged with mtd */
+	const char *name;
+	u32 size;
+	u32 sector_size;
+	u32 erase_size;
+};
+
+static inline void spi_nor_set_flash_node(struct spi_nor *nor,
+					  const struct device_node *np)
+{
+	mtd_set_of_node(&nor->mtd, np);
+}
+
+static inline const struct device_node *spi_nor_get_flash_node(struct spi_nor *nor)
+{
+	return mtd_get_of_node(&nor->mtd);
+}
+
+/**
+ * struct spi_nor_hwcaps - Structure for describing the hardware capabilies
+ * supported by the SPI controller (bus master).
+ * @mask:		the bitmask listing all the supported hw capabilies
+ */
+struct spi_nor_hwcaps {
+	u32	mask;
+};
+
+/*
+ *(Fast) Read capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * As a matter of performances, it is relevant to use Octo SPI protocols first,
+ * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly
+ * (Slow) Read.
+ */
+#define SNOR_HWCAPS_READ_MASK		GENMASK(14, 0)
+#define SNOR_HWCAPS_READ		BIT(0)
+#define SNOR_HWCAPS_READ_FAST		BIT(1)
+#define SNOR_HWCAPS_READ_1_1_1_DTR	BIT(2)
+
+#define SNOR_HWCAPS_READ_DUAL		GENMASK(6, 3)
+#define SNOR_HWCAPS_READ_1_1_2		BIT(3)
+#define SNOR_HWCAPS_READ_1_2_2		BIT(4)
+#define SNOR_HWCAPS_READ_2_2_2		BIT(5)
+#define SNOR_HWCAPS_READ_1_2_2_DTR	BIT(6)
+
+#define SNOR_HWCAPS_READ_QUAD		GENMASK(10, 7)
+#define SNOR_HWCAPS_READ_1_1_4		BIT(7)
+#define SNOR_HWCAPS_READ_1_4_4		BIT(8)
+#define SNOR_HWCAPS_READ_4_4_4		BIT(9)
+#define SNOR_HWCAPS_READ_1_4_4_DTR	BIT(10)
+
+#define SNOR_HWCPAS_READ_OCTO		GENMASK(14, 11)
+#define SNOR_HWCAPS_READ_1_1_8		BIT(11)
+#define SNOR_HWCAPS_READ_1_8_8		BIT(12)
+#define SNOR_HWCAPS_READ_8_8_8		BIT(13)
+#define SNOR_HWCAPS_READ_1_8_8_DTR	BIT(14)
+
+/*
+ * Page Program capabilities.
+ * MUST be ordered by priority: the higher bit position, the higher priority.
+ * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the
+ * legacy SPI 1-1-1 protocol.
+ * Note that Dual Page Programs are not supported because there is no existing
+ * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory
+ * implements such commands.
+ */
+#define SNOR_HWCAPS_PP_MASK	GENMASK(22, 16)
+#define SNOR_HWCAPS_PP		BIT(16)
+
+#define SNOR_HWCAPS_PP_QUAD	GENMASK(19, 17)
+#define SNOR_HWCAPS_PP_1_1_4	BIT(17)
+#define SNOR_HWCAPS_PP_1_4_4	BIT(18)
+#define SNOR_HWCAPS_PP_4_4_4	BIT(19)
+
+#define SNOR_HWCAPS_PP_OCTO	GENMASK(22, 20)
+#define SNOR_HWCAPS_PP_1_1_8	BIT(20)
+#define SNOR_HWCAPS_PP_1_8_8	BIT(21)
+#define SNOR_HWCAPS_PP_8_8_8	BIT(22)
+
+/**
+ * spi_nor_scan() - scan the SPI NOR
+ * @nor:	the spi_nor structure
+ *
+ * The drivers can use this fuction to scan the SPI NOR.
+ * In the scanning, it will try to get all the necessary information to
+ * fill the mtd_info{} and the spi_nor{}.
+ *
+ * Return: 0 for success, others for failure.
+ */
+int spi_nor_scan(struct spi_nor *nor);
+
+/**
+ * spi_nor_restore_addr_mode() - restore the status of SPI NOR
+ * @nor:	the spi_nor structure
+ */
+void spi_nor_restore(struct spi_nor *nor);
+
+#endif