diff mbox series

mtd: rawnand: cache read suquential

Message ID 20220106081043.17275-1-jaimeliao.tw@gmail.com
State Changes Requested
Delegated to: Miquel Raynal
Headers show
Series mtd: rawnand: cache read suquential | expand

Commit Message

liao jaime Jan. 6, 2022, 8:10 a.m. UTC
For reducing time of read sequence which address are continuous.
Adding cache read suquential features in raw nand existing architecture.
Creating nand_exec_continue_read_page_op function for read command extend.
Adding command select in nand_lp_exec_read_page_op and taking readlen to
judge the last page for read.
Command of read cache sequential can be separate 3 parts.
First part is 00-30-31, it means read cache sequential enabled and next
page data can be prepare while cache data reading.
Second part is 31-3F, means flash will prepare next page data for reducing
time cost.
Last part is 3F, means the last cache read and flash don't need to prepare
next page data.

Signed-off-by: JaimeLiao <jaimeliao.tw@gmail.com>
---
 drivers/mtd/nand/raw/nand_base.c | 95 ++++++++++++++++++++++++++++++--
 include/linux/mtd/rawnand.h      |  5 ++
 2 files changed, 95 insertions(+), 5 deletions(-)

Comments

Miquel Raynal Jan. 26, 2022, 10:35 a.m. UTC | #1
Hi Jaime,

Typo in the title, sequential

jaimeliao.tw@gmail.com wrote on Thu,  6 Jan 2022 16:10:43 +0800:

> For reducing time of read sequence which address are continuous.
> Adding cache read suquential features in raw nand existing architecture.
> Creating nand_exec_continue_read_page_op function for read command extend.
> Adding command select in nand_lp_exec_read_page_op and taking readlen to
> judge the last page for read.
> Command of read cache sequential can be separate 3 parts.
> First part is 00-30-31, it means read cache sequential enabled and next
> page data can be prepare while cache data reading.
> Second part is 31-3F, means flash will prepare next page data for reducing
> time cost.
> Last part is 3F, means the last cache read and flash don't need to prepare
> next page data.

Can you provide a benchmarking result?

> Signed-off-by: JaimeLiao <jaimeliao.tw@gmail.com>
> ---
>  drivers/mtd/nand/raw/nand_base.c | 95 ++++++++++++++++++++++++++++++--
>  include/linux/mtd/rawnand.h      |  5 ++
>  2 files changed, 95 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
> index 3d6c6e880520..ddaba40b58a7 100644
> --- a/drivers/mtd/nand/raw/nand_base.c
> +++ b/drivers/mtd/nand/raw/nand_base.c
> @@ -1176,6 +1176,39 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
>  	const struct nand_interface_config *conf =
>  		nand_get_interface_config(chip);
>  	u8 addrs[5];
> +
> +	/* Command sequence select to 00-30-31 when read cache sequential is enabled */
> +	if (chip->ops.conti_read_require) {
> +		struct nand_op_instr instrs[] = {
> +			NAND_OP_CMD(NAND_CMD_READ0, 0),
> +			NAND_OP_ADDR(4, addrs, 0),
> +			NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
> +			NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0),
> +			NAND_OP_CMD(NAND_CMD_READCONTI, NAND_COMMON_TIMING_NS(conf, tWB_max)),
> +			NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
> +					 NAND_COMMON_TIMING_NS(conf, tRR_min)),
> +			NAND_OP_DATA_IN(len, buf, 0),
> +		};
> +		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
> +		int ret;
> +
> +		if (!len)
> +			op.ninstrs--;
> +
> +		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
> +		if (ret < 0)
> +			return ret;
> +
> +		addrs[2] = page;
> +		addrs[3] = page >> 8;
> +
> +		if (chip->options & NAND_ROW_ADDR_3) {
> +			addrs[4] = page >> 16;
> +			instrs[1].ctx.addr.naddrs++;
> +		}
> +
> +		return nand_exec_op(chip, &op);
> +	}

I don't like the idea of depending on this new feature.

Perhaps we should have dedicated helpers. Either we support it and we
use it, or we don't and continue with the historical path.

Non exec_op enabled drivers should not try this feature.

I'm still not sure how to make that big check. If we start checking
what a driver supports, we  need to cover a lot of different cases.

>  	struct nand_op_instr instrs[] = {
>  		NAND_OP_CMD(NAND_CMD_READ0, 0),
>  		NAND_OP_ADDR(4, addrs, 0),
> @@ -1206,6 +1239,31 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
>  	return nand_exec_op(chip, &op);
>  }
>  
> +static int nand_exec_continue_read_page_op(struct nand_chip *chip,
> +					    void *buf, unsigned int len)
> +{
> +	bool conti_read_last = false;
> +	const struct nand_interface_config *conf =
> +		nand_get_interface_config(chip);
> +	/* Set conti_read_last true before the last aligned page reading */
> +	if (chip->ops.conti_readlen >= len && chip->ops.conti_readlen < 2*len)
> +		conti_read_last = true;
> +	struct nand_op_instr instrs[] = {
> +		NAND_OP_CMD(conti_read_last?NAND_CMD_READLAST:NAND_CMD_READCONTI,
> +					    NAND_COMMON_TIMING_NS(conf, tWB_max)),
> +		NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
> +				 NAND_COMMON_TIMING_NS(conf, tRR_min)),
> +		NAND_OP_DATA_IN(len, buf, 0),
> +	};
> +	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
> +
> +	/* Drop the DATA_IN instruction if len is set to 0. */
> +	if (!len)
> +		op.ninstrs--;
> +
> +	return nand_exec_op(chip, &op);
> +}
> +
>  /**
>   * nand_read_page_op - Do a READ PAGE operation
>   * @chip: The NAND chip
> @@ -1231,10 +1289,15 @@ int nand_read_page_op(struct nand_chip *chip, unsigned int page,
>  		return -EINVAL;
>  
>  	if (nand_has_exec_op(chip)) {
> -		if (mtd->writesize > 512)
> -			return nand_lp_exec_read_page_op(chip, page,
> -							 offset_in_page, buf,
> -							 len);
> +		if (mtd->writesize > 512) {
> +			/* Selecting read command sequence  */
> +			if (!chip->ops.conti_read_first_page && chip->ops.conti_read_require)
> +				return nand_exec_continue_read_page_op(chip, buf, mtd->writesize);
> +			else
> +				return nand_lp_exec_read_page_op(chip, page,
> +								 offset_in_page, buf,
> +								 len);
> +		}
>  
>  		return nand_sp_exec_read_page_op(chip, page, offset_in_page,
>  						 buf, len);
> @@ -3362,7 +3425,6 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
>  
>  		bytes = min(mtd->writesize - col, readlen);
>  		aligned = (bytes == mtd->writesize);
> -
>  		if (!aligned)
>  			use_bounce_buf = 1;
>  		else if (chip->options & NAND_USES_DMA)
> @@ -3381,6 +3443,25 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
>  						 __func__, buf);
>  
>  read_retry:
> +			/*
> +			 * Read cache sequential command can be separate 3 parts.
> +			 * For distinguishing that flash driver set value to
> +			 * conti_read_first_page for checking whether the first page
> +			 * and set conti_read_require to select command sequence.
> +			 * And take readlen to conti_readlen for checking whether the
> +			 * last page or not.
> +			 */
> +			if (!col && readlen >= (2*mtd->writesize)) {
> +				if (chip->ops.conti_read_require == 0)
> +					chip->ops.conti_read_first_page = 1;
> +				else
> +					chip->ops.conti_read_first_page = 0;
> +				chip->ops.conti_read_require = 1;
> +				chip->ops.conti_readlen = readlen;
> +			} else if (chip->ops.conti_read_require) {
> +				chip->ops.conti_read_first_page = 0;
> +				chip->ops.conti_readlen = readlen;
> +			}
>  			/*
>  			 * Now read the page into the buffer.  Absent an error,
>  			 * the read methods return max bitflips per ecc step.
> @@ -3468,6 +3549,10 @@ static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
>  			retry_mode = 0;
>  		}
>  
> +		/* set conti_read_require 0 when finish the last aligned page read */
> +		if (readlen < mtd->writesize)
> +			chip->ops.conti_read_require = 0;
> +
>  		if (!readlen)
>  			break;
>  
> diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
> index b2f9dd3cbd69..a5dccccdf65e 100644
> --- a/include/linux/mtd/rawnand.h
> +++ b/include/linux/mtd/rawnand.h
> @@ -67,6 +67,8 @@ struct gpio_desc;
>  
>  /* Extended commands for large page devices */
>  #define NAND_CMD_READSTART	0x30
> +#define NAND_CMD_READCONTI     0x31
> +#define NAND_CMD_READLAST      0x3f
>  #define NAND_CMD_RNDOUTSTART	0xE0
>  #define NAND_CMD_CACHEDPROG	0x15
>  
> @@ -1173,6 +1175,9 @@ struct nand_chip_ops {
>  	int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
>  	int (*choose_interface_config)(struct nand_chip *chip,
>  				       struct nand_interface_config *iface);
> +	bool conti_read_require;
> +	bool conti_read_first_page;
> +	int conti_readlen;
>  };
>  
>  /**


Thanks,
Miquèl
diff mbox series

Patch

diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 3d6c6e880520..ddaba40b58a7 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -1176,6 +1176,39 @@  static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
 	const struct nand_interface_config *conf =
 		nand_get_interface_config(chip);
 	u8 addrs[5];
+
+	/* Command sequence select to 00-30-31 when read cache sequential is enabled */
+	if (chip->ops.conti_read_require) {
+		struct nand_op_instr instrs[] = {
+			NAND_OP_CMD(NAND_CMD_READ0, 0),
+			NAND_OP_ADDR(4, addrs, 0),
+			NAND_OP_CMD(NAND_CMD_READSTART, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+			NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max), 0),
+			NAND_OP_CMD(NAND_CMD_READCONTI, NAND_COMMON_TIMING_NS(conf, tWB_max)),
+			NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+					 NAND_COMMON_TIMING_NS(conf, tRR_min)),
+			NAND_OP_DATA_IN(len, buf, 0),
+		};
+		struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+		int ret;
+
+		if (!len)
+			op.ninstrs--;
+
+		ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+		if (ret < 0)
+			return ret;
+
+		addrs[2] = page;
+		addrs[3] = page >> 8;
+
+		if (chip->options & NAND_ROW_ADDR_3) {
+			addrs[4] = page >> 16;
+			instrs[1].ctx.addr.naddrs++;
+		}
+
+		return nand_exec_op(chip, &op);
+	}
 	struct nand_op_instr instrs[] = {
 		NAND_OP_CMD(NAND_CMD_READ0, 0),
 		NAND_OP_ADDR(4, addrs, 0),
@@ -1206,6 +1239,31 @@  static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
 	return nand_exec_op(chip, &op);
 }
 
+static int nand_exec_continue_read_page_op(struct nand_chip *chip,
+					    void *buf, unsigned int len)
+{
+	bool conti_read_last = false;
+	const struct nand_interface_config *conf =
+		nand_get_interface_config(chip);
+	/* Set conti_read_last true before the last aligned page reading */
+	if (chip->ops.conti_readlen >= len && chip->ops.conti_readlen < 2*len)
+		conti_read_last = true;
+	struct nand_op_instr instrs[] = {
+		NAND_OP_CMD(conti_read_last?NAND_CMD_READLAST:NAND_CMD_READCONTI,
+					    NAND_COMMON_TIMING_NS(conf, tWB_max)),
+		NAND_OP_WAIT_RDY(NAND_COMMON_TIMING_MS(conf, tR_max),
+				 NAND_COMMON_TIMING_NS(conf, tRR_min)),
+		NAND_OP_DATA_IN(len, buf, 0),
+	};
+	struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+	/* Drop the DATA_IN instruction if len is set to 0. */
+	if (!len)
+		op.ninstrs--;
+
+	return nand_exec_op(chip, &op);
+}
+
 /**
  * nand_read_page_op - Do a READ PAGE operation
  * @chip: The NAND chip
@@ -1231,10 +1289,15 @@  int nand_read_page_op(struct nand_chip *chip, unsigned int page,
 		return -EINVAL;
 
 	if (nand_has_exec_op(chip)) {
-		if (mtd->writesize > 512)
-			return nand_lp_exec_read_page_op(chip, page,
-							 offset_in_page, buf,
-							 len);
+		if (mtd->writesize > 512) {
+			/* Selecting read command sequence  */
+			if (!chip->ops.conti_read_first_page && chip->ops.conti_read_require)
+				return nand_exec_continue_read_page_op(chip, buf, mtd->writesize);
+			else
+				return nand_lp_exec_read_page_op(chip, page,
+								 offset_in_page, buf,
+								 len);
+		}
 
 		return nand_sp_exec_read_page_op(chip, page, offset_in_page,
 						 buf, len);
@@ -3362,7 +3425,6 @@  static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
 
 		bytes = min(mtd->writesize - col, readlen);
 		aligned = (bytes == mtd->writesize);
-
 		if (!aligned)
 			use_bounce_buf = 1;
 		else if (chip->options & NAND_USES_DMA)
@@ -3381,6 +3443,25 @@  static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
 						 __func__, buf);
 
 read_retry:
+			/*
+			 * Read cache sequential command can be separate 3 parts.
+			 * For distinguishing that flash driver set value to
+			 * conti_read_first_page for checking whether the first page
+			 * and set conti_read_require to select command sequence.
+			 * And take readlen to conti_readlen for checking whether the
+			 * last page or not.
+			 */
+			if (!col && readlen >= (2*mtd->writesize)) {
+				if (chip->ops.conti_read_require == 0)
+					chip->ops.conti_read_first_page = 1;
+				else
+					chip->ops.conti_read_first_page = 0;
+				chip->ops.conti_read_require = 1;
+				chip->ops.conti_readlen = readlen;
+			} else if (chip->ops.conti_read_require) {
+				chip->ops.conti_read_first_page = 0;
+				chip->ops.conti_readlen = readlen;
+			}
 			/*
 			 * Now read the page into the buffer.  Absent an error,
 			 * the read methods return max bitflips per ecc step.
@@ -3468,6 +3549,10 @@  static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
 			retry_mode = 0;
 		}
 
+		/* set conti_read_require 0 when finish the last aligned page read */
+		if (readlen < mtd->writesize)
+			chip->ops.conti_read_require = 0;
+
 		if (!readlen)
 			break;
 
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index b2f9dd3cbd69..a5dccccdf65e 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -67,6 +67,8 @@  struct gpio_desc;
 
 /* Extended commands for large page devices */
 #define NAND_CMD_READSTART	0x30
+#define NAND_CMD_READCONTI     0x31
+#define NAND_CMD_READLAST      0x3f
 #define NAND_CMD_RNDOUTSTART	0xE0
 #define NAND_CMD_CACHEDPROG	0x15
 
@@ -1173,6 +1175,9 @@  struct nand_chip_ops {
 	int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
 	int (*choose_interface_config)(struct nand_chip *chip,
 				       struct nand_interface_config *iface);
+	bool conti_read_require;
+	bool conti_read_first_page;
+	int conti_readlen;
 };
 
 /**