diff mbox series

[RFC,1/6] spi: Extend the core to ease integration of SPI memory controllers

Message ID 20180205232120.5851-2-boris.brezillon@bootlin.com
State Superseded
Headers show
Series spi: Extend the framework to generically support memory devices | expand

Commit Message

Boris Brezillon Feb. 5, 2018, 11:21 p.m. UTC
From: Boris Brezillon <boris.brezillon@free-electrons.com>

Some controllers are exposing high-level interfaces to access various
kind of SPI memories. Unfortunately they do not fit in the current
spi_controller model and usually have drivers placed in
drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
memories in general.

This is an attempt at defining a SPI memory interface which works for
all kinds of SPI memories (NORs, NANDs, SRAMs).

Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
---
 drivers/spi/spi.c       | 423 +++++++++++++++++++++++++++++++++++++++++++++++-
 include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++
 2 files changed, 646 insertions(+), 3 deletions(-)

Comments

Maxime Chevallier Feb. 6, 2018, 9:43 a.m. UTC | #1
Hi Boris,

> From: Boris Brezillon <boris.brezillon@free-electrons.com>
> 
> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.
> 
> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).
> 
> Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> ---
>  drivers/spi/spi.c       | 423
> +++++++++++++++++++++++++++++++++++++++++++++++-
> include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++ 2 files
> changed, 646 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
> index b33a727a0158..57bc540a0521 100644
> --- a/drivers/spi/spi.c
> +++ b/drivers/spi/spi.c
> @@ -2057,6 +2057,24 @@ static int of_spi_register_master(struct
> spi_controller *ctlr) }
>  #endif
>  

[...]

> +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> +	struct spi_controller *ctlr = mem->spi->controller;
> +	struct spi_transfer xfers[4] = { };
> +	struct spi_message msg;
> +	u8 *tmpbuf;
> +	int ret;
> +
> +	if (!spi_mem_supports_op(mem, op))
> +		return -ENOTSUPP;
> +
> +	if (ctlr->mem_ops) {
> +		if (ctlr->auto_runtime_pm) {
> +			ret = pm_runtime_get_sync(ctlr->dev.parent);
> +			if (ret < 0) {
> +				dev_err(&ctlr->dev,
> +					"Failed to power device:
> %d\n",
> +					ret);
> +				return ret;
> +			}
> +		}
> +
> +		mutex_lock(&ctlr->bus_lock_mutex);
> +		mutex_lock(&ctlr->io_mutex);
> +		ret = ctlr->mem_ops->exec_op(mem, op);

As a user, what prevented me from using spi_flash_read is that it
bypasses the message queue. I have a setup that uses spi_async and I
have to make sure everything goes in the right order, so I ended up
using spi_write_then_read instead.

Is there a way to make so that the message that uses exec_op are issued
in the correct order regarding messages that are already queued ?

Maybe we could extend spi_message or spi_transfer to store all
this opcode/dummy/addr information, so that we would use the standard
interfaces spi_sync / spi_async, and make this mechanism of exec_op
transparent from the user ?


> +		mutex_unlock(&ctlr->io_mutex);
> +		mutex_unlock(&ctlr->bus_lock_mutex);
> +
> +		if (ctlr->auto_runtime_pm)
> +			pm_runtime_put(ctlr->dev.parent);
> +
> +		/*
> +		 * Some controllers only optimize specific paths
> (typically the
> +		 * read path) and expect the core to use the regular
> SPI
> +		 * interface in these cases.
> +		 */
> +		if (!ret || ret != -ENOTSUPP)
> +			return ret;
> +	}
> +
> +	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
> +		     op->dummy.nbytes;
> +
> +	/*
> +	 * Allocate a buffer to transmit the CMD, ADDR cycles with
> kmalloc() so
> +	 * we're guaranteed that this buffer is DMA-able, as
> required by the
> +	 * SPI layer.
> +	 */
> +	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
> +	if (!tmpbuf)
> +		return -ENOMEM;
> +
> +	spi_message_init(&msg);
> +
> +	tmpbuf[0] = op->cmd.opcode;
> +	xfers[xferpos].tx_buf = tmpbuf;
> +	xfers[xferpos].len = sizeof(op->cmd.opcode);
> +	xfers[xferpos].tx_nbits = op->cmd.buswidth;
> +	spi_message_add_tail(&xfers[xferpos], &msg);
> +	xferpos++;
> +	totalxferlen++;
> +
> +	if (op->addr.nbytes) {
> +		memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + 1;
> +		xfers[xferpos].len = op->addr.nbytes;
> +		xfers[xferpos].tx_nbits = op->addr.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->addr.nbytes;
> +	}
> +
> +	if (op->dummy.nbytes) {
> +		memset(tmpbuf + op->addr.nbytes + 1, 0xff,
> op->dummy.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
> +		xfers[xferpos].len = op->dummy.nbytes;
> +		xfers[xferpos].tx_nbits = op->dummy.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->dummy.nbytes;
> +	}

Can't we use just one xfer for all the opcode, addr and dummy bytes ?

> +	if (op->data.nbytes) {
> +		if (op->data.dir == SPI_MEM_DATA_IN) {
> +			xfers[xferpos].rx_buf = op->data.buf.in;
> +			xfers[xferpos].rx_nbits = op->data.buswidth;
> +		} else {
> +			xfers[xferpos].tx_buf = op->data.buf.out;
> +			xfers[xferpos].tx_nbits = op->data.buswidth;
> +		}
> +
> +		xfers[xferpos].len = op->data.nbytes;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->data.nbytes;
> +	}
> +
> +	ret = spi_sync(mem->spi, &msg);
> +
> +	kfree(tmpbuf);
> +
> +	if (ret)
> +		return ret;
> +
> +	if (msg.actual_length != totalxferlen)
> +		return -EIO;
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_exec_op);

[...]

Thanks,

Maxime
Boris Brezillon Feb. 6, 2018, 10:25 a.m. UTC | #2
Hi Maxime,

On Tue, 6 Feb 2018 10:43:30 +0100
Maxime Chevallier <maxime.chevallier@smile.fr> wrote:

> Hi Boris,
> 
> > From: Boris Brezillon <boris.brezillon@free-electrons.com>
> > 
> > Some controllers are exposing high-level interfaces to access various
> > kind of SPI memories. Unfortunately they do not fit in the current
> > spi_controller model and usually have drivers placed in
> > drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> > memories in general.
> > 
> > This is an attempt at defining a SPI memory interface which works for
> > all kinds of SPI memories (NORs, NANDs, SRAMs).
> > 
> > Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> > ---
> >  drivers/spi/spi.c       | 423
> > +++++++++++++++++++++++++++++++++++++++++++++++-
> > include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++ 2 files
> > changed, 646 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
> > index b33a727a0158..57bc540a0521 100644
> > --- a/drivers/spi/spi.c
> > +++ b/drivers/spi/spi.c
> > @@ -2057,6 +2057,24 @@ static int of_spi_register_master(struct
> > spi_controller *ctlr) }
> >  #endif
> >    
> 
> [...]
> 
> > +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> > +{
> > +	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> > +	struct spi_controller *ctlr = mem->spi->controller;
> > +	struct spi_transfer xfers[4] = { };
> > +	struct spi_message msg;
> > +	u8 *tmpbuf;
> > +	int ret;
> > +
> > +	if (!spi_mem_supports_op(mem, op))
> > +		return -ENOTSUPP;
> > +
> > +	if (ctlr->mem_ops) {
> > +		if (ctlr->auto_runtime_pm) {
> > +			ret = pm_runtime_get_sync(ctlr->dev.parent);
> > +			if (ret < 0) {
> > +				dev_err(&ctlr->dev,
> > +					"Failed to power device:
> > %d\n",
> > +					ret);
> > +				return ret;
> > +			}
> > +		}
> > +
> > +		mutex_lock(&ctlr->bus_lock_mutex);
> > +		mutex_lock(&ctlr->io_mutex);
> > +		ret = ctlr->mem_ops->exec_op(mem, op);  
> 
> As a user, what prevented me from using spi_flash_read is that it
> bypasses the message queue. I have a setup that uses spi_async and I
> have to make sure everything goes in the right order, so I ended up
> using spi_write_then_read instead.
> 
> Is there a way to make so that the message that uses exec_op are issued
> in the correct order regarding messages that are already queued ?

Nope, that's not handled right now, and mem ops can indeed go in before
the already queued messages if spi_mem_exec_op() acquires the io_lock
before the dequeuing thread.

> 
> Maybe we could extend spi_message or spi_transfer to store all
> this opcode/dummy/addr information, so that we would use the standard
> interfaces spi_sync / spi_async, and make this mechanism of exec_op
> transparent from the user ?

That's a possibility. Note that SPI controllers are passed a spi_mem
object in ->exec_op(), not a spi_device. This is because I envision
we'll need to pass extra information to the controller to let it take
appropriate decisions (like the memory device size or other things that
may have an impact on how the controller handle the spi_mem_op
requests). 
Anyway, we could stuff a spi_mem_op and a spi_mem pointer in the
spi_message struct and adapt the queuing/dequeuing logic, but I fear
this will complexify the whole thing.

Another approach would be to flush the msg queue before calling
->exec_op(). That does not guarantee that the memory operation will be
placed exactly where it should (SPI messages might be queued while
we're pumping the queue), but I'm not sure we really care.

	/*
	 * When the generic queuing/dequeuing mechanism is in place
	 * pump all pending messages to enforce FIFO behavior, and
	 * execute the SPI mem operation after that.
	 */
	if (ctlr->transfer == spi_queued_transfer)
		__spi_pump_messages(ctlr, false);

	mutex_lock(&ctlr->bus_lock_mutex);
	mutex_lock(&ctlr->io_mutex);
	ret = ctlr->mem_ops->exec_op(mem, op);
	...	

> 
> 
> > +		mutex_unlock(&ctlr->io_mutex);
> > +		mutex_unlock(&ctlr->bus_lock_mutex);
> > +
> > +		if (ctlr->auto_runtime_pm)
> > +			pm_runtime_put(ctlr->dev.parent);
> > +
> > +		/*
> > +		 * Some controllers only optimize specific paths
> > (typically the
> > +		 * read path) and expect the core to use the regular
> > SPI
> > +		 * interface in these cases.
> > +		 */
> > +		if (!ret || ret != -ENOTSUPP)
> > +			return ret;
> > +	}
> > +
> > +	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
> > +		     op->dummy.nbytes;
> > +
> > +	/*
> > +	 * Allocate a buffer to transmit the CMD, ADDR cycles with
> > kmalloc() so
> > +	 * we're guaranteed that this buffer is DMA-able, as
> > required by the
> > +	 * SPI layer.
> > +	 */
> > +	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
> > +	if (!tmpbuf)
> > +		return -ENOMEM;
> > +
> > +	spi_message_init(&msg);
> > +
> > +	tmpbuf[0] = op->cmd.opcode;
> > +	xfers[xferpos].tx_buf = tmpbuf;
> > +	xfers[xferpos].len = sizeof(op->cmd.opcode);
> > +	xfers[xferpos].tx_nbits = op->cmd.buswidth;
> > +	spi_message_add_tail(&xfers[xferpos], &msg);
> > +	xferpos++;
> > +	totalxferlen++;
> > +
> > +	if (op->addr.nbytes) {
> > +		memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
> > +		xfers[xferpos].tx_buf = tmpbuf + 1;
> > +		xfers[xferpos].len = op->addr.nbytes;
> > +		xfers[xferpos].tx_nbits = op->addr.buswidth;
> > +		spi_message_add_tail(&xfers[xferpos], &msg);
> > +		xferpos++;
> > +		totalxferlen += op->addr.nbytes;
> > +	}
> > +
> > +	if (op->dummy.nbytes) {
> > +		memset(tmpbuf + op->addr.nbytes + 1, 0xff,
> > op->dummy.nbytes);
> > +		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
> > +		xfers[xferpos].len = op->dummy.nbytes;
> > +		xfers[xferpos].tx_nbits = op->dummy.buswidth;
> > +		spi_message_add_tail(&xfers[xferpos], &msg);
> > +		xferpos++;
> > +		totalxferlen += op->dummy.nbytes;
> > +	}  
> 
> Can't we use just one xfer for all the opcode, addr and dummy bytes ?

We can, if they have the same buswidth, but I didn't want to go that
far into optimization before making sure the approach was acceptable.
Definitely something I can add in a v2.

> 
> > +	if (op->data.nbytes) {
> > +		if (op->data.dir == SPI_MEM_DATA_IN) {
> > +			xfers[xferpos].rx_buf = op->data.buf.in;
> > +			xfers[xferpos].rx_nbits = op->data.buswidth;
> > +		} else {
> > +			xfers[xferpos].tx_buf = op->data.buf.out;
> > +			xfers[xferpos].tx_nbits = op->data.buswidth;
> > +		}
> > +
> > +		xfers[xferpos].len = op->data.nbytes;
> > +		spi_message_add_tail(&xfers[xferpos], &msg);
> > +		xferpos++;
> > +		totalxferlen += op->data.nbytes;
> > +	}
> > +
> > +	ret = spi_sync(mem->spi, &msg);
> > +
> > +	kfree(tmpbuf);
> > +
> > +	if (ret)
> > +		return ret;
> > +
> > +	if (msg.actual_length != totalxferlen)
> > +		return -EIO;
> > +
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(spi_mem_exec_op);  
> 
> [...]
> 

Thanks for your feedback.

Boris
Mark Brown Feb. 6, 2018, 12:06 p.m. UTC | #3
On Tue, Feb 06, 2018 at 10:43:30AM +0100, Maxime Chevallier wrote:

> As a user, what prevented me from using spi_flash_read is that it
> bypasses the message queue. I have a setup that uses spi_async and I
> have to make sure everything goes in the right order, so I ended up
> using spi_write_then_read instead.

> Is there a way to make so that the message that uses exec_op are issued
> in the correct order regarding messages that are already queued ?

> Maybe we could extend spi_message or spi_transfer to store all
> this opcode/dummy/addr information, so that we would use the standard
> interfaces spi_sync / spi_async, and make this mechanism of exec_op
> transparent from the user ?

Or have the flash read flush out the queue perhaps, might be simpler.
Pushing the memory mapped read operations through the queue does seem to
defeat some of the purpose of having them though I can see it might be
needed, I think the current applications were read only or at least read
mostly.
Miquel Raynal Feb. 9, 2018, 12:52 p.m. UTC | #4
Hi Boris,

Just a few comments on the form below.

On Tue,  6 Feb 2018 00:21:15 +0100, Boris Brezillon
<boris.brezillon@bootlin.com> wrote:

> From: Boris Brezillon <boris.brezillon@free-electrons.com>
> 
> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.
> 
> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).
> 
> Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> ---
>  drivers/spi/spi.c       | 423 +++++++++++++++++++++++++++++++++++++++++++++++-
>  include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++
>  2 files changed, 646 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
> index b33a727a0158..57bc540a0521 100644
> --- a/drivers/spi/spi.c
> +++ b/drivers/spi/spi.c
> @@ -2057,6 +2057,24 @@ static int of_spi_register_master(struct spi_controller *ctlr)
>  }
>  #endif
>  
> +static int spi_controller_check_ops(struct spi_controller *ctlr)
> +{
> +	/*
> +	 * The controller can implement only the high-level SPI-memory like
> +	 * operations if it does not support regular SPI transfers.
> +	 */
> +	if (ctlr->mem_ops) {
> +		if (!ctlr->mem_ops->supports_op ||
> +		    !ctlr->mem_ops->exec_op)
> +			return -EINVAL;
> +	} else if (!ctlr->transfer && !ctlr->transfer_one &&
> +		   !ctlr->transfer_one_message) {
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
>  /**
>   * spi_register_controller - register SPI master or slave controller
>   * @ctlr: initialized master, originally from spi_alloc_master() or
> @@ -2090,6 +2108,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>  	if (!dev)
>  		return -ENODEV;
>  
> +	/*
> +	 * Make sure all necessary hooks are implemented before registering
> +	 * the SPI controller.
> +	 */
> +	status = spi_controller_check_ops(ctlr);
> +	if (status)
> +		return status;
> +
>  	if (!spi_controller_is_slave(ctlr)) {
>  		status = of_spi_register_master(ctlr);
>  		if (status)
> @@ -2155,10 +2181,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>  			spi_controller_is_slave(ctlr) ? "slave" : "master",
>  			dev_name(&ctlr->dev));
>  
> -	/* If we're using a queued driver, start the queue */
> -	if (ctlr->transfer)
> +	/*
> +	 * If we're using a queued driver, start the queue. Note that we don't
> +	 * need the queueing logic if the driver is only supporting high-level
> +	 * memory operations.
> +	 */
> +	if (ctlr->transfer) {
>  		dev_info(dev, "controller is unqueued, this is deprecated\n");
> -	else {
> +	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
>  		status = spi_controller_initialize_queue(ctlr);
>  		if (status) {
>  			device_del(&ctlr->dev);
> @@ -2893,6 +2923,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
>  {
>  	struct spi_controller *ctlr = spi->controller;
>  
> +	/*
> +	 * Some controllers do not support doing regular SPI transfers. Return
> +	 * ENOTSUPP when this is the case.
> +	 */
> +	if (!ctlr->transfer)
> +		return -ENOTSUPP;
> +
>  	message->spi = spi;
>  
>  	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
> @@ -3321,6 +3358,386 @@ int spi_write_then_read(struct spi_device *spi,
>  }
>  EXPORT_SYMBOL_GPL(spi_write_then_read);
>  
> +/**
> + * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
> + *					  memory operation
> + * @ctlr: the SPI controller requesting this dma_map()
> + * @op: the memory operation containing the buffer to map
> + * @sgt: a pointer to a non-initialized sg_table that will be filled by this
> + *	 function
> + *
> + * Some controllers might want to do DMA on the data buffer embedded in @op.
> + * This helper prepares everything for you and provides a ready-to-use
> + * sg_table. This function is not intended to be called from spi drivers.
> + * Only SPI controller drivers should use it.
> + * Note that the caller must ensure the memory region pointed by
> + * op->data.buf.{in,out} is DMA-able before calling this function.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
> +				       const struct spi_mem_op *op,
> +				       struct sg_table *sgt)
> +{
> +	struct device *dmadev;
> +
> +	if (!op->data.nbytes)
> +		return -EINVAL;
> +
> +	if (op->data.dir == SPI_MEM_DATA_OUT)
> +		dmadev = ctlr->dma_tx ?
> +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +	else
> +		dmadev = ctlr->dma_rx ?
> +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
> +
> +	if (!dmadev)
> +		return -EINVAL;
> +
> +	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
> +			   op->data.dir == SPI_MEM_DATA_IN ?
> +			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
> +}
> +EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
> +
> +/**
> + * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
> + *					    memory operation
> + * @ctlr: the SPI controller requesting this dma_unmap()
> + * @op: the memory operation containing the buffer to unmap
> + * @sgt: a pointer to an sg_table previously initialized by
> + *	 spi_controller_dma_map_mem_op_data()
> + *
> + * Some controllers might want to do DMA on the data buffer embedded in @op.
> + * This helper prepares things so that the CPU can access the
> + * op->data.buf.{in,out} buffer again.
> + *
> + * This function is not intended to be called from spi drivers. Only SPI

s/spi/SPI/

> + * controller drivers should use it.
> + *
> + * This function should be called after the DMA operation has finished an is

s/an/and/

> + * only valid if the previous spi_controller_dma_map_mem_op_data() has returned
> + * 0.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> +					  const struct spi_mem_op *op,
> +					  struct sg_table *sgt)
> +{
> +	struct device *dmadev;
> +
> +	if (!op->data.nbytes)
> +		return;
> +
> +	if (op->data.dir == SPI_MEM_DATA_OUT)
> +		dmadev = ctlr->dma_tx ?
> +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +	else
> +		dmadev = ctlr->dma_rx ?
> +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
> +
> +	spi_unmap_buf(ctlr, dmadev, sgt,
> +		      op->data.dir == SPI_MEM_DATA_IN ?
> +		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
> +}
> +EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
> +
> +static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
> +{
> +	u32 mode = mem->spi->mode;
> +
> +	switch (buswidth) {
> +	case 1:
> +		return 0;
> +
> +	case 2:
> +		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
> +		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
> +			return 0;
> +
> +		break;
> +
> +	case 4:
> +		if ((tx && (mode & SPI_TX_QUAD)) ||
> +		    (!tx && (mode & SPI_RX_QUAD)))
> +			return 0;
> +
> +		break;
> +
> +	default:
> +		break;
> +	}
> +
> +	return -ENOTSUPP;
> +}
> +
> +/**
> + * spi_mem_supports_op() - Check if a memory device and the controller it is
> + *			   connected to support a specific memory operation
> + * @mem: the SPI memory
> + * @op: the memory operation to check
> + *
> + * Some controllers are only supporting Single or Dual IOs, others might only
> + * support specific opcodes, or it can even be that the controller and device
> + * both support Quad IOs but the hardware prevents you from using it because
> + * only 2 IO lines are connected.
> + *
> + * This function checks whether a specific operation is supported.
> + *
> + * Return: true if @op is supported, false otherwise.
> + */
> +bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +	struct spi_controller *ctlr = mem->spi->controller;
> +
> +	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
> +		return false;
> +
> +	if (op->addr.nbytes &&
> +	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
> +		return false;
> +
> +	if (op->dummy.nbytes &&
> +	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
> +		return false;
> +
> +	if (op->data.nbytes &&
> +	    spi_check_buswidth_req(mem, op->data.buswidth,
> +				   op->data.dir == SPI_MEM_DATA_IN ?
> +				   false : true))

Why not just op->data.dir != SPI_MEM_DATA_IN or even better ==
SPI_MEM_DATA_OUT if it exists?

> +		return false;
> +
> +	if (ctlr->mem_ops)
> +		return ctlr->mem_ops->supports_op(mem, op);
> +
> +	return true;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_supports_op);
> +
> +/**
> + * spi_mem_exec_op() - Execute a memory operation
> + * @mem: the SPI memory
> + * @op: the memory operation to execute
> + *
> + * Executes a memory operation.
> + *
> + * This function first checks that @op is supported and then tries to execute
> + * it.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> +	struct spi_controller *ctlr = mem->spi->controller;
> +	struct spi_transfer xfers[4] = { };
> +	struct spi_message msg;
> +	u8 *tmpbuf;
> +	int ret;
> +
> +	if (!spi_mem_supports_op(mem, op))
> +		return -ENOTSUPP;
> +
> +	if (ctlr->mem_ops) {
> +		if (ctlr->auto_runtime_pm) {
> +			ret = pm_runtime_get_sync(ctlr->dev.parent);
> +			if (ret < 0) {
> +				dev_err(&ctlr->dev,
> +					"Failed to power device: %d\n",
> +					ret);
> +				return ret;
> +			}
> +		}
> +
> +		mutex_lock(&ctlr->bus_lock_mutex);
> +		mutex_lock(&ctlr->io_mutex);
> +		ret = ctlr->mem_ops->exec_op(mem, op);
> +		mutex_unlock(&ctlr->io_mutex);
> +		mutex_unlock(&ctlr->bus_lock_mutex);

Is not this a bit dangerous? I guess that no one should release the bus
lock without having already released the IO lock but maybe this should
be clearly mentioned in a comment in the original structure definition?

> +
> +		if (ctlr->auto_runtime_pm)
> +			pm_runtime_put(ctlr->dev.parent);
> +
> +		/*
> +		 * Some controllers only optimize specific paths (typically the
> +		 * read path) and expect the core to use the regular SPI
> +		 * interface in these cases.
> +		 */
> +		if (!ret || ret != -ENOTSUPP)
> +			return ret;
> +	}
> +
> +	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
> +		     op->dummy.nbytes;
> +
> +	/*
> +	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
> +	 * we're guaranteed that this buffer is DMA-able, as required by the
> +	 * SPI layer.
> +	 */
> +	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
> +	if (!tmpbuf)
> +		return -ENOMEM;
> +
> +	spi_message_init(&msg);
> +
> +	tmpbuf[0] = op->cmd.opcode;
> +	xfers[xferpos].tx_buf = tmpbuf;
> +	xfers[xferpos].len = sizeof(op->cmd.opcode);
> +	xfers[xferpos].tx_nbits = op->cmd.buswidth;
> +	spi_message_add_tail(&xfers[xferpos], &msg);
> +	xferpos++;
> +	totalxferlen++;
> +
> +	if (op->addr.nbytes) {
> +		memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + 1;
> +		xfers[xferpos].len = op->addr.nbytes;
> +		xfers[xferpos].tx_nbits = op->addr.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->addr.nbytes;
> +	}
> +
> +	if (op->dummy.nbytes) {
> +		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
> +		xfers[xferpos].len = op->dummy.nbytes;
> +		xfers[xferpos].tx_nbits = op->dummy.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->dummy.nbytes;
> +	}
> +
> +	if (op->data.nbytes) {
> +		if (op->data.dir == SPI_MEM_DATA_IN) {
> +			xfers[xferpos].rx_buf = op->data.buf.in;
> +			xfers[xferpos].rx_nbits = op->data.buswidth;
> +		} else {
> +			xfers[xferpos].tx_buf = op->data.buf.out;
> +			xfers[xferpos].tx_nbits = op->data.buswidth;
> +		}
> +
> +		xfers[xferpos].len = op->data.nbytes;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->data.nbytes;
> +	}
> +
> +	ret = spi_sync(mem->spi, &msg);
> +
> +	kfree(tmpbuf);
> +
> +	if (ret)
> +		return ret;
> +
> +	if (msg.actual_length != totalxferlen)
> +		return -EIO;
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_exec_op);
> +
> +/**
> + * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
> + *			      match controller limitations
> + * @mem: the SPI memory
> + * @op: the operation to adjust
> + *
> + * Some controllers have FIFO limitations and must split a data transfer
> + * operation into multiple ones, others require a specific alignment for
> + * optimized accesses. This function allows SPI mem drivers to split a single
> + * operation into multiple sub-operations when required.
> + *
> + * Return: a negative error code if the controller can't properly adjust @op,
> + *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
> + *	   can't be handled in a single step.
> + */
> +int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
> +{
> +	struct spi_controller *ctlr = mem->spi->controller;
> +
> +	if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
> +		return ctlr->mem_ops->adjust_op_size(mem, op);
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
> +
> +static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
> +{
> +	return container_of(drv, struct spi_mem_driver, spidrv.driver);
> +}
> +
> +static int spi_mem_probe(struct spi_device *spi)
> +{
> +	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +	struct spi_mem *mem;
> +
> +	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
> +	if (!mem)
> +		return -ENOMEM;
> +
> +	mem->spi = spi;
> +	spi_set_drvdata(spi, mem);
> +
> +	return memdrv->probe(mem);
> +}
> +
> +static int spi_mem_remove(struct spi_device *spi)
> +{
> +	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +	struct spi_mem *mem = spi_get_drvdata(spi);
> +
> +	if (memdrv->remove)
> +		return memdrv->remove(mem);
> +
> +	return 0;
> +}
> +
> +static void spi_mem_shutdown(struct spi_device *spi)
> +{
> +	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +	struct spi_mem *mem = spi_get_drvdata(spi);
> +
> +	if (memdrv->shutdown)
> +		memdrv->shutdown(mem);
> +}
> +
> +/**
> + * spi_mem_driver_register_with_owner() - Register a SPI memory driver
> + * @memdrv: the SPI memory driver to register
> + * @owner: the owner of this driver
> + *
> + * Registers a SPI memory driver.
> + *
> + * Return: 0 in case of success, a negative error core otherwise.
> + */
> +
> +int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
> +				       struct module *owner)
> +{
> +	memdrv->spidrv.probe = spi_mem_probe;
> +	memdrv->spidrv.remove = spi_mem_remove;
> +	memdrv->spidrv.shutdown = spi_mem_shutdown;
> +
> +	return __spi_register_driver(owner, &memdrv->spidrv);
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
> +
> +/**
> + * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
> + * @memdrv: the SPI memory driver to unregister
> + *
> + * Unregisters a SPI memory driver.
> + */
> +void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
> +{
> +	spi_unregister_driver(&memdrv->spidrv);
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
> +
>  /*-------------------------------------------------------------------------*/
>  
>  #if IS_ENABLED(CONFIG_OF_DYNAMIC)
> diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
> index 7b2170bfd6e7..af3c4ac62b55 100644
> --- a/include/linux/spi/spi.h
> +++ b/include/linux/spi/spi.h
> @@ -27,6 +27,7 @@ struct property_entry;
>  struct spi_controller;
>  struct spi_transfer;
>  struct spi_flash_read_message;
> +struct spi_controller_mem_ops;
>  
>  /*
>   * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
> @@ -376,6 +377,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
>   *                    transfer_one callback.
>   * @handle_err: the subsystem calls the driver to handle an error that occurs
>   *		in the generic implementation of transfer_one_message().
> + * @mem_ops: optimized/dedicated operations for interactions with SPI memory.
> + *	     This field is optional and should only be implemented if the
> + *	     controller has native support for memory like operations.
>   * @unprepare_message: undo any work done by prepare_message().
>   * @slave_abort: abort the ongoing transfer request on an SPI slave controller
>   * @spi_flash_read: to support spi-controller hardwares that provide
> @@ -564,6 +568,9 @@ struct spi_controller {
>  	void (*handle_err)(struct spi_controller *ctlr,
>  			   struct spi_message *message);
>  
> +	/* Optimized handlers for SPI memory-like operations. */
> +	const struct spi_controller_mem_ops *mem_ops;
> +
>  	/* gpio chip select */
>  	int			*cs_gpios;
>  
> @@ -1227,6 +1234,225 @@ int spi_flash_read(struct spi_device *spi,
>  
>  /*---------------------------------------------------------------------------*/
>  
> +/* SPI memory related definitions. */
> +
> +#define SPI_MEM_OP_CMD(__opcode, __buswidth)			\
> +	{							\
> +		.buswidth = __buswidth,				\
> +		.opcode = __opcode,				\
> +	}
> +
> +#define SPI_MEM_OP_ADDRS(__nbytes, __buf, __buswidth)		\
> +	{							\
> +		.nbytes = __nbytes,				\
> +		.buf = __buf,					\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_NO_ADDRS	{ }
> +
> +#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth)			\
> +	{							\
> +		.nbytes = __nbytes,				\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_NO_DUMMY	{ }
> +
> +#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth)		\
> +	{							\
> +		.dir = SPI_MEM_DATA_IN,				\
> +		.nbytes = __nbytes,				\
> +		.buf.in = __buf,				\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth)	\
> +	{							\
> +		.dir = SPI_MEM_DATA_OUT,			\
> +		.nbytes = __nbytes,				\
> +		.buf.out = __buf,				\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_NO_DATA	{ }
> +
> +/**
> + * enum spi_mem_data_dir - describes the direction of a SPI memory data
> + *			   transfer from the controller perspective
> + * @SPI_MEM_DATA_IN: data coming from the SPI memory
> + * @SPI_MEM_DATA_OUT: data sent the SPI memory
> + */
> +enum spi_mem_data_dir {
> +	SPI_MEM_DATA_IN,
> +	SPI_MEM_DATA_OUT,
> +};
> +
> +/**
> + * struct spi_mem_op - describes a SPI memory operation
> + * @cmd.buswidth: number of IO lines used to transmit the command
> + * @cmd.opcode: operation opcode
> + * @addr.nbytes: number of address bytes to send. Can be zero if the operation
> + *		 does not need to send an address
> + * @addr.buswidth: number of IO lines used to transmit the address cycles
> + * @addr.buf: buffer storing the address bytes
> + * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
> + *		  be zero if the operation does not require dummy bytes
> + * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
> + * @data.buswidth: number of IO lanes used to send/receive the data
> + * @data.dir: direction of the transfer
> + * @data.buf.in: input buffer
> + * @data.buf.out: output buffer
> + */
> +struct spi_mem_op {
> +	struct {
> +		u8 buswidth;
> +		u8 opcode;
> +	} cmd;
> +
> +	struct {
> +		u8 nbytes;
> +		u8 buswidth;
> +		const u8 *buf;
> +	} addr;
> +
> +	struct {
> +		u8 nbytes;
> +		u8 buswidth;
> +	} dummy;
> +
> +	struct {
> +		u8 buswidth;
> +		enum spi_mem_data_dir dir;
> +		unsigned int nbytes;
> +		/* buf.{in,out} must be DMA-able. */
> +		union {
> +			void *in;
> +			const void *out;
> +		} buf;
> +	} data;
> +};
> +
> +#define SPI_MEM_OP(__cmd, __addr, __dummy, __data)		\
> +	{							\
> +		.cmd = __cmd,					\
> +		.addr = __addr,					\
> +		.dummy = __dummy,				\
> +		.data = __data,					\
> +	}
> +
> +/**
> + * struct spi_mem - describes a SPI memory device
> + * @spi: the underlying SPI device
> + * @drvpriv: spi_mem_drviver private data
> + *
> + * Extra information that describe the SPI memory device and may be needed by
> + * the controller to properly handle this device should be placed here.
> + *
> + * One example would be the device size since some controller expose their SPI
> + * mem devices through a io-mapped region.
> + */
> +struct spi_mem {
> +	struct spi_device *spi;
> +	void *drvpriv;
> +};
> +
> +/**
> + * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem
> + *				  device
> + * @mem: memory device
> + * @data: data to attach to the memory device
> + */
> +static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data)
> +{
> +	mem->drvpriv = data;
> +}
> +
> +/**
> + * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem
> + *				  device
> + * @mem: memory device
> + *
> + * Return: the data attached to the mem device.
> + */
> +static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
> +{
> +	return mem->drvpriv;
> +}
> +
> +/**
> + * struct spi_controller_mem_ops - SPI memory operations
> + * @adjust_op_size: shrink the data xfer of an operation to match controller's
> + *		    limitations (can be alignment of max RX/TX size
> + *		    limitations)
> + * @supports_op: check if an operation is supported by the controller
> + * @exec_op: execute a SPI memory operation
> + *
> + * This interface should be implemented by SPI controllers providing an
> + * high-level interface to execute SPI memory operation, which is usually the
> + * case for QSPI controllers.
> + */
> +struct spi_controller_mem_ops {
> +	int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
> +	bool (*supports_op)(struct spi_mem *mem,
> +			    const struct spi_mem_op *op);
> +	int (*exec_op)(struct spi_mem *mem,
> +		       const struct spi_mem_op *op);

:)

> +};
> +
> +int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
> +				       const struct spi_mem_op *op,
> +				       struct sg_table *sg);
> +
> +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> +					  const struct spi_mem_op *op,
> +					  struct sg_table *sg);
> +
> +int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
> +
> +bool spi_mem_supports_op(struct spi_mem *mem,
> +			 const struct spi_mem_op *op);
> +
> +int spi_mem_exec_op(struct spi_mem *mem,
> +		    const struct spi_mem_op *op);
> +
> +/**
> + * struct spi_mem_driver - SPI memory driver
> + * @spidrv: inherit from a SPI driver
> + * @probe: probe a SPI memory. Usually where detection/initialization takes
> + *	   place
> + * @remove: remove a SPI memory
> + * @shutdown: take appropriate action when the system is shutdown
> + *
> + * This is just a thin wrapper around a spi_driver. The core takes care of
> + * allocating the spi_mem object and forwarding the probe/remove/shutdown
> + * request to the spi_mem_driver. The reason we use this wrapper is because
> + * we might have to stuff more information into the spi_mem struct to let
> + * SPI controllers know more about the SPI memory they interact with, and
> + * having this intermediate layer allows us to do that without adding more
> + * useless fields to the spi_device object.
> + */
> +struct spi_mem_driver {
> +	struct spi_driver spidrv;
> +	int (*probe)(struct spi_mem *mem);
> +	int (*remove)(struct spi_mem *mem);
> +	void (*shutdown)(struct spi_mem *mem);
> +};
> +
> +int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
> +				       struct module *owner);
> +
> +#define spi_mem_driver_register(__drv)                                  \
> +	spi_mem_driver_register_with_owner(__drv, THIS_MODULE)
> +
> +void spi_mem_driver_unregister(struct spi_mem_driver *drv);
> +
> +#define module_spi_mem_driver(__drv)                                    \
> +	module_driver(__drv, spi_mem_driver_register,                   \
> +		      spi_mem_driver_unregister)
> +
> +/*---------------------------------------------------------------------------*/
> +
>  /*
>   * INTERFACE between board init code and SPI infrastructure.
>   *


Best regards,
Miquèl
Boris Brezillon Feb. 11, 2018, 4 p.m. UTC | #5
Hi Miquel,

On Fri, 9 Feb 2018 13:52:05 +0100
Miquel Raynal <miquel.raynal@bootlin.com> wrote:

> > +/**
> > + * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
> > + *					    memory operation
> > + * @ctlr: the SPI controller requesting this dma_unmap()
> > + * @op: the memory operation containing the buffer to unmap
> > + * @sgt: a pointer to an sg_table previously initialized by
> > + *	 spi_controller_dma_map_mem_op_data()
> > + *
> > + * Some controllers might want to do DMA on the data buffer embedded in @op.
> > + * This helper prepares things so that the CPU can access the
> > + * op->data.buf.{in,out} buffer again.
> > + *
> > + * This function is not intended to be called from spi drivers. Only SPI  
> 
> s/spi/SPI/
> 
> > + * controller drivers should use it.
> > + *
> > + * This function should be called after the DMA operation has finished an is  
> 
> s/an/and/

Will fix both.

> 
> > + * only valid if the previous spi_controller_dma_map_mem_op_data() has returned
> > + * 0.
> > + *
> > + * Return: 0 in case of success, a negative error code otherwise.
> > + */
> > +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> > +					  const struct spi_mem_op *op,
> > +					  struct sg_table *sgt)
> > +{
> > +	struct device *dmadev;
> > +
> > +	if (!op->data.nbytes)
> > +		return;
> > +
> > +	if (op->data.dir == SPI_MEM_DATA_OUT)
> > +		dmadev = ctlr->dma_tx ?
> > +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> > +	else
> > +		dmadev = ctlr->dma_rx ?
> > +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
> > +
> > +	spi_unmap_buf(ctlr, dmadev, sgt,
> > +		      op->data.dir == SPI_MEM_DATA_IN ?
> > +		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
> > +}
> > +EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
> > +
> > +static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
> > +{
> > +	u32 mode = mem->spi->mode;
> > +
> > +	switch (buswidth) {
> > +	case 1:
> > +		return 0;
> > +
> > +	case 2:
> > +		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
> > +		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
> > +			return 0;
> > +
> > +		break;
> > +
> > +	case 4:
> > +		if ((tx && (mode & SPI_TX_QUAD)) ||
> > +		    (!tx && (mode & SPI_RX_QUAD)))
> > +			return 0;
> > +
> > +		break;
> > +
> > +	default:
> > +		break;
> > +	}
> > +
> > +	return -ENOTSUPP;
> > +}
> > +
> > +/**
> > + * spi_mem_supports_op() - Check if a memory device and the controller it is
> > + *			   connected to support a specific memory operation
> > + * @mem: the SPI memory
> > + * @op: the memory operation to check
> > + *
> > + * Some controllers are only supporting Single or Dual IOs, others might only
> > + * support specific opcodes, or it can even be that the controller and device
> > + * both support Quad IOs but the hardware prevents you from using it because
> > + * only 2 IO lines are connected.
> > + *
> > + * This function checks whether a specific operation is supported.
> > + *
> > + * Return: true if @op is supported, false otherwise.
> > + */
> > +bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
> > +{
> > +	struct spi_controller *ctlr = mem->spi->controller;
> > +
> > +	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
> > +		return false;
> > +
> > +	if (op->addr.nbytes &&
> > +	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
> > +		return false;
> > +
> > +	if (op->dummy.nbytes &&
> > +	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
> > +		return false;
> > +
> > +	if (op->data.nbytes &&
> > +	    spi_check_buswidth_req(mem, op->data.buswidth,
> > +				   op->data.dir == SPI_MEM_DATA_IN ?
> > +				   false : true))  
> 
> Why not just op->data.dir != SPI_MEM_DATA_IN or even better ==
> SPI_MEM_DATA_OUT if it exists?

Indeed, I'll use op->data.dir == SPI_MEM_DATA_OUT.

> 
> > +		return false;
> > +
> > +	if (ctlr->mem_ops)
> > +		return ctlr->mem_ops->supports_op(mem, op);
> > +
> > +	return true;
> > +}
> > +EXPORT_SYMBOL_GPL(spi_mem_supports_op);
> > +
> > +/**
> > + * spi_mem_exec_op() - Execute a memory operation
> > + * @mem: the SPI memory
> > + * @op: the memory operation to execute
> > + *
> > + * Executes a memory operation.
> > + *
> > + * This function first checks that @op is supported and then tries to execute
> > + * it.
> > + *
> > + * Return: 0 in case of success, a negative error code otherwise.
> > + */
> > +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> > +{
> > +	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> > +	struct spi_controller *ctlr = mem->spi->controller;
> > +	struct spi_transfer xfers[4] = { };
> > +	struct spi_message msg;
> > +	u8 *tmpbuf;
> > +	int ret;
> > +
> > +	if (!spi_mem_supports_op(mem, op))
> > +		return -ENOTSUPP;
> > +
> > +	if (ctlr->mem_ops) {
> > +		if (ctlr->auto_runtime_pm) {
> > +			ret = pm_runtime_get_sync(ctlr->dev.parent);
> > +			if (ret < 0) {
> > +				dev_err(&ctlr->dev,
> > +					"Failed to power device: %d\n",
> > +					ret);
> > +				return ret;
> > +			}
> > +		}
> > +
> > +		mutex_lock(&ctlr->bus_lock_mutex);
> > +		mutex_lock(&ctlr->io_mutex);
> > +		ret = ctlr->mem_ops->exec_op(mem, op);
> > +		mutex_unlock(&ctlr->io_mutex);
> > +		mutex_unlock(&ctlr->bus_lock_mutex);  
> 
> Is not this a bit dangerous? I guess that no one should release the bus
> lock without having already released the IO lock but maybe this should
> be clearly mentioned in a comment in the original structure definition?

It's not something new, spi_flash_read() was doing the same [1]. This
being said, if Mark agrees, I can add a comment in the spi_controller
struct def stating that ->bus_lock_mutex should always be acquired
before ->io_mutex.

Thanks for your review.

Boris

[1]http://elixir.free-electrons.com/linux/v4.15.2/source/drivers/spi/spi.c#L3045
Raghavendra, Vignesh Feb. 12, 2018, 11:50 a.m. UTC | #6
Hi,

On Tuesday 06 February 2018 04:51 AM, Boris Brezillon wrote:
> From: Boris Brezillon <boris.brezillon@free-electrons.com>
> 
> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.
> 
> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).

Wouldn't it be better if spi_mem* implementations were in separate file?
drivers/spi/spi.c is 3.5K+ lines and is bit difficult to follow.
Boris Brezillon Feb. 12, 2018, 12:28 p.m. UTC | #7
On Mon, 12 Feb 2018 17:20:46 +0530
Vignesh R <vigneshr@ti.com> wrote:

> Hi,
> 
> On Tuesday 06 February 2018 04:51 AM, Boris Brezillon wrote:
> > From: Boris Brezillon <boris.brezillon@free-electrons.com>
> > 
> > Some controllers are exposing high-level interfaces to access various
> > kind of SPI memories. Unfortunately they do not fit in the current
> > spi_controller model and usually have drivers placed in
> > drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> > memories in general.
> > 
> > This is an attempt at defining a SPI memory interface which works for
> > all kinds of SPI memories (NORs, NANDs, SRAMs).  
> 
> Wouldn't it be better if spi_mem* implementations were in separate file?
> drivers/spi/spi.c is 3.5K+ lines and is bit difficult to follow.

No strong opinion about where the spi_mem code should lie, so if Mark
agrees I can move it to a different file and possibly make it optional
with a Kconfig option.
Mark Brown Feb. 19, 2018, 1:53 p.m. UTC | #8
On Tue, Feb 06, 2018 at 12:21:15AM +0100, Boris Brezillon wrote:

> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.

> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).

It would be helpful if this changelog were to describe what the
problems are and how the patch addresses them.  Someone reading the git
history should be able to tell what the patch is doing without having to
google around to find a cover letter for the patch series.  It also
feels like this should be split up a bit - there's some preparatory
refactoring here, a new API, and an adaption layer to implement that API
with actual SPI controllers.  It's a very large patch doing several
different things and splitting it up would make it easier to review.

I have to say I'm rather unclear why this is being done in the SPI core
rather than as a layer on top of it - devices doing the new API can't
support normal SPI clients at all and everything about this is entirely
flash specific.  You've got a bit about that in your cover letter, I'll
reply there.

> @@ -2155,10 +2181,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>  			spi_controller_is_slave(ctlr) ? "slave" : "master",
>  			dev_name(&ctlr->dev));
>  
> -	/* If we're using a queued driver, start the queue */
> -	if (ctlr->transfer)
> +	/*
> +	 * If we're using a queued driver, start the queue. Note that we don't
> +	 * need the queueing logic if the driver is only supporting high-level
> +	 * memory operations.
> +	 */
> +	if (ctlr->transfer) {
>  		dev_info(dev, "controller is unqueued, this is deprecated\n");
> -	else {
> +	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
>  		status = spi_controller_initialize_queue(ctlr);
>  		if (status) {
>  			device_del(&ctlr->dev);

This for example feels like a separate refactoring which could be split
out.

> +	if (op->data.dir == SPI_MEM_DATA_OUT)
> +		dmadev = ctlr->dma_tx ?
> +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +	else
> +		dmadev = ctlr->dma_rx ?
> +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;

Please don't abuse the ternery operator like this :(
Mark Brown Feb. 19, 2018, 2 p.m. UTC | #9
On Tue, Feb 06, 2018 at 12:21:15AM +0100, Boris Brezillon wrote:

> +	/*
> +	 * The controller can implement only the high-level SPI-memory like
> +	 * operations if it does not support regular SPI transfers.
> +	 */
> +	if (ctlr->mem_ops) {
> +		if (!ctlr->mem_ops->supports_op ||
> +		    !ctlr->mem_ops->exec_op)
> +			return -EINVAL;
> +	} else if (!ctlr->transfer && !ctlr->transfer_one &&
> +		   !ctlr->transfer_one_message) {
> +		return -EINVAL;
> +	}

BTW your comment isn't describing what the code does - the comment says
that having the memory operations means the driver can't be a regular
SPI controller while the code does not do that and only checks that if a
driver has memory ops it implements two required ones.  Indeed the
existing drivers that are updated to the new API continue to implement
normal SPI operations.
Boris Brezillon Feb. 19, 2018, 2:20 p.m. UTC | #10
Hi Mark,

On Mon, 19 Feb 2018 13:53:57 +0000
Mark Brown <broonie@kernel.org> wrote:

> On Tue, Feb 06, 2018 at 12:21:15AM +0100, Boris Brezillon wrote:
> 
> > Some controllers are exposing high-level interfaces to access various
> > kind of SPI memories. Unfortunately they do not fit in the current
> > spi_controller model and usually have drivers placed in
> > drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> > memories in general.  
> 
> > This is an attempt at defining a SPI memory interface which works for
> > all kinds of SPI memories (NORs, NANDs, SRAMs).  
> 
> It would be helpful if this changelog were to describe what the
> problems are and how the patch addresses them.  Someone reading the git
> history should be able to tell what the patch is doing without having to
> google around to find a cover letter for the patch series.

Sure. I'll copy the explanation I give in my cover letter here.

> It also
> feels like this should be split up a bit - there's some preparatory
> refactoring here, a new API, and an adaption layer to implement that API
> with actual SPI controllers.  It's a very large patch doing several
> different things and splitting it up would make it easier to review.

Noted. I'll try to spit things up.

> 
> I have to say I'm rather unclear why this is being done in the SPI core
> rather than as a layer on top of it - devices doing the new API can't
> support normal SPI clients at all and everything about this is entirely
> flash specific.  You've got a bit about that in your cover letter, I'll
> reply there.

Then I'll reply there.

> 
> > @@ -2155,10 +2181,14 @@ int spi_register_controller(struct spi_controller *ctlr)
> >  			spi_controller_is_slave(ctlr) ? "slave" : "master",
> >  			dev_name(&ctlr->dev));
> >  
> > -	/* If we're using a queued driver, start the queue */
> > -	if (ctlr->transfer)
> > +	/*
> > +	 * If we're using a queued driver, start the queue. Note that we don't
> > +	 * need the queueing logic if the driver is only supporting high-level
> > +	 * memory operations.
> > +	 */
> > +	if (ctlr->transfer) {
> >  		dev_info(dev, "controller is unqueued, this is deprecated\n");
> > -	else {
> > +	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
> >  		status = spi_controller_initialize_queue(ctlr);
> >  		if (status) {
> >  			device_del(&ctlr->dev);  
> 
> This for example feels like a separate refactoring which could be split
> out.

Hm, this change is not really required without the spi_mem stuff. I
mean, the only cases we have right now are:

1/ the controller implements ->transfer() on its own
2/ the controller implements ctlr->transfer_one()
   or ctlr->transfer_one_message() and relies on the default
   queueing/dequeuing mechanism

The spi_mem stuff adds a 3rd case:

3/ the controller only supports memory-like operation and in this case
   we don't need to initialize the queue

which is why I added this else if() at the same time I added the
spi_mem stuff.

> 
> > +	if (op->data.dir == SPI_MEM_DATA_OUT)
> > +		dmadev = ctlr->dma_tx ?
> > +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> > +	else
> > +		dmadev = ctlr->dma_rx ?
> > +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;  
> 
> Please don't abuse the ternery operator like this :(

I'll try to remember that.

Thanks for your review.

Boris
Boris Brezillon Feb. 19, 2018, 2:32 p.m. UTC | #11
On Mon, 19 Feb 2018 14:00:03 +0000
Mark Brown <broonie@kernel.org> wrote:

> On Tue, Feb 06, 2018 at 12:21:15AM +0100, Boris Brezillon wrote:
> 
> > +	/*
> > +	 * The controller can implement only the high-level SPI-memory like
> > +	 * operations if it does not support regular SPI transfers.
> > +	 */
> > +	if (ctlr->mem_ops) {
> > +		if (!ctlr->mem_ops->supports_op ||
> > +		    !ctlr->mem_ops->exec_op)
> > +			return -EINVAL;
> > +	} else if (!ctlr->transfer && !ctlr->transfer_one &&
> > +		   !ctlr->transfer_one_message) {
> > +		return -EINVAL;
> > +	}  
> 
> BTW your comment isn't describing what the code does - the comment says
> that having the memory operations means the driver can't be a regular
> SPI controller while the code does not do that and only checks that if a
> driver has memory ops it implements two required ones.  Indeed the
> existing drivers that are updated to the new API continue to implement
> normal SPI operations.

Definitely not what I wanted to say :-/. I guess replacing 'can' by
'may' would be more appropriate. What I want to say is that SPI
controllers do not have to implement the hooks for regular SPI
operations if they only support SPI-mem like operations, but of course
they can implement those hooks if they support both spi_mem and regular
SPI ops.

This check is here to allow registration of SPI controllers that
support spi_mem ops, regular ops or both, and prevent registration if
both spi_mem and regular hooks are missing.

Is it clearer?
Peter Pan Feb. 28, 2018, 7:51 a.m. UTC | #12
Hi Boris,

I'm away from work for the last month for some personal reason.
Now I'm trying to catch up your great effort on SPI memory stuff.

Below is some comments:


On Tue, Feb 6, 2018 at 7:21 AM, Boris Brezillon
<boris.brezillon@bootlin.com> wrote:
> From: Boris Brezillon <boris.brezillon@free-electrons.com>
>
> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.
>
> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).
>
> Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> ---
>  drivers/spi/spi.c       | 423 +++++++++++++++++++++++++++++++++++++++++++++++-
>  include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++
>  2 files changed, 646 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
> index b33a727a0158..57bc540a0521 100644
> --- a/drivers/spi/spi.c
> +++ b/drivers/spi/spi.c
> @@ -2057,6 +2057,24 @@ static int of_spi_register_master(struct spi_controller *ctlr)
>  }
>  #endif
>
> +static int spi_controller_check_ops(struct spi_controller *ctlr)
> +{
> +       /*
> +        * The controller can implement only the high-level SPI-memory like
> +        * operations if it does not support regular SPI transfers.
> +        */
> +       if (ctlr->mem_ops) {
> +               if (!ctlr->mem_ops->supports_op ||
> +                   !ctlr->mem_ops->exec_op)
> +                       return -EINVAL;
> +       } else if (!ctlr->transfer && !ctlr->transfer_one &&
> +                  !ctlr->transfer_one_message) {
> +               return -EINVAL;
> +       }
> +
> +       return 0;
> +}
> +
>  /**
>   * spi_register_controller - register SPI master or slave controller
>   * @ctlr: initialized master, originally from spi_alloc_master() or
> @@ -2090,6 +2108,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>         if (!dev)
>                 return -ENODEV;
>
> +       /*
> +        * Make sure all necessary hooks are implemented before registering
> +        * the SPI controller.
> +        */
> +       status = spi_controller_check_ops(ctlr);
> +       if (status)
> +               return status;
> +
>         if (!spi_controller_is_slave(ctlr)) {
>                 status = of_spi_register_master(ctlr);
>                 if (status)
> @@ -2155,10 +2181,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>                         spi_controller_is_slave(ctlr) ? "slave" : "master",
>                         dev_name(&ctlr->dev));
>
> -       /* If we're using a queued driver, start the queue */
> -       if (ctlr->transfer)
> +       /*
> +        * If we're using a queued driver, start the queue. Note that we don't
> +        * need the queueing logic if the driver is only supporting high-level
> +        * memory operations.
> +        */
> +       if (ctlr->transfer) {
>                 dev_info(dev, "controller is unqueued, this is deprecated\n");
> -       else {
> +       } else if (ctlr->transfer_one || ctlr->transfer_one_message) {
>                 status = spi_controller_initialize_queue(ctlr);
>                 if (status) {
>                         device_del(&ctlr->dev);
> @@ -2893,6 +2923,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
>  {
>         struct spi_controller *ctlr = spi->controller;
>
> +       /*
> +        * Some controllers do not support doing regular SPI transfers. Return
> +        * ENOTSUPP when this is the case.
> +        */
> +       if (!ctlr->transfer)
> +               return -ENOTSUPP;
> +
>         message->spi = spi;
>
>         SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
> @@ -3321,6 +3358,386 @@ int spi_write_then_read(struct spi_device *spi,
>  }
>  EXPORT_SYMBOL_GPL(spi_write_then_read);
>
> +/**
> + * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
> + *                                       memory operation
> + * @ctlr: the SPI controller requesting this dma_map()
> + * @op: the memory operation containing the buffer to map
> + * @sgt: a pointer to a non-initialized sg_table that will be filled by this
> + *      function
> + *
> + * Some controllers might want to do DMA on the data buffer embedded in @op.
> + * This helper prepares everything for you and provides a ready-to-use
> + * sg_table. This function is not intended to be called from spi drivers.
> + * Only SPI controller drivers should use it.
> + * Note that the caller must ensure the memory region pointed by
> + * op->data.buf.{in,out} is DMA-able before calling this function.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
> +                                      const struct spi_mem_op *op,
> +                                      struct sg_table *sgt)
> +{
> +       struct device *dmadev;
> +
> +       if (!op->data.nbytes)
> +               return -EINVAL;
> +
> +       if (op->data.dir == SPI_MEM_DATA_OUT)
> +               dmadev = ctlr->dma_tx ?
> +                        ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +       else
> +               dmadev = ctlr->dma_rx ?
> +                        ctlr->dma_rx->device->dev : ctlr->dev.parent;
> +
> +       if (!dmadev)
> +               return -EINVAL;
> +
> +       return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
> +                          op->data.dir == SPI_MEM_DATA_IN ?
> +                          DMA_FROM_DEVICE : DMA_TO_DEVICE);
> +}
> +EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
> +
> +/**
> + * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
> + *                                         memory operation
> + * @ctlr: the SPI controller requesting this dma_unmap()
> + * @op: the memory operation containing the buffer to unmap
> + * @sgt: a pointer to an sg_table previously initialized by
> + *      spi_controller_dma_map_mem_op_data()
> + *
> + * Some controllers might want to do DMA on the data buffer embedded in @op.
> + * This helper prepares things so that the CPU can access the
> + * op->data.buf.{in,out} buffer again.
> + *
> + * This function is not intended to be called from spi drivers. Only SPI
> + * controller drivers should use it.
> + *
> + * This function should be called after the DMA operation has finished an is
> + * only valid if the previous spi_controller_dma_map_mem_op_data() has returned
> + * 0.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> +                                         const struct spi_mem_op *op,
> +                                         struct sg_table *sgt)
> +{
> +       struct device *dmadev;
> +
> +       if (!op->data.nbytes)
> +               return;
> +
> +       if (op->data.dir == SPI_MEM_DATA_OUT)
> +               dmadev = ctlr->dma_tx ?
> +                        ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +       else
> +               dmadev = ctlr->dma_rx ?
> +                        ctlr->dma_rx->device->dev : ctlr->dev.parent;
> +
> +       spi_unmap_buf(ctlr, dmadev, sgt,
> +                     op->data.dir == SPI_MEM_DATA_IN ?
> +                     DMA_FROM_DEVICE : DMA_TO_DEVICE);
> +}
> +EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
> +
> +static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
> +{
> +       u32 mode = mem->spi->mode;
> +
> +       switch (buswidth) {
> +       case 1:
> +               return 0;
> +
> +       case 2:
> +               if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
> +                   (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
> +                       return 0;
> +
> +               break;
> +
> +       case 4:
> +               if ((tx && (mode & SPI_TX_QUAD)) ||
> +                   (!tx && (mode & SPI_RX_QUAD)))
> +                       return 0;
> +
> +               break;
> +
> +       default:
> +               break;
> +       }
> +
> +       return -ENOTSUPP;
> +}
> +
> +/**
> + * spi_mem_supports_op() - Check if a memory device and the controller it is
> + *                        connected to support a specific memory operation
> + * @mem: the SPI memory
> + * @op: the memory operation to check
> + *
> + * Some controllers are only supporting Single or Dual IOs, others might only
> + * support specific opcodes, or it can even be that the controller and device
> + * both support Quad IOs but the hardware prevents you from using it because
> + * only 2 IO lines are connected.
> + *
> + * This function checks whether a specific operation is supported.
> + *
> + * Return: true if @op is supported, false otherwise.
> + */
> +bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +       struct spi_controller *ctlr = mem->spi->controller;
> +
> +       if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
> +               return false;
> +
> +       if (op->addr.nbytes &&
> +           spi_check_buswidth_req(mem, op->addr.buswidth, true))
> +               return false;
> +
> +       if (op->dummy.nbytes &&
> +           spi_check_buswidth_req(mem, op->dummy.buswidth, true))
> +               return false;
> +
> +       if (op->data.nbytes &&
> +           spi_check_buswidth_req(mem, op->data.buswidth,
> +                                  op->data.dir == SPI_MEM_DATA_IN ?
> +                                  false : true))
> +               return false;
> +
> +       if (ctlr->mem_ops)
> +               return ctlr->mem_ops->supports_op(mem, op);
> +
> +       return true;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_supports_op);
> +
> +/**
> + * spi_mem_exec_op() - Execute a memory operation
> + * @mem: the SPI memory
> + * @op: the memory operation to execute
> + *
> + * Executes a memory operation.
> + *
> + * This function first checks that @op is supported and then tries to execute
> + * it.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +       unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> +       struct spi_controller *ctlr = mem->spi->controller;
> +       struct spi_transfer xfers[4] = { };
> +       struct spi_message msg;
> +       u8 *tmpbuf;
> +       int ret;
> +
> +       if (!spi_mem_supports_op(mem, op))
> +               return -ENOTSUPP;
> +
> +       if (ctlr->mem_ops) {
> +               if (ctlr->auto_runtime_pm) {
> +                       ret = pm_runtime_get_sync(ctlr->dev.parent);
> +                       if (ret < 0) {
> +                               dev_err(&ctlr->dev,
> +                                       "Failed to power device: %d\n",
> +                                       ret);
> +                               return ret;
> +                       }
> +               }
> +
> +               mutex_lock(&ctlr->bus_lock_mutex);
> +               mutex_lock(&ctlr->io_mutex);
> +               ret = ctlr->mem_ops->exec_op(mem, op);
> +               mutex_unlock(&ctlr->io_mutex);
> +               mutex_unlock(&ctlr->bus_lock_mutex);
> +
> +               if (ctlr->auto_runtime_pm)
> +                       pm_runtime_put(ctlr->dev.parent);
> +
> +               /*
> +                * Some controllers only optimize specific paths (typically the
> +                * read path) and expect the core to use the regular SPI
> +                * interface in these cases.
> +                */
> +               if (!ret || ret != -ENOTSUPP)
> +                       return ret;
> +       }
> +
> +       tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
> +                    op->dummy.nbytes;

Here you are using sizeof(op->cmd.opcode) while the code after this
assumes opcode is u8(ie. "memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);")
It may be confused.

Thanks,
Peter Pan

> +
> +       /*
> +        * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
> +        * we're guaranteed that this buffer is DMA-able, as required by the
> +        * SPI layer.
> +        */
> +       tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
> +       if (!tmpbuf)
> +               return -ENOMEM;
> +
> +       spi_message_init(&msg);
> +
> +       tmpbuf[0] = op->cmd.opcode;
> +       xfers[xferpos].tx_buf = tmpbuf;
> +       xfers[xferpos].len = sizeof(op->cmd.opcode);
> +       xfers[xferpos].tx_nbits = op->cmd.buswidth;
> +       spi_message_add_tail(&xfers[xferpos], &msg);
> +       xferpos++;
> +       totalxferlen++;
> +
> +       if (op->addr.nbytes) {
> +               memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
> +               xfers[xferpos].tx_buf = tmpbuf + 1;
> +               xfers[xferpos].len = op->addr.nbytes;
> +               xfers[xferpos].tx_nbits = op->addr.buswidth;
> +               spi_message_add_tail(&xfers[xferpos], &msg);
> +               xferpos++;
> +               totalxferlen += op->addr.nbytes;
> +       }
> +
> +       if (op->dummy.nbytes) {
> +               memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
> +               xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
> +               xfers[xferpos].len = op->dummy.nbytes;
> +               xfers[xferpos].tx_nbits = op->dummy.buswidth;
> +               spi_message_add_tail(&xfers[xferpos], &msg);
> +               xferpos++;
> +               totalxferlen += op->dummy.nbytes;
> +       }
> +
> +       if (op->data.nbytes) {
> +               if (op->data.dir == SPI_MEM_DATA_IN) {
> +                       xfers[xferpos].rx_buf = op->data.buf.in;
> +                       xfers[xferpos].rx_nbits = op->data.buswidth;
> +               } else {
> +                       xfers[xferpos].tx_buf = op->data.buf.out;
> +                       xfers[xferpos].tx_nbits = op->data.buswidth;
> +               }
> +
> +               xfers[xferpos].len = op->data.nbytes;
> +               spi_message_add_tail(&xfers[xferpos], &msg);
> +               xferpos++;
> +               totalxferlen += op->data.nbytes;
> +       }
> +
> +       ret = spi_sync(mem->spi, &msg);
> +
> +       kfree(tmpbuf);
> +
> +       if (ret)
> +               return ret;
> +
> +       if (msg.actual_length != totalxferlen)
> +               return -EIO;
> +
> +       return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_exec_op);
> +
> +/**
> + * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
> + *                           match controller limitations
> + * @mem: the SPI memory
> + * @op: the operation to adjust
> + *
> + * Some controllers have FIFO limitations and must split a data transfer
> + * operation into multiple ones, others require a specific alignment for
> + * optimized accesses. This function allows SPI mem drivers to split a single
> + * operation into multiple sub-operations when required.
> + *
> + * Return: a negative error code if the controller can't properly adjust @op,
> + *        0 otherwise. Note that @op->data.nbytes will be updated if @op
> + *        can't be handled in a single step.
> + */
> +int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
> +{
> +       struct spi_controller *ctlr = mem->spi->controller;
> +
> +       if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
> +               return ctlr->mem_ops->adjust_op_size(mem, op);
> +
> +       return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
> +
> +static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
> +{
> +       return container_of(drv, struct spi_mem_driver, spidrv.driver);
> +}
> +
> +static int spi_mem_probe(struct spi_device *spi)
> +{
> +       struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +       struct spi_mem *mem;
> +
> +       mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
> +       if (!mem)
> +               return -ENOMEM;
> +
> +       mem->spi = spi;
> +       spi_set_drvdata(spi, mem);
> +
> +       return memdrv->probe(mem);
> +}
> +
> +static int spi_mem_remove(struct spi_device *spi)
> +{
> +       struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +       struct spi_mem *mem = spi_get_drvdata(spi);
> +
> +       if (memdrv->remove)
> +               return memdrv->remove(mem);
> +
> +       return 0;
> +}
> +
> +static void spi_mem_shutdown(struct spi_device *spi)
> +{
> +       struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +       struct spi_mem *mem = spi_get_drvdata(spi);
> +
> +       if (memdrv->shutdown)
> +               memdrv->shutdown(mem);
> +}
> +
> +/**
> + * spi_mem_driver_register_with_owner() - Register a SPI memory driver
> + * @memdrv: the SPI memory driver to register
> + * @owner: the owner of this driver
> + *
> + * Registers a SPI memory driver.
> + *
> + * Return: 0 in case of success, a negative error core otherwise.
> + */
> +
> +int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
> +                                      struct module *owner)
> +{
> +       memdrv->spidrv.probe = spi_mem_probe;
> +       memdrv->spidrv.remove = spi_mem_remove;
> +       memdrv->spidrv.shutdown = spi_mem_shutdown;
> +
> +       return __spi_register_driver(owner, &memdrv->spidrv);
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
> +
> +/**
> + * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
> + * @memdrv: the SPI memory driver to unregister
> + *
> + * Unregisters a SPI memory driver.
> + */
> +void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
> +{
> +       spi_unregister_driver(&memdrv->spidrv);
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
> +
>  /*-------------------------------------------------------------------------*/
>
>  #if IS_ENABLED(CONFIG_OF_DYNAMIC)
> diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
> index 7b2170bfd6e7..af3c4ac62b55 100644
> --- a/include/linux/spi/spi.h
> +++ b/include/linux/spi/spi.h
> @@ -27,6 +27,7 @@ struct property_entry;
>  struct spi_controller;
>  struct spi_transfer;
>  struct spi_flash_read_message;
> +struct spi_controller_mem_ops;
>
>  /*
>   * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
> @@ -376,6 +377,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
>   *                    transfer_one callback.
>   * @handle_err: the subsystem calls the driver to handle an error that occurs
>   *             in the generic implementation of transfer_one_message().
> + * @mem_ops: optimized/dedicated operations for interactions with SPI memory.
> + *          This field is optional and should only be implemented if the
> + *          controller has native support for memory like operations.
>   * @unprepare_message: undo any work done by prepare_message().
>   * @slave_abort: abort the ongoing transfer request on an SPI slave controller
>   * @spi_flash_read: to support spi-controller hardwares that provide
> @@ -564,6 +568,9 @@ struct spi_controller {
>         void (*handle_err)(struct spi_controller *ctlr,
>                            struct spi_message *message);
>
> +       /* Optimized handlers for SPI memory-like operations. */
> +       const struct spi_controller_mem_ops *mem_ops;
> +
>         /* gpio chip select */
>         int                     *cs_gpios;
>
> @@ -1227,6 +1234,225 @@ int spi_flash_read(struct spi_device *spi,
>
>  /*---------------------------------------------------------------------------*/
>
> +/* SPI memory related definitions. */
> +
> +#define SPI_MEM_OP_CMD(__opcode, __buswidth)                   \
> +       {                                                       \
> +               .buswidth = __buswidth,                         \
> +               .opcode = __opcode,                             \
> +       }
> +
> +#define SPI_MEM_OP_ADDRS(__nbytes, __buf, __buswidth)          \
> +       {                                                       \
> +               .nbytes = __nbytes,                             \
> +               .buf = __buf,                                   \
> +               .buswidth = __buswidth,                         \
> +       }
> +
> +#define SPI_MEM_OP_NO_ADDRS    { }
> +
> +#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth)                 \
> +       {                                                       \
> +               .nbytes = __nbytes,                             \
> +               .buswidth = __buswidth,                         \
> +       }
> +
> +#define SPI_MEM_OP_NO_DUMMY    { }
> +
> +#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth)                \
> +       {                                                       \
> +               .dir = SPI_MEM_DATA_IN,                         \
> +               .nbytes = __nbytes,                             \
> +               .buf.in = __buf,                                \
> +               .buswidth = __buswidth,                         \
> +       }
> +
> +#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth)       \
> +       {                                                       \
> +               .dir = SPI_MEM_DATA_OUT,                        \
> +               .nbytes = __nbytes,                             \
> +               .buf.out = __buf,                               \
> +               .buswidth = __buswidth,                         \
> +       }
> +
> +#define SPI_MEM_OP_NO_DATA     { }
> +
> +/**
> + * enum spi_mem_data_dir - describes the direction of a SPI memory data
> + *                        transfer from the controller perspective
> + * @SPI_MEM_DATA_IN: data coming from the SPI memory
> + * @SPI_MEM_DATA_OUT: data sent the SPI memory
> + */
> +enum spi_mem_data_dir {
> +       SPI_MEM_DATA_IN,
> +       SPI_MEM_DATA_OUT,
> +};
> +
> +/**
> + * struct spi_mem_op - describes a SPI memory operation
> + * @cmd.buswidth: number of IO lines used to transmit the command
> + * @cmd.opcode: operation opcode
> + * @addr.nbytes: number of address bytes to send. Can be zero if the operation
> + *              does not need to send an address
> + * @addr.buswidth: number of IO lines used to transmit the address cycles
> + * @addr.buf: buffer storing the address bytes
> + * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
> + *               be zero if the operation does not require dummy bytes
> + * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
> + * @data.buswidth: number of IO lanes used to send/receive the data
> + * @data.dir: direction of the transfer
> + * @data.buf.in: input buffer
> + * @data.buf.out: output buffer
> + */
> +struct spi_mem_op {
> +       struct {
> +               u8 buswidth;
> +               u8 opcode;
> +       } cmd;
> +
> +       struct {
> +               u8 nbytes;
> +               u8 buswidth;
> +               const u8 *buf;
> +       } addr;
> +
> +       struct {
> +               u8 nbytes;
> +               u8 buswidth;
> +       } dummy;
> +
> +       struct {
> +               u8 buswidth;
> +               enum spi_mem_data_dir dir;
> +               unsigned int nbytes;
> +               /* buf.{in,out} must be DMA-able. */
> +               union {
> +                       void *in;
> +                       const void *out;
> +               } buf;
> +       } data;
> +};
> +
> +#define SPI_MEM_OP(__cmd, __addr, __dummy, __data)             \
> +       {                                                       \
> +               .cmd = __cmd,                                   \
> +               .addr = __addr,                                 \
> +               .dummy = __dummy,                               \
> +               .data = __data,                                 \
> +       }
> +
> +/**
> + * struct spi_mem - describes a SPI memory device
> + * @spi: the underlying SPI device
> + * @drvpriv: spi_mem_drviver private data
> + *
> + * Extra information that describe the SPI memory device and may be needed by
> + * the controller to properly handle this device should be placed here.
> + *
> + * One example would be the device size since some controller expose their SPI
> + * mem devices through a io-mapped region.
> + */
> +struct spi_mem {
> +       struct spi_device *spi;
> +       void *drvpriv;
> +};
> +
> +/**
> + * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem
> + *                               device
> + * @mem: memory device
> + * @data: data to attach to the memory device
> + */
> +static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data)
> +{
> +       mem->drvpriv = data;
> +}
> +
> +/**
> + * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem
> + *                               device
> + * @mem: memory device
> + *
> + * Return: the data attached to the mem device.
> + */
> +static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
> +{
> +       return mem->drvpriv;
> +}
> +
> +/**
> + * struct spi_controller_mem_ops - SPI memory operations
> + * @adjust_op_size: shrink the data xfer of an operation to match controller's
> + *                 limitations (can be alignment of max RX/TX size
> + *                 limitations)
> + * @supports_op: check if an operation is supported by the controller
> + * @exec_op: execute a SPI memory operation
> + *
> + * This interface should be implemented by SPI controllers providing an
> + * high-level interface to execute SPI memory operation, which is usually the
> + * case for QSPI controllers.
> + */
> +struct spi_controller_mem_ops {
> +       int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
> +       bool (*supports_op)(struct spi_mem *mem,
> +                           const struct spi_mem_op *op);
> +       int (*exec_op)(struct spi_mem *mem,
> +                      const struct spi_mem_op *op);
> +};
> +
> +int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
> +                                      const struct spi_mem_op *op,
> +                                      struct sg_table *sg);
> +
> +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> +                                         const struct spi_mem_op *op,
> +                                         struct sg_table *sg);
> +
> +int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
> +
> +bool spi_mem_supports_op(struct spi_mem *mem,
> +                        const struct spi_mem_op *op);
> +
> +int spi_mem_exec_op(struct spi_mem *mem,
> +                   const struct spi_mem_op *op);
> +
> +/**
> + * struct spi_mem_driver - SPI memory driver
> + * @spidrv: inherit from a SPI driver
> + * @probe: probe a SPI memory. Usually where detection/initialization takes
> + *        place
> + * @remove: remove a SPI memory
> + * @shutdown: take appropriate action when the system is shutdown
> + *
> + * This is just a thin wrapper around a spi_driver. The core takes care of
> + * allocating the spi_mem object and forwarding the probe/remove/shutdown
> + * request to the spi_mem_driver. The reason we use this wrapper is because
> + * we might have to stuff more information into the spi_mem struct to let
> + * SPI controllers know more about the SPI memory they interact with, and
> + * having this intermediate layer allows us to do that without adding more
> + * useless fields to the spi_device object.
> + */
> +struct spi_mem_driver {
> +       struct spi_driver spidrv;
> +       int (*probe)(struct spi_mem *mem);
> +       int (*remove)(struct spi_mem *mem);
> +       void (*shutdown)(struct spi_mem *mem);
> +};
> +
> +int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
> +                                      struct module *owner);
> +
> +#define spi_mem_driver_register(__drv)                                  \
> +       spi_mem_driver_register_with_owner(__drv, THIS_MODULE)
> +
> +void spi_mem_driver_unregister(struct spi_mem_driver *drv);
> +
> +#define module_spi_mem_driver(__drv)                                    \
> +       module_driver(__drv, spi_mem_driver_register,                   \
> +                     spi_mem_driver_unregister)
> +
> +/*---------------------------------------------------------------------------*/
> +
>  /*
>   * INTERFACE between board init code and SPI infrastructure.
>   *
> --
> 2.14.1
>
Boris Brezillon Feb. 28, 2018, 12:25 p.m. UTC | #13
On Wed, 28 Feb 2018 15:51:29 +0800
Peter Pan <peterpansjtu@gmail.com> wrote:

> > +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> > +{
> > +       unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> > +       struct spi_controller *ctlr = mem->spi->controller;
> > +       struct spi_transfer xfers[4] = { };
> > +       struct spi_message msg;
> > +       u8 *tmpbuf;
> > +       int ret;
> > +
> > +       if (!spi_mem_supports_op(mem, op))
> > +               return -ENOTSUPP;
> > +
> > +       if (ctlr->mem_ops) {
> > +               if (ctlr->auto_runtime_pm) {
> > +                       ret = pm_runtime_get_sync(ctlr->dev.parent);
> > +                       if (ret < 0) {
> > +                               dev_err(&ctlr->dev,
> > +                                       "Failed to power device: %d\n",
> > +                                       ret);
> > +                               return ret;
> > +                       }
> > +               }
> > +
> > +               mutex_lock(&ctlr->bus_lock_mutex);
> > +               mutex_lock(&ctlr->io_mutex);
> > +               ret = ctlr->mem_ops->exec_op(mem, op);
> > +               mutex_unlock(&ctlr->io_mutex);
> > +               mutex_unlock(&ctlr->bus_lock_mutex);
> > +
> > +               if (ctlr->auto_runtime_pm)
> > +                       pm_runtime_put(ctlr->dev.parent);
> > +
> > +               /*
> > +                * Some controllers only optimize specific paths (typically the
> > +                * read path) and expect the core to use the regular SPI
> > +                * interface in these cases.
> > +                */
> > +               if (!ret || ret != -ENOTSUPP)
> > +                       return ret;
> > +       }
> > +
> > +       tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
> > +                    op->dummy.nbytes;  
> 
> Here you are using sizeof(op->cmd.opcode) while the code after this
> assumes opcode is u8(ie. "memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);")
> It may be confused.

Will use sizeof(op->cmd.opcode) everywhere then.
Cyrille Pitchen March 4, 2018, 9:15 p.m. UTC | #14
Hi Boris,

Le 06/02/2018 à 00:21, Boris Brezillon a écrit :
> From: Boris Brezillon <boris.brezillon@free-electrons.com>
> 
> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.
> 
> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).
> 
> Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> ---
>  drivers/spi/spi.c       | 423 +++++++++++++++++++++++++++++++++++++++++++++++-
>  include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++
>  2 files changed, 646 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
> index b33a727a0158..57bc540a0521 100644
> --- a/drivers/spi/spi.c
> +++ b/drivers/spi/spi.c
> @@ -2057,6 +2057,24 @@ static int of_spi_register_master(struct spi_controller *ctlr)
>  }
>  #endif
>  
> +static int spi_controller_check_ops(struct spi_controller *ctlr)
> +{
> +	/*
> +	 * The controller can implement only the high-level SPI-memory like
> +	 * operations if it does not support regular SPI transfers.
> +	 */
> +	if (ctlr->mem_ops) {
> +		if (!ctlr->mem_ops->supports_op ||
> +		    !ctlr->mem_ops->exec_op)
> +			return -EINVAL;
> +	} else if (!ctlr->transfer && !ctlr->transfer_one &&
> +		   !ctlr->transfer_one_message) {
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
>  /**
>   * spi_register_controller - register SPI master or slave controller
>   * @ctlr: initialized master, originally from spi_alloc_master() or
> @@ -2090,6 +2108,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>  	if (!dev)
>  		return -ENODEV;
>  
> +	/*
> +	 * Make sure all necessary hooks are implemented before registering
> +	 * the SPI controller.
> +	 */
> +	status = spi_controller_check_ops(ctlr);
> +	if (status)
> +		return status;
> +
>  	if (!spi_controller_is_slave(ctlr)) {
>  		status = of_spi_register_master(ctlr);
>  		if (status)
> @@ -2155,10 +2181,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>  			spi_controller_is_slave(ctlr) ? "slave" : "master",
>  			dev_name(&ctlr->dev));
>  
> -	/* If we're using a queued driver, start the queue */
> -	if (ctlr->transfer)
> +	/*
> +	 * If we're using a queued driver, start the queue. Note that we don't
> +	 * need the queueing logic if the driver is only supporting high-level
> +	 * memory operations.
> +	 */
> +	if (ctlr->transfer) {
>  		dev_info(dev, "controller is unqueued, this is deprecated\n");
> -	else {
> +	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
>  		status = spi_controller_initialize_queue(ctlr);
>  		if (status) {
>  			device_del(&ctlr->dev);
> @@ -2893,6 +2923,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
>  {
>  	struct spi_controller *ctlr = spi->controller;
>  
> +	/*
> +	 * Some controllers do not support doing regular SPI transfers. Return
> +	 * ENOTSUPP when this is the case.
> +	 */
> +	if (!ctlr->transfer)
> +		return -ENOTSUPP;
> +
>  	message->spi = spi;
>  
>  	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
> @@ -3321,6 +3358,386 @@ int spi_write_then_read(struct spi_device *spi,
>  }
>  EXPORT_SYMBOL_GPL(spi_write_then_read);
>  
> +/**
> + * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
> + *					  memory operation
> + * @ctlr: the SPI controller requesting this dma_map()
> + * @op: the memory operation containing the buffer to map
> + * @sgt: a pointer to a non-initialized sg_table that will be filled by this
> + *	 function
> + *
> + * Some controllers might want to do DMA on the data buffer embedded in @op.
> + * This helper prepares everything for you and provides a ready-to-use
> + * sg_table. This function is not intended to be called from spi drivers.
> + * Only SPI controller drivers should use it.
> + * Note that the caller must ensure the memory region pointed by
> + * op->data.buf.{in,out} is DMA-able before calling this function.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
> +				       const struct spi_mem_op *op,
> +				       struct sg_table *sgt)
> +{
> +	struct device *dmadev;
> +
> +	if (!op->data.nbytes)
> +		return -EINVAL;
> +
> +	if (op->data.dir == SPI_MEM_DATA_OUT)
> +		dmadev = ctlr->dma_tx ?
> +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +	else
> +		dmadev = ctlr->dma_rx ?
> +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
> +
> +	if (!dmadev)
> +		return -EINVAL;
> +
> +	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
> +			   op->data.dir == SPI_MEM_DATA_IN ?
> +			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
> +}
> +EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
> +
> +/**
> + * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
> + *					    memory operation
> + * @ctlr: the SPI controller requesting this dma_unmap()
> + * @op: the memory operation containing the buffer to unmap
> + * @sgt: a pointer to an sg_table previously initialized by
> + *	 spi_controller_dma_map_mem_op_data()
> + *
> + * Some controllers might want to do DMA on the data buffer embedded in @op.
> + * This helper prepares things so that the CPU can access the
> + * op->data.buf.{in,out} buffer again.
> + *
> + * This function is not intended to be called from spi drivers. Only SPI
> + * controller drivers should use it.
> + *
> + * This function should be called after the DMA operation has finished an is
> + * only valid if the previous spi_controller_dma_map_mem_op_data() has returned
> + * 0.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> +					  const struct spi_mem_op *op,
> +					  struct sg_table *sgt)
> +{
> +	struct device *dmadev;
> +
> +	if (!op->data.nbytes)
> +		return;
> +
> +	if (op->data.dir == SPI_MEM_DATA_OUT)
> +		dmadev = ctlr->dma_tx ?
> +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +	else
> +		dmadev = ctlr->dma_rx ?
> +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
> +
> +	spi_unmap_buf(ctlr, dmadev, sgt,
> +		      op->data.dir == SPI_MEM_DATA_IN ?
> +		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
> +}
> +EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
> +
> +static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
> +{
> +	u32 mode = mem->spi->mode;
> +
> +	switch (buswidth) {
> +	case 1:
> +		return 0;
> +
> +	case 2:
> +		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
> +		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))

Some SPI (flash) controller may support Quad protocols but no Duals
protocols at all. Hence you should only test SPI_{R,T]X_DUAL.

Anyway, I think the function should completely be removed because
SPI_{R,T}X_{DUAL,QUAD} flags should be deprecated.

They were fine when introduced long time ago when Quad SPI flashes only
supported Fast Read 1-1-4 and maybe few of them Page Program 1-1-4.

However for many years now, Quad SPI flashes support far much SPI protocols
and commands. For instance:
Fast Read 1-1-4
Fast Read 1-4-4
(Fast Read 4-4-4)
Page Program 1-1-4
Page Program 1-4-4
Page Program 4-4-4

Fast Read x-y-z means that:
- the op code is transfered (tx) on x I/O line(s)
- the address, mode and dummies are transfered (tx) on y I/O line(s)
- the data are received (rx) on z I/O line(s)

Page Program x-y-z stands for:
- the op code being transfered (tx) on x I/O line(s)
- the address is transfered (tx) on y I/O line(s)
the data are transfered (tx) on z I/O line(s)

Then some SPI flash controllers and/or memories may support Fast Read 1-4-4
but not Page Program 1-1-4 whereas others may support Page Program 1-1-4
but not Fast Read 1-4-4.

So using only SPI_{R,T}X_{DUAL,QUAD} flags, how do we make the difference
between support of Fast Read 1-4-4 and Page Program 1-1-4 or between
Fast Read 1-1-4 and Fast Read 1-4-4 ?
Indeed some latest (Cypress) memories only support Fast Read 1-4-4 but no
longer Fast Read 1-1-4.

I guess we'd rather use something close to the SNOR_HWCAPS_* macros as
defined in include/linux/mtd/spi-nor.h

SPI_{R,T}X_{DUAL,QUAD} flags are no longer suited to SPI flash memories for
a long time now.


> +			return 0;
> +
> +		break;
> +
> +	case 4:
> +		if ((tx && (mode & SPI_TX_QUAD)) ||
> +		    (!tx && (mode & SPI_RX_QUAD)))
> +			return 0;
> +
> +		break;
> +
> +	default:
> +		break;
> +	}
> +
> +	return -ENOTSUPP;
> +}
> +
> +/**
> + * spi_mem_supports_op() - Check if a memory device and the controller it is
> + *			   connected to support a specific memory operation
> + * @mem: the SPI memory
> + * @op: the memory operation to check
> + *
> + * Some controllers are only supporting Single or Dual IOs, others might only
> + * support specific opcodes, or it can even be that the controller and device
> + * both support Quad IOs but the hardware prevents you from using it because
> + * only 2 IO lines are connected.
> + *
> + * This function checks whether a specific operation is supported.
> + *
> + * Return: true if @op is supported, false otherwise.
> + */
> +bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +	struct spi_controller *ctlr = mem->spi->controller;
> +
> +	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
> +		return false;
> +
> +	if (op->addr.nbytes &&
> +	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
> +		return false;
> +
> +	if (op->dummy.nbytes &&
> +	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
> +		return false;
> +
> +	if (op->data.nbytes &&
> +	    spi_check_buswidth_req(mem, op->data.buswidth,
> +				   op->data.dir == SPI_MEM_DATA_IN ?
> +				   false : true))
> +		return false;

You should remove the 4 tests and calls of spi_check_buswidth_req() above.
Based on spi_controller_check_ops(), when ctrl->mem_ops is not NULL,
mem_ops->supports_op() must be provided, this hook is mandatory.

So call this latest hook only, has done just below. You may want to keep the 4
tests above and spi_check_buswidth_req() but then move them in a
stand-alone function that could be used as a default implementation of
mem_ops->supports_op() if you want to allow some SPI controller drivers not
to implement ->supports_op() by their own.

However it should be considered a transitionnal and at some point I think
SPI_{R,T}X_{DUAL,QUAD} flags should just be removed because many
developers submitting patches to spi-nor have often been confused by how
properly use those flags in their drivers.

Till then, if you removed the 4 tests above, at least new SPI flash
controller drivers won't have to set some odd combination of flags in
spi->mode to make their support of SPI flash memories work as they want.

> +
> +	if (ctlr->mem_ops)
> +		return ctlr->mem_ops->supports_op(mem, op);
> +
> +	return true;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_supports_op);
> +
> +/**
> + * spi_mem_exec_op() - Execute a memory operation
> + * @mem: the SPI memory
> + * @op: the memory operation to execute
> + *
> + * Executes a memory operation.
> + *
> + * This function first checks that @op is supported and then tries to execute
> + * it.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> +	struct spi_controller *ctlr = mem->spi->controller;
> +	struct spi_transfer xfers[4] = { };
> +	struct spi_message msg;
> +	u8 *tmpbuf;
> +	int ret;
> +
> +	if (!spi_mem_supports_op(mem, op))
> +		return -ENOTSUPP;
> +
> +	if (ctlr->mem_ops) {
> +		if (ctlr->auto_runtime_pm) {
> +			ret = pm_runtime_get_sync(ctlr->dev.parent);
> +			if (ret < 0) {
> +				dev_err(&ctlr->dev,
> +					"Failed to power device: %d\n",
> +					ret);
> +				return ret;
> +			}
> +		}
> +
> +		mutex_lock(&ctlr->bus_lock_mutex);
> +		mutex_lock(&ctlr->io_mutex);
> +		ret = ctlr->mem_ops->exec_op(mem, op);
> +		mutex_unlock(&ctlr->io_mutex);
> +		mutex_unlock(&ctlr->bus_lock_mutex);
> +
> +		if (ctlr->auto_runtime_pm)
> +			pm_runtime_put(ctlr->dev.parent);
> +
> +		/*
> +		 * Some controllers only optimize specific paths (typically the
> +		 * read path) and expect the core to use the regular SPI
> +		 * interface in these cases.
> +		 */
> +		if (!ret || ret != -ENOTSUPP)
> +			return ret;
> +	}
> +
> +	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
> +		     op->dummy.nbytes;
> +
> +	/*
> +	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
> +	 * we're guaranteed that this buffer is DMA-able, as required by the
> +	 * SPI layer.
> +	 */
> +	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
> +	if (!tmpbuf)
> +		return -ENOMEM;
> +
> +	spi_message_init(&msg);
> +
> +	tmpbuf[0] = op->cmd.opcode;
> +	xfers[xferpos].tx_buf = tmpbuf;
> +	xfers[xferpos].len = sizeof(op->cmd.opcode);
> +	xfers[xferpos].tx_nbits = op->cmd.buswidth;
> +	spi_message_add_tail(&xfers[xferpos], &msg);
> +	xferpos++;
> +	totalxferlen++;
> +
> +	if (op->addr.nbytes) {
> +		memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + 1;
> +		xfers[xferpos].len = op->addr.nbytes;
> +		xfers[xferpos].tx_nbits = op->addr.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->addr.nbytes;
> +	}
> +
> +	if (op->dummy.nbytes) {
> +		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
> +		xfers[xferpos].len = op->dummy.nbytes;
> +		xfers[xferpos].tx_nbits = op->dummy.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->dummy.nbytes;
> +	}
> +
> +	if (op->data.nbytes) {
> +		if (op->data.dir == SPI_MEM_DATA_IN) {
> +			xfers[xferpos].rx_buf = op->data.buf.in;
> +			xfers[xferpos].rx_nbits = op->data.buswidth;
> +		} else {
> +			xfers[xferpos].tx_buf = op->data.buf.out;
> +			xfers[xferpos].tx_nbits = op->data.buswidth;
> +		}
> +
> +		xfers[xferpos].len = op->data.nbytes;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->data.nbytes;
> +	}
> +
> +	ret = spi_sync(mem->spi, &msg);
> +
> +	kfree(tmpbuf);
> +
> +	if (ret)
> +		return ret;
> +
> +	if (msg.actual_length != totalxferlen)
> +		return -EIO;
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_exec_op);
> +
> +/**
> + * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
> + *			      match controller limitations
> + * @mem: the SPI memory
> + * @op: the operation to adjust
> + *
> + * Some controllers have FIFO limitations and must split a data transfer
> + * operation into multiple ones, others require a specific alignment for
> + * optimized accesses. This function allows SPI mem drivers to split a single
> + * operation into multiple sub-operations when required.
> + *
> + * Return: a negative error code if the controller can't properly adjust @op,
> + *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
> + *	   can't be handled in a single step.
> + */
> +int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
> +{
> +	struct spi_controller *ctlr = mem->spi->controller;
> +
> +	if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
> +		return ctlr->mem_ops->adjust_op_size(mem, op);
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
> +
> +static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
> +{
> +	return container_of(drv, struct spi_mem_driver, spidrv.driver);
> +}
> +
> +static int spi_mem_probe(struct spi_device *spi)
> +{
> +	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +	struct spi_mem *mem;
> +
> +	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
> +	if (!mem)
> +		return -ENOMEM;
> +
> +	mem->spi = spi;
> +	spi_set_drvdata(spi, mem);
> +
> +	return memdrv->probe(mem);
> +}
> +
> +static int spi_mem_remove(struct spi_device *spi)
> +{
> +	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +	struct spi_mem *mem = spi_get_drvdata(spi);
> +
> +	if (memdrv->remove)
> +		return memdrv->remove(mem);
> +
> +	return 0;
> +}
> +
> +static void spi_mem_shutdown(struct spi_device *spi)
> +{
> +	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +	struct spi_mem *mem = spi_get_drvdata(spi);
> +
> +	if (memdrv->shutdown)
> +		memdrv->shutdown(mem);
> +}
> +
> +/**
> + * spi_mem_driver_register_with_owner() - Register a SPI memory driver
> + * @memdrv: the SPI memory driver to register
> + * @owner: the owner of this driver
> + *
> + * Registers a SPI memory driver.
> + *
> + * Return: 0 in case of success, a negative error core otherwise.
> + */
> +
> +int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
> +				       struct module *owner)
> +{
> +	memdrv->spidrv.probe = spi_mem_probe;
> +	memdrv->spidrv.remove = spi_mem_remove;
> +	memdrv->spidrv.shutdown = spi_mem_shutdown;
> +
> +	return __spi_register_driver(owner, &memdrv->spidrv);
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
> +
> +/**
> + * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
> + * @memdrv: the SPI memory driver to unregister
> + *
> + * Unregisters a SPI memory driver.
> + */
> +void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
> +{
> +	spi_unregister_driver(&memdrv->spidrv);
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
> +
>  /*-------------------------------------------------------------------------*/
>  
>  #if IS_ENABLED(CONFIG_OF_DYNAMIC)
> diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
> index 7b2170bfd6e7..af3c4ac62b55 100644
> --- a/include/linux/spi/spi.h
> +++ b/include/linux/spi/spi.h
> @@ -27,6 +27,7 @@ struct property_entry;
>  struct spi_controller;
>  struct spi_transfer;
>  struct spi_flash_read_message;
> +struct spi_controller_mem_ops;
>  
>  /*
>   * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
> @@ -376,6 +377,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
>   *                    transfer_one callback.
>   * @handle_err: the subsystem calls the driver to handle an error that occurs
>   *		in the generic implementation of transfer_one_message().
> + * @mem_ops: optimized/dedicated operations for interactions with SPI memory.
> + *	     This field is optional and should only be implemented if the
> + *	     controller has native support for memory like operations.
>   * @unprepare_message: undo any work done by prepare_message().
>   * @slave_abort: abort the ongoing transfer request on an SPI slave controller
>   * @spi_flash_read: to support spi-controller hardwares that provide
> @@ -564,6 +568,9 @@ struct spi_controller {
>  	void (*handle_err)(struct spi_controller *ctlr,
>  			   struct spi_message *message);
>  
> +	/* Optimized handlers for SPI memory-like operations. */
> +	const struct spi_controller_mem_ops *mem_ops;
> +
>  	/* gpio chip select */
>  	int			*cs_gpios;
>  
> @@ -1227,6 +1234,225 @@ int spi_flash_read(struct spi_device *spi,
>  
>  /*---------------------------------------------------------------------------*/
>  
> +/* SPI memory related definitions. */
> +
> +#define SPI_MEM_OP_CMD(__opcode, __buswidth)			\
> +	{							\
> +		.buswidth = __buswidth,				\
> +		.opcode = __opcode,				\
> +	}
> +
> +#define SPI_MEM_OP_ADDRS(__nbytes, __buf, __buswidth)		\
> +	{							\
> +		.nbytes = __nbytes,				\
> +		.buf = __buf,					\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_NO_ADDRS	{ }
> +
> +#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth)			\
> +	{							\
> +		.nbytes = __nbytes,				\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_NO_DUMMY	{ }
> +
> +#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth)		\
> +	{							\
> +		.dir = SPI_MEM_DATA_IN,				\
> +		.nbytes = __nbytes,				\
> +		.buf.in = __buf,				\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth)	\
> +	{							\
> +		.dir = SPI_MEM_DATA_OUT,			\
> +		.nbytes = __nbytes,				\
> +		.buf.out = __buf,				\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_NO_DATA	{ }
> +
> +/**
> + * enum spi_mem_data_dir - describes the direction of a SPI memory data
> + *			   transfer from the controller perspective
> + * @SPI_MEM_DATA_IN: data coming from the SPI memory
> + * @SPI_MEM_DATA_OUT: data sent the SPI memory
> + */
> +enum spi_mem_data_dir {
> +	SPI_MEM_DATA_IN,
> +	SPI_MEM_DATA_OUT,
> +};
> +
> +/**
> + * struct spi_mem_op - describes a SPI memory operation
> + * @cmd.buswidth: number of IO lines used to transmit the command
> + * @cmd.opcode: operation opcode
> + * @addr.nbytes: number of address bytes to send. Can be zero if the operation
> + *		 does not need to send an address
> + * @addr.buswidth: number of IO lines used to transmit the address cycles
> + * @addr.buf: buffer storing the address bytes
> + * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
> + *		  be zero if the operation does not require dummy bytes
> + * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
> + * @data.buswidth: number of IO lanes used to send/receive the data
> + * @data.dir: direction of the transfer
> + * @data.buf.in: input buffer
> + * @data.buf.out: output buffer
> + */
> +struct spi_mem_op {
> +	struct {
> +		u8 buswidth;
> +		u8 opcode;
> +	} cmd;
> +
> +	struct {
> +		u8 nbytes;
> +		u8 buswidth;
> +		const u8 *buf;

For the address value, I would rather use some loff_t type, instead of
const u8 *. 
Actually, most (if not all) SPI flash controllers have some hardware
register to set the address value of the SPI flash command to be
sent. Hence having this value directly in the right format would avoid to
convert. AFAIK, only m25p80 needs to convert from loff_t to u8 * and only
when using the regular SPI API, ie spi_sync(), as the 'struct
spi_flash_read_messag' already uses some integral type too.


> +	} addr;
> +
> +	struct {
> +		u8 nbytes;
> +		u8 buswidth;
> +	} dummy;
> +
> +	struct {
> +		u8 buswidth;
> +		enum spi_mem_data_dir dir;
> +		unsigned int nbytes;
> +		/* buf.{in,out} must be DMA-able. */
> +		union {
> +			void *in;
> +			const void *out;
> +		} buf;
> +	} data;

Also, you should add another member in this structure to set the 'type' of
operation for the SPI flash command:
- register read
- register write
- memory read
- memory write
- memory erase
[- SFDP read ? ]
[- OTP read/write ? ]

Indeed, some SPI flash controller drivers need this information.

For instance the Atmel QSPI controller has some optional feature that allow
to scramble data on the fly from/to the (Q)SPI memory. Hence the scrambling
should be enabled for memory {read,write} (Fast Read, Page Programm)
commands but disabled for register {read,write} commands (Read ID, Write
Enable, Read/Write Status Register, ...).

SPI flash controller drivers *should not* have to check the SPI flash command
instruction op code to guess what kind of operation is being performed.

Another example:
Some (Q)SPI flash controllers map the the SPI flash memory into the system
address space then perform some "memcpy-like" operations to access this SPI
flash memory whereas they rely on reguler register to handle SPI flash
commands reading from or writing into the internal registers fo the SPI
flash. So those controller too need to make the difference between register
and memory operations.

Best regards,

Cyrille

> +};
> +
> +#define SPI_MEM_OP(__cmd, __addr, __dummy, __data)		\
> +	{							\
> +		.cmd = __cmd,					\
> +		.addr = __addr,					\
> +		.dummy = __dummy,				\
> +		.data = __data,					\
> +	}
> +
> +/**
> + * struct spi_mem - describes a SPI memory device
> + * @spi: the underlying SPI device
> + * @drvpriv: spi_mem_drviver private data
> + *
> + * Extra information that describe the SPI memory device and may be needed by
> + * the controller to properly handle this device should be placed here.
> + *
> + * One example would be the device size since some controller expose their SPI
> + * mem devices through a io-mapped region.
> + */
> +struct spi_mem {
> +	struct spi_device *spi;
> +	void *drvpriv;
> +};
> +
> +/**
> + * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem
> + *				  device
> + * @mem: memory device
> + * @data: data to attach to the memory device
> + */
> +static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data)
> +{
> +	mem->drvpriv = data;
> +}
> +
> +/**
> + * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem
> + *				  device
> + * @mem: memory device
> + *
> + * Return: the data attached to the mem device.
> + */
> +static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
> +{
> +	return mem->drvpriv;
> +}
> +
> +/**
> + * struct spi_controller_mem_ops - SPI memory operations
> + * @adjust_op_size: shrink the data xfer of an operation to match controller's
> + *		    limitations (can be alignment of max RX/TX size
> + *		    limitations)
> + * @supports_op: check if an operation is supported by the controller
> + * @exec_op: execute a SPI memory operation
> + *
> + * This interface should be implemented by SPI controllers providing an
> + * high-level interface to execute SPI memory operation, which is usually the
> + * case for QSPI controllers.
> + */
> +struct spi_controller_mem_ops {
> +	int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
> +	bool (*supports_op)(struct spi_mem *mem,
> +			    const struct spi_mem_op *op);
> +	int (*exec_op)(struct spi_mem *mem,
> +		       const struct spi_mem_op *op);
> +};
> +
> +int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
> +				       const struct spi_mem_op *op,
> +				       struct sg_table *sg);
> +
> +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> +					  const struct spi_mem_op *op,
> +					  struct sg_table *sg);
> +
> +int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
> +
> +bool spi_mem_supports_op(struct spi_mem *mem,
> +			 const struct spi_mem_op *op);
> +
> +int spi_mem_exec_op(struct spi_mem *mem,
> +		    const struct spi_mem_op *op);
> +
> +/**
> + * struct spi_mem_driver - SPI memory driver
> + * @spidrv: inherit from a SPI driver
> + * @probe: probe a SPI memory. Usually where detection/initialization takes
> + *	   place
> + * @remove: remove a SPI memory
> + * @shutdown: take appropriate action when the system is shutdown
> + *
> + * This is just a thin wrapper around a spi_driver. The core takes care of
> + * allocating the spi_mem object and forwarding the probe/remove/shutdown
> + * request to the spi_mem_driver. The reason we use this wrapper is because
> + * we might have to stuff more information into the spi_mem struct to let
> + * SPI controllers know more about the SPI memory they interact with, and
> + * having this intermediate layer allows us to do that without adding more
> + * useless fields to the spi_device object.
> + */
> +struct spi_mem_driver {
> +	struct spi_driver spidrv;
> +	int (*probe)(struct spi_mem *mem);
> +	int (*remove)(struct spi_mem *mem);
> +	void (*shutdown)(struct spi_mem *mem);
> +};
> +
> +int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
> +				       struct module *owner);
> +
> +#define spi_mem_driver_register(__drv)                                  \
> +	spi_mem_driver_register_with_owner(__drv, THIS_MODULE)
> +
> +void spi_mem_driver_unregister(struct spi_mem_driver *drv);
> +
> +#define module_spi_mem_driver(__drv)                                    \
> +	module_driver(__drv, spi_mem_driver_register,                   \
> +		      spi_mem_driver_unregister)
> +
> +/*---------------------------------------------------------------------------*/
> +
>  /*
>   * INTERFACE between board init code and SPI infrastructure.
>   *
>
Boris Brezillon March 5, 2018, 9 a.m. UTC | #15
Hello Cyrille,

On Sun, 4 Mar 2018 22:15:20 +0100
Cyrille Pitchen <cyrille.pitchen@wedev4u.fr> wrote:

> > +
> > +static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
> > +{
> > +	u32 mode = mem->spi->mode;
> > +
> > +	switch (buswidth) {
> > +	case 1:
> > +		return 0;
> > +
> > +	case 2:
> > +		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
> > +		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))  
> 
> Some SPI (flash) controller may support Quad protocols but no Duals
> protocols at all. Hence you should only test SPI_{R,T]X_DUAL.

Hm, I'd rather not change that, since it's what have been used so far.
Note that a controller that wants to put more constraints can still
implement the ->supports_op() hook.

> 
> Anyway, I think the function should completely be removed because
> SPI_{R,T}X_{DUAL,QUAD} flags should be deprecated.
> 
> They were fine when introduced long time ago when Quad SPI flashes only
> supported Fast Read 1-1-4 and maybe few of them Page Program 1-1-4.
> 
> However for many years now, Quad SPI flashes support far much SPI protocols
> and commands. For instance:
> Fast Read 1-1-4
> Fast Read 1-4-4
> (Fast Read 4-4-4)
> Page Program 1-1-4
> Page Program 1-4-4
> Page Program 4-4-4
> 
> Fast Read x-y-z means that:
> - the op code is transfered (tx) on x I/O line(s)
> - the address, mode and dummies are transfered (tx) on y I/O line(s)
> - the data are received (rx) on z I/O line(s)
> 
> Page Program x-y-z stands for:
> - the op code being transfered (tx) on x I/O line(s)
> - the address is transfered (tx) on y I/O line(s)
> the data are transfered (tx) on z I/O line(s)
> 
> Then some SPI flash controllers and/or memories may support Fast Read 1-4-4
> but not Page Program 1-1-4 whereas others may support Page Program 1-1-4
> but not Fast Read 1-4-4.
> 
> So using only SPI_{R,T}X_{DUAL,QUAD} flags, how do we make the difference
> between support of Fast Read 1-4-4 and Page Program 1-1-4 or between
> Fast Read 1-1-4 and Fast Read 1-4-4 ?

I think you're mixing 2 different things:

1/ The RX/TX buswidth capabilities for generic SPI/DualSPI/QuadSPI
   transfers, which is, I guess, what these flags have been created for

2/ The buswidth of each instruction of a memory-like operation (by
   instruction I mean, CMD, ADDR, DATA cycles)

There is no reason for a generic SPI controller to specify things for
#2, because it doesn't know anything about this spi-mem protocol, all
it can do is send/recv data using a specific bus width.

> Indeed some latest (Cypress) memories only support Fast Read 1-4-4 but no
> longer Fast Read 1-1-4.

And here you're going to specific. This framework is not here to fully
handle SPI NOR devices, it's here to abstract things enough to make SPI
controllers compatible with various SPI memories or devices that want
to use a similar protocol (FPGAs?).

> 
> I guess we'd rather use something close to the SNOR_HWCAPS_* macros as
> defined in include/linux/mtd/spi-nor.h

->supports_op() should be enough to check such constraints, I don't see
a need for yet another set of macros.

> 
> SPI_{R,T}X_{DUAL,QUAD} flags are no longer suited to SPI flash memories for
> a long time now.

See my explanation above. I don't think SPI_{R,T}X_{DUAL,QUAD} have
been created to handle SPI memories, it's just a way to specify the
supported buswidth for a SPI transfer, that's all. And we still need
them for regular SPI transfers.

> 
> 
> > +			return 0;
> > +
> > +		break;
> > +
> > +	case 4:
> > +		if ((tx && (mode & SPI_TX_QUAD)) ||
> > +		    (!tx && (mode & SPI_RX_QUAD)))
> > +			return 0;
> > +
> > +		break;
> > +
> > +	default:
> > +		break;
> > +	}
> > +
> > +	return -ENOTSUPP;
> > +}
> > +
> > +/**
> > + * spi_mem_supports_op() - Check if a memory device and the controller it is
> > + *			   connected to support a specific memory operation
> > + * @mem: the SPI memory
> > + * @op: the memory operation to check
> > + *
> > + * Some controllers are only supporting Single or Dual IOs, others might only
> > + * support specific opcodes, or it can even be that the controller and device
> > + * both support Quad IOs but the hardware prevents you from using it because
> > + * only 2 IO lines are connected.
> > + *
> > + * This function checks whether a specific operation is supported.
> > + *
> > + * Return: true if @op is supported, false otherwise.
> > + */
> > +bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
> > +{
> > +	struct spi_controller *ctlr = mem->spi->controller;
> > +
> > +	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
> > +		return false;
> > +
> > +	if (op->addr.nbytes &&
> > +	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
> > +		return false;
> > +
> > +	if (op->dummy.nbytes &&
> > +	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
> > +		return false;
> > +
> > +	if (op->data.nbytes &&
> > +	    spi_check_buswidth_req(mem, op->data.buswidth,
> > +				   op->data.dir == SPI_MEM_DATA_IN ?
> > +				   false : true))
> > +		return false;  
> 
> You should remove the 4 tests and calls of spi_check_buswidth_req() above.

Why?

> Based on spi_controller_check_ops(), when ctrl->mem_ops is not NULL,
> mem_ops->supports_op() must be provided, this hook is mandatory.

Well, this was a mistake on my side, see my reply to Vignesh.

> 
> So call this latest hook only, has done just below. You may want to keep the 4
> tests above and spi_check_buswidth_req() but then move them in a
> stand-alone function that could be used as a default implementation of
> mem_ops->supports_op() if you want to allow some SPI controller drivers not
> to implement ->supports_op() by their own.

Okay, that's an option: make ->supports_op() mandatory and then provide
an helper if the controller does not have any extra constraints. I see
one problem with this approach:

> 
> However it should be considered a transitionnal and at some point I think
> SPI_{R,T}X_{DUAL,QUAD} flags should just be removed because many
> developers submitting patches to spi-nor have often been confused by how
> properly use those flags in their drivers.

And here I disagree, again. SPI_{R,T}X_{DUAL,QUAD} are still useful for
regular SPI transfers. I guess not every Dual/Quad SPI device is using
the spi-mem protocol, so we need to support this case too.

> 
> Till then, if you removed the 4 tests above, at least new SPI flash
> controller drivers won't have to set some odd combination of flags in
> spi->mode to make their support of SPI flash memories work as they want.

Hm, I'm not sure about spi->mode. AFAICT, SPI_{R,T}X_{DUAL,QUAD}
flags are only encoding what the device supports, not the current mode.

But I guess you were talking about controller->mode_bits which encodes
the controller capabilities, and I think this one is still useful if
the controller is a generic SPI controller.

As a side note, I'll add that it's possible to have a device supporting
SPI+DualSPI+QuadSPI and still have this device partially connected to
the host controller (some pins left unconnected), hence the
spi-{tx,rx}-bus-width properties in DTs. So we really want to check
the device capabilities, the controller capabilities and the limitation
implied by the board design.

> > +
> > +/**
> > + * struct spi_mem_op - describes a SPI memory operation
> > + * @cmd.buswidth: number of IO lines used to transmit the command
> > + * @cmd.opcode: operation opcode
> > + * @addr.nbytes: number of address bytes to send. Can be zero if the operation
> > + *		 does not need to send an address
> > + * @addr.buswidth: number of IO lines used to transmit the address cycles
> > + * @addr.buf: buffer storing the address bytes
> > + * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
> > + *		  be zero if the operation does not require dummy bytes
> > + * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
> > + * @data.buswidth: number of IO lanes used to send/receive the data
> > + * @data.dir: direction of the transfer
> > + * @data.buf.in: input buffer
> > + * @data.buf.out: output buffer
> > + */
> > +struct spi_mem_op {
> > +	struct {
> > +		u8 buswidth;
> > +		u8 opcode;
> > +	} cmd;
> > +
> > +	struct {
> > +		u8 nbytes;
> > +		u8 buswidth;
> > +		const u8 *buf;  
> 
> For the address value, I would rather use some loff_t type, instead of
> const u8 *. 
> Actually, most (if not all) SPI flash controllers have some hardware
> register to set the address value of the SPI flash command to be
> sent. Hence having this value directly in the right format would avoid to
> convert.

What is the appropriate format? An address encoded in a 64-bit integer
does not give any information on how these bytes should be transmitted
on the bus. SPI NOR is passing the address in big endian, SPI NAND seems
to do the same, but we're not sure this will be the case for all kind of
memories or devices using this spi-mem protocol.

Also, by passing it through an integer we limit ourselves to 8 bytes.
That should be enough, but who knows :-).

If we go for this loff_t field, we'll have to add an endianness field
here and force all drivers to check it.

> AFAIK, only m25p80 needs to convert from loff_t to u8 * and only
> when using the regular SPI API, ie spi_sync(), as the 'struct
> spi_flash_read_messag' already uses some integral type too.

And I'm not sure this was a good idea.

> 
> 
> > +	} addr;
> > +
> > +	struct {
> > +		u8 nbytes;
> > +		u8 buswidth;
> > +	} dummy;
> > +
> > +	struct {
> > +		u8 buswidth;
> > +		enum spi_mem_data_dir dir;
> > +		unsigned int nbytes;
> > +		/* buf.{in,out} must be DMA-able. */
> > +		union {
> > +			void *in;
> > +			const void *out;
> > +		} buf;
> > +	} data;  
> 
> Also, you should add another member in this structure to set the 'type' of
> operation for the SPI flash command:
> - register read
> - register write
> - memory read
> - memory write
> - memory erase
> [- SFDP read ? ]
> [- OTP read/write ? ]

No, really, that's not belonging here. It's entirely SPI NOR specific.

> 
> Indeed, some SPI flash controller drivers need this information.

Then they'll have to be converted/reworked to not depend on that. I did
it for the FSL QSPI controller, and it worked fine. I'm pretty sure
almost all controllers will fit well in the new framework. For those
controllers that really are only able to handle SPI NOR devices (I'm
thinking about the Intel one, but there may be others), then they should
not be converted to the spi_mem approach.

> 
> For instance the Atmel QSPI controller has some optional feature that allow
> to scramble data on the fly from/to the (Q)SPI memory. Hence the scrambling
> should be enabled for memory {read,write} (Fast Read, Page Programm)
> commands but disabled for register {read,write} commands (Read ID, Write
> Enable, Read/Write Status Register, ...).

If scrambling is really something that we need to support, then I think
adding a spi_mem_op->data.scramble boolean would be more appropriate.

> 
> SPI flash controller drivers *should not* have to check the SPI flash command
> instruction op code to guess what kind of operation is being performed.

I completely agree. Not only they shouldn't check the opcode to guess
what is being done, but, IMHO, they should also not try to determine
what kind of operation is being done, so your suggestion to add an
operation type information is not a good idea.

> 
> Another example:
> Some (Q)SPI flash controllers map the the SPI flash memory into the system
> address space then perform some "memcpy-like" operations to access this SPI
> flash memory whereas they rely on reguler register to handle SPI flash
> commands reading from or writing into the internal registers fo the SPI
> flash. So those controller too need to make the difference between register
> and memory operations.

I already thought about the direct mapping stuff, and I think this
should be supported through new hooks in spi_mem_ops (->map() +
->unmap()?).

Thanks for your review.

Boris
Cyrille Pitchen March 5, 2018, 1:01 p.m. UTC | #16
Hi Boris,

Le 05/03/2018 à 10:00, Boris Brezillon a écrit :
> Hello Cyrille,
> 
> On Sun, 4 Mar 2018 22:15:20 +0100
> Cyrille Pitchen <cyrille.pitchen@wedev4u.fr> wrote:
> 
>>> +
>>> +static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
>>> +{
>>> +	u32 mode = mem->spi->mode;
>>> +
>>> +	switch (buswidth) {
>>> +	case 1:
>>> +		return 0;
>>> +
>>> +	case 2:
>>> +		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
>>> +		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))  
>>
>> Some SPI (flash) controller may support Quad protocols but no Duals
>> protocols at all. Hence you should only test SPI_{R,T]X_DUAL.
> 
> Hm, I'd rather not change that, since it's what have been used so far.
> Note that a controller that wants to put more constraints can still
> implement the ->supports_op() hook.
> 
>>
>> Anyway, I think the function should completely be removed because
>> SPI_{R,T}X_{DUAL,QUAD} flags should be deprecated.
>>
>> They were fine when introduced long time ago when Quad SPI flashes only
>> supported Fast Read 1-1-4 and maybe few of them Page Program 1-1-4.
>>
>> However for many years now, Quad SPI flashes support far much SPI protocols
>> and commands. For instance:
>> Fast Read 1-1-4
>> Fast Read 1-4-4
>> (Fast Read 4-4-4)
>> Page Program 1-1-4
>> Page Program 1-4-4
>> Page Program 4-4-4
>>
>> Fast Read x-y-z means that:
>> - the op code is transfered (tx) on x I/O line(s)
>> - the address, mode and dummies are transfered (tx) on y I/O line(s)
>> - the data are received (rx) on z I/O line(s)
>>
>> Page Program x-y-z stands for:
>> - the op code being transfered (tx) on x I/O line(s)
>> - the address is transfered (tx) on y I/O line(s)
>> the data are transfered (tx) on z I/O line(s)
>>
>> Then some SPI flash controllers and/or memories may support Fast Read 1-4-4
>> but not Page Program 1-1-4 whereas others may support Page Program 1-1-4
>> but not Fast Read 1-4-4.
>>
>> So using only SPI_{R,T}X_{DUAL,QUAD} flags, how do we make the difference
>> between support of Fast Read 1-4-4 and Page Program 1-1-4 or between
>> Fast Read 1-1-4 and Fast Read 1-4-4 ?
> 
> I think you're mixing 2 different things:
> 
> 1/ The RX/TX buswidth capabilities for generic SPI/DualSPI/QuadSPI
>    transfers, which is, I guess, what these flags have been created for
> 
> 2/ The buswidth of each instruction of a memory-like operation (by
>    instruction I mean, CMD, ADDR, DATA cycles)
> 
> There is no reason for a generic SPI controller to specify things for
> #2, because it doesn't know anything about this spi-mem protocol, all
> it can do is send/recv data using a specific bus width.
>

I might be wrong but I think SPI flashes are the only SPI devices using more
than one I/O line to send and receive data.

Anyway, let assume devices other than SPI flash also use many I/O lines
hence make use of the SPI_{R,T}X_{DUAL,QUAD} flags.

It looks like this series is dealing a new SPI API dedicated to SPI memories.
Just my opinion but 1/ and the associated SPI_{R,T}X_{DUAL,QUAD} flags should
be left only for the generic SPI API, - with spi_sync(), 'struct spi_message',
'struct spi_transfer' and so on.

On the other hand, the new SPI flash dedicated API should only focus on 2/
and totally ignore the SPI_{R,T}X_{DUAL,QUAD} flags.

If we take the example of the sama5d2 QSPI controller, we can use more than one
I/O line only when configure in "serial flash memory" mode. However when in "spi"
mode, when can only use regular MOSI and MISO lines.

With spi_mem_supports_op() as proposed below, we would have to set both SPI_RX_QUAD
and SPI_TX_QUAD to allow Fast Read 1-4-4 when in "serial flash memory" mode.
However with other SPI devices, hence when in "spi" mode, the SPI_{R,T}X_QUAD flags
would still be set though the controller cannot support more than a single I/O line.

The SPI_{R,T}_{DUAL,QUAD} flags could be tested in a default implementation of
spi_mem_supports_op() but should not be tested as mandatory flags for all SPI
(flash) controllers.

 
>> Indeed some latest (Cypress) memories only support Fast Read 1-4-4 but no
>> longer Fast Read 1-1-4.
> 
> And here you're going to specific. This framework is not here to fully
> handle SPI NOR devices, it's here to abstract things enough to make SPI
> controllers compatible with various SPI memories or devices that want
> to use a similar protocol (FPGAs?).
> 
>>
>> I guess we'd rather use something close to the SNOR_HWCAPS_* macros as
>> defined in include/linux/mtd/spi-nor.h
> 
> ->supports_op() should be enough to check such constraints, I don't see
> a need for yet another set of macros.
> 
>>
>> SPI_{R,T}X_{DUAL,QUAD} flags are no longer suited to SPI flash memories for
>> a long time now.
> 
> See my explanation above. I don't think SPI_{R,T}X_{DUAL,QUAD} have
> been created to handle SPI memories, it's just a way to specify the
> supported buswidth for a SPI transfer, that's all. And we still need
> them for regular SPI transfers.
> 
>>
>>
>>> +			return 0;
>>> +
>>> +		break;
>>> +
>>> +	case 4:
>>> +		if ((tx && (mode & SPI_TX_QUAD)) ||
>>> +		    (!tx && (mode & SPI_RX_QUAD)))
>>> +			return 0;
>>> +
>>> +		break;
>>> +
>>> +	default:
>>> +		break;
>>> +	}
>>> +
>>> +	return -ENOTSUPP;
>>> +}
>>> +
>>> +/**
>>> + * spi_mem_supports_op() - Check if a memory device and the controller it is
>>> + *			   connected to support a specific memory operation
>>> + * @mem: the SPI memory
>>> + * @op: the memory operation to check
>>> + *
>>> + * Some controllers are only supporting Single or Dual IOs, others might only
>>> + * support specific opcodes, or it can even be that the controller and device
>>> + * both support Quad IOs but the hardware prevents you from using it because
>>> + * only 2 IO lines are connected.
>>> + *
>>> + * This function checks whether a specific operation is supported.
>>> + *
>>> + * Return: true if @op is supported, false otherwise.
>>> + */
>>> +bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
>>> +{
>>> +	struct spi_controller *ctlr = mem->spi->controller;
>>> +
>>> +	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
>>> +		return false;
>>> +
>>> +	if (op->addr.nbytes &&
>>> +	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
>>> +		return false;
>>> +
>>> +	if (op->dummy.nbytes &&
>>> +	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
>>> +		return false;
>>> +
>>> +	if (op->data.nbytes &&
>>> +	    spi_check_buswidth_req(mem, op->data.buswidth,
>>> +				   op->data.dir == SPI_MEM_DATA_IN ?
>>> +				   false : true))
>>> +		return false;  
>>
>> You should remove the 4 tests and calls of spi_check_buswidth_req() above.
> 
> Why?
> 
>> Based on spi_controller_check_ops(), when ctrl->mem_ops is not NULL,
>> mem_ops->supports_op() must be provided, this hook is mandatory.
> 
> Well, this was a mistake on my side, see my reply to Vignesh.
> 
>>
>> So call this latest hook only, has done just below. You may want to keep the 4
>> tests above and spi_check_buswidth_req() but then move them in a
>> stand-alone function that could be used as a default implementation of
>> mem_ops->supports_op() if you want to allow some SPI controller drivers not
>> to implement ->supports_op() by their own.
> 
> Okay, that's an option: make ->supports_op() mandatory and then provide
> an helper if the controller does not have any extra constraints. I see
> one problem with this approach:
> 
>>
>> However it should be considered a transitionnal and at some point I think
>> SPI_{R,T}X_{DUAL,QUAD} flags should just be removed because many
>> developers submitting patches to spi-nor have often been confused by how
>> properly use those flags in their drivers.
> 
> And here I disagree, again. SPI_{R,T}X_{DUAL,QUAD} are still useful for
> regular SPI transfers. I guess not every Dual/Quad SPI device is using
> the spi-mem protocol, so we need to support this case too.
> 
>>
>> Till then, if you removed the 4 tests above, at least new SPI flash
>> controller drivers won't have to set some odd combination of flags in
>> spi->mode to make their support of SPI flash memories work as they want.
> 
> Hm, I'm not sure about spi->mode. AFAICT, SPI_{R,T}X_{DUAL,QUAD}
> flags are only encoding what the device supports, not the current mode.
> 
> But I guess you were talking about controller->mode_bits which encodes
> the controller capabilities, and I think this one is still useful if
> the controller is a generic SPI controller.
> 
> As a side note, I'll add that it's possible to have a device supporting
> SPI+DualSPI+QuadSPI and still have this device partially connected to
> the host controller (some pins left unconnected), hence the
> spi-{tx,rx}-bus-width properties in DTs. So we really want to check
> the device capabilities, the controller capabilities and the limitation
> implied by the board design.
> 
>>> +
>>> +/**
>>> + * struct spi_mem_op - describes a SPI memory operation
>>> + * @cmd.buswidth: number of IO lines used to transmit the command
>>> + * @cmd.opcode: operation opcode
>>> + * @addr.nbytes: number of address bytes to send. Can be zero if the operation
>>> + *		 does not need to send an address
>>> + * @addr.buswidth: number of IO lines used to transmit the address cycles
>>> + * @addr.buf: buffer storing the address bytes
>>> + * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
>>> + *		  be zero if the operation does not require dummy bytes
>>> + * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
>>> + * @data.buswidth: number of IO lanes used to send/receive the data
>>> + * @data.dir: direction of the transfer
>>> + * @data.buf.in: input buffer
>>> + * @data.buf.out: output buffer
>>> + */
>>> +struct spi_mem_op {
>>> +	struct {
>>> +		u8 buswidth;
>>> +		u8 opcode;
>>> +	} cmd;
>>> +
>>> +	struct {
>>> +		u8 nbytes;
>>> +		u8 buswidth;
>>> +		const u8 *buf;  
>>
>> For the address value, I would rather use some loff_t type, instead of
>> const u8 *. 
>> Actually, most (if not all) SPI flash controllers have some hardware
>> register to set the address value of the SPI flash command to be
>> sent. Hence having this value directly in the right format would avoid to
>> convert.
> 
> What is the appropriate format? An address encoded in a 64-bit integer
> does not give any information on how these bytes should be transmitted
> on the bus. SPI NOR is passing the address in big endian, SPI NAND seems
> to do the same, but we're not sure this will be the case for all kind of
> memories or devices using this spi-mem protocol.
> 
> Also, by passing it through an integer we limit ourselves to 8 bytes.
> That should be enough, but who knows :-).
> 
> If we go for this loff_t field, we'll have to add an endianness field
> here and force all drivers to check it.
>

I don't think we need any endianness field. We already use loff_t only,
without additional endianness paramater, in spi-nor but also in the above
MTD layer (see 'struct mtd_info') and till now it has just worked fine.

Most, if not all, SPI flash controllers have a dedicated field in one of
their registers to write the address value into it. Generally, this register
is written with an integer in the CPU native byte order so no need to convert.

And anyway, if some conversion is ever needed, the SPI flash controller
driver knows it so we could set as a rule that the integer value for the
address is always stored in the native CPU byte order. At least most SPI flash
controller driver won't have to convert from a const u8 * to some unsigned
integral type more suited to their register accesses.
 
>> AFAIK, only m25p80 needs to convert from loff_t to u8 * and only
>> when using the regular SPI API, ie spi_sync(), as the 'struct
>> spi_flash_read_messag' already uses some integral type too.
> 
> And I'm not sure this was a good idea.
> 
>>
>>
>>> +	} addr;
>>> +
>>> +	struct {
>>> +		u8 nbytes;
>>> +		u8 buswidth;
>>> +	} dummy;
>>> +
>>> +	struct {
>>> +		u8 buswidth;
>>> +		enum spi_mem_data_dir dir;
>>> +		unsigned int nbytes;
>>> +		/* buf.{in,out} must be DMA-able. */
>>> +		union {
>>> +			void *in;
>>> +			const void *out;
>>> +		} buf;
>>> +	} data;  
>>
>> Also, you should add another member in this structure to set the 'type' of
>> operation for the SPI flash command:
>> - register read
>> - register write
>> - memory read
>> - memory write
>> - memory erase
>> [- SFDP read ? ]
>> [- OTP read/write ? ]
> 
> No, really, that's not belonging here. It's entirely SPI NOR specific.
> 

I disagree with you on that. Except for SFDP of course, this will also apply to
SPI NAND as well. And this information should be passed down to the SPI (flash)
controller driver, instead of being lost at the SPI NAND/NOR levels, so the SPI
controller driver could take the proper actions (data cache flush/invalidation
for memory operations, enabling/disabling on-the-fly data scrambling, and other
controller specific constraints).

Best regards,

Cyrille

>>
>> Indeed, some SPI flash controller drivers need this information.
> 
> Then they'll have to be converted/reworked to not depend on that. I did
> it for the FSL QSPI controller, and it worked fine. I'm pretty sure
> almost all controllers will fit well in the new framework. For those
> controllers that really are only able to handle SPI NOR devices (I'm
> thinking about the Intel one, but there may be others), then they should
> not be converted to the spi_mem approach.
> 
>>
>> For instance the Atmel QSPI controller has some optional feature that allow
>> to scramble data on the fly from/to the (Q)SPI memory. Hence the scrambling
>> should be enabled for memory {read,write} (Fast Read, Page Programm)
>> commands but disabled for register {read,write} commands (Read ID, Write
>> Enable, Read/Write Status Register, ...).
> 
> If scrambling is really something that we need to support, then I think
> adding a spi_mem_op->data.scramble boolean would be more appropriate.
> 
>>
>> SPI flash controller drivers *should not* have to check the SPI flash command
>> instruction op code to guess what kind of operation is being performed.
> 
> I completely agree. Not only they shouldn't check the opcode to guess
> what is being done, but, IMHO, they should also not try to determine
> what kind of operation is being done, so your suggestion to add an
> operation type information is not a good idea.
> 
>>
>> Another example:
>> Some (Q)SPI flash controllers map the the SPI flash memory into the system
>> address space then perform some "memcpy-like" operations to access this SPI
>> flash memory whereas they rely on reguler register to handle SPI flash
>> commands reading from or writing into the internal registers fo the SPI
>> flash. So those controller too need to make the difference between register
>> and memory operations.
> 
> I already thought about the direct mapping stuff, and I think this
> should be supported through new hooks in spi_mem_ops (->map() +
> ->unmap()?).
> 
> Thanks for your review.
> 
> Boris
>
Boris Brezillon March 5, 2018, 1:47 p.m. UTC | #17
On Mon, 5 Mar 2018 14:01:59 +0100
Cyrille Pitchen <cyrille.pitchen@wedev4u.fr> wrote:

> Hi Boris,
> 
> Le 05/03/2018 à 10:00, Boris Brezillon a écrit :
> > Hello Cyrille,
> > 
> > On Sun, 4 Mar 2018 22:15:20 +0100
> > Cyrille Pitchen <cyrille.pitchen@wedev4u.fr> wrote:
> >   
> >>> +
> >>> +static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
> >>> +{
> >>> +	u32 mode = mem->spi->mode;
> >>> +
> >>> +	switch (buswidth) {
> >>> +	case 1:
> >>> +		return 0;
> >>> +
> >>> +	case 2:
> >>> +		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
> >>> +		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))    
> >>
> >> Some SPI (flash) controller may support Quad protocols but no Duals
> >> protocols at all. Hence you should only test SPI_{R,T]X_DUAL.  
> > 
> > Hm, I'd rather not change that, since it's what have been used so far.
> > Note that a controller that wants to put more constraints can still
> > implement the ->supports_op() hook.
> >   
> >>
> >> Anyway, I think the function should completely be removed because
> >> SPI_{R,T}X_{DUAL,QUAD} flags should be deprecated.
> >>
> >> They were fine when introduced long time ago when Quad SPI flashes only
> >> supported Fast Read 1-1-4 and maybe few of them Page Program 1-1-4.
> >>
> >> However for many years now, Quad SPI flashes support far much SPI protocols
> >> and commands. For instance:
> >> Fast Read 1-1-4
> >> Fast Read 1-4-4
> >> (Fast Read 4-4-4)
> >> Page Program 1-1-4
> >> Page Program 1-4-4
> >> Page Program 4-4-4
> >>
> >> Fast Read x-y-z means that:
> >> - the op code is transfered (tx) on x I/O line(s)
> >> - the address, mode and dummies are transfered (tx) on y I/O line(s)
> >> - the data are received (rx) on z I/O line(s)
> >>
> >> Page Program x-y-z stands for:
> >> - the op code being transfered (tx) on x I/O line(s)
> >> - the address is transfered (tx) on y I/O line(s)
> >> the data are transfered (tx) on z I/O line(s)
> >>
> >> Then some SPI flash controllers and/or memories may support Fast Read 1-4-4
> >> but not Page Program 1-1-4 whereas others may support Page Program 1-1-4
> >> but not Fast Read 1-4-4.
> >>
> >> So using only SPI_{R,T}X_{DUAL,QUAD} flags, how do we make the difference
> >> between support of Fast Read 1-4-4 and Page Program 1-1-4 or between
> >> Fast Read 1-1-4 and Fast Read 1-4-4 ?  
> > 
> > I think you're mixing 2 different things:
> > 
> > 1/ The RX/TX buswidth capabilities for generic SPI/DualSPI/QuadSPI
> >    transfers, which is, I guess, what these flags have been created for
> > 
> > 2/ The buswidth of each instruction of a memory-like operation (by
> >    instruction I mean, CMD, ADDR, DATA cycles)
> > 
> > There is no reason for a generic SPI controller to specify things for
> > #2, because it doesn't know anything about this spi-mem protocol, all
> > it can do is send/recv data using a specific bus width.
> >  
> 
> I might be wrong but I think SPI flashes are the only SPI devices using more
> than one I/O line to send and receive data.

FPGAs, and I there are and will be other devices using the spi-mem
protocol to address non-memory devices. After all, this protocol is
just enforcing a specific sequence that has nothing memory-specific.

> 
> Anyway, let assume devices other than SPI flash also use many I/O lines
> hence make use of the SPI_{R,T}X_{DUAL,QUAD} flags.
> 
> It looks like this series is dealing a new SPI API dedicated to SPI memories.

I said memory-like devices, and it's actually about supporting any kind
of devices that follow the spi-mem protocol, or in other words, any
kind of communication where transactions are of the following form:

1/ an opcode
2/ 0 to N address cycles
3/ 0 to N dummy cycles
4/ 0 to N data in/out cycles

> Just my opinion but 1/ and the associated SPI_{R,T}X_{DUAL,QUAD} flags should
> be left only for the generic SPI API, - with spi_sync(), 'struct spi_message',
> 'struct spi_transfer' and so on.

And that's what pretty much what I'm doing: a controller implementing
the spi_mem_ops interface can check the operation by itself, if the
default check based on SPI_{R,T}X_{DUAL,QUAD} flags is not sufficient.

> 
> On the other hand, the new SPI flash dedicated API should only focus on 2/
> and totally ignore the SPI_{R,T}X_{DUAL,QUAD} flags.

Not when we fallback to the generic API, but otherwise, I'm fine with
making ->supports_op() mandatory and only providing the generic checks
as a helper.

> 
> If we take the example of the sama5d2 QSPI controller, we can use more than one
> I/O line only when configure in "serial flash memory" mode. However when in "spi"
> mode, when can only use regular MOSI and MISO lines.
> 
> With spi_mem_supports_op() as proposed below, we would have to set both SPI_RX_QUAD
> and SPI_TX_QUAD to allow Fast Read 1-4-4 when in "serial flash memory" mode.
> However with other SPI devices, hence when in "spi" mode, the SPI_{R,T}X_QUAD flags
> would still be set though the controller cannot support more than a single I/O line.

Okay, that's a good reason to get this generic check out of the
spi_mem_supports_op() path and instead provide a generic helper that
can be used by drivers which wants to rely on SPI_{R,T}X_{DUAL,QUAD}
flags.

> 
> The SPI_{R,T}_{DUAL,QUAD} flags could be tested in a default implementation of
> spi_mem_supports_op() but should not be tested as mandatory flags for all SPI
> (flash) controllers.

Agreed. I still think we need a way to restrict the number of IO-lines
based on board-related information, and if spi->mode is not appropriate
for that, we need it somewhere else. Otherwise, you might end-up
assuming QSPI is supported by both the device and the controller, while
the connections on the board is, for instance, limiting it to SPI or
DualSPI.


> >   
> >>> +
> >>> +/**
> >>> + * struct spi_mem_op - describes a SPI memory operation
> >>> + * @cmd.buswidth: number of IO lines used to transmit the command
> >>> + * @cmd.opcode: operation opcode
> >>> + * @addr.nbytes: number of address bytes to send. Can be zero if the operation
> >>> + *		 does not need to send an address
> >>> + * @addr.buswidth: number of IO lines used to transmit the address cycles
> >>> + * @addr.buf: buffer storing the address bytes
> >>> + * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
> >>> + *		  be zero if the operation does not require dummy bytes
> >>> + * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
> >>> + * @data.buswidth: number of IO lanes used to send/receive the data
> >>> + * @data.dir: direction of the transfer
> >>> + * @data.buf.in: input buffer
> >>> + * @data.buf.out: output buffer
> >>> + */
> >>> +struct spi_mem_op {
> >>> +	struct {
> >>> +		u8 buswidth;
> >>> +		u8 opcode;
> >>> +	} cmd;
> >>> +
> >>> +	struct {
> >>> +		u8 nbytes;
> >>> +		u8 buswidth;
> >>> +		const u8 *buf;    
> >>
> >> For the address value, I would rather use some loff_t type, instead of
> >> const u8 *. 
> >> Actually, most (if not all) SPI flash controllers have some hardware
> >> register to set the address value of the SPI flash command to be
> >> sent. Hence having this value directly in the right format would avoid to
> >> convert.  
> > 
> > What is the appropriate format? An address encoded in a 64-bit integer
> > does not give any information on how these bytes should be transmitted
> > on the bus. SPI NOR is passing the address in big endian, SPI NAND seems
> > to do the same, but we're not sure this will be the case for all kind of
> > memories or devices using this spi-mem protocol.
> > 
> > Also, by passing it through an integer we limit ourselves to 8 bytes.
> > That should be enough, but who knows :-).
> > 
> > If we go for this loff_t field, we'll have to add an endianness field
> > here and force all drivers to check it.
> >  
> 
> I don't think we need any endianness field. We already use loff_t only,
> without additional endianness paramater, in spi-nor but also in the above
> MTD layer (see 'struct mtd_info') and till now it has just worked fine.

It's not about the MTD layer. Can you predict that all devices will
transmit the address in big-endian? I can't.

> 
> Most, if not all, SPI flash controllers have a dedicated field in one of
> their registers to write the address value into it. Generally, this register
> is written with an integer in the CPU native byte order so no need to convert.

It's not about how you write your register, but in which order address
bytes are transmitted on the SPI wire. Currently, SPI NOR use the MSB
first scheme, but that won't necessarily be the case for other SPI
devices.

> 
> And anyway, if some conversion is ever needed, the SPI flash controller
> driver knows it so we could set as a rule that the integer value for the
> address is always stored in the native CPU byte order. At least most SPI flash
> controller driver won't have to convert from a const u8 * to some unsigned
> integral type more suited to their register accesses.

They will have to at least make sure it's properly encoded: if the
device is expecting the address to be transmitted LSB first (AKA
little-endian), they'll have to swap address bytes before writing the
value to the register.

>  
> >> AFAIK, only m25p80 needs to convert from loff_t to u8 * and only
> >> when using the regular SPI API, ie spi_sync(), as the 'struct
> >> spi_flash_read_messag' already uses some integral type too.  
> > 
> > And I'm not sure this was a good idea.
> >   
> >>
> >>  
> >>> +	} addr;
> >>> +
> >>> +	struct {
> >>> +		u8 nbytes;
> >>> +		u8 buswidth;
> >>> +	} dummy;
> >>> +
> >>> +	struct {
> >>> +		u8 buswidth;
> >>> +		enum spi_mem_data_dir dir;
> >>> +		unsigned int nbytes;
> >>> +		/* buf.{in,out} must be DMA-able. */
> >>> +		union {
> >>> +			void *in;
> >>> +			const void *out;
> >>> +		} buf;
> >>> +	} data;    
> >>
> >> Also, you should add another member in this structure to set the 'type' of
> >> operation for the SPI flash command:
> >> - register read
> >> - register write
> >> - memory read
> >> - memory write
> >> - memory erase
> >> [- SFDP read ? ]
> >> [- OTP read/write ? ]  
> > 
> > No, really, that's not belonging here. It's entirely SPI NOR specific.
> >   
> 
> I disagree with you on that. Except for SFDP of course, this will also apply to
> SPI NAND as well. And this information should be passed down to the SPI (flash)
> controller driver, instead of being lost at the SPI NAND/NOR levels,

I'm not saying we should keep these information at the spi-nor/nand
level, just saying we should find a way to make it look like a generic
solution so that SPI controllers don't have to guess what they should
do based on the memory operation type.

Typically, why should a SPI controller care about whether the operation
is an erase, a reg read or a reg write. From the controller PoV, it's
just an spi-mem operation.
The use cases you want to optimize are read/write user-data, and this
can be done in different ways (direct memory mapping is one solution).

> so the SPI
> controller driver could take the proper actions (data cache flush/invalidation
> for memory operations,

This one has to do with direct mapping, so an API to allow direct
mapping should help address that.

> enabling/disabling on-the-fly data scrambling,

And what's the problem with passing the scramble information on a
per-operation basis and letting the upper layer decide when it's a good
idea to use it or not?

> and other
> controller specific constraints).

Could you be more specific?

I really have the feeling you're reviewing that with your SPI NOR
maintainer eye, while, as I said several times, I'm trying to
address the problem more generically. My intent is not to move every
bits you have in the spi-nor layer to the spi layer. Instead, we can
progressively provide a consistent API that helps you deal with memory
devices (I guess supporting direct memory mapping is the next step if
we want to get the best perfs out of these advanced controllers) in a
device agnostic fashion.

Regards,

Boris
Boris Brezillon March 8, 2018, 2:07 p.m. UTC | #18
Hi Cyrille,

On Mon, 5 Mar 2018 14:47:02 +0100
Boris Brezillon <boris.brezillon@bootlin.com> wrote:

> > >     
> > >>> +
> > >>> +/**
> > >>> + * struct spi_mem_op - describes a SPI memory operation
> > >>> + * @cmd.buswidth: number of IO lines used to transmit the command
> > >>> + * @cmd.opcode: operation opcode
> > >>> + * @addr.nbytes: number of address bytes to send. Can be zero if the operation
> > >>> + *		 does not need to send an address
> > >>> + * @addr.buswidth: number of IO lines used to transmit the address cycles
> > >>> + * @addr.buf: buffer storing the address bytes
> > >>> + * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
> > >>> + *		  be zero if the operation does not require dummy bytes
> > >>> + * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
> > >>> + * @data.buswidth: number of IO lanes used to send/receive the data
> > >>> + * @data.dir: direction of the transfer
> > >>> + * @data.buf.in: input buffer
> > >>> + * @data.buf.out: output buffer
> > >>> + */
> > >>> +struct spi_mem_op {
> > >>> +	struct {
> > >>> +		u8 buswidth;
> > >>> +		u8 opcode;
> > >>> +	} cmd;
> > >>> +
> > >>> +	struct {
> > >>> +		u8 nbytes;
> > >>> +		u8 buswidth;
> > >>> +		const u8 *buf;      
> > >>
> > >> For the address value, I would rather use some loff_t type, instead of
> > >> const u8 *. 
> > >> Actually, most (if not all) SPI flash controllers have some hardware
> > >> register to set the address value of the SPI flash command to be
> > >> sent. Hence having this value directly in the right format would avoid to
> > >> convert.    
> > > 
> > > What is the appropriate format? An address encoded in a 64-bit integer
> > > does not give any information on how these bytes should be transmitted
> > > on the bus. SPI NOR is passing the address in big endian, SPI NAND seems
> > > to do the same, but we're not sure this will be the case for all kind of
> > > memories or devices using this spi-mem protocol.
> > > 
> > > Also, by passing it through an integer we limit ourselves to 8 bytes.
> > > That should be enough, but who knows :-).
> > > 
> > > If we go for this loff_t field, we'll have to add an endianness field
> > > here and force all drivers to check it.
> > >    
> > 
> > I don't think we need any endianness field. We already use loff_t only,
> > without additional endianness paramater, in spi-nor but also in the above
> > MTD layer (see 'struct mtd_info') and till now it has just worked fine.  
> 
> It's not about the MTD layer. Can you predict that all devices will
> transmit the address in big-endian? I can't.

If you really want an loff_t (or u64) field here I'll change it (even
if I'm not convinced this is a good idea). In this case we'll just
assume that all devices expect addresses to be sent in big-endian on
the wire (MSB first).


> 
> >    
> > >> AFAIK, only m25p80 needs to convert from loff_t to u8 * and only
> > >> when using the regular SPI API, ie spi_sync(), as the 'struct
> > >> spi_flash_read_messag' already uses some integral type too.    
> > > 
> > > And I'm not sure this was a good idea.
> > >     
> > >>
> > >>    
> > >>> +	} addr;
> > >>> +
> > >>> +	struct {
> > >>> +		u8 nbytes;
> > >>> +		u8 buswidth;
> > >>> +	} dummy;
> > >>> +
> > >>> +	struct {
> > >>> +		u8 buswidth;
> > >>> +		enum spi_mem_data_dir dir;
> > >>> +		unsigned int nbytes;
> > >>> +		/* buf.{in,out} must be DMA-able. */
> > >>> +		union {
> > >>> +			void *in;
> > >>> +			const void *out;
> > >>> +		} buf;
> > >>> +	} data;      
> > >>
> > >> Also, you should add another member in this structure to set the 'type' of
> > >> operation for the SPI flash command:
> > >> - register read
> > >> - register write
> > >> - memory read
> > >> - memory write
> > >> - memory erase
> > >> [- SFDP read ? ]
> > >> [- OTP read/write ? ]    
> > > 
> > > No, really, that's not belonging here. It's entirely SPI NOR specific.
> > >     
> > 
> > I disagree with you on that. Except for SFDP of course, this will also apply to
> > SPI NAND as well. And this information should be passed down to the SPI (flash)
> > controller driver, instead of being lost at the SPI NAND/NOR levels,  
> 
> I'm not saying we should keep these information at the spi-nor/nand
> level, just saying we should find a way to make it look like a generic
> solution so that SPI controllers don't have to guess what they should
> do based on the memory operation type.
> 
> Typically, why should a SPI controller care about whether the operation
> is an erase, a reg read or a reg write. From the controller PoV, it's
> just an spi-mem operation.
> The use cases you want to optimize are read/write user-data, and this
> can be done in different ways (direct memory mapping is one solution).
> 
> > so the SPI
> > controller driver could take the proper actions (data cache flush/invalidation
> > for memory operations,  
> 
> This one has to do with direct mapping, so an API to allow direct
> mapping should help address that.

> 
> > enabling/disabling on-the-fly data scrambling,  
> 
> And what's the problem with passing the scramble information on a
> per-operation basis and letting the upper layer decide when it's a good
> idea to use it or not?
> 
> > and other
> > controller specific constraints).  
> 

Can we leave those additional improvements for later? I mean, we'd
better start passing extra information in the spi_mem_op struct when we
actually know what's needed instead of speculating on what the drivers
will need before having tried to convert them to the new approach.
Actually, I exercised the new interface by converting the fsl-qspi
driver and I didn't need the access-type information you're mentioning
here even though I'm using direct AHB accesses for the read path. I'm
not saying 'never', but 'let's wait until we have a real user'.

If you don't mind, I'd like to send a new version addressing all the
comments I received so far.

Regards,

Boris
diff mbox series

Patch

diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b33a727a0158..57bc540a0521 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2057,6 +2057,24 @@  static int of_spi_register_master(struct spi_controller *ctlr)
 }
 #endif
 
+static int spi_controller_check_ops(struct spi_controller *ctlr)
+{
+	/*
+	 * The controller can implement only the high-level SPI-memory like
+	 * operations if it does not support regular SPI transfers.
+	 */
+	if (ctlr->mem_ops) {
+		if (!ctlr->mem_ops->supports_op ||
+		    !ctlr->mem_ops->exec_op)
+			return -EINVAL;
+	} else if (!ctlr->transfer && !ctlr->transfer_one &&
+		   !ctlr->transfer_one_message) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 /**
  * spi_register_controller - register SPI master or slave controller
  * @ctlr: initialized master, originally from spi_alloc_master() or
@@ -2090,6 +2108,14 @@  int spi_register_controller(struct spi_controller *ctlr)
 	if (!dev)
 		return -ENODEV;
 
+	/*
+	 * Make sure all necessary hooks are implemented before registering
+	 * the SPI controller.
+	 */
+	status = spi_controller_check_ops(ctlr);
+	if (status)
+		return status;
+
 	if (!spi_controller_is_slave(ctlr)) {
 		status = of_spi_register_master(ctlr);
 		if (status)
@@ -2155,10 +2181,14 @@  int spi_register_controller(struct spi_controller *ctlr)
 			spi_controller_is_slave(ctlr) ? "slave" : "master",
 			dev_name(&ctlr->dev));
 
-	/* If we're using a queued driver, start the queue */
-	if (ctlr->transfer)
+	/*
+	 * If we're using a queued driver, start the queue. Note that we don't
+	 * need the queueing logic if the driver is only supporting high-level
+	 * memory operations.
+	 */
+	if (ctlr->transfer) {
 		dev_info(dev, "controller is unqueued, this is deprecated\n");
-	else {
+	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
 		status = spi_controller_initialize_queue(ctlr);
 		if (status) {
 			device_del(&ctlr->dev);
@@ -2893,6 +2923,13 @@  static int __spi_async(struct spi_device *spi, struct spi_message *message)
 {
 	struct spi_controller *ctlr = spi->controller;
 
+	/*
+	 * Some controllers do not support doing regular SPI transfers. Return
+	 * ENOTSUPP when this is the case.
+	 */
+	if (!ctlr->transfer)
+		return -ENOTSUPP;
+
 	message->spi = spi;
 
 	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
@@ -3321,6 +3358,386 @@  int spi_write_then_read(struct spi_device *spi,
 }
 EXPORT_SYMBOL_GPL(spi_write_then_read);
 
+/**
+ * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
+ *					  memory operation
+ * @ctlr: the SPI controller requesting this dma_map()
+ * @op: the memory operation containing the buffer to map
+ * @sgt: a pointer to a non-initialized sg_table that will be filled by this
+ *	 function
+ *
+ * Some controllers might want to do DMA on the data buffer embedded in @op.
+ * This helper prepares everything for you and provides a ready-to-use
+ * sg_table. This function is not intended to be called from spi drivers.
+ * Only SPI controller drivers should use it.
+ * Note that the caller must ensure the memory region pointed by
+ * op->data.buf.{in,out} is DMA-able before calling this function.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
+				       const struct spi_mem_op *op,
+				       struct sg_table *sgt)
+{
+	struct device *dmadev;
+
+	if (!op->data.nbytes)
+		return -EINVAL;
+
+	if (op->data.dir == SPI_MEM_DATA_OUT)
+		dmadev = ctlr->dma_tx ?
+			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
+	else
+		dmadev = ctlr->dma_rx ?
+			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
+
+	if (!dmadev)
+		return -EINVAL;
+
+	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
+			   op->data.dir == SPI_MEM_DATA_IN ?
+			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
+
+/**
+ * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
+ *					    memory operation
+ * @ctlr: the SPI controller requesting this dma_unmap()
+ * @op: the memory operation containing the buffer to unmap
+ * @sgt: a pointer to an sg_table previously initialized by
+ *	 spi_controller_dma_map_mem_op_data()
+ *
+ * Some controllers might want to do DMA on the data buffer embedded in @op.
+ * This helper prepares things so that the CPU can access the
+ * op->data.buf.{in,out} buffer again.
+ *
+ * This function is not intended to be called from spi drivers. Only SPI
+ * controller drivers should use it.
+ *
+ * This function should be called after the DMA operation has finished an is
+ * only valid if the previous spi_controller_dma_map_mem_op_data() has returned
+ * 0.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
+					  const struct spi_mem_op *op,
+					  struct sg_table *sgt)
+{
+	struct device *dmadev;
+
+	if (!op->data.nbytes)
+		return;
+
+	if (op->data.dir == SPI_MEM_DATA_OUT)
+		dmadev = ctlr->dma_tx ?
+			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
+	else
+		dmadev = ctlr->dma_rx ?
+			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
+
+	spi_unmap_buf(ctlr, dmadev, sgt,
+		      op->data.dir == SPI_MEM_DATA_IN ?
+		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
+
+static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
+{
+	u32 mode = mem->spi->mode;
+
+	switch (buswidth) {
+	case 1:
+		return 0;
+
+	case 2:
+		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
+		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
+			return 0;
+
+		break;
+
+	case 4:
+		if ((tx && (mode & SPI_TX_QUAD)) ||
+		    (!tx && (mode & SPI_RX_QUAD)))
+			return 0;
+
+		break;
+
+	default:
+		break;
+	}
+
+	return -ENOTSUPP;
+}
+
+/**
+ * spi_mem_supports_op() - Check if a memory device and the controller it is
+ *			   connected to support a specific memory operation
+ * @mem: the SPI memory
+ * @op: the memory operation to check
+ *
+ * Some controllers are only supporting Single or Dual IOs, others might only
+ * support specific opcodes, or it can even be that the controller and device
+ * both support Quad IOs but the hardware prevents you from using it because
+ * only 2 IO lines are connected.
+ *
+ * This function checks whether a specific operation is supported.
+ *
+ * Return: true if @op is supported, false otherwise.
+ */
+bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+	struct spi_controller *ctlr = mem->spi->controller;
+
+	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
+		return false;
+
+	if (op->addr.nbytes &&
+	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
+		return false;
+
+	if (op->dummy.nbytes &&
+	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
+		return false;
+
+	if (op->data.nbytes &&
+	    spi_check_buswidth_req(mem, op->data.buswidth,
+				   op->data.dir == SPI_MEM_DATA_IN ?
+				   false : true))
+		return false;
+
+	if (ctlr->mem_ops)
+		return ctlr->mem_ops->supports_op(mem, op);
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(spi_mem_supports_op);
+
+/**
+ * spi_mem_exec_op() - Execute a memory operation
+ * @mem: the SPI memory
+ * @op: the memory operation to execute
+ *
+ * Executes a memory operation.
+ *
+ * This function first checks that @op is supported and then tries to execute
+ * it.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
+	struct spi_controller *ctlr = mem->spi->controller;
+	struct spi_transfer xfers[4] = { };
+	struct spi_message msg;
+	u8 *tmpbuf;
+	int ret;
+
+	if (!spi_mem_supports_op(mem, op))
+		return -ENOTSUPP;
+
+	if (ctlr->mem_ops) {
+		if (ctlr->auto_runtime_pm) {
+			ret = pm_runtime_get_sync(ctlr->dev.parent);
+			if (ret < 0) {
+				dev_err(&ctlr->dev,
+					"Failed to power device: %d\n",
+					ret);
+				return ret;
+			}
+		}
+
+		mutex_lock(&ctlr->bus_lock_mutex);
+		mutex_lock(&ctlr->io_mutex);
+		ret = ctlr->mem_ops->exec_op(mem, op);
+		mutex_unlock(&ctlr->io_mutex);
+		mutex_unlock(&ctlr->bus_lock_mutex);
+
+		if (ctlr->auto_runtime_pm)
+			pm_runtime_put(ctlr->dev.parent);
+
+		/*
+		 * Some controllers only optimize specific paths (typically the
+		 * read path) and expect the core to use the regular SPI
+		 * interface in these cases.
+		 */
+		if (!ret || ret != -ENOTSUPP)
+			return ret;
+	}
+
+	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
+		     op->dummy.nbytes;
+
+	/*
+	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
+	 * we're guaranteed that this buffer is DMA-able, as required by the
+	 * SPI layer.
+	 */
+	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
+	if (!tmpbuf)
+		return -ENOMEM;
+
+	spi_message_init(&msg);
+
+	tmpbuf[0] = op->cmd.opcode;
+	xfers[xferpos].tx_buf = tmpbuf;
+	xfers[xferpos].len = sizeof(op->cmd.opcode);
+	xfers[xferpos].tx_nbits = op->cmd.buswidth;
+	spi_message_add_tail(&xfers[xferpos], &msg);
+	xferpos++;
+	totalxferlen++;
+
+	if (op->addr.nbytes) {
+		memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
+		xfers[xferpos].tx_buf = tmpbuf + 1;
+		xfers[xferpos].len = op->addr.nbytes;
+		xfers[xferpos].tx_nbits = op->addr.buswidth;
+		spi_message_add_tail(&xfers[xferpos], &msg);
+		xferpos++;
+		totalxferlen += op->addr.nbytes;
+	}
+
+	if (op->dummy.nbytes) {
+		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
+		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
+		xfers[xferpos].len = op->dummy.nbytes;
+		xfers[xferpos].tx_nbits = op->dummy.buswidth;
+		spi_message_add_tail(&xfers[xferpos], &msg);
+		xferpos++;
+		totalxferlen += op->dummy.nbytes;
+	}
+
+	if (op->data.nbytes) {
+		if (op->data.dir == SPI_MEM_DATA_IN) {
+			xfers[xferpos].rx_buf = op->data.buf.in;
+			xfers[xferpos].rx_nbits = op->data.buswidth;
+		} else {
+			xfers[xferpos].tx_buf = op->data.buf.out;
+			xfers[xferpos].tx_nbits = op->data.buswidth;
+		}
+
+		xfers[xferpos].len = op->data.nbytes;
+		spi_message_add_tail(&xfers[xferpos], &msg);
+		xferpos++;
+		totalxferlen += op->data.nbytes;
+	}
+
+	ret = spi_sync(mem->spi, &msg);
+
+	kfree(tmpbuf);
+
+	if (ret)
+		return ret;
+
+	if (msg.actual_length != totalxferlen)
+		return -EIO;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(spi_mem_exec_op);
+
+/**
+ * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
+ *			      match controller limitations
+ * @mem: the SPI memory
+ * @op: the operation to adjust
+ *
+ * Some controllers have FIFO limitations and must split a data transfer
+ * operation into multiple ones, others require a specific alignment for
+ * optimized accesses. This function allows SPI mem drivers to split a single
+ * operation into multiple sub-operations when required.
+ *
+ * Return: a negative error code if the controller can't properly adjust @op,
+ *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
+ *	   can't be handled in a single step.
+ */
+int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+	struct spi_controller *ctlr = mem->spi->controller;
+
+	if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
+		return ctlr->mem_ops->adjust_op_size(mem, op);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+
+static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
+{
+	return container_of(drv, struct spi_mem_driver, spidrv.driver);
+}
+
+static int spi_mem_probe(struct spi_device *spi)
+{
+	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+	struct spi_mem *mem;
+
+	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+
+	mem->spi = spi;
+	spi_set_drvdata(spi, mem);
+
+	return memdrv->probe(mem);
+}
+
+static int spi_mem_remove(struct spi_device *spi)
+{
+	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+	struct spi_mem *mem = spi_get_drvdata(spi);
+
+	if (memdrv->remove)
+		return memdrv->remove(mem);
+
+	return 0;
+}
+
+static void spi_mem_shutdown(struct spi_device *spi)
+{
+	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+	struct spi_mem *mem = spi_get_drvdata(spi);
+
+	if (memdrv->shutdown)
+		memdrv->shutdown(mem);
+}
+
+/**
+ * spi_mem_driver_register_with_owner() - Register a SPI memory driver
+ * @memdrv: the SPI memory driver to register
+ * @owner: the owner of this driver
+ *
+ * Registers a SPI memory driver.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+
+int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
+				       struct module *owner)
+{
+	memdrv->spidrv.probe = spi_mem_probe;
+	memdrv->spidrv.remove = spi_mem_remove;
+	memdrv->spidrv.shutdown = spi_mem_shutdown;
+
+	return __spi_register_driver(owner, &memdrv->spidrv);
+}
+EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
+
+/**
+ * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
+ * @memdrv: the SPI memory driver to unregister
+ *
+ * Unregisters a SPI memory driver.
+ */
+void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
+{
+	spi_unregister_driver(&memdrv->spidrv);
+}
+EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
+
 /*-------------------------------------------------------------------------*/
 
 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 7b2170bfd6e7..af3c4ac62b55 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -27,6 +27,7 @@  struct property_entry;
 struct spi_controller;
 struct spi_transfer;
 struct spi_flash_read_message;
+struct spi_controller_mem_ops;
 
 /*
  * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
@@ -376,6 +377,9 @@  static inline void spi_unregister_driver(struct spi_driver *sdrv)
  *                    transfer_one callback.
  * @handle_err: the subsystem calls the driver to handle an error that occurs
  *		in the generic implementation of transfer_one_message().
+ * @mem_ops: optimized/dedicated operations for interactions with SPI memory.
+ *	     This field is optional and should only be implemented if the
+ *	     controller has native support for memory like operations.
  * @unprepare_message: undo any work done by prepare_message().
  * @slave_abort: abort the ongoing transfer request on an SPI slave controller
  * @spi_flash_read: to support spi-controller hardwares that provide
@@ -564,6 +568,9 @@  struct spi_controller {
 	void (*handle_err)(struct spi_controller *ctlr,
 			   struct spi_message *message);
 
+	/* Optimized handlers for SPI memory-like operations. */
+	const struct spi_controller_mem_ops *mem_ops;
+
 	/* gpio chip select */
 	int			*cs_gpios;
 
@@ -1227,6 +1234,225 @@  int spi_flash_read(struct spi_device *spi,
 
 /*---------------------------------------------------------------------------*/
 
+/* SPI memory related definitions. */
+
+#define SPI_MEM_OP_CMD(__opcode, __buswidth)			\
+	{							\
+		.buswidth = __buswidth,				\
+		.opcode = __opcode,				\
+	}
+
+#define SPI_MEM_OP_ADDRS(__nbytes, __buf, __buswidth)		\
+	{							\
+		.nbytes = __nbytes,				\
+		.buf = __buf,					\
+		.buswidth = __buswidth,				\
+	}
+
+#define SPI_MEM_OP_NO_ADDRS	{ }
+
+#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth)			\
+	{							\
+		.nbytes = __nbytes,				\
+		.buswidth = __buswidth,				\
+	}
+
+#define SPI_MEM_OP_NO_DUMMY	{ }
+
+#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth)		\
+	{							\
+		.dir = SPI_MEM_DATA_IN,				\
+		.nbytes = __nbytes,				\
+		.buf.in = __buf,				\
+		.buswidth = __buswidth,				\
+	}
+
+#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth)	\
+	{							\
+		.dir = SPI_MEM_DATA_OUT,			\
+		.nbytes = __nbytes,				\
+		.buf.out = __buf,				\
+		.buswidth = __buswidth,				\
+	}
+
+#define SPI_MEM_OP_NO_DATA	{ }
+
+/**
+ * enum spi_mem_data_dir - describes the direction of a SPI memory data
+ *			   transfer from the controller perspective
+ * @SPI_MEM_DATA_IN: data coming from the SPI memory
+ * @SPI_MEM_DATA_OUT: data sent the SPI memory
+ */
+enum spi_mem_data_dir {
+	SPI_MEM_DATA_IN,
+	SPI_MEM_DATA_OUT,
+};
+
+/**
+ * struct spi_mem_op - describes a SPI memory operation
+ * @cmd.buswidth: number of IO lines used to transmit the command
+ * @cmd.opcode: operation opcode
+ * @addr.nbytes: number of address bytes to send. Can be zero if the operation
+ *		 does not need to send an address
+ * @addr.buswidth: number of IO lines used to transmit the address cycles
+ * @addr.buf: buffer storing the address bytes
+ * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
+ *		  be zero if the operation does not require dummy bytes
+ * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
+ * @data.buswidth: number of IO lanes used to send/receive the data
+ * @data.dir: direction of the transfer
+ * @data.buf.in: input buffer
+ * @data.buf.out: output buffer
+ */
+struct spi_mem_op {
+	struct {
+		u8 buswidth;
+		u8 opcode;
+	} cmd;
+
+	struct {
+		u8 nbytes;
+		u8 buswidth;
+		const u8 *buf;
+	} addr;
+
+	struct {
+		u8 nbytes;
+		u8 buswidth;
+	} dummy;
+
+	struct {
+		u8 buswidth;
+		enum spi_mem_data_dir dir;
+		unsigned int nbytes;
+		/* buf.{in,out} must be DMA-able. */
+		union {
+			void *in;
+			const void *out;
+		} buf;
+	} data;
+};
+
+#define SPI_MEM_OP(__cmd, __addr, __dummy, __data)		\
+	{							\
+		.cmd = __cmd,					\
+		.addr = __addr,					\
+		.dummy = __dummy,				\
+		.data = __data,					\
+	}
+
+/**
+ * struct spi_mem - describes a SPI memory device
+ * @spi: the underlying SPI device
+ * @drvpriv: spi_mem_drviver private data
+ *
+ * Extra information that describe the SPI memory device and may be needed by
+ * the controller to properly handle this device should be placed here.
+ *
+ * One example would be the device size since some controller expose their SPI
+ * mem devices through a io-mapped region.
+ */
+struct spi_mem {
+	struct spi_device *spi;
+	void *drvpriv;
+};
+
+/**
+ * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem
+ *				  device
+ * @mem: memory device
+ * @data: data to attach to the memory device
+ */
+static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data)
+{
+	mem->drvpriv = data;
+}
+
+/**
+ * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem
+ *				  device
+ * @mem: memory device
+ *
+ * Return: the data attached to the mem device.
+ */
+static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
+{
+	return mem->drvpriv;
+}
+
+/**
+ * struct spi_controller_mem_ops - SPI memory operations
+ * @adjust_op_size: shrink the data xfer of an operation to match controller's
+ *		    limitations (can be alignment of max RX/TX size
+ *		    limitations)
+ * @supports_op: check if an operation is supported by the controller
+ * @exec_op: execute a SPI memory operation
+ *
+ * This interface should be implemented by SPI controllers providing an
+ * high-level interface to execute SPI memory operation, which is usually the
+ * case for QSPI controllers.
+ */
+struct spi_controller_mem_ops {
+	int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
+	bool (*supports_op)(struct spi_mem *mem,
+			    const struct spi_mem_op *op);
+	int (*exec_op)(struct spi_mem *mem,
+		       const struct spi_mem_op *op);
+};
+
+int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
+				       const struct spi_mem_op *op,
+				       struct sg_table *sg);
+
+void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
+					  const struct spi_mem_op *op,
+					  struct sg_table *sg);
+
+int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
+
+bool spi_mem_supports_op(struct spi_mem *mem,
+			 const struct spi_mem_op *op);
+
+int spi_mem_exec_op(struct spi_mem *mem,
+		    const struct spi_mem_op *op);
+
+/**
+ * struct spi_mem_driver - SPI memory driver
+ * @spidrv: inherit from a SPI driver
+ * @probe: probe a SPI memory. Usually where detection/initialization takes
+ *	   place
+ * @remove: remove a SPI memory
+ * @shutdown: take appropriate action when the system is shutdown
+ *
+ * This is just a thin wrapper around a spi_driver. The core takes care of
+ * allocating the spi_mem object and forwarding the probe/remove/shutdown
+ * request to the spi_mem_driver. The reason we use this wrapper is because
+ * we might have to stuff more information into the spi_mem struct to let
+ * SPI controllers know more about the SPI memory they interact with, and
+ * having this intermediate layer allows us to do that without adding more
+ * useless fields to the spi_device object.
+ */
+struct spi_mem_driver {
+	struct spi_driver spidrv;
+	int (*probe)(struct spi_mem *mem);
+	int (*remove)(struct spi_mem *mem);
+	void (*shutdown)(struct spi_mem *mem);
+};
+
+int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
+				       struct module *owner);
+
+#define spi_mem_driver_register(__drv)                                  \
+	spi_mem_driver_register_with_owner(__drv, THIS_MODULE)
+
+void spi_mem_driver_unregister(struct spi_mem_driver *drv);
+
+#define module_spi_mem_driver(__drv)                                    \
+	module_driver(__drv, spi_mem_driver_register,                   \
+		      spi_mem_driver_unregister)
+
+/*---------------------------------------------------------------------------*/
+
 /*
  * INTERFACE between board init code and SPI infrastructure.
  *