[RFC,1/6] spi: Extend the core to ease integration of SPI memory controllers

Message ID 20180205232120.5851-2-boris.brezillon@bootlin.com
State New
Headers show
Series
  • spi: Extend the framework to generically support memory devices
Related show

Commit Message

Boris Brezillon Feb. 5, 2018, 11:21 p.m.
From: Boris Brezillon <boris.brezillon@free-electrons.com>

Some controllers are exposing high-level interfaces to access various
kind of SPI memories. Unfortunately they do not fit in the current
spi_controller model and usually have drivers placed in
drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
memories in general.

This is an attempt at defining a SPI memory interface which works for
all kinds of SPI memories (NORs, NANDs, SRAMs).

Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
---
 drivers/spi/spi.c       | 423 +++++++++++++++++++++++++++++++++++++++++++++++-
 include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++
 2 files changed, 646 insertions(+), 3 deletions(-)

Comments

Maxime Chevallier Feb. 6, 2018, 9:43 a.m. | #1
Hi Boris,

> From: Boris Brezillon <boris.brezillon@free-electrons.com>
> 
> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.
> 
> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).
> 
> Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> ---
>  drivers/spi/spi.c       | 423
> +++++++++++++++++++++++++++++++++++++++++++++++-
> include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++ 2 files
> changed, 646 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
> index b33a727a0158..57bc540a0521 100644
> --- a/drivers/spi/spi.c
> +++ b/drivers/spi/spi.c
> @@ -2057,6 +2057,24 @@ static int of_spi_register_master(struct
> spi_controller *ctlr) }
>  #endif
>  

[...]

> +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> +	struct spi_controller *ctlr = mem->spi->controller;
> +	struct spi_transfer xfers[4] = { };
> +	struct spi_message msg;
> +	u8 *tmpbuf;
> +	int ret;
> +
> +	if (!spi_mem_supports_op(mem, op))
> +		return -ENOTSUPP;
> +
> +	if (ctlr->mem_ops) {
> +		if (ctlr->auto_runtime_pm) {
> +			ret = pm_runtime_get_sync(ctlr->dev.parent);
> +			if (ret < 0) {
> +				dev_err(&ctlr->dev,
> +					"Failed to power device:
> %d\n",
> +					ret);
> +				return ret;
> +			}
> +		}
> +
> +		mutex_lock(&ctlr->bus_lock_mutex);
> +		mutex_lock(&ctlr->io_mutex);
> +		ret = ctlr->mem_ops->exec_op(mem, op);

As a user, what prevented me from using spi_flash_read is that it
bypasses the message queue. I have a setup that uses spi_async and I
have to make sure everything goes in the right order, so I ended up
using spi_write_then_read instead.

Is there a way to make so that the message that uses exec_op are issued
in the correct order regarding messages that are already queued ?

Maybe we could extend spi_message or spi_transfer to store all
this opcode/dummy/addr information, so that we would use the standard
interfaces spi_sync / spi_async, and make this mechanism of exec_op
transparent from the user ?


> +		mutex_unlock(&ctlr->io_mutex);
> +		mutex_unlock(&ctlr->bus_lock_mutex);
> +
> +		if (ctlr->auto_runtime_pm)
> +			pm_runtime_put(ctlr->dev.parent);
> +
> +		/*
> +		 * Some controllers only optimize specific paths
> (typically the
> +		 * read path) and expect the core to use the regular
> SPI
> +		 * interface in these cases.
> +		 */
> +		if (!ret || ret != -ENOTSUPP)
> +			return ret;
> +	}
> +
> +	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
> +		     op->dummy.nbytes;
> +
> +	/*
> +	 * Allocate a buffer to transmit the CMD, ADDR cycles with
> kmalloc() so
> +	 * we're guaranteed that this buffer is DMA-able, as
> required by the
> +	 * SPI layer.
> +	 */
> +	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
> +	if (!tmpbuf)
> +		return -ENOMEM;
> +
> +	spi_message_init(&msg);
> +
> +	tmpbuf[0] = op->cmd.opcode;
> +	xfers[xferpos].tx_buf = tmpbuf;
> +	xfers[xferpos].len = sizeof(op->cmd.opcode);
> +	xfers[xferpos].tx_nbits = op->cmd.buswidth;
> +	spi_message_add_tail(&xfers[xferpos], &msg);
> +	xferpos++;
> +	totalxferlen++;
> +
> +	if (op->addr.nbytes) {
> +		memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + 1;
> +		xfers[xferpos].len = op->addr.nbytes;
> +		xfers[xferpos].tx_nbits = op->addr.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->addr.nbytes;
> +	}
> +
> +	if (op->dummy.nbytes) {
> +		memset(tmpbuf + op->addr.nbytes + 1, 0xff,
> op->dummy.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
> +		xfers[xferpos].len = op->dummy.nbytes;
> +		xfers[xferpos].tx_nbits = op->dummy.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->dummy.nbytes;
> +	}

Can't we use just one xfer for all the opcode, addr and dummy bytes ?

> +	if (op->data.nbytes) {
> +		if (op->data.dir == SPI_MEM_DATA_IN) {
> +			xfers[xferpos].rx_buf = op->data.buf.in;
> +			xfers[xferpos].rx_nbits = op->data.buswidth;
> +		} else {
> +			xfers[xferpos].tx_buf = op->data.buf.out;
> +			xfers[xferpos].tx_nbits = op->data.buswidth;
> +		}
> +
> +		xfers[xferpos].len = op->data.nbytes;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->data.nbytes;
> +	}
> +
> +	ret = spi_sync(mem->spi, &msg);
> +
> +	kfree(tmpbuf);
> +
> +	if (ret)
> +		return ret;
> +
> +	if (msg.actual_length != totalxferlen)
> +		return -EIO;
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_exec_op);

[...]

Thanks,

Maxime
Boris Brezillon Feb. 6, 2018, 10:25 a.m. | #2
Hi Maxime,

On Tue, 6 Feb 2018 10:43:30 +0100
Maxime Chevallier <maxime.chevallier@smile.fr> wrote:

> Hi Boris,
> 
> > From: Boris Brezillon <boris.brezillon@free-electrons.com>
> > 
> > Some controllers are exposing high-level interfaces to access various
> > kind of SPI memories. Unfortunately they do not fit in the current
> > spi_controller model and usually have drivers placed in
> > drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> > memories in general.
> > 
> > This is an attempt at defining a SPI memory interface which works for
> > all kinds of SPI memories (NORs, NANDs, SRAMs).
> > 
> > Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> > ---
> >  drivers/spi/spi.c       | 423
> > +++++++++++++++++++++++++++++++++++++++++++++++-
> > include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++ 2 files
> > changed, 646 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
> > index b33a727a0158..57bc540a0521 100644
> > --- a/drivers/spi/spi.c
> > +++ b/drivers/spi/spi.c
> > @@ -2057,6 +2057,24 @@ static int of_spi_register_master(struct
> > spi_controller *ctlr) }
> >  #endif
> >    
> 
> [...]
> 
> > +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> > +{
> > +	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> > +	struct spi_controller *ctlr = mem->spi->controller;
> > +	struct spi_transfer xfers[4] = { };
> > +	struct spi_message msg;
> > +	u8 *tmpbuf;
> > +	int ret;
> > +
> > +	if (!spi_mem_supports_op(mem, op))
> > +		return -ENOTSUPP;
> > +
> > +	if (ctlr->mem_ops) {
> > +		if (ctlr->auto_runtime_pm) {
> > +			ret = pm_runtime_get_sync(ctlr->dev.parent);
> > +			if (ret < 0) {
> > +				dev_err(&ctlr->dev,
> > +					"Failed to power device:
> > %d\n",
> > +					ret);
> > +				return ret;
> > +			}
> > +		}
> > +
> > +		mutex_lock(&ctlr->bus_lock_mutex);
> > +		mutex_lock(&ctlr->io_mutex);
> > +		ret = ctlr->mem_ops->exec_op(mem, op);  
> 
> As a user, what prevented me from using spi_flash_read is that it
> bypasses the message queue. I have a setup that uses spi_async and I
> have to make sure everything goes in the right order, so I ended up
> using spi_write_then_read instead.
> 
> Is there a way to make so that the message that uses exec_op are issued
> in the correct order regarding messages that are already queued ?

Nope, that's not handled right now, and mem ops can indeed go in before
the already queued messages if spi_mem_exec_op() acquires the io_lock
before the dequeuing thread.

> 
> Maybe we could extend spi_message or spi_transfer to store all
> this opcode/dummy/addr information, so that we would use the standard
> interfaces spi_sync / spi_async, and make this mechanism of exec_op
> transparent from the user ?

That's a possibility. Note that SPI controllers are passed a spi_mem
object in ->exec_op(), not a spi_device. This is because I envision
we'll need to pass extra information to the controller to let it take
appropriate decisions (like the memory device size or other things that
may have an impact on how the controller handle the spi_mem_op
requests). 
Anyway, we could stuff a spi_mem_op and a spi_mem pointer in the
spi_message struct and adapt the queuing/dequeuing logic, but I fear
this will complexify the whole thing.

Another approach would be to flush the msg queue before calling
->exec_op(). That does not guarantee that the memory operation will be
placed exactly where it should (SPI messages might be queued while
we're pumping the queue), but I'm not sure we really care.

	/*
	 * When the generic queuing/dequeuing mechanism is in place
	 * pump all pending messages to enforce FIFO behavior, and
	 * execute the SPI mem operation after that.
	 */
	if (ctlr->transfer == spi_queued_transfer)
		__spi_pump_messages(ctlr, false);

	mutex_lock(&ctlr->bus_lock_mutex);
	mutex_lock(&ctlr->io_mutex);
	ret = ctlr->mem_ops->exec_op(mem, op);
	...	

> 
> 
> > +		mutex_unlock(&ctlr->io_mutex);
> > +		mutex_unlock(&ctlr->bus_lock_mutex);
> > +
> > +		if (ctlr->auto_runtime_pm)
> > +			pm_runtime_put(ctlr->dev.parent);
> > +
> > +		/*
> > +		 * Some controllers only optimize specific paths
> > (typically the
> > +		 * read path) and expect the core to use the regular
> > SPI
> > +		 * interface in these cases.
> > +		 */
> > +		if (!ret || ret != -ENOTSUPP)
> > +			return ret;
> > +	}
> > +
> > +	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
> > +		     op->dummy.nbytes;
> > +
> > +	/*
> > +	 * Allocate a buffer to transmit the CMD, ADDR cycles with
> > kmalloc() so
> > +	 * we're guaranteed that this buffer is DMA-able, as
> > required by the
> > +	 * SPI layer.
> > +	 */
> > +	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
> > +	if (!tmpbuf)
> > +		return -ENOMEM;
> > +
> > +	spi_message_init(&msg);
> > +
> > +	tmpbuf[0] = op->cmd.opcode;
> > +	xfers[xferpos].tx_buf = tmpbuf;
> > +	xfers[xferpos].len = sizeof(op->cmd.opcode);
> > +	xfers[xferpos].tx_nbits = op->cmd.buswidth;
> > +	spi_message_add_tail(&xfers[xferpos], &msg);
> > +	xferpos++;
> > +	totalxferlen++;
> > +
> > +	if (op->addr.nbytes) {
> > +		memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
> > +		xfers[xferpos].tx_buf = tmpbuf + 1;
> > +		xfers[xferpos].len = op->addr.nbytes;
> > +		xfers[xferpos].tx_nbits = op->addr.buswidth;
> > +		spi_message_add_tail(&xfers[xferpos], &msg);
> > +		xferpos++;
> > +		totalxferlen += op->addr.nbytes;
> > +	}
> > +
> > +	if (op->dummy.nbytes) {
> > +		memset(tmpbuf + op->addr.nbytes + 1, 0xff,
> > op->dummy.nbytes);
> > +		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
> > +		xfers[xferpos].len = op->dummy.nbytes;
> > +		xfers[xferpos].tx_nbits = op->dummy.buswidth;
> > +		spi_message_add_tail(&xfers[xferpos], &msg);
> > +		xferpos++;
> > +		totalxferlen += op->dummy.nbytes;
> > +	}  
> 
> Can't we use just one xfer for all the opcode, addr and dummy bytes ?

We can, if they have the same buswidth, but I didn't want to go that
far into optimization before making sure the approach was acceptable.
Definitely something I can add in a v2.

> 
> > +	if (op->data.nbytes) {
> > +		if (op->data.dir == SPI_MEM_DATA_IN) {
> > +			xfers[xferpos].rx_buf = op->data.buf.in;
> > +			xfers[xferpos].rx_nbits = op->data.buswidth;
> > +		} else {
> > +			xfers[xferpos].tx_buf = op->data.buf.out;
> > +			xfers[xferpos].tx_nbits = op->data.buswidth;
> > +		}
> > +
> > +		xfers[xferpos].len = op->data.nbytes;
> > +		spi_message_add_tail(&xfers[xferpos], &msg);
> > +		xferpos++;
> > +		totalxferlen += op->data.nbytes;
> > +	}
> > +
> > +	ret = spi_sync(mem->spi, &msg);
> > +
> > +	kfree(tmpbuf);
> > +
> > +	if (ret)
> > +		return ret;
> > +
> > +	if (msg.actual_length != totalxferlen)
> > +		return -EIO;
> > +
> > +	return 0;
> > +}
> > +EXPORT_SYMBOL_GPL(spi_mem_exec_op);  
> 
> [...]
> 

Thanks for your feedback.

Boris
Mark Brown Feb. 6, 2018, 12:06 p.m. | #3
On Tue, Feb 06, 2018 at 10:43:30AM +0100, Maxime Chevallier wrote:

> As a user, what prevented me from using spi_flash_read is that it
> bypasses the message queue. I have a setup that uses spi_async and I
> have to make sure everything goes in the right order, so I ended up
> using spi_write_then_read instead.

> Is there a way to make so that the message that uses exec_op are issued
> in the correct order regarding messages that are already queued ?

> Maybe we could extend spi_message or spi_transfer to store all
> this opcode/dummy/addr information, so that we would use the standard
> interfaces spi_sync / spi_async, and make this mechanism of exec_op
> transparent from the user ?

Or have the flash read flush out the queue perhaps, might be simpler.
Pushing the memory mapped read operations through the queue does seem to
defeat some of the purpose of having them though I can see it might be
needed, I think the current applications were read only or at least read
mostly.
Miquel Raynal Feb. 9, 2018, 12:52 p.m. | #4
Hi Boris,

Just a few comments on the form below.

On Tue,  6 Feb 2018 00:21:15 +0100, Boris Brezillon
<boris.brezillon@bootlin.com> wrote:

> From: Boris Brezillon <boris.brezillon@free-electrons.com>
> 
> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.
> 
> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).
> 
> Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
> ---
>  drivers/spi/spi.c       | 423 +++++++++++++++++++++++++++++++++++++++++++++++-
>  include/linux/spi/spi.h | 226 ++++++++++++++++++++++++++
>  2 files changed, 646 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
> index b33a727a0158..57bc540a0521 100644
> --- a/drivers/spi/spi.c
> +++ b/drivers/spi/spi.c
> @@ -2057,6 +2057,24 @@ static int of_spi_register_master(struct spi_controller *ctlr)
>  }
>  #endif
>  
> +static int spi_controller_check_ops(struct spi_controller *ctlr)
> +{
> +	/*
> +	 * The controller can implement only the high-level SPI-memory like
> +	 * operations if it does not support regular SPI transfers.
> +	 */
> +	if (ctlr->mem_ops) {
> +		if (!ctlr->mem_ops->supports_op ||
> +		    !ctlr->mem_ops->exec_op)
> +			return -EINVAL;
> +	} else if (!ctlr->transfer && !ctlr->transfer_one &&
> +		   !ctlr->transfer_one_message) {
> +		return -EINVAL;
> +	}
> +
> +	return 0;
> +}
> +
>  /**
>   * spi_register_controller - register SPI master or slave controller
>   * @ctlr: initialized master, originally from spi_alloc_master() or
> @@ -2090,6 +2108,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>  	if (!dev)
>  		return -ENODEV;
>  
> +	/*
> +	 * Make sure all necessary hooks are implemented before registering
> +	 * the SPI controller.
> +	 */
> +	status = spi_controller_check_ops(ctlr);
> +	if (status)
> +		return status;
> +
>  	if (!spi_controller_is_slave(ctlr)) {
>  		status = of_spi_register_master(ctlr);
>  		if (status)
> @@ -2155,10 +2181,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>  			spi_controller_is_slave(ctlr) ? "slave" : "master",
>  			dev_name(&ctlr->dev));
>  
> -	/* If we're using a queued driver, start the queue */
> -	if (ctlr->transfer)
> +	/*
> +	 * If we're using a queued driver, start the queue. Note that we don't
> +	 * need the queueing logic if the driver is only supporting high-level
> +	 * memory operations.
> +	 */
> +	if (ctlr->transfer) {
>  		dev_info(dev, "controller is unqueued, this is deprecated\n");
> -	else {
> +	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
>  		status = spi_controller_initialize_queue(ctlr);
>  		if (status) {
>  			device_del(&ctlr->dev);
> @@ -2893,6 +2923,13 @@ static int __spi_async(struct spi_device *spi, struct spi_message *message)
>  {
>  	struct spi_controller *ctlr = spi->controller;
>  
> +	/*
> +	 * Some controllers do not support doing regular SPI transfers. Return
> +	 * ENOTSUPP when this is the case.
> +	 */
> +	if (!ctlr->transfer)
> +		return -ENOTSUPP;
> +
>  	message->spi = spi;
>  
>  	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
> @@ -3321,6 +3358,386 @@ int spi_write_then_read(struct spi_device *spi,
>  }
>  EXPORT_SYMBOL_GPL(spi_write_then_read);
>  
> +/**
> + * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
> + *					  memory operation
> + * @ctlr: the SPI controller requesting this dma_map()
> + * @op: the memory operation containing the buffer to map
> + * @sgt: a pointer to a non-initialized sg_table that will be filled by this
> + *	 function
> + *
> + * Some controllers might want to do DMA on the data buffer embedded in @op.
> + * This helper prepares everything for you and provides a ready-to-use
> + * sg_table. This function is not intended to be called from spi drivers.
> + * Only SPI controller drivers should use it.
> + * Note that the caller must ensure the memory region pointed by
> + * op->data.buf.{in,out} is DMA-able before calling this function.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
> +				       const struct spi_mem_op *op,
> +				       struct sg_table *sgt)
> +{
> +	struct device *dmadev;
> +
> +	if (!op->data.nbytes)
> +		return -EINVAL;
> +
> +	if (op->data.dir == SPI_MEM_DATA_OUT)
> +		dmadev = ctlr->dma_tx ?
> +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +	else
> +		dmadev = ctlr->dma_rx ?
> +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
> +
> +	if (!dmadev)
> +		return -EINVAL;
> +
> +	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
> +			   op->data.dir == SPI_MEM_DATA_IN ?
> +			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
> +}
> +EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
> +
> +/**
> + * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
> + *					    memory operation
> + * @ctlr: the SPI controller requesting this dma_unmap()
> + * @op: the memory operation containing the buffer to unmap
> + * @sgt: a pointer to an sg_table previously initialized by
> + *	 spi_controller_dma_map_mem_op_data()
> + *
> + * Some controllers might want to do DMA on the data buffer embedded in @op.
> + * This helper prepares things so that the CPU can access the
> + * op->data.buf.{in,out} buffer again.
> + *
> + * This function is not intended to be called from spi drivers. Only SPI

s/spi/SPI/

> + * controller drivers should use it.
> + *
> + * This function should be called after the DMA operation has finished an is

s/an/and/

> + * only valid if the previous spi_controller_dma_map_mem_op_data() has returned
> + * 0.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> +					  const struct spi_mem_op *op,
> +					  struct sg_table *sgt)
> +{
> +	struct device *dmadev;
> +
> +	if (!op->data.nbytes)
> +		return;
> +
> +	if (op->data.dir == SPI_MEM_DATA_OUT)
> +		dmadev = ctlr->dma_tx ?
> +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +	else
> +		dmadev = ctlr->dma_rx ?
> +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
> +
> +	spi_unmap_buf(ctlr, dmadev, sgt,
> +		      op->data.dir == SPI_MEM_DATA_IN ?
> +		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
> +}
> +EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
> +
> +static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
> +{
> +	u32 mode = mem->spi->mode;
> +
> +	switch (buswidth) {
> +	case 1:
> +		return 0;
> +
> +	case 2:
> +		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
> +		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
> +			return 0;
> +
> +		break;
> +
> +	case 4:
> +		if ((tx && (mode & SPI_TX_QUAD)) ||
> +		    (!tx && (mode & SPI_RX_QUAD)))
> +			return 0;
> +
> +		break;
> +
> +	default:
> +		break;
> +	}
> +
> +	return -ENOTSUPP;
> +}
> +
> +/**
> + * spi_mem_supports_op() - Check if a memory device and the controller it is
> + *			   connected to support a specific memory operation
> + * @mem: the SPI memory
> + * @op: the memory operation to check
> + *
> + * Some controllers are only supporting Single or Dual IOs, others might only
> + * support specific opcodes, or it can even be that the controller and device
> + * both support Quad IOs but the hardware prevents you from using it because
> + * only 2 IO lines are connected.
> + *
> + * This function checks whether a specific operation is supported.
> + *
> + * Return: true if @op is supported, false otherwise.
> + */
> +bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +	struct spi_controller *ctlr = mem->spi->controller;
> +
> +	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
> +		return false;
> +
> +	if (op->addr.nbytes &&
> +	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
> +		return false;
> +
> +	if (op->dummy.nbytes &&
> +	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
> +		return false;
> +
> +	if (op->data.nbytes &&
> +	    spi_check_buswidth_req(mem, op->data.buswidth,
> +				   op->data.dir == SPI_MEM_DATA_IN ?
> +				   false : true))

Why not just op->data.dir != SPI_MEM_DATA_IN or even better ==
SPI_MEM_DATA_OUT if it exists?

> +		return false;
> +
> +	if (ctlr->mem_ops)
> +		return ctlr->mem_ops->supports_op(mem, op);
> +
> +	return true;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_supports_op);
> +
> +/**
> + * spi_mem_exec_op() - Execute a memory operation
> + * @mem: the SPI memory
> + * @op: the memory operation to execute
> + *
> + * Executes a memory operation.
> + *
> + * This function first checks that @op is supported and then tries to execute
> + * it.
> + *
> + * Return: 0 in case of success, a negative error code otherwise.
> + */
> +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> +{
> +	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> +	struct spi_controller *ctlr = mem->spi->controller;
> +	struct spi_transfer xfers[4] = { };
> +	struct spi_message msg;
> +	u8 *tmpbuf;
> +	int ret;
> +
> +	if (!spi_mem_supports_op(mem, op))
> +		return -ENOTSUPP;
> +
> +	if (ctlr->mem_ops) {
> +		if (ctlr->auto_runtime_pm) {
> +			ret = pm_runtime_get_sync(ctlr->dev.parent);
> +			if (ret < 0) {
> +				dev_err(&ctlr->dev,
> +					"Failed to power device: %d\n",
> +					ret);
> +				return ret;
> +			}
> +		}
> +
> +		mutex_lock(&ctlr->bus_lock_mutex);
> +		mutex_lock(&ctlr->io_mutex);
> +		ret = ctlr->mem_ops->exec_op(mem, op);
> +		mutex_unlock(&ctlr->io_mutex);
> +		mutex_unlock(&ctlr->bus_lock_mutex);

Is not this a bit dangerous? I guess that no one should release the bus
lock without having already released the IO lock but maybe this should
be clearly mentioned in a comment in the original structure definition?

> +
> +		if (ctlr->auto_runtime_pm)
> +			pm_runtime_put(ctlr->dev.parent);
> +
> +		/*
> +		 * Some controllers only optimize specific paths (typically the
> +		 * read path) and expect the core to use the regular SPI
> +		 * interface in these cases.
> +		 */
> +		if (!ret || ret != -ENOTSUPP)
> +			return ret;
> +	}
> +
> +	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
> +		     op->dummy.nbytes;
> +
> +	/*
> +	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
> +	 * we're guaranteed that this buffer is DMA-able, as required by the
> +	 * SPI layer.
> +	 */
> +	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
> +	if (!tmpbuf)
> +		return -ENOMEM;
> +
> +	spi_message_init(&msg);
> +
> +	tmpbuf[0] = op->cmd.opcode;
> +	xfers[xferpos].tx_buf = tmpbuf;
> +	xfers[xferpos].len = sizeof(op->cmd.opcode);
> +	xfers[xferpos].tx_nbits = op->cmd.buswidth;
> +	spi_message_add_tail(&xfers[xferpos], &msg);
> +	xferpos++;
> +	totalxferlen++;
> +
> +	if (op->addr.nbytes) {
> +		memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + 1;
> +		xfers[xferpos].len = op->addr.nbytes;
> +		xfers[xferpos].tx_nbits = op->addr.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->addr.nbytes;
> +	}
> +
> +	if (op->dummy.nbytes) {
> +		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
> +		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
> +		xfers[xferpos].len = op->dummy.nbytes;
> +		xfers[xferpos].tx_nbits = op->dummy.buswidth;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->dummy.nbytes;
> +	}
> +
> +	if (op->data.nbytes) {
> +		if (op->data.dir == SPI_MEM_DATA_IN) {
> +			xfers[xferpos].rx_buf = op->data.buf.in;
> +			xfers[xferpos].rx_nbits = op->data.buswidth;
> +		} else {
> +			xfers[xferpos].tx_buf = op->data.buf.out;
> +			xfers[xferpos].tx_nbits = op->data.buswidth;
> +		}
> +
> +		xfers[xferpos].len = op->data.nbytes;
> +		spi_message_add_tail(&xfers[xferpos], &msg);
> +		xferpos++;
> +		totalxferlen += op->data.nbytes;
> +	}
> +
> +	ret = spi_sync(mem->spi, &msg);
> +
> +	kfree(tmpbuf);
> +
> +	if (ret)
> +		return ret;
> +
> +	if (msg.actual_length != totalxferlen)
> +		return -EIO;
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_exec_op);
> +
> +/**
> + * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
> + *			      match controller limitations
> + * @mem: the SPI memory
> + * @op: the operation to adjust
> + *
> + * Some controllers have FIFO limitations and must split a data transfer
> + * operation into multiple ones, others require a specific alignment for
> + * optimized accesses. This function allows SPI mem drivers to split a single
> + * operation into multiple sub-operations when required.
> + *
> + * Return: a negative error code if the controller can't properly adjust @op,
> + *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
> + *	   can't be handled in a single step.
> + */
> +int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
> +{
> +	struct spi_controller *ctlr = mem->spi->controller;
> +
> +	if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
> +		return ctlr->mem_ops->adjust_op_size(mem, op);
> +
> +	return 0;
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
> +
> +static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
> +{
> +	return container_of(drv, struct spi_mem_driver, spidrv.driver);
> +}
> +
> +static int spi_mem_probe(struct spi_device *spi)
> +{
> +	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +	struct spi_mem *mem;
> +
> +	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
> +	if (!mem)
> +		return -ENOMEM;
> +
> +	mem->spi = spi;
> +	spi_set_drvdata(spi, mem);
> +
> +	return memdrv->probe(mem);
> +}
> +
> +static int spi_mem_remove(struct spi_device *spi)
> +{
> +	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +	struct spi_mem *mem = spi_get_drvdata(spi);
> +
> +	if (memdrv->remove)
> +		return memdrv->remove(mem);
> +
> +	return 0;
> +}
> +
> +static void spi_mem_shutdown(struct spi_device *spi)
> +{
> +	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
> +	struct spi_mem *mem = spi_get_drvdata(spi);
> +
> +	if (memdrv->shutdown)
> +		memdrv->shutdown(mem);
> +}
> +
> +/**
> + * spi_mem_driver_register_with_owner() - Register a SPI memory driver
> + * @memdrv: the SPI memory driver to register
> + * @owner: the owner of this driver
> + *
> + * Registers a SPI memory driver.
> + *
> + * Return: 0 in case of success, a negative error core otherwise.
> + */
> +
> +int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
> +				       struct module *owner)
> +{
> +	memdrv->spidrv.probe = spi_mem_probe;
> +	memdrv->spidrv.remove = spi_mem_remove;
> +	memdrv->spidrv.shutdown = spi_mem_shutdown;
> +
> +	return __spi_register_driver(owner, &memdrv->spidrv);
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
> +
> +/**
> + * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
> + * @memdrv: the SPI memory driver to unregister
> + *
> + * Unregisters a SPI memory driver.
> + */
> +void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
> +{
> +	spi_unregister_driver(&memdrv->spidrv);
> +}
> +EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
> +
>  /*-------------------------------------------------------------------------*/
>  
>  #if IS_ENABLED(CONFIG_OF_DYNAMIC)
> diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
> index 7b2170bfd6e7..af3c4ac62b55 100644
> --- a/include/linux/spi/spi.h
> +++ b/include/linux/spi/spi.h
> @@ -27,6 +27,7 @@ struct property_entry;
>  struct spi_controller;
>  struct spi_transfer;
>  struct spi_flash_read_message;
> +struct spi_controller_mem_ops;
>  
>  /*
>   * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
> @@ -376,6 +377,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
>   *                    transfer_one callback.
>   * @handle_err: the subsystem calls the driver to handle an error that occurs
>   *		in the generic implementation of transfer_one_message().
> + * @mem_ops: optimized/dedicated operations for interactions with SPI memory.
> + *	     This field is optional and should only be implemented if the
> + *	     controller has native support for memory like operations.
>   * @unprepare_message: undo any work done by prepare_message().
>   * @slave_abort: abort the ongoing transfer request on an SPI slave controller
>   * @spi_flash_read: to support spi-controller hardwares that provide
> @@ -564,6 +568,9 @@ struct spi_controller {
>  	void (*handle_err)(struct spi_controller *ctlr,
>  			   struct spi_message *message);
>  
> +	/* Optimized handlers for SPI memory-like operations. */
> +	const struct spi_controller_mem_ops *mem_ops;
> +
>  	/* gpio chip select */
>  	int			*cs_gpios;
>  
> @@ -1227,6 +1234,225 @@ int spi_flash_read(struct spi_device *spi,
>  
>  /*---------------------------------------------------------------------------*/
>  
> +/* SPI memory related definitions. */
> +
> +#define SPI_MEM_OP_CMD(__opcode, __buswidth)			\
> +	{							\
> +		.buswidth = __buswidth,				\
> +		.opcode = __opcode,				\
> +	}
> +
> +#define SPI_MEM_OP_ADDRS(__nbytes, __buf, __buswidth)		\
> +	{							\
> +		.nbytes = __nbytes,				\
> +		.buf = __buf,					\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_NO_ADDRS	{ }
> +
> +#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth)			\
> +	{							\
> +		.nbytes = __nbytes,				\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_NO_DUMMY	{ }
> +
> +#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth)		\
> +	{							\
> +		.dir = SPI_MEM_DATA_IN,				\
> +		.nbytes = __nbytes,				\
> +		.buf.in = __buf,				\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth)	\
> +	{							\
> +		.dir = SPI_MEM_DATA_OUT,			\
> +		.nbytes = __nbytes,				\
> +		.buf.out = __buf,				\
> +		.buswidth = __buswidth,				\
> +	}
> +
> +#define SPI_MEM_OP_NO_DATA	{ }
> +
> +/**
> + * enum spi_mem_data_dir - describes the direction of a SPI memory data
> + *			   transfer from the controller perspective
> + * @SPI_MEM_DATA_IN: data coming from the SPI memory
> + * @SPI_MEM_DATA_OUT: data sent the SPI memory
> + */
> +enum spi_mem_data_dir {
> +	SPI_MEM_DATA_IN,
> +	SPI_MEM_DATA_OUT,
> +};
> +
> +/**
> + * struct spi_mem_op - describes a SPI memory operation
> + * @cmd.buswidth: number of IO lines used to transmit the command
> + * @cmd.opcode: operation opcode
> + * @addr.nbytes: number of address bytes to send. Can be zero if the operation
> + *		 does not need to send an address
> + * @addr.buswidth: number of IO lines used to transmit the address cycles
> + * @addr.buf: buffer storing the address bytes
> + * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
> + *		  be zero if the operation does not require dummy bytes
> + * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
> + * @data.buswidth: number of IO lanes used to send/receive the data
> + * @data.dir: direction of the transfer
> + * @data.buf.in: input buffer
> + * @data.buf.out: output buffer
> + */
> +struct spi_mem_op {
> +	struct {
> +		u8 buswidth;
> +		u8 opcode;
> +	} cmd;
> +
> +	struct {
> +		u8 nbytes;
> +		u8 buswidth;
> +		const u8 *buf;
> +	} addr;
> +
> +	struct {
> +		u8 nbytes;
> +		u8 buswidth;
> +	} dummy;
> +
> +	struct {
> +		u8 buswidth;
> +		enum spi_mem_data_dir dir;
> +		unsigned int nbytes;
> +		/* buf.{in,out} must be DMA-able. */
> +		union {
> +			void *in;
> +			const void *out;
> +		} buf;
> +	} data;
> +};
> +
> +#define SPI_MEM_OP(__cmd, __addr, __dummy, __data)		\
> +	{							\
> +		.cmd = __cmd,					\
> +		.addr = __addr,					\
> +		.dummy = __dummy,				\
> +		.data = __data,					\
> +	}
> +
> +/**
> + * struct spi_mem - describes a SPI memory device
> + * @spi: the underlying SPI device
> + * @drvpriv: spi_mem_drviver private data
> + *
> + * Extra information that describe the SPI memory device and may be needed by
> + * the controller to properly handle this device should be placed here.
> + *
> + * One example would be the device size since some controller expose their SPI
> + * mem devices through a io-mapped region.
> + */
> +struct spi_mem {
> +	struct spi_device *spi;
> +	void *drvpriv;
> +};
> +
> +/**
> + * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem
> + *				  device
> + * @mem: memory device
> + * @data: data to attach to the memory device
> + */
> +static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data)
> +{
> +	mem->drvpriv = data;
> +}
> +
> +/**
> + * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem
> + *				  device
> + * @mem: memory device
> + *
> + * Return: the data attached to the mem device.
> + */
> +static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
> +{
> +	return mem->drvpriv;
> +}
> +
> +/**
> + * struct spi_controller_mem_ops - SPI memory operations
> + * @adjust_op_size: shrink the data xfer of an operation to match controller's
> + *		    limitations (can be alignment of max RX/TX size
> + *		    limitations)
> + * @supports_op: check if an operation is supported by the controller
> + * @exec_op: execute a SPI memory operation
> + *
> + * This interface should be implemented by SPI controllers providing an
> + * high-level interface to execute SPI memory operation, which is usually the
> + * case for QSPI controllers.
> + */
> +struct spi_controller_mem_ops {
> +	int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
> +	bool (*supports_op)(struct spi_mem *mem,
> +			    const struct spi_mem_op *op);
> +	int (*exec_op)(struct spi_mem *mem,
> +		       const struct spi_mem_op *op);

:)

> +};
> +
> +int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
> +				       const struct spi_mem_op *op,
> +				       struct sg_table *sg);
> +
> +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> +					  const struct spi_mem_op *op,
> +					  struct sg_table *sg);
> +
> +int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
> +
> +bool spi_mem_supports_op(struct spi_mem *mem,
> +			 const struct spi_mem_op *op);
> +
> +int spi_mem_exec_op(struct spi_mem *mem,
> +		    const struct spi_mem_op *op);
> +
> +/**
> + * struct spi_mem_driver - SPI memory driver
> + * @spidrv: inherit from a SPI driver
> + * @probe: probe a SPI memory. Usually where detection/initialization takes
> + *	   place
> + * @remove: remove a SPI memory
> + * @shutdown: take appropriate action when the system is shutdown
> + *
> + * This is just a thin wrapper around a spi_driver. The core takes care of
> + * allocating the spi_mem object and forwarding the probe/remove/shutdown
> + * request to the spi_mem_driver. The reason we use this wrapper is because
> + * we might have to stuff more information into the spi_mem struct to let
> + * SPI controllers know more about the SPI memory they interact with, and
> + * having this intermediate layer allows us to do that without adding more
> + * useless fields to the spi_device object.
> + */
> +struct spi_mem_driver {
> +	struct spi_driver spidrv;
> +	int (*probe)(struct spi_mem *mem);
> +	int (*remove)(struct spi_mem *mem);
> +	void (*shutdown)(struct spi_mem *mem);
> +};
> +
> +int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
> +				       struct module *owner);
> +
> +#define spi_mem_driver_register(__drv)                                  \
> +	spi_mem_driver_register_with_owner(__drv, THIS_MODULE)
> +
> +void spi_mem_driver_unregister(struct spi_mem_driver *drv);
> +
> +#define module_spi_mem_driver(__drv)                                    \
> +	module_driver(__drv, spi_mem_driver_register,                   \
> +		      spi_mem_driver_unregister)
> +
> +/*---------------------------------------------------------------------------*/
> +
>  /*
>   * INTERFACE between board init code and SPI infrastructure.
>   *


Best regards,
Miquèl
Boris Brezillon Feb. 11, 2018, 4 p.m. | #5
Hi Miquel,

On Fri, 9 Feb 2018 13:52:05 +0100
Miquel Raynal <miquel.raynal@bootlin.com> wrote:

> > +/**
> > + * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
> > + *					    memory operation
> > + * @ctlr: the SPI controller requesting this dma_unmap()
> > + * @op: the memory operation containing the buffer to unmap
> > + * @sgt: a pointer to an sg_table previously initialized by
> > + *	 spi_controller_dma_map_mem_op_data()
> > + *
> > + * Some controllers might want to do DMA on the data buffer embedded in @op.
> > + * This helper prepares things so that the CPU can access the
> > + * op->data.buf.{in,out} buffer again.
> > + *
> > + * This function is not intended to be called from spi drivers. Only SPI  
> 
> s/spi/SPI/
> 
> > + * controller drivers should use it.
> > + *
> > + * This function should be called after the DMA operation has finished an is  
> 
> s/an/and/

Will fix both.

> 
> > + * only valid if the previous spi_controller_dma_map_mem_op_data() has returned
> > + * 0.
> > + *
> > + * Return: 0 in case of success, a negative error code otherwise.
> > + */
> > +void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
> > +					  const struct spi_mem_op *op,
> > +					  struct sg_table *sgt)
> > +{
> > +	struct device *dmadev;
> > +
> > +	if (!op->data.nbytes)
> > +		return;
> > +
> > +	if (op->data.dir == SPI_MEM_DATA_OUT)
> > +		dmadev = ctlr->dma_tx ?
> > +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> > +	else
> > +		dmadev = ctlr->dma_rx ?
> > +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
> > +
> > +	spi_unmap_buf(ctlr, dmadev, sgt,
> > +		      op->data.dir == SPI_MEM_DATA_IN ?
> > +		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
> > +}
> > +EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
> > +
> > +static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
> > +{
> > +	u32 mode = mem->spi->mode;
> > +
> > +	switch (buswidth) {
> > +	case 1:
> > +		return 0;
> > +
> > +	case 2:
> > +		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
> > +		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
> > +			return 0;
> > +
> > +		break;
> > +
> > +	case 4:
> > +		if ((tx && (mode & SPI_TX_QUAD)) ||
> > +		    (!tx && (mode & SPI_RX_QUAD)))
> > +			return 0;
> > +
> > +		break;
> > +
> > +	default:
> > +		break;
> > +	}
> > +
> > +	return -ENOTSUPP;
> > +}
> > +
> > +/**
> > + * spi_mem_supports_op() - Check if a memory device and the controller it is
> > + *			   connected to support a specific memory operation
> > + * @mem: the SPI memory
> > + * @op: the memory operation to check
> > + *
> > + * Some controllers are only supporting Single or Dual IOs, others might only
> > + * support specific opcodes, or it can even be that the controller and device
> > + * both support Quad IOs but the hardware prevents you from using it because
> > + * only 2 IO lines are connected.
> > + *
> > + * This function checks whether a specific operation is supported.
> > + *
> > + * Return: true if @op is supported, false otherwise.
> > + */
> > +bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
> > +{
> > +	struct spi_controller *ctlr = mem->spi->controller;
> > +
> > +	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
> > +		return false;
> > +
> > +	if (op->addr.nbytes &&
> > +	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
> > +		return false;
> > +
> > +	if (op->dummy.nbytes &&
> > +	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
> > +		return false;
> > +
> > +	if (op->data.nbytes &&
> > +	    spi_check_buswidth_req(mem, op->data.buswidth,
> > +				   op->data.dir == SPI_MEM_DATA_IN ?
> > +				   false : true))  
> 
> Why not just op->data.dir != SPI_MEM_DATA_IN or even better ==
> SPI_MEM_DATA_OUT if it exists?

Indeed, I'll use op->data.dir == SPI_MEM_DATA_OUT.

> 
> > +		return false;
> > +
> > +	if (ctlr->mem_ops)
> > +		return ctlr->mem_ops->supports_op(mem, op);
> > +
> > +	return true;
> > +}
> > +EXPORT_SYMBOL_GPL(spi_mem_supports_op);
> > +
> > +/**
> > + * spi_mem_exec_op() - Execute a memory operation
> > + * @mem: the SPI memory
> > + * @op: the memory operation to execute
> > + *
> > + * Executes a memory operation.
> > + *
> > + * This function first checks that @op is supported and then tries to execute
> > + * it.
> > + *
> > + * Return: 0 in case of success, a negative error code otherwise.
> > + */
> > +int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
> > +{
> > +	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
> > +	struct spi_controller *ctlr = mem->spi->controller;
> > +	struct spi_transfer xfers[4] = { };
> > +	struct spi_message msg;
> > +	u8 *tmpbuf;
> > +	int ret;
> > +
> > +	if (!spi_mem_supports_op(mem, op))
> > +		return -ENOTSUPP;
> > +
> > +	if (ctlr->mem_ops) {
> > +		if (ctlr->auto_runtime_pm) {
> > +			ret = pm_runtime_get_sync(ctlr->dev.parent);
> > +			if (ret < 0) {
> > +				dev_err(&ctlr->dev,
> > +					"Failed to power device: %d\n",
> > +					ret);
> > +				return ret;
> > +			}
> > +		}
> > +
> > +		mutex_lock(&ctlr->bus_lock_mutex);
> > +		mutex_lock(&ctlr->io_mutex);
> > +		ret = ctlr->mem_ops->exec_op(mem, op);
> > +		mutex_unlock(&ctlr->io_mutex);
> > +		mutex_unlock(&ctlr->bus_lock_mutex);  
> 
> Is not this a bit dangerous? I guess that no one should release the bus
> lock without having already released the IO lock but maybe this should
> be clearly mentioned in a comment in the original structure definition?

It's not something new, spi_flash_read() was doing the same [1]. This
being said, if Mark agrees, I can add a comment in the spi_controller
struct def stating that ->bus_lock_mutex should always be acquired
before ->io_mutex.

Thanks for your review.

Boris

[1]http://elixir.free-electrons.com/linux/v4.15.2/source/drivers/spi/spi.c#L3045
Vignesh R Feb. 12, 2018, 11:50 a.m. | #6
Hi,

On Tuesday 06 February 2018 04:51 AM, Boris Brezillon wrote:
> From: Boris Brezillon <boris.brezillon@free-electrons.com>
> 
> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.
> 
> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).

Wouldn't it be better if spi_mem* implementations were in separate file?
drivers/spi/spi.c is 3.5K+ lines and is bit difficult to follow.
Boris Brezillon Feb. 12, 2018, 12:28 p.m. | #7
On Mon, 12 Feb 2018 17:20:46 +0530
Vignesh R <vigneshr@ti.com> wrote:

> Hi,
> 
> On Tuesday 06 February 2018 04:51 AM, Boris Brezillon wrote:
> > From: Boris Brezillon <boris.brezillon@free-electrons.com>
> > 
> > Some controllers are exposing high-level interfaces to access various
> > kind of SPI memories. Unfortunately they do not fit in the current
> > spi_controller model and usually have drivers placed in
> > drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> > memories in general.
> > 
> > This is an attempt at defining a SPI memory interface which works for
> > all kinds of SPI memories (NORs, NANDs, SRAMs).  
> 
> Wouldn't it be better if spi_mem* implementations were in separate file?
> drivers/spi/spi.c is 3.5K+ lines and is bit difficult to follow.

No strong opinion about where the spi_mem code should lie, so if Mark
agrees I can move it to a different file and possibly make it optional
with a Kconfig option.
Mark Brown Feb. 19, 2018, 1:53 p.m. | #8
On Tue, Feb 06, 2018 at 12:21:15AM +0100, Boris Brezillon wrote:

> Some controllers are exposing high-level interfaces to access various
> kind of SPI memories. Unfortunately they do not fit in the current
> spi_controller model and usually have drivers placed in
> drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> memories in general.

> This is an attempt at defining a SPI memory interface which works for
> all kinds of SPI memories (NORs, NANDs, SRAMs).

It would be helpful if this changelog were to describe what the
problems are and how the patch addresses them.  Someone reading the git
history should be able to tell what the patch is doing without having to
google around to find a cover letter for the patch series.  It also
feels like this should be split up a bit - there's some preparatory
refactoring here, a new API, and an adaption layer to implement that API
with actual SPI controllers.  It's a very large patch doing several
different things and splitting it up would make it easier to review.

I have to say I'm rather unclear why this is being done in the SPI core
rather than as a layer on top of it - devices doing the new API can't
support normal SPI clients at all and everything about this is entirely
flash specific.  You've got a bit about that in your cover letter, I'll
reply there.

> @@ -2155,10 +2181,14 @@ int spi_register_controller(struct spi_controller *ctlr)
>  			spi_controller_is_slave(ctlr) ? "slave" : "master",
>  			dev_name(&ctlr->dev));
>  
> -	/* If we're using a queued driver, start the queue */
> -	if (ctlr->transfer)
> +	/*
> +	 * If we're using a queued driver, start the queue. Note that we don't
> +	 * need the queueing logic if the driver is only supporting high-level
> +	 * memory operations.
> +	 */
> +	if (ctlr->transfer) {
>  		dev_info(dev, "controller is unqueued, this is deprecated\n");
> -	else {
> +	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
>  		status = spi_controller_initialize_queue(ctlr);
>  		if (status) {
>  			device_del(&ctlr->dev);

This for example feels like a separate refactoring which could be split
out.

> +	if (op->data.dir == SPI_MEM_DATA_OUT)
> +		dmadev = ctlr->dma_tx ?
> +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> +	else
> +		dmadev = ctlr->dma_rx ?
> +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;

Please don't abuse the ternery operator like this :(
Mark Brown Feb. 19, 2018, 2 p.m. | #9
On Tue, Feb 06, 2018 at 12:21:15AM +0100, Boris Brezillon wrote:

> +	/*
> +	 * The controller can implement only the high-level SPI-memory like
> +	 * operations if it does not support regular SPI transfers.
> +	 */
> +	if (ctlr->mem_ops) {
> +		if (!ctlr->mem_ops->supports_op ||
> +		    !ctlr->mem_ops->exec_op)
> +			return -EINVAL;
> +	} else if (!ctlr->transfer && !ctlr->transfer_one &&
> +		   !ctlr->transfer_one_message) {
> +		return -EINVAL;
> +	}

BTW your comment isn't describing what the code does - the comment says
that having the memory operations means the driver can't be a regular
SPI controller while the code does not do that and only checks that if a
driver has memory ops it implements two required ones.  Indeed the
existing drivers that are updated to the new API continue to implement
normal SPI operations.
Boris Brezillon Feb. 19, 2018, 2:20 p.m. | #10
Hi Mark,

On Mon, 19 Feb 2018 13:53:57 +0000
Mark Brown <broonie@kernel.org> wrote:

> On Tue, Feb 06, 2018 at 12:21:15AM +0100, Boris Brezillon wrote:
> 
> > Some controllers are exposing high-level interfaces to access various
> > kind of SPI memories. Unfortunately they do not fit in the current
> > spi_controller model and usually have drivers placed in
> > drivers/mtd/spi-nor which are only supporting SPI NORs and not SPI
> > memories in general.  
> 
> > This is an attempt at defining a SPI memory interface which works for
> > all kinds of SPI memories (NORs, NANDs, SRAMs).  
> 
> It would be helpful if this changelog were to describe what the
> problems are and how the patch addresses them.  Someone reading the git
> history should be able to tell what the patch is doing without having to
> google around to find a cover letter for the patch series.

Sure. I'll copy the explanation I give in my cover letter here.

> It also
> feels like this should be split up a bit - there's some preparatory
> refactoring here, a new API, and an adaption layer to implement that API
> with actual SPI controllers.  It's a very large patch doing several
> different things and splitting it up would make it easier to review.

Noted. I'll try to spit things up.

> 
> I have to say I'm rather unclear why this is being done in the SPI core
> rather than as a layer on top of it - devices doing the new API can't
> support normal SPI clients at all and everything about this is entirely
> flash specific.  You've got a bit about that in your cover letter, I'll
> reply there.

Then I'll reply there.

> 
> > @@ -2155,10 +2181,14 @@ int spi_register_controller(struct spi_controller *ctlr)
> >  			spi_controller_is_slave(ctlr) ? "slave" : "master",
> >  			dev_name(&ctlr->dev));
> >  
> > -	/* If we're using a queued driver, start the queue */
> > -	if (ctlr->transfer)
> > +	/*
> > +	 * If we're using a queued driver, start the queue. Note that we don't
> > +	 * need the queueing logic if the driver is only supporting high-level
> > +	 * memory operations.
> > +	 */
> > +	if (ctlr->transfer) {
> >  		dev_info(dev, "controller is unqueued, this is deprecated\n");
> > -	else {
> > +	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
> >  		status = spi_controller_initialize_queue(ctlr);
> >  		if (status) {
> >  			device_del(&ctlr->dev);  
> 
> This for example feels like a separate refactoring which could be split
> out.

Hm, this change is not really required without the spi_mem stuff. I
mean, the only cases we have right now are:

1/ the controller implements ->transfer() on its own
2/ the controller implements ctlr->transfer_one()
   or ctlr->transfer_one_message() and relies on the default
   queueing/dequeuing mechanism

The spi_mem stuff adds a 3rd case:

3/ the controller only supports memory-like operation and in this case
   we don't need to initialize the queue

which is why I added this else if() at the same time I added the
spi_mem stuff.

> 
> > +	if (op->data.dir == SPI_MEM_DATA_OUT)
> > +		dmadev = ctlr->dma_tx ?
> > +			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
> > +	else
> > +		dmadev = ctlr->dma_rx ?
> > +			 ctlr->dma_rx->device->dev : ctlr->dev.parent;  
> 
> Please don't abuse the ternery operator like this :(

I'll try to remember that.

Thanks for your review.

Boris
Boris Brezillon Feb. 19, 2018, 2:32 p.m. | #11
On Mon, 19 Feb 2018 14:00:03 +0000
Mark Brown <broonie@kernel.org> wrote:

> On Tue, Feb 06, 2018 at 12:21:15AM +0100, Boris Brezillon wrote:
> 
> > +	/*
> > +	 * The controller can implement only the high-level SPI-memory like
> > +	 * operations if it does not support regular SPI transfers.
> > +	 */
> > +	if (ctlr->mem_ops) {
> > +		if (!ctlr->mem_ops->supports_op ||
> > +		    !ctlr->mem_ops->exec_op)
> > +			return -EINVAL;
> > +	} else if (!ctlr->transfer && !ctlr->transfer_one &&
> > +		   !ctlr->transfer_one_message) {
> > +		return -EINVAL;
> > +	}  
> 
> BTW your comment isn't describing what the code does - the comment says
> that having the memory operations means the driver can't be a regular
> SPI controller while the code does not do that and only checks that if a
> driver has memory ops it implements two required ones.  Indeed the
> existing drivers that are updated to the new API continue to implement
> normal SPI operations.

Definitely not what I wanted to say :-/. I guess replacing 'can' by
'may' would be more appropriate. What I want to say is that SPI
controllers do not have to implement the hooks for regular SPI
operations if they only support SPI-mem like operations, but of course
they can implement those hooks if they support both spi_mem and regular
SPI ops.

This check is here to allow registration of SPI controllers that
support spi_mem ops, regular ops or both, and prevent registration if
both spi_mem and regular hooks are missing.

Is it clearer?

Patch

diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b33a727a0158..57bc540a0521 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2057,6 +2057,24 @@  static int of_spi_register_master(struct spi_controller *ctlr)
 }
 #endif
 
+static int spi_controller_check_ops(struct spi_controller *ctlr)
+{
+	/*
+	 * The controller can implement only the high-level SPI-memory like
+	 * operations if it does not support regular SPI transfers.
+	 */
+	if (ctlr->mem_ops) {
+		if (!ctlr->mem_ops->supports_op ||
+		    !ctlr->mem_ops->exec_op)
+			return -EINVAL;
+	} else if (!ctlr->transfer && !ctlr->transfer_one &&
+		   !ctlr->transfer_one_message) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
 /**
  * spi_register_controller - register SPI master or slave controller
  * @ctlr: initialized master, originally from spi_alloc_master() or
@@ -2090,6 +2108,14 @@  int spi_register_controller(struct spi_controller *ctlr)
 	if (!dev)
 		return -ENODEV;
 
+	/*
+	 * Make sure all necessary hooks are implemented before registering
+	 * the SPI controller.
+	 */
+	status = spi_controller_check_ops(ctlr);
+	if (status)
+		return status;
+
 	if (!spi_controller_is_slave(ctlr)) {
 		status = of_spi_register_master(ctlr);
 		if (status)
@@ -2155,10 +2181,14 @@  int spi_register_controller(struct spi_controller *ctlr)
 			spi_controller_is_slave(ctlr) ? "slave" : "master",
 			dev_name(&ctlr->dev));
 
-	/* If we're using a queued driver, start the queue */
-	if (ctlr->transfer)
+	/*
+	 * If we're using a queued driver, start the queue. Note that we don't
+	 * need the queueing logic if the driver is only supporting high-level
+	 * memory operations.
+	 */
+	if (ctlr->transfer) {
 		dev_info(dev, "controller is unqueued, this is deprecated\n");
-	else {
+	} else if (ctlr->transfer_one || ctlr->transfer_one_message) {
 		status = spi_controller_initialize_queue(ctlr);
 		if (status) {
 			device_del(&ctlr->dev);
@@ -2893,6 +2923,13 @@  static int __spi_async(struct spi_device *spi, struct spi_message *message)
 {
 	struct spi_controller *ctlr = spi->controller;
 
+	/*
+	 * Some controllers do not support doing regular SPI transfers. Return
+	 * ENOTSUPP when this is the case.
+	 */
+	if (!ctlr->transfer)
+		return -ENOTSUPP;
+
 	message->spi = spi;
 
 	SPI_STATISTICS_INCREMENT_FIELD(&ctlr->statistics, spi_async);
@@ -3321,6 +3358,386 @@  int spi_write_then_read(struct spi_device *spi,
 }
 EXPORT_SYMBOL_GPL(spi_write_then_read);
 
+/**
+ * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
+ *					  memory operation
+ * @ctlr: the SPI controller requesting this dma_map()
+ * @op: the memory operation containing the buffer to map
+ * @sgt: a pointer to a non-initialized sg_table that will be filled by this
+ *	 function
+ *
+ * Some controllers might want to do DMA on the data buffer embedded in @op.
+ * This helper prepares everything for you and provides a ready-to-use
+ * sg_table. This function is not intended to be called from spi drivers.
+ * Only SPI controller drivers should use it.
+ * Note that the caller must ensure the memory region pointed by
+ * op->data.buf.{in,out} is DMA-able before calling this function.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
+				       const struct spi_mem_op *op,
+				       struct sg_table *sgt)
+{
+	struct device *dmadev;
+
+	if (!op->data.nbytes)
+		return -EINVAL;
+
+	if (op->data.dir == SPI_MEM_DATA_OUT)
+		dmadev = ctlr->dma_tx ?
+			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
+	else
+		dmadev = ctlr->dma_rx ?
+			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
+
+	if (!dmadev)
+		return -EINVAL;
+
+	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
+			   op->data.dir == SPI_MEM_DATA_IN ?
+			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
+
+/**
+ * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
+ *					    memory operation
+ * @ctlr: the SPI controller requesting this dma_unmap()
+ * @op: the memory operation containing the buffer to unmap
+ * @sgt: a pointer to an sg_table previously initialized by
+ *	 spi_controller_dma_map_mem_op_data()
+ *
+ * Some controllers might want to do DMA on the data buffer embedded in @op.
+ * This helper prepares things so that the CPU can access the
+ * op->data.buf.{in,out} buffer again.
+ *
+ * This function is not intended to be called from spi drivers. Only SPI
+ * controller drivers should use it.
+ *
+ * This function should be called after the DMA operation has finished an is
+ * only valid if the previous spi_controller_dma_map_mem_op_data() has returned
+ * 0.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
+					  const struct spi_mem_op *op,
+					  struct sg_table *sgt)
+{
+	struct device *dmadev;
+
+	if (!op->data.nbytes)
+		return;
+
+	if (op->data.dir == SPI_MEM_DATA_OUT)
+		dmadev = ctlr->dma_tx ?
+			 ctlr->dma_tx->device->dev : ctlr->dev.parent;
+	else
+		dmadev = ctlr->dma_rx ?
+			 ctlr->dma_rx->device->dev : ctlr->dev.parent;
+
+	spi_unmap_buf(ctlr, dmadev, sgt,
+		      op->data.dir == SPI_MEM_DATA_IN ?
+		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
+}
+EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
+
+static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
+{
+	u32 mode = mem->spi->mode;
+
+	switch (buswidth) {
+	case 1:
+		return 0;
+
+	case 2:
+		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
+		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
+			return 0;
+
+		break;
+
+	case 4:
+		if ((tx && (mode & SPI_TX_QUAD)) ||
+		    (!tx && (mode & SPI_RX_QUAD)))
+			return 0;
+
+		break;
+
+	default:
+		break;
+	}
+
+	return -ENOTSUPP;
+}
+
+/**
+ * spi_mem_supports_op() - Check if a memory device and the controller it is
+ *			   connected to support a specific memory operation
+ * @mem: the SPI memory
+ * @op: the memory operation to check
+ *
+ * Some controllers are only supporting Single or Dual IOs, others might only
+ * support specific opcodes, or it can even be that the controller and device
+ * both support Quad IOs but the hardware prevents you from using it because
+ * only 2 IO lines are connected.
+ *
+ * This function checks whether a specific operation is supported.
+ *
+ * Return: true if @op is supported, false otherwise.
+ */
+bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+	struct spi_controller *ctlr = mem->spi->controller;
+
+	if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
+		return false;
+
+	if (op->addr.nbytes &&
+	    spi_check_buswidth_req(mem, op->addr.buswidth, true))
+		return false;
+
+	if (op->dummy.nbytes &&
+	    spi_check_buswidth_req(mem, op->dummy.buswidth, true))
+		return false;
+
+	if (op->data.nbytes &&
+	    spi_check_buswidth_req(mem, op->data.buswidth,
+				   op->data.dir == SPI_MEM_DATA_IN ?
+				   false : true))
+		return false;
+
+	if (ctlr->mem_ops)
+		return ctlr->mem_ops->supports_op(mem, op);
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(spi_mem_supports_op);
+
+/**
+ * spi_mem_exec_op() - Execute a memory operation
+ * @mem: the SPI memory
+ * @op: the memory operation to execute
+ *
+ * Executes a memory operation.
+ *
+ * This function first checks that @op is supported and then tries to execute
+ * it.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
+{
+	unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
+	struct spi_controller *ctlr = mem->spi->controller;
+	struct spi_transfer xfers[4] = { };
+	struct spi_message msg;
+	u8 *tmpbuf;
+	int ret;
+
+	if (!spi_mem_supports_op(mem, op))
+		return -ENOTSUPP;
+
+	if (ctlr->mem_ops) {
+		if (ctlr->auto_runtime_pm) {
+			ret = pm_runtime_get_sync(ctlr->dev.parent);
+			if (ret < 0) {
+				dev_err(&ctlr->dev,
+					"Failed to power device: %d\n",
+					ret);
+				return ret;
+			}
+		}
+
+		mutex_lock(&ctlr->bus_lock_mutex);
+		mutex_lock(&ctlr->io_mutex);
+		ret = ctlr->mem_ops->exec_op(mem, op);
+		mutex_unlock(&ctlr->io_mutex);
+		mutex_unlock(&ctlr->bus_lock_mutex);
+
+		if (ctlr->auto_runtime_pm)
+			pm_runtime_put(ctlr->dev.parent);
+
+		/*
+		 * Some controllers only optimize specific paths (typically the
+		 * read path) and expect the core to use the regular SPI
+		 * interface in these cases.
+		 */
+		if (!ret || ret != -ENOTSUPP)
+			return ret;
+	}
+
+	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
+		     op->dummy.nbytes;
+
+	/*
+	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
+	 * we're guaranteed that this buffer is DMA-able, as required by the
+	 * SPI layer.
+	 */
+	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
+	if (!tmpbuf)
+		return -ENOMEM;
+
+	spi_message_init(&msg);
+
+	tmpbuf[0] = op->cmd.opcode;
+	xfers[xferpos].tx_buf = tmpbuf;
+	xfers[xferpos].len = sizeof(op->cmd.opcode);
+	xfers[xferpos].tx_nbits = op->cmd.buswidth;
+	spi_message_add_tail(&xfers[xferpos], &msg);
+	xferpos++;
+	totalxferlen++;
+
+	if (op->addr.nbytes) {
+		memcpy(tmpbuf + 1, op->addr.buf, op->addr.nbytes);
+		xfers[xferpos].tx_buf = tmpbuf + 1;
+		xfers[xferpos].len = op->addr.nbytes;
+		xfers[xferpos].tx_nbits = op->addr.buswidth;
+		spi_message_add_tail(&xfers[xferpos], &msg);
+		xferpos++;
+		totalxferlen += op->addr.nbytes;
+	}
+
+	if (op->dummy.nbytes) {
+		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
+		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
+		xfers[xferpos].len = op->dummy.nbytes;
+		xfers[xferpos].tx_nbits = op->dummy.buswidth;
+		spi_message_add_tail(&xfers[xferpos], &msg);
+		xferpos++;
+		totalxferlen += op->dummy.nbytes;
+	}
+
+	if (op->data.nbytes) {
+		if (op->data.dir == SPI_MEM_DATA_IN) {
+			xfers[xferpos].rx_buf = op->data.buf.in;
+			xfers[xferpos].rx_nbits = op->data.buswidth;
+		} else {
+			xfers[xferpos].tx_buf = op->data.buf.out;
+			xfers[xferpos].tx_nbits = op->data.buswidth;
+		}
+
+		xfers[xferpos].len = op->data.nbytes;
+		spi_message_add_tail(&xfers[xferpos], &msg);
+		xferpos++;
+		totalxferlen += op->data.nbytes;
+	}
+
+	ret = spi_sync(mem->spi, &msg);
+
+	kfree(tmpbuf);
+
+	if (ret)
+		return ret;
+
+	if (msg.actual_length != totalxferlen)
+		return -EIO;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(spi_mem_exec_op);
+
+/**
+ * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
+ *			      match controller limitations
+ * @mem: the SPI memory
+ * @op: the operation to adjust
+ *
+ * Some controllers have FIFO limitations and must split a data transfer
+ * operation into multiple ones, others require a specific alignment for
+ * optimized accesses. This function allows SPI mem drivers to split a single
+ * operation into multiple sub-operations when required.
+ *
+ * Return: a negative error code if the controller can't properly adjust @op,
+ *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
+ *	   can't be handled in a single step.
+ */
+int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+	struct spi_controller *ctlr = mem->spi->controller;
+
+	if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
+		return ctlr->mem_ops->adjust_op_size(mem, op);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
+
+static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
+{
+	return container_of(drv, struct spi_mem_driver, spidrv.driver);
+}
+
+static int spi_mem_probe(struct spi_device *spi)
+{
+	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+	struct spi_mem *mem;
+
+	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
+	if (!mem)
+		return -ENOMEM;
+
+	mem->spi = spi;
+	spi_set_drvdata(spi, mem);
+
+	return memdrv->probe(mem);
+}
+
+static int spi_mem_remove(struct spi_device *spi)
+{
+	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+	struct spi_mem *mem = spi_get_drvdata(spi);
+
+	if (memdrv->remove)
+		return memdrv->remove(mem);
+
+	return 0;
+}
+
+static void spi_mem_shutdown(struct spi_device *spi)
+{
+	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
+	struct spi_mem *mem = spi_get_drvdata(spi);
+
+	if (memdrv->shutdown)
+		memdrv->shutdown(mem);
+}
+
+/**
+ * spi_mem_driver_register_with_owner() - Register a SPI memory driver
+ * @memdrv: the SPI memory driver to register
+ * @owner: the owner of this driver
+ *
+ * Registers a SPI memory driver.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+
+int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
+				       struct module *owner)
+{
+	memdrv->spidrv.probe = spi_mem_probe;
+	memdrv->spidrv.remove = spi_mem_remove;
+	memdrv->spidrv.shutdown = spi_mem_shutdown;
+
+	return __spi_register_driver(owner, &memdrv->spidrv);
+}
+EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
+
+/**
+ * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
+ * @memdrv: the SPI memory driver to unregister
+ *
+ * Unregisters a SPI memory driver.
+ */
+void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
+{
+	spi_unregister_driver(&memdrv->spidrv);
+}
+EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
+
 /*-------------------------------------------------------------------------*/
 
 #if IS_ENABLED(CONFIG_OF_DYNAMIC)
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 7b2170bfd6e7..af3c4ac62b55 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -27,6 +27,7 @@  struct property_entry;
 struct spi_controller;
 struct spi_transfer;
 struct spi_flash_read_message;
+struct spi_controller_mem_ops;
 
 /*
  * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
@@ -376,6 +377,9 @@  static inline void spi_unregister_driver(struct spi_driver *sdrv)
  *                    transfer_one callback.
  * @handle_err: the subsystem calls the driver to handle an error that occurs
  *		in the generic implementation of transfer_one_message().
+ * @mem_ops: optimized/dedicated operations for interactions with SPI memory.
+ *	     This field is optional and should only be implemented if the
+ *	     controller has native support for memory like operations.
  * @unprepare_message: undo any work done by prepare_message().
  * @slave_abort: abort the ongoing transfer request on an SPI slave controller
  * @spi_flash_read: to support spi-controller hardwares that provide
@@ -564,6 +568,9 @@  struct spi_controller {
 	void (*handle_err)(struct spi_controller *ctlr,
 			   struct spi_message *message);
 
+	/* Optimized handlers for SPI memory-like operations. */
+	const struct spi_controller_mem_ops *mem_ops;
+
 	/* gpio chip select */
 	int			*cs_gpios;
 
@@ -1227,6 +1234,225 @@  int spi_flash_read(struct spi_device *spi,
 
 /*---------------------------------------------------------------------------*/
 
+/* SPI memory related definitions. */
+
+#define SPI_MEM_OP_CMD(__opcode, __buswidth)			\
+	{							\
+		.buswidth = __buswidth,				\
+		.opcode = __opcode,				\
+	}
+
+#define SPI_MEM_OP_ADDRS(__nbytes, __buf, __buswidth)		\
+	{							\
+		.nbytes = __nbytes,				\
+		.buf = __buf,					\
+		.buswidth = __buswidth,				\
+	}
+
+#define SPI_MEM_OP_NO_ADDRS	{ }
+
+#define SPI_MEM_OP_DUMMY(__nbytes, __buswidth)			\
+	{							\
+		.nbytes = __nbytes,				\
+		.buswidth = __buswidth,				\
+	}
+
+#define SPI_MEM_OP_NO_DUMMY	{ }
+
+#define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth)		\
+	{							\
+		.dir = SPI_MEM_DATA_IN,				\
+		.nbytes = __nbytes,				\
+		.buf.in = __buf,				\
+		.buswidth = __buswidth,				\
+	}
+
+#define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth)	\
+	{							\
+		.dir = SPI_MEM_DATA_OUT,			\
+		.nbytes = __nbytes,				\
+		.buf.out = __buf,				\
+		.buswidth = __buswidth,				\
+	}
+
+#define SPI_MEM_OP_NO_DATA	{ }
+
+/**
+ * enum spi_mem_data_dir - describes the direction of a SPI memory data
+ *			   transfer from the controller perspective
+ * @SPI_MEM_DATA_IN: data coming from the SPI memory
+ * @SPI_MEM_DATA_OUT: data sent the SPI memory
+ */
+enum spi_mem_data_dir {
+	SPI_MEM_DATA_IN,
+	SPI_MEM_DATA_OUT,
+};
+
+/**
+ * struct spi_mem_op - describes a SPI memory operation
+ * @cmd.buswidth: number of IO lines used to transmit the command
+ * @cmd.opcode: operation opcode
+ * @addr.nbytes: number of address bytes to send. Can be zero if the operation
+ *		 does not need to send an address
+ * @addr.buswidth: number of IO lines used to transmit the address cycles
+ * @addr.buf: buffer storing the address bytes
+ * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can
+ *		  be zero if the operation does not require dummy bytes
+ * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes
+ * @data.buswidth: number of IO lanes used to send/receive the data
+ * @data.dir: direction of the transfer
+ * @data.buf.in: input buffer
+ * @data.buf.out: output buffer
+ */
+struct spi_mem_op {
+	struct {
+		u8 buswidth;
+		u8 opcode;
+	} cmd;
+
+	struct {
+		u8 nbytes;
+		u8 buswidth;
+		const u8 *buf;
+	} addr;
+
+	struct {
+		u8 nbytes;
+		u8 buswidth;
+	} dummy;
+
+	struct {
+		u8 buswidth;
+		enum spi_mem_data_dir dir;
+		unsigned int nbytes;
+		/* buf.{in,out} must be DMA-able. */
+		union {
+			void *in;
+			const void *out;
+		} buf;
+	} data;
+};
+
+#define SPI_MEM_OP(__cmd, __addr, __dummy, __data)		\
+	{							\
+		.cmd = __cmd,					\
+		.addr = __addr,					\
+		.dummy = __dummy,				\
+		.data = __data,					\
+	}
+
+/**
+ * struct spi_mem - describes a SPI memory device
+ * @spi: the underlying SPI device
+ * @drvpriv: spi_mem_drviver private data
+ *
+ * Extra information that describe the SPI memory device and may be needed by
+ * the controller to properly handle this device should be placed here.
+ *
+ * One example would be the device size since some controller expose their SPI
+ * mem devices through a io-mapped region.
+ */
+struct spi_mem {
+	struct spi_device *spi;
+	void *drvpriv;
+};
+
+/**
+ * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem
+ *				  device
+ * @mem: memory device
+ * @data: data to attach to the memory device
+ */
+static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data)
+{
+	mem->drvpriv = data;
+}
+
+/**
+ * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem
+ *				  device
+ * @mem: memory device
+ *
+ * Return: the data attached to the mem device.
+ */
+static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
+{
+	return mem->drvpriv;
+}
+
+/**
+ * struct spi_controller_mem_ops - SPI memory operations
+ * @adjust_op_size: shrink the data xfer of an operation to match controller's
+ *		    limitations (can be alignment of max RX/TX size
+ *		    limitations)
+ * @supports_op: check if an operation is supported by the controller
+ * @exec_op: execute a SPI memory operation
+ *
+ * This interface should be implemented by SPI controllers providing an
+ * high-level interface to execute SPI memory operation, which is usually the
+ * case for QSPI controllers.
+ */
+struct spi_controller_mem_ops {
+	int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op);
+	bool (*supports_op)(struct spi_mem *mem,
+			    const struct spi_mem_op *op);
+	int (*exec_op)(struct spi_mem *mem,
+		       const struct spi_mem_op *op);
+};
+
+int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
+				       const struct spi_mem_op *op,
+				       struct sg_table *sg);
+
+void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
+					  const struct spi_mem_op *op,
+					  struct sg_table *sg);
+
+int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
+
+bool spi_mem_supports_op(struct spi_mem *mem,
+			 const struct spi_mem_op *op);
+
+int spi_mem_exec_op(struct spi_mem *mem,
+		    const struct spi_mem_op *op);
+
+/**
+ * struct spi_mem_driver - SPI memory driver
+ * @spidrv: inherit from a SPI driver
+ * @probe: probe a SPI memory. Usually where detection/initialization takes
+ *	   place
+ * @remove: remove a SPI memory
+ * @shutdown: take appropriate action when the system is shutdown
+ *
+ * This is just a thin wrapper around a spi_driver. The core takes care of
+ * allocating the spi_mem object and forwarding the probe/remove/shutdown
+ * request to the spi_mem_driver. The reason we use this wrapper is because
+ * we might have to stuff more information into the spi_mem struct to let
+ * SPI controllers know more about the SPI memory they interact with, and
+ * having this intermediate layer allows us to do that without adding more
+ * useless fields to the spi_device object.
+ */
+struct spi_mem_driver {
+	struct spi_driver spidrv;
+	int (*probe)(struct spi_mem *mem);
+	int (*remove)(struct spi_mem *mem);
+	void (*shutdown)(struct spi_mem *mem);
+};
+
+int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
+				       struct module *owner);
+
+#define spi_mem_driver_register(__drv)                                  \
+	spi_mem_driver_register_with_owner(__drv, THIS_MODULE)
+
+void spi_mem_driver_unregister(struct spi_mem_driver *drv);
+
+#define module_spi_mem_driver(__drv)                                    \
+	module_driver(__drv, spi_mem_driver_register,                   \
+		      spi_mem_driver_unregister)
+
+/*---------------------------------------------------------------------------*/
+
 /*
  * INTERFACE between board init code and SPI infrastructure.
  *