diff mbox

[CFT,09/11] mtd: omap2: add DMA engine support

Message ID E1Scaaa-0003uB-1l@rmk-PC.arm.linux.org.uk
State Accepted
Commit 763e735910922382c2577e820e2a51df0a7cf17c
Headers show

Commit Message

Russell King June 7, 2012, 11:09 a.m. UTC
Add DMA engine support to the OMAP2 NAND driver.  This supplements the
private DMA API implementation contained within this driver, and the
driver can be independently switched at build time between using DMA
engine and the private DMA API.

Tested-by: Grazvydas Ignotas <notasas@gmail.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
---
 drivers/mtd/nand/omap2.c |   92 +++++++++++++++++++++++++++++++++++++++++++++-
 1 files changed, 91 insertions(+), 1 deletions(-)

Comments

Artem Bityutskiy June 7, 2012, 12:49 p.m. UTC | #1
On Thu, 2012-06-07 at 12:09 +0100, Russell King wrote:
> Add DMA engine support to the OMAP2 NAND driver.  This supplements the
> private DMA API implementation contained within this driver, and the
> driver can be independently switched at build time between using DMA
> engine and the private DMA API.
> 
> Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

I guess it is makes sense to make this stuff to go in via the OMAP tree.
Russell King - ARM Linux June 7, 2012, 1:11 p.m. UTC | #2
On Thu, Jun 07, 2012 at 03:49:35PM +0300, Artem Bityutskiy wrote:
> On Thu, 2012-06-07 at 12:09 +0100, Russell King wrote:
> > Add DMA engine support to the OMAP2 NAND driver.  This supplements the
> > private DMA API implementation contained within this driver, and the
> > driver can be independently switched at build time between using DMA
> > engine and the private DMA API.
> > 
> > Tested-by: Grazvydas Ignotas <notasas@gmail.com>
> > Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
> 
> I guess it is makes sense to make this stuff to go in via the OMAP tree.

No, it makes sense to get this stuff via a single tree all together,
because, as you can see from the thread structure, it isn't purely
an OMAP thing.

The OMAP stuff depends on a core set, as does a bunch of PL08x and
SA11x0 changes.  We can't stuff all that through the OMAP tree, that
wouldn't make any sense.

What probably should happen is that the tip of the OMAP stuff gets
pulled by Tony into his tree, and we share those commits between my
tree and his - and then it doesn't matter what goes in when and by
whom.
Artem Bityutskiy June 7, 2012, 1:28 p.m. UTC | #3
On Thu, 2012-06-07 at 14:11 +0100, Russell King - ARM Linux wrote:
> No, it makes sense to get this stuff via a single tree all together,
> because, as you can see from the thread structure, it isn't purely
> an OMAP thing.
> 
> The OMAP stuff depends on a core set, as does a bunch of PL08x and
> SA11x0 changes.  We can't stuff all that through the OMAP tree, that
> wouldn't make any sense.
> 
> What probably should happen is that the tip of the OMAP stuff gets
> pulled by Tony into his tree, and we share those commits between my
> tree and his - and then it doesn't matter what goes in when and by
> whom.

Oh, sure, sorry, I actually wanted to say that these to patches should
_not_ got via the MTD tree.
Tony Lindgren June 7, 2012, 5:10 p.m. UTC | #4
* Artem Bityutskiy <dedekind1@gmail.com> [120607 06:28]:
> On Thu, 2012-06-07 at 14:11 +0100, Russell King - ARM Linux wrote:
> > No, it makes sense to get this stuff via a single tree all together,
> > because, as you can see from the thread structure, it isn't purely
> > an OMAP thing.
> > 
> > The OMAP stuff depends on a core set, as does a bunch of PL08x and
> > SA11x0 changes.  We can't stuff all that through the OMAP tree, that
> > wouldn't make any sense.
> > 
> > What probably should happen is that the tip of the OMAP stuff gets
> > pulled by Tony into his tree, and we share those commits between my
> > tree and his - and then it doesn't matter what goes in when and by
> > whom.
> 
> Oh, sure, sorry, I actually wanted to say that these to patches should
> _not_ got via the MTD tree.

What Russell is suggesting works good for me.

Regards,

Tony
diff mbox

Patch

diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index d7f681d..2912d6c 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -9,6 +9,7 @@ 
  */
 
 #include <linux/platform_device.h>
+#include <linux/dmaengine.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/module.h>
@@ -18,6 +19,7 @@ 
 #include <linux/mtd/mtd.h>
 #include <linux/mtd/nand.h>
 #include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
 #include <linux/io.h>
 #include <linux/slab.h>
 
@@ -123,6 +125,7 @@  struct omap_nand_info {
 	int				gpmc_cs;
 	unsigned long			phys_base;
 	struct completion		comp;
+	struct dma_chan			*dma;
 	int				dma_ch;
 	int				gpmc_irq;
 	enum {
@@ -345,6 +348,10 @@  static void omap_nand_dma_cb(int lch, u16 ch_status, void *data)
 {
 	complete((struct completion *) data);
 }
+static void omap_nand_dma_callback(void *data)
+{
+	complete((struct completion *) data);
+}
 
 /*
  * omap_nand_dma_transfer: configer and start dma transfer
@@ -382,6 +389,56 @@  static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK);
 	}
 
+	if (info->dma) {
+		struct dma_async_tx_descriptor *tx;
+		struct scatterlist sg;
+		unsigned n;
+
+		sg_init_one(&sg, addr, len);
+		n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+		if (n == 0) {
+			dev_err(&info->pdev->dev,
+				"Couldn't DMA map a %d byte buffer\n", len);
+			goto out_copy;
+		}
+
+		tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+			is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+		if (!tx) {
+			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+			goto out_copy;
+		}
+		tx->callback = omap_nand_dma_callback;
+		tx->callback_param = &info->comp;
+		dmaengine_submit(tx);
+
+		/*  configure and start prefetch transfer */
+		ret = gpmc_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
+		if (ret) {
+			/* PFPW engine is busy, use cpu copy method */
+			dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+			goto out_copy;
+		}
+
+		init_completion(&info->comp);
+		dma_async_issue_pending(info->dma);
+
+		/* setup and start DMA using dma_addr */
+		wait_for_completion(&info->comp);
+		tim = 0;
+		limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+		while (gpmc_read_status(GPMC_PREFETCH_COUNT) && (tim++ < limit))
+			cpu_relax();
+
+		/* disable and stop the PFPW engine */
+		gpmc_prefetch_reset(info->gpmc_cs);
+
+		dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+		return 0;
+	}
+
 	dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir);
 	if (dma_mapping_error(&info->pdev->dev, dma_addr)) {
 		dev_err(&info->pdev->dev,
@@ -414,7 +471,6 @@  static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
 		goto out_copy_unmap;
 
 	init_completion(&info->comp);
-
 	omap_start_dma(info->dma_ch);
 
 	/* setup and start DMA using dma_addr */
@@ -1164,6 +1220,8 @@  static int __devinit omap_nand_probe(struct platform_device *pdev)
 	struct omap_nand_platform_data	*pdata;
 	int				err;
 	int				i, offset;
+	dma_cap_mask_t mask;
+	unsigned sig;
 
 	pdata = pdev->dev.platform_data;
 	if (pdata == NULL) {
@@ -1244,6 +1302,33 @@  static int __devinit omap_nand_probe(struct platform_device *pdev)
 		break;
 
 	case NAND_OMAP_PREFETCH_DMA:
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+		sig = OMAP24XX_DMA_GPMC;
+		info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig);
+		if (!info->dma) {
+			dev_warn(&pdev->dev, "DMA engine request failed\n");
+		} else {
+			struct dma_slave_config cfg;
+			int rc;
+
+			memset(&cfg, 0, sizeof(cfg));
+			cfg.src_addr = info->phys_base;
+			cfg.dst_addr = info->phys_base;
+			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.src_maxburst = 16;
+			cfg.dst_maxburst = 16;
+			rc = dmaengine_slave_config(info->dma, &cfg);
+			if (rc) {
+				dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+					rc);
+				goto out_release_mem_region;
+			}
+			info->nand.read_buf   = omap_read_buf_dma_pref;
+			info->nand.write_buf  = omap_write_buf_dma_pref;
+			break;
+		}
 		err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND",
 				omap_nand_dma_cb, &info->comp, &info->dma_ch);
 		if (err < 0) {
@@ -1358,6 +1443,8 @@  static int __devinit omap_nand_probe(struct platform_device *pdev)
 	return 0;
 
 out_release_mem_region:
+	if (info->dma)
+		dma_release_channel(info->dma);
 	release_mem_region(info->phys_base, NAND_IO_SIZE);
 out_free_info:
 	kfree(info);
@@ -1376,6 +1463,9 @@  static int omap_nand_remove(struct platform_device *pdev)
 	if (info->dma_ch != -1)
 		omap_free_dma(info->dma_ch);
 
+	if (info->dma)
+		dma_release_channel(info->dma);
+
 	if (info->gpmc_irq)
 		free_irq(info->gpmc_irq, info);