diff mbox

[RFC,2/7] mtd: nand: move code to rawnand/ subdir

Message ID 1474539180-5863-3-git-send-email-boris.brezillon@free-electrons.com
State Superseded
Headers show

Commit Message

Boris Brezillon Sept. 22, 2016, 10:12 a.m. UTC
As part of the process of sharing more code between different NAND based
devices, we need to move all raw NAND related code to the rawnand/
subdirectory.

Signed-off-by: Boris Brezillon <boris.brezillon@free-electrons.com>
---
 drivers/mtd/nand/Kconfig                           |  573 +--
 drivers/mtd/nand/Makefile                          |   63 +-
 drivers/mtd/nand/ams-delta.c                       |  291 --
 drivers/mtd/nand/atmel_nand.c                      | 2481 ----------
 drivers/mtd/nand/atmel_nand_ecc.h                  |  163 -
 drivers/mtd/nand/atmel_nand_nfc.h                  |  103 -
 drivers/mtd/nand/au1550nd.c                        |  518 ---
 drivers/mtd/nand/bcm47xxnflash/Makefile            |    4 -
 drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h     |   25 -
 drivers/mtd/nand/bcm47xxnflash/main.c              |   81 -
 drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c       |  454 --
 drivers/mtd/nand/bf5xx_nand.c                      |  860 ----
 drivers/mtd/nand/brcmnand/Makefile                 |    7 -
 drivers/mtd/nand/brcmnand/bcm63138_nand.c          |  109 -
 drivers/mtd/nand/brcmnand/bcm6368_nand.c           |  142 -
 drivers/mtd/nand/brcmnand/brcmnand.c               | 2561 -----------
 drivers/mtd/nand/brcmnand/brcmnand.h               |   74 -
 drivers/mtd/nand/brcmnand/brcmstb_nand.c           |   44 -
 drivers/mtd/nand/brcmnand/iproc_nand.c             |  160 -
 drivers/mtd/nand/cafe_nand.c                       |  898 ----
 drivers/mtd/nand/cmx270_nand.c                     |  246 -
 drivers/mtd/nand/cs553x_nand.c                     |  358 --
 drivers/mtd/nand/davinci_nand.c                    |  862 ----
 drivers/mtd/nand/denali.c                          | 1663 -------
 drivers/mtd/nand/denali.h                          |  484 --
 drivers/mtd/nand/denali_dt.c                       |  131 -
 drivers/mtd/nand/denali_pci.c                      |  121 -
 drivers/mtd/nand/diskonchip.c                      | 1712 -------
 drivers/mtd/nand/docg4.c                           | 1410 ------
 drivers/mtd/nand/fsl_elbc_nand.c                   |  977 ----
 drivers/mtd/nand/fsl_ifc_nand.c                    | 1095 -----
 drivers/mtd/nand/fsl_upm.c                         |  363 --
 drivers/mtd/nand/fsmc_nand.c                       | 1100 -----
 drivers/mtd/nand/gpio.c                            |  322 --
 drivers/mtd/nand/gpmi-nand/Makefile                |    3 -
 drivers/mtd/nand/gpmi-nand/bch-regs.h              |  128 -
 drivers/mtd/nand/gpmi-nand/gpmi-lib.c              | 1508 ------
 drivers/mtd/nand/gpmi-nand/gpmi-nand.c             | 2193 ---------
 drivers/mtd/nand/gpmi-nand/gpmi-nand.h             |  310 --
 drivers/mtd/nand/gpmi-nand/gpmi-regs.h             |  187 -
 drivers/mtd/nand/hisi504_nand.c                    |  898 ----
 drivers/mtd/nand/jz4740_nand.c                     |  557 ---
 drivers/mtd/nand/jz4780_bch.c                      |  380 --
 drivers/mtd/nand/jz4780_bch.h                      |   43 -
 drivers/mtd/nand/jz4780_nand.c                     |  416 --
 drivers/mtd/nand/lpc32xx_mlc.c                     |  902 ----
 drivers/mtd/nand/lpc32xx_slc.c                     | 1041 -----
 drivers/mtd/nand/mpc5121_nfc.c                     |  855 ----
 drivers/mtd/nand/mtk_ecc.c                         |  530 ---
 drivers/mtd/nand/mtk_ecc.h                         |   50 -
 drivers/mtd/nand/mtk_nand.c                        | 1526 ------
 drivers/mtd/nand/mxc_nand.c                        | 1857 --------
 drivers/mtd/nand/nand_base.c                       | 4840 --------------------
 drivers/mtd/nand/nand_bbt.c                        | 1452 ------
 drivers/mtd/nand/nand_bch.c                        |  234 -
 drivers/mtd/nand/nand_ecc.c                        |  533 ---
 drivers/mtd/nand/nand_ids.c                        |  193 -
 drivers/mtd/nand/nand_timings.c                    |  311 --
 drivers/mtd/nand/nandsim.c                         | 2431 ----------
 drivers/mtd/nand/ndfc.c                            |  286 --
 drivers/mtd/nand/nuc900_nand.c                     |  306 --
 drivers/mtd/nand/omap2.c                           | 2214 ---------
 drivers/mtd/nand/omap_elm.c                        |  578 ---
 drivers/mtd/nand/orion_nand.c                      |  218 -
 drivers/mtd/nand/pasemi_nand.c                     |  233 -
 drivers/mtd/nand/plat_nand.c                       |  145 -
 drivers/mtd/nand/pxa3xx_nand.c                     | 2067 ---------
 drivers/mtd/nand/qcom_nandc.c                      | 2208 ---------
 drivers/mtd/nand/r852.c                            | 1082 -----
 drivers/mtd/nand/r852.h                            |  160 -
 drivers/mtd/nand/rawnand/Kconfig                   |  572 +++
 drivers/mtd/nand/rawnand/Makefile                  |   62 +
 drivers/mtd/nand/rawnand/ams-delta.c               |  291 ++
 drivers/mtd/nand/rawnand/atmel_nand.c              | 2481 ++++++++++
 drivers/mtd/nand/rawnand/atmel_nand_ecc.h          |  163 +
 drivers/mtd/nand/rawnand/atmel_nand_nfc.h          |  103 +
 drivers/mtd/nand/rawnand/au1550nd.c                |  518 +++
 drivers/mtd/nand/rawnand/bcm47xxnflash/Makefile    |    4 +
 .../mtd/nand/rawnand/bcm47xxnflash/bcm47xxnflash.h |   25 +
 drivers/mtd/nand/rawnand/bcm47xxnflash/main.c      |   81 +
 .../mtd/nand/rawnand/bcm47xxnflash/ops_bcm4706.c   |  454 ++
 drivers/mtd/nand/rawnand/bf5xx_nand.c              |  860 ++++
 drivers/mtd/nand/rawnand/brcmnand/Makefile         |    7 +
 drivers/mtd/nand/rawnand/brcmnand/bcm63138_nand.c  |  109 +
 drivers/mtd/nand/rawnand/brcmnand/bcm6368_nand.c   |  142 +
 drivers/mtd/nand/rawnand/brcmnand/brcmnand.c       | 2561 +++++++++++
 drivers/mtd/nand/rawnand/brcmnand/brcmnand.h       |   74 +
 drivers/mtd/nand/rawnand/brcmnand/brcmstb_nand.c   |   44 +
 drivers/mtd/nand/rawnand/brcmnand/iproc_nand.c     |  160 +
 drivers/mtd/nand/rawnand/cafe_nand.c               |  898 ++++
 drivers/mtd/nand/rawnand/cmx270_nand.c             |  246 +
 drivers/mtd/nand/rawnand/cs553x_nand.c             |  358 ++
 drivers/mtd/nand/rawnand/davinci_nand.c            |  862 ++++
 drivers/mtd/nand/rawnand/denali.c                  | 1663 +++++++
 drivers/mtd/nand/rawnand/denali.h                  |  484 ++
 drivers/mtd/nand/rawnand/denali_dt.c               |  131 +
 drivers/mtd/nand/rawnand/denali_pci.c              |  121 +
 drivers/mtd/nand/rawnand/diskonchip.c              | 1712 +++++++
 drivers/mtd/nand/rawnand/docg4.c                   | 1410 ++++++
 drivers/mtd/nand/rawnand/fsl_elbc_nand.c           |  977 ++++
 drivers/mtd/nand/rawnand/fsl_ifc_nand.c            | 1095 +++++
 drivers/mtd/nand/rawnand/fsl_upm.c                 |  363 ++
 drivers/mtd/nand/rawnand/fsmc_nand.c               | 1100 +++++
 drivers/mtd/nand/rawnand/gpio.c                    |  322 ++
 drivers/mtd/nand/rawnand/gpmi-nand/Makefile        |    3 +
 drivers/mtd/nand/rawnand/gpmi-nand/bch-regs.h      |  128 +
 drivers/mtd/nand/rawnand/gpmi-nand/gpmi-lib.c      | 1508 ++++++
 drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.c     | 2193 +++++++++
 drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.h     |  310 ++
 drivers/mtd/nand/rawnand/gpmi-nand/gpmi-regs.h     |  187 +
 drivers/mtd/nand/rawnand/hisi504_nand.c            |  898 ++++
 drivers/mtd/nand/rawnand/jz4740_nand.c             |  557 +++
 drivers/mtd/nand/rawnand/jz4780_bch.c              |  380 ++
 drivers/mtd/nand/rawnand/jz4780_bch.h              |   43 +
 drivers/mtd/nand/rawnand/jz4780_nand.c             |  416 ++
 drivers/mtd/nand/rawnand/lpc32xx_mlc.c             |  902 ++++
 drivers/mtd/nand/rawnand/lpc32xx_slc.c             | 1041 +++++
 drivers/mtd/nand/rawnand/mpc5121_nfc.c             |  855 ++++
 drivers/mtd/nand/rawnand/mtk_ecc.c                 |  530 +++
 drivers/mtd/nand/rawnand/mtk_ecc.h                 |   50 +
 drivers/mtd/nand/rawnand/mtk_nand.c                | 1526 ++++++
 drivers/mtd/nand/rawnand/mxc_nand.c                | 1857 ++++++++
 drivers/mtd/nand/rawnand/nand_base.c               | 4840 ++++++++++++++++++++
 drivers/mtd/nand/rawnand/nand_bbt.c                | 1452 ++++++
 drivers/mtd/nand/rawnand/nand_bch.c                |  234 +
 drivers/mtd/nand/rawnand/nand_ecc.c                |  533 +++
 drivers/mtd/nand/rawnand/nand_ids.c                |  193 +
 drivers/mtd/nand/rawnand/nand_timings.c            |  311 ++
 drivers/mtd/nand/rawnand/nandsim.c                 | 2431 ++++++++++
 drivers/mtd/nand/rawnand/ndfc.c                    |  286 ++
 drivers/mtd/nand/rawnand/nuc900_nand.c             |  306 ++
 drivers/mtd/nand/rawnand/omap2.c                   | 2214 +++++++++
 drivers/mtd/nand/rawnand/omap_elm.c                |  578 +++
 drivers/mtd/nand/rawnand/orion_nand.c              |  218 +
 drivers/mtd/nand/rawnand/pasemi_nand.c             |  233 +
 drivers/mtd/nand/rawnand/plat_nand.c               |  145 +
 drivers/mtd/nand/rawnand/pxa3xx_nand.c             | 2067 +++++++++
 drivers/mtd/nand/rawnand/qcom_nandc.c              | 2208 +++++++++
 drivers/mtd/nand/rawnand/r852.c                    | 1082 +++++
 drivers/mtd/nand/rawnand/r852.h                    |  160 +
 drivers/mtd/nand/rawnand/s3c2410.c                 | 1165 +++++
 drivers/mtd/nand/rawnand/sh_flctl.c                | 1251 +++++
 drivers/mtd/nand/rawnand/sharpsl.c                 |  235 +
 drivers/mtd/nand/rawnand/sm_common.c               |  202 +
 drivers/mtd/nand/rawnand/sm_common.h               |   61 +
 drivers/mtd/nand/rawnand/socrates_nand.c           |  251 +
 drivers/mtd/nand/rawnand/sunxi_nand.c              | 2291 +++++++++
 drivers/mtd/nand/rawnand/tmio_nand.c               |  510 +++
 drivers/mtd/nand/rawnand/txx9ndfmc.c               |  423 ++
 drivers/mtd/nand/rawnand/vf610_nfc.c               |  846 ++++
 drivers/mtd/nand/rawnand/xway_nand.c               |  248 +
 drivers/mtd/nand/s3c2410.c                         | 1165 -----
 drivers/mtd/nand/sh_flctl.c                        | 1251 -----
 drivers/mtd/nand/sharpsl.c                         |  235 -
 drivers/mtd/nand/sm_common.c                       |  202 -
 drivers/mtd/nand/sm_common.h                       |   61 -
 drivers/mtd/nand/socrates_nand.c                   |  251 -
 drivers/mtd/nand/sunxi_nand.c                      | 2291 ---------
 drivers/mtd/nand/tmio_nand.c                       |  510 ---
 drivers/mtd/nand/txx9ndfmc.c                       |  423 --
 drivers/mtd/nand/vf610_nfc.c                       |  846 ----
 drivers/mtd/nand/xway_nand.c                       |  248 -
 drivers/mtd/sm_ftl.c                               |    2 +-
 163 files changed, 60814 insertions(+), 60812 deletions(-)
 delete mode 100644 drivers/mtd/nand/ams-delta.c
 delete mode 100644 drivers/mtd/nand/atmel_nand.c
 delete mode 100644 drivers/mtd/nand/atmel_nand_ecc.h
 delete mode 100644 drivers/mtd/nand/atmel_nand_nfc.h
 delete mode 100644 drivers/mtd/nand/au1550nd.c
 delete mode 100644 drivers/mtd/nand/bcm47xxnflash/Makefile
 delete mode 100644 drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
 delete mode 100644 drivers/mtd/nand/bcm47xxnflash/main.c
 delete mode 100644 drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
 delete mode 100644 drivers/mtd/nand/bf5xx_nand.c
 delete mode 100644 drivers/mtd/nand/brcmnand/Makefile
 delete mode 100644 drivers/mtd/nand/brcmnand/bcm63138_nand.c
 delete mode 100644 drivers/mtd/nand/brcmnand/bcm6368_nand.c
 delete mode 100644 drivers/mtd/nand/brcmnand/brcmnand.c
 delete mode 100644 drivers/mtd/nand/brcmnand/brcmnand.h
 delete mode 100644 drivers/mtd/nand/brcmnand/brcmstb_nand.c
 delete mode 100644 drivers/mtd/nand/brcmnand/iproc_nand.c
 delete mode 100644 drivers/mtd/nand/cafe_nand.c
 delete mode 100644 drivers/mtd/nand/cmx270_nand.c
 delete mode 100644 drivers/mtd/nand/cs553x_nand.c
 delete mode 100644 drivers/mtd/nand/davinci_nand.c
 delete mode 100644 drivers/mtd/nand/denali.c
 delete mode 100644 drivers/mtd/nand/denali.h
 delete mode 100644 drivers/mtd/nand/denali_dt.c
 delete mode 100644 drivers/mtd/nand/denali_pci.c
 delete mode 100644 drivers/mtd/nand/diskonchip.c
 delete mode 100644 drivers/mtd/nand/docg4.c
 delete mode 100644 drivers/mtd/nand/fsl_elbc_nand.c
 delete mode 100644 drivers/mtd/nand/fsl_ifc_nand.c
 delete mode 100644 drivers/mtd/nand/fsl_upm.c
 delete mode 100644 drivers/mtd/nand/fsmc_nand.c
 delete mode 100644 drivers/mtd/nand/gpio.c
 delete mode 100644 drivers/mtd/nand/gpmi-nand/Makefile
 delete mode 100644 drivers/mtd/nand/gpmi-nand/bch-regs.h
 delete mode 100644 drivers/mtd/nand/gpmi-nand/gpmi-lib.c
 delete mode 100644 drivers/mtd/nand/gpmi-nand/gpmi-nand.c
 delete mode 100644 drivers/mtd/nand/gpmi-nand/gpmi-nand.h
 delete mode 100644 drivers/mtd/nand/gpmi-nand/gpmi-regs.h
 delete mode 100644 drivers/mtd/nand/hisi504_nand.c
 delete mode 100644 drivers/mtd/nand/jz4740_nand.c
 delete mode 100644 drivers/mtd/nand/jz4780_bch.c
 delete mode 100644 drivers/mtd/nand/jz4780_bch.h
 delete mode 100644 drivers/mtd/nand/jz4780_nand.c
 delete mode 100644 drivers/mtd/nand/lpc32xx_mlc.c
 delete mode 100644 drivers/mtd/nand/lpc32xx_slc.c
 delete mode 100644 drivers/mtd/nand/mpc5121_nfc.c
 delete mode 100644 drivers/mtd/nand/mtk_ecc.c
 delete mode 100644 drivers/mtd/nand/mtk_ecc.h
 delete mode 100644 drivers/mtd/nand/mtk_nand.c
 delete mode 100644 drivers/mtd/nand/mxc_nand.c
 delete mode 100644 drivers/mtd/nand/nand_base.c
 delete mode 100644 drivers/mtd/nand/nand_bbt.c
 delete mode 100644 drivers/mtd/nand/nand_bch.c
 delete mode 100644 drivers/mtd/nand/nand_ecc.c
 delete mode 100644 drivers/mtd/nand/nand_ids.c
 delete mode 100644 drivers/mtd/nand/nand_timings.c
 delete mode 100644 drivers/mtd/nand/nandsim.c
 delete mode 100644 drivers/mtd/nand/ndfc.c
 delete mode 100644 drivers/mtd/nand/nuc900_nand.c
 delete mode 100644 drivers/mtd/nand/omap2.c
 delete mode 100644 drivers/mtd/nand/omap_elm.c
 delete mode 100644 drivers/mtd/nand/orion_nand.c
 delete mode 100644 drivers/mtd/nand/pasemi_nand.c
 delete mode 100644 drivers/mtd/nand/plat_nand.c
 delete mode 100644 drivers/mtd/nand/pxa3xx_nand.c
 delete mode 100644 drivers/mtd/nand/qcom_nandc.c
 delete mode 100644 drivers/mtd/nand/r852.c
 delete mode 100644 drivers/mtd/nand/r852.h
 create mode 100644 drivers/mtd/nand/rawnand/Kconfig
 create mode 100644 drivers/mtd/nand/rawnand/Makefile
 create mode 100644 drivers/mtd/nand/rawnand/ams-delta.c
 create mode 100644 drivers/mtd/nand/rawnand/atmel_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/atmel_nand_ecc.h
 create mode 100644 drivers/mtd/nand/rawnand/atmel_nand_nfc.h
 create mode 100644 drivers/mtd/nand/rawnand/au1550nd.c
 create mode 100644 drivers/mtd/nand/rawnand/bcm47xxnflash/Makefile
 create mode 100644 drivers/mtd/nand/rawnand/bcm47xxnflash/bcm47xxnflash.h
 create mode 100644 drivers/mtd/nand/rawnand/bcm47xxnflash/main.c
 create mode 100644 drivers/mtd/nand/rawnand/bcm47xxnflash/ops_bcm4706.c
 create mode 100644 drivers/mtd/nand/rawnand/bf5xx_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/brcmnand/Makefile
 create mode 100644 drivers/mtd/nand/rawnand/brcmnand/bcm63138_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/brcmnand/bcm6368_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/brcmnand/brcmnand.c
 create mode 100644 drivers/mtd/nand/rawnand/brcmnand/brcmnand.h
 create mode 100644 drivers/mtd/nand/rawnand/brcmnand/brcmstb_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/brcmnand/iproc_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/cafe_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/cmx270_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/cs553x_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/davinci_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/denali.c
 create mode 100644 drivers/mtd/nand/rawnand/denali.h
 create mode 100644 drivers/mtd/nand/rawnand/denali_dt.c
 create mode 100644 drivers/mtd/nand/rawnand/denali_pci.c
 create mode 100644 drivers/mtd/nand/rawnand/diskonchip.c
 create mode 100644 drivers/mtd/nand/rawnand/docg4.c
 create mode 100644 drivers/mtd/nand/rawnand/fsl_elbc_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/fsl_ifc_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/fsl_upm.c
 create mode 100644 drivers/mtd/nand/rawnand/fsmc_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/gpio.c
 create mode 100644 drivers/mtd/nand/rawnand/gpmi-nand/Makefile
 create mode 100644 drivers/mtd/nand/rawnand/gpmi-nand/bch-regs.h
 create mode 100644 drivers/mtd/nand/rawnand/gpmi-nand/gpmi-lib.c
 create mode 100644 drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.c
 create mode 100644 drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.h
 create mode 100644 drivers/mtd/nand/rawnand/gpmi-nand/gpmi-regs.h
 create mode 100644 drivers/mtd/nand/rawnand/hisi504_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/jz4740_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/jz4780_bch.c
 create mode 100644 drivers/mtd/nand/rawnand/jz4780_bch.h
 create mode 100644 drivers/mtd/nand/rawnand/jz4780_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/lpc32xx_mlc.c
 create mode 100644 drivers/mtd/nand/rawnand/lpc32xx_slc.c
 create mode 100644 drivers/mtd/nand/rawnand/mpc5121_nfc.c
 create mode 100644 drivers/mtd/nand/rawnand/mtk_ecc.c
 create mode 100644 drivers/mtd/nand/rawnand/mtk_ecc.h
 create mode 100644 drivers/mtd/nand/rawnand/mtk_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/mxc_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/nand_base.c
 create mode 100644 drivers/mtd/nand/rawnand/nand_bbt.c
 create mode 100644 drivers/mtd/nand/rawnand/nand_bch.c
 create mode 100644 drivers/mtd/nand/rawnand/nand_ecc.c
 create mode 100644 drivers/mtd/nand/rawnand/nand_ids.c
 create mode 100644 drivers/mtd/nand/rawnand/nand_timings.c
 create mode 100644 drivers/mtd/nand/rawnand/nandsim.c
 create mode 100644 drivers/mtd/nand/rawnand/ndfc.c
 create mode 100644 drivers/mtd/nand/rawnand/nuc900_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/omap2.c
 create mode 100644 drivers/mtd/nand/rawnand/omap_elm.c
 create mode 100644 drivers/mtd/nand/rawnand/orion_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/pasemi_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/plat_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/pxa3xx_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/qcom_nandc.c
 create mode 100644 drivers/mtd/nand/rawnand/r852.c
 create mode 100644 drivers/mtd/nand/rawnand/r852.h
 create mode 100644 drivers/mtd/nand/rawnand/s3c2410.c
 create mode 100644 drivers/mtd/nand/rawnand/sh_flctl.c
 create mode 100644 drivers/mtd/nand/rawnand/sharpsl.c
 create mode 100644 drivers/mtd/nand/rawnand/sm_common.c
 create mode 100644 drivers/mtd/nand/rawnand/sm_common.h
 create mode 100644 drivers/mtd/nand/rawnand/socrates_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/sunxi_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/tmio_nand.c
 create mode 100644 drivers/mtd/nand/rawnand/txx9ndfmc.c
 create mode 100644 drivers/mtd/nand/rawnand/vf610_nfc.c
 create mode 100644 drivers/mtd/nand/rawnand/xway_nand.c
 delete mode 100644 drivers/mtd/nand/s3c2410.c
 delete mode 100644 drivers/mtd/nand/sh_flctl.c
 delete mode 100644 drivers/mtd/nand/sharpsl.c
 delete mode 100644 drivers/mtd/nand/sm_common.c
 delete mode 100644 drivers/mtd/nand/sm_common.h
 delete mode 100644 drivers/mtd/nand/socrates_nand.c
 delete mode 100644 drivers/mtd/nand/sunxi_nand.c
 delete mode 100644 drivers/mtd/nand/tmio_nand.c
 delete mode 100644 drivers/mtd/nand/txx9ndfmc.c
 delete mode 100644 drivers/mtd/nand/vf610_nfc.c
 delete mode 100644 drivers/mtd/nand/xway_nand.c
diff mbox

Patch

diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7b7a887b4709..7db2386af665 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,572 +1 @@ 
-config MTD_NAND_ECC
-	tristate
-
-config MTD_NAND_ECC_SMC
-	bool "NAND ECC Smart Media byte order"
-	depends on MTD_NAND_ECC
-	default n
-	help
-	  Software ECC according to the Smart Media Specification.
-	  The original Linux implementation had byte 0 and 1 swapped.
-
-
-menuconfig MTD_NAND
-	tristate "NAND Device Support"
-	depends on MTD
-	select MTD_NAND_IDS
-	select MTD_NAND_ECC
-	help
-	  This enables support for accessing all type of NAND flash
-	  devices. For further information see
-	  <http://www.linux-mtd.infradead.org/doc/nand.html>.
-
-if MTD_NAND
-
-config MTD_NAND_BCH
-	tristate
-	select BCH
-	depends on MTD_NAND_ECC_BCH
-	default MTD_NAND
-
-config MTD_NAND_ECC_BCH
-	bool "Support software BCH ECC"
-	default n
-	help
-	  This enables support for software BCH error correction. Binary BCH
-	  codes are more powerful and cpu intensive than traditional Hamming
-	  ECC codes. They are used with NAND devices requiring more than 1 bit
-	  of error correction.
-
-config MTD_SM_COMMON
-	tristate
-	default n
-
-config MTD_NAND_DENALI
-	tristate
-
-config MTD_NAND_DENALI_PCI
-        tristate "Support Denali NAND controller on Intel Moorestown"
-	select MTD_NAND_DENALI
-	depends on HAS_DMA && PCI
-        help
-          Enable the driver for NAND flash on Intel Moorestown, using the
-          Denali NAND controller core.
-
-config MTD_NAND_DENALI_DT
-	tristate "Support Denali NAND controller as a DT device"
-	select MTD_NAND_DENALI
-	depends on HAS_DMA && HAVE_CLK && OF
-	help
-	  Enable the driver for NAND flash on platforms using a Denali NAND
-	  controller as a DT device.
-
-config MTD_NAND_DENALI_SCRATCH_REG_ADDR
-        hex "Denali NAND size scratch register address"
-        default "0xFF108018"
-        depends on MTD_NAND_DENALI_PCI
-        help
-          Some platforms place the NAND chip size in a scratch register
-          because (some versions of) the driver aren't able to automatically
-          determine the size of certain chips. Set the address of the
-          scratch register here to enable this feature. On Intel Moorestown
-          boards, the scratch register is at 0xFF108018.
-
-config MTD_NAND_GPIO
-	tristate "GPIO assisted NAND Flash driver"
-	depends on GPIOLIB || COMPILE_TEST
-	depends on HAS_IOMEM
-	help
-	  This enables a NAND flash driver where control signals are
-	  connected to GPIO pins, and commands and data are communicated
-	  via a memory mapped interface.
-
-config MTD_NAND_AMS_DELTA
-	tristate "NAND Flash device on Amstrad E3"
-	depends on MACH_AMS_DELTA
-	default y
-	help
-	  Support for NAND flash on Amstrad E3 (Delta).
-
-config MTD_NAND_OMAP2
-	tristate "NAND Flash device on OMAP2, OMAP3, OMAP4 and Keystone"
-	depends on (ARCH_OMAP2PLUS || ARCH_KEYSTONE)
-	help
-          Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
-	  and Keystone platforms.
-
-config MTD_NAND_OMAP_BCH
-	depends on MTD_NAND_OMAP2
-	bool "Support hardware based BCH error correction"
-	default n
-	select BCH
-	help
-	  This config enables the ELM hardware engine, which can be used to
-	  locate and correct errors when using BCH ECC scheme. This offloads
-	  the cpu from doing ECC error searching and correction. However some
-	  legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
-	  so this is optional for them.
-
-config MTD_NAND_OMAP_BCH_BUILD
-	def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
-
-config MTD_NAND_IDS
-	tristate
-
-config MTD_NAND_RICOH
-	tristate "Ricoh xD card reader"
-	default n
-	depends on PCI
-	select MTD_SM_COMMON
-	help
-	  Enable support for Ricoh R5C852 xD card reader
-	  You also need to enable ether
-	  NAND SSFDC (SmartMedia) read only translation layer' or new
-	  expermental, readwrite
-	  'SmartMedia/xD new translation layer'
-
-config MTD_NAND_AU1550
-	tristate "Au1550/1200 NAND support"
-	depends on MIPS_ALCHEMY
-	help
-	  This enables the driver for the NAND flash controller on the
-	  AMD/Alchemy 1550 SOC.
-
-config MTD_NAND_BF5XX
-	tristate "Blackfin on-chip NAND Flash Controller driver"
-	depends on BF54x || BF52x
-	help
-	  This enables the Blackfin on-chip NAND flash controller
-
-	  No board specific support is done by this driver, each board
-	  must advertise a platform_device for the driver to attach.
-
-	  This driver can also be built as a module. If so, the module
-	  will be called bf5xx-nand.
-
-config MTD_NAND_BF5XX_HWECC
-	bool "BF5XX NAND Hardware ECC"
-	default y
-	depends on MTD_NAND_BF5XX
-	help
-	  Enable the use of the BF5XX's internal ECC generator when
-	  using NAND.
-
-config MTD_NAND_BF5XX_BOOTROM_ECC
-	bool "Use Blackfin BootROM ECC Layout"
-	default n
-	depends on MTD_NAND_BF5XX_HWECC
-	help
-	  If you wish to modify NAND pages and allow the Blackfin on-chip
-	  BootROM to boot from them, say Y here.  This is only necessary
-	  if you are booting U-Boot out of NAND and you wish to update
-	  U-Boot from Linux' userspace.  Otherwise, you should say N here.
-
-	  If unsure, say N.
-
-config MTD_NAND_S3C2410
-	tristate "NAND Flash support for Samsung S3C SoCs"
-	depends on ARCH_S3C24XX || ARCH_S3C64XX
-	help
-	  This enables the NAND flash controller on the S3C24xx and S3C64xx
-	  SoCs
-
-	  No board specific support is done by this driver, each board
-	  must advertise a platform_device for the driver to attach.
-
-config MTD_NAND_S3C2410_DEBUG
-	bool "Samsung S3C NAND driver debug"
-	depends on MTD_NAND_S3C2410
-	help
-	  Enable debugging of the S3C NAND driver
-
-config MTD_NAND_S3C2410_HWECC
-	bool "Samsung S3C NAND Hardware ECC"
-	depends on MTD_NAND_S3C2410
-	help
-	  Enable the use of the controller's internal ECC generator when
-	  using NAND. Early versions of the chips have had problems with
-	  incorrect ECC generation, and if using these, the default of
-	  software ECC is preferable.
-
-config MTD_NAND_NDFC
-	tristate "NDFC NanD Flash Controller"
-	depends on 4xx
-	select MTD_NAND_ECC_SMC
-	help
-	 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
-
-config MTD_NAND_S3C2410_CLKSTOP
-	bool "Samsung S3C NAND IDLE clock stop"
-	depends on MTD_NAND_S3C2410
-	default n
-	help
-	  Stop the clock to the NAND controller when there is no chip
-	  selected to save power. This will mean there is a small delay
-	  when the is NAND chip selected or released, but will save
-	  approximately 5mA of power when there is nothing happening.
-
-config MTD_NAND_DISKONCHIP
-	tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
-	depends on HAS_IOMEM
-	select REED_SOLOMON
-	select REED_SOLOMON_DEC16
-	help
-	  This is a reimplementation of M-Systems DiskOnChip 2000,
-	  Millennium and Millennium Plus as a standard NAND device driver,
-	  as opposed to the earlier self-contained MTD device drivers.
-	  This should enable, among other things, proper JFFS2 operation on
-	  these devices.
-
-config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
-        bool "Advanced detection options for DiskOnChip"
-        depends on MTD_NAND_DISKONCHIP
-        help
-          This option allows you to specify nonstandard address at which to
-          probe for a DiskOnChip, or to change the detection options.  You
-          are unlikely to need any of this unless you are using LinuxBIOS.
-          Say 'N'.
-
-config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
-        hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
-        depends on MTD_NAND_DISKONCHIP
-        default "0"
-        ---help---
-        By default, the probe for DiskOnChip devices will look for a
-        DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
-        This option allows you to specify a single address at which to probe
-        for the device, which is useful if you have other devices in that
-        range which get upset when they are probed.
-
-        (Note that on PowerPC, the normal probe will only check at
-        0xE4000000.)
-
-        Normally, you should leave this set to zero, to allow the probe at
-        the normal addresses.
-
-config MTD_NAND_DISKONCHIP_PROBE_HIGH
-        bool "Probe high addresses"
-        depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
-        help
-          By default, the probe for DiskOnChip devices will look for a
-          DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
-          This option changes to make it probe between 0xFFFC8000 and
-          0xFFFEE000.  Unless you are using LinuxBIOS, this is unlikely to be
-          useful to you.  Say 'N'.
-
-config MTD_NAND_DISKONCHIP_BBTWRITE
-	bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
-	depends on MTD_NAND_DISKONCHIP
-	help
-	  On DiskOnChip devices shipped with the INFTL filesystem (Millennium
-	  and 2000 TSOP/Alon), Linux reserves some space at the end of the
-	  device for the Bad Block Table (BBT).  If you have existing INFTL
-	  data on your device (created by non-Linux tools such as M-Systems'
-	  DOS drivers), your data might overlap the area Linux wants to use for
-	  the BBT.  If this is a concern for you, leave this option disabled and
-	  Linux will not write BBT data into this area.
-	  The downside of leaving this option disabled is that if bad blocks
-	  are detected by Linux, they will not be recorded in the BBT, which
-	  could cause future problems.
-	  Once you enable this option, new filesystems (INFTL or others, created
-	  in Linux or other operating systems) will not use the reserved area.
-	  The only reason not to enable this option is to prevent damage to
-	  preexisting filesystems.
-	  Even if you leave this disabled, you can enable BBT writes at module
-	  load time (assuming you build diskonchip as a module) with the module
-	  parameter "inftl_bbt_write=1".
-
-config MTD_NAND_DOCG4
-	tristate "Support for DiskOnChip G4"
-	depends on HAS_IOMEM
-	select BCH
-	select BITREVERSE
-	help
-	  Support for diskonchip G4 nand flash, found in various smartphones and
-	  PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
-	  Portege G900, Asus P526, and O2 XDA Zinc.
-
-	  With this driver you will be able to use UBI and create a ubifs on the
-	  device, so you may wish to consider enabling UBI and UBIFS as well.
-
-	  These devices ship with the Mys/Sandisk SAFTL formatting, for which
-	  there is currently no mtd parser, so you may want to use command line
-	  partitioning to segregate write-protected blocks. On the Treo680, the
-	  first five erase blocks (256KiB each) are write-protected, followed
-	  by the block containing the saftl partition table.  This is probably
-	  typical.
-
-config MTD_NAND_SHARPSL
-	tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
-	depends on ARCH_PXA
-
-config MTD_NAND_CAFE
-	tristate "NAND support for OLPC CAFÉ chip"
-	depends on PCI
-	select REED_SOLOMON
-	select REED_SOLOMON_DEC16
-	help
-	  Use NAND flash attached to the CAFÉ chip designed for the OLPC
-	  laptop.
-
-config MTD_NAND_CS553X
-	tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
-	depends on X86_32
-	depends on !UML && HAS_IOMEM
-	help
-	  The CS553x companion chips for the AMD Geode processor
-	  include NAND flash controllers with built-in hardware ECC
-	  capabilities; enabling this option will allow you to use
-	  these. The driver will check the MSRs to verify that the
-	  controller is enabled for NAND, and currently requires that
-	  the controller be in MMIO mode.
-
-	  If you say "m", the module will be called cs553x_nand.
-
-config MTD_NAND_ATMEL
-	tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32"
-	depends on ARCH_AT91 || AVR32
-	help
-	  Enables support for NAND Flash / Smart Media Card interface
-	  on Atmel AT91 and AVR32 processors.
-
-config MTD_NAND_PXA3xx
-	tristate "NAND support on PXA3xx and Armada 370/XP"
-	depends on PXA3xx || ARCH_MMP || PLAT_ORION
-	help
-	  This enables the driver for the NAND flash device found on
-	  PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
-
-config MTD_NAND_SLC_LPC32XX
-	tristate "NXP LPC32xx SLC Controller"
-	depends on ARCH_LPC32XX
-	help
-	  Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
-	  chips) NAND controller. This is the default for the PHYTEC 3250
-	  reference board which contains a NAND256R3A2CZA6 chip.
-
-	  Please check the actual NAND chip connected and its support
-	  by the SLC NAND controller.
-
-config MTD_NAND_MLC_LPC32XX
-	tristate "NXP LPC32xx MLC Controller"
-	depends on ARCH_LPC32XX
-	help
-	  Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
-	  controller. This is the default for the WORK92105 controller
-	  board.
-
-	  Please check the actual NAND chip connected and its support
-	  by the MLC NAND controller.
-
-config MTD_NAND_CM_X270
-	tristate "Support for NAND Flash on CM-X270 modules"
-	depends on MACH_ARMCORE
-
-config MTD_NAND_PASEMI
-	tristate "NAND support for PA Semi PWRficient"
-	depends on PPC_PASEMI
-	help
-	  Enables support for NAND Flash interface on PA Semi PWRficient
-	  based boards
-
-config MTD_NAND_TMIO
-	tristate "NAND Flash device on Toshiba Mobile IO Controller"
-	depends on MFD_TMIO
-	help
-	  Support for NAND flash connected to a Toshiba Mobile IO
-	  Controller in some PDAs, including the Sharp SL6000x.
-
-config MTD_NAND_NANDSIM
-	tristate "Support for NAND Flash Simulator"
-	help
-	  The simulator may simulate various NAND flash chips for the
-	  MTD nand layer.
-
-config MTD_NAND_GPMI_NAND
-        tristate "GPMI NAND Flash Controller driver"
-        depends on MTD_NAND && MXS_DMA
-        help
-	 Enables NAND Flash support for IMX23, IMX28 or IMX6.
-	 The GPMI controller is very powerful, with the help of BCH
-	 module, it can do the hardware ECC. The GPMI supports several
-	 NAND flashs at the same time. The GPMI may conflicts with other
-	 block, such as SD card. So pay attention to it when you enable
-	 the GPMI.
-
-config MTD_NAND_BRCMNAND
-	tristate "Broadcom STB NAND controller"
-	depends on ARM || ARM64 || MIPS
-	help
-	  Enables the Broadcom NAND controller driver. The controller was
-	  originally designed for Set-Top Box but is used on various BCM7xxx,
-	  BCM3xxx, BCM63xxx, iProc/Cygnus and more.
-
-config MTD_NAND_BCM47XXNFLASH
-	tristate "Support for NAND flash on BCM4706 BCMA bus"
-	depends on BCMA_NFLASH
-	help
-	  BCMA bus can have various flash memories attached, they are
-	  registered by bcma as platform devices. This enables driver for
-	  NAND flash memories. For now only BCM4706 is supported.
-
-config MTD_NAND_PLATFORM
-	tristate "Support for generic platform NAND driver"
-	depends on HAS_IOMEM
-	help
-	  This implements a generic NAND driver for on-SOC platform
-	  devices. You will need to provide platform-specific functions
-	  via platform_data.
-
-config MTD_NAND_ORION
-	tristate "NAND Flash support for Marvell Orion SoC"
-	depends on PLAT_ORION
-	help
-	  This enables the NAND flash controller on Orion machines.
-
-	  No board specific support is done by this driver, each board
-	  must advertise a platform_device for the driver to attach.
-
-config MTD_NAND_FSL_ELBC
-	tristate "NAND support for Freescale eLBC controllers"
-	depends on FSL_SOC
-	select FSL_LBC
-	help
-	  Various Freescale chips, including the 8313, include a NAND Flash
-	  Controller Module with built-in hardware ECC capabilities.
-	  Enabling this option will enable you to use this to control
-	  external NAND devices.
-
-config MTD_NAND_FSL_IFC
-	tristate "NAND support for Freescale IFC controller"
-	depends on FSL_SOC || ARCH_LAYERSCAPE
-	select FSL_IFC
-	select MEMORY
-	help
-	  Various Freescale chips e.g P1010, include a NAND Flash machine
-	  with built-in hardware ECC capabilities.
-	  Enabling this option will enable you to use this to control
-	  external NAND devices.
-
-config MTD_NAND_FSL_UPM
-	tristate "Support for NAND on Freescale UPM"
-	depends on PPC_83xx || PPC_85xx
-	select FSL_LBC
-	help
-	  Enables support for NAND Flash chips wired onto Freescale PowerPC
-	  processor localbus with User-Programmable Machine support.
-
-config MTD_NAND_MPC5121_NFC
-	tristate "MPC5121 built-in NAND Flash Controller support"
-	depends on PPC_MPC512x
-	help
-	  This enables the driver for the NAND flash controller on the
-	  MPC5121 SoC.
-
-config MTD_NAND_VF610_NFC
-	tristate "Support for Freescale NFC for VF610/MPC5125"
-	depends on (SOC_VF610 || COMPILE_TEST)
-	depends on HAS_IOMEM
-	help
-	  Enables support for NAND Flash Controller on some Freescale
-	  processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
-	  The driver supports a maximum 2k page size. With 2k pages and
-	  64 bytes or more of OOB, hardware ECC with up to 32-bit error
-	  correction is supported. Hardware ECC is only enabled through
-	  device tree.
-
-config MTD_NAND_MXC
-	tristate "MXC NAND support"
-	depends on ARCH_MXC
-	help
-	  This enables the driver for the NAND flash controller on the
-	  MXC processors.
-
-config MTD_NAND_SH_FLCTL
-	tristate "Support for NAND on Renesas SuperH FLCTL"
-	depends on SUPERH || COMPILE_TEST
-	depends on HAS_IOMEM
-	depends on HAS_DMA
-	help
-	  Several Renesas SuperH CPU has FLCTL. This option enables support
-	  for NAND Flash using FLCTL.
-
-config MTD_NAND_DAVINCI
-        tristate "Support NAND on DaVinci/Keystone SoC"
-        depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)
-        help
-	  Enable the driver for NAND flash chips on Texas Instruments
-	  DaVinci/Keystone processors.
-
-config MTD_NAND_TXX9NDFMC
-	tristate "NAND Flash support for TXx9 SoC"
-	depends on SOC_TX4938 || SOC_TX4939
-	help
-	  This enables the NAND flash controller on the TXx9 SoCs.
-
-config MTD_NAND_SOCRATES
-	tristate "Support for NAND on Socrates board"
-	depends on SOCRATES
-	help
-	  Enables support for NAND Flash chips wired onto Socrates board.
-
-config MTD_NAND_NUC900
-	tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
-	depends on ARCH_W90X900
-	help
-	  This enables the driver for the NAND Flash on evaluation board based
-	  on w90p910 / NUC9xx.
-
-config MTD_NAND_JZ4740
-	tristate "Support for JZ4740 SoC NAND controller"
-	depends on MACH_JZ4740
-	help
-		Enables support for NAND Flash on JZ4740 SoC based boards.
-
-config MTD_NAND_JZ4780
-	tristate "Support for NAND on JZ4780 SoC"
-	depends on MACH_JZ4780 && JZ4780_NEMC
-	help
-	  Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
-	  based boards, using the BCH controller for hardware error correction.
-
-config MTD_NAND_FSMC
-	tristate "Support for NAND on ST Micros FSMC"
-	depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
-	help
-	  Enables support for NAND Flash chips on the ST Microelectronics
-	  Flexible Static Memory Controller (FSMC)
-
-config MTD_NAND_XWAY
-	tristate "Support for NAND on Lantiq XWAY SoC"
-	depends on LANTIQ && SOC_TYPE_XWAY
-	help
-	  Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
-	  to the External Bus Unit (EBU).
-
-config MTD_NAND_SUNXI
-	tristate "Support for NAND on Allwinner SoCs"
-	depends on ARCH_SUNXI
-	help
-	  Enables support for NAND Flash chips on Allwinner SoCs.
-
-config MTD_NAND_HISI504
-	tristate "Support for NAND controller on Hisilicon SoC Hip04"
-	depends on HAS_DMA
-	help
-	  Enables support for NAND controller on Hisilicon SoC Hip04.
-
-config MTD_NAND_QCOM
-	tristate "Support for NAND on QCOM SoCs"
-	depends on ARCH_QCOM
-	help
-	  Enables support for NAND flash chips on SoCs containing the EBI2 NAND
-	  controller. This controller is found on IPQ806x SoC.
-
-config MTD_NAND_MTK
-	tristate "Support for NAND controller on MTK SoCs"
-	depends on HAS_DMA
-	help
-	  Enables support for NAND controller on MTK SoCs.
-	  This controller is found on mt27xx, mt81xx, mt65xx SoCs.
-
-endif # MTD_NAND
+source "drivers/mtd/nand/rawnand/Kconfig"
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index cafde6f3d957..6553278cdc6d 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -1,62 +1 @@ 
-#
-# linux/drivers/nand/Makefile
-#
-
-obj-$(CONFIG_MTD_NAND)			+= nand.o
-obj-$(CONFIG_MTD_NAND_ECC)		+= nand_ecc.o
-obj-$(CONFIG_MTD_NAND_BCH)		+= nand_bch.o
-obj-$(CONFIG_MTD_NAND_IDS)		+= nand_ids.o
-obj-$(CONFIG_MTD_SM_COMMON) 		+= sm_common.o
-
-obj-$(CONFIG_MTD_NAND_CAFE)		+= cafe_nand.o
-obj-$(CONFIG_MTD_NAND_AMS_DELTA)	+= ams-delta.o
-obj-$(CONFIG_MTD_NAND_DENALI)		+= denali.o
-obj-$(CONFIG_MTD_NAND_DENALI_PCI)	+= denali_pci.o
-obj-$(CONFIG_MTD_NAND_DENALI_DT)	+= denali_dt.o
-obj-$(CONFIG_MTD_NAND_AU1550)		+= au1550nd.o
-obj-$(CONFIG_MTD_NAND_BF5XX)		+= bf5xx_nand.o
-obj-$(CONFIG_MTD_NAND_S3C2410)		+= s3c2410.o
-obj-$(CONFIG_MTD_NAND_DAVINCI)		+= davinci_nand.o
-obj-$(CONFIG_MTD_NAND_DISKONCHIP)	+= diskonchip.o
-obj-$(CONFIG_MTD_NAND_DOCG4)		+= docg4.o
-obj-$(CONFIG_MTD_NAND_FSMC)		+= fsmc_nand.o
-obj-$(CONFIG_MTD_NAND_SHARPSL)		+= sharpsl.o
-obj-$(CONFIG_MTD_NAND_NANDSIM)		+= nandsim.o
-obj-$(CONFIG_MTD_NAND_CS553X)		+= cs553x_nand.o
-obj-$(CONFIG_MTD_NAND_NDFC)		+= ndfc.o
-obj-$(CONFIG_MTD_NAND_ATMEL)		+= atmel_nand.o
-obj-$(CONFIG_MTD_NAND_GPIO)		+= gpio.o
-omap2_nand-objs := omap2.o
-obj-$(CONFIG_MTD_NAND_OMAP2) 		+= omap2_nand.o
-obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD)	+= omap_elm.o
-obj-$(CONFIG_MTD_NAND_CM_X270)		+= cmx270_nand.o
-obj-$(CONFIG_MTD_NAND_PXA3xx)		+= pxa3xx_nand.o
-obj-$(CONFIG_MTD_NAND_TMIO)		+= tmio_nand.o
-obj-$(CONFIG_MTD_NAND_PLATFORM)		+= plat_nand.o
-obj-$(CONFIG_MTD_NAND_PASEMI)		+= pasemi_nand.o
-obj-$(CONFIG_MTD_NAND_ORION)		+= orion_nand.o
-obj-$(CONFIG_MTD_NAND_FSL_ELBC)		+= fsl_elbc_nand.o
-obj-$(CONFIG_MTD_NAND_FSL_IFC)		+= fsl_ifc_nand.o
-obj-$(CONFIG_MTD_NAND_FSL_UPM)		+= fsl_upm.o
-obj-$(CONFIG_MTD_NAND_SLC_LPC32XX)      += lpc32xx_slc.o
-obj-$(CONFIG_MTD_NAND_MLC_LPC32XX)      += lpc32xx_mlc.o
-obj-$(CONFIG_MTD_NAND_SH_FLCTL)		+= sh_flctl.o
-obj-$(CONFIG_MTD_NAND_MXC)		+= mxc_nand.o
-obj-$(CONFIG_MTD_NAND_SOCRATES)		+= socrates_nand.o
-obj-$(CONFIG_MTD_NAND_TXX9NDFMC)	+= txx9ndfmc.o
-obj-$(CONFIG_MTD_NAND_NUC900)		+= nuc900_nand.o
-obj-$(CONFIG_MTD_NAND_MPC5121_NFC)	+= mpc5121_nfc.o
-obj-$(CONFIG_MTD_NAND_VF610_NFC)	+= vf610_nfc.o
-obj-$(CONFIG_MTD_NAND_RICOH)		+= r852.o
-obj-$(CONFIG_MTD_NAND_JZ4740)		+= jz4740_nand.o
-obj-$(CONFIG_MTD_NAND_JZ4780)		+= jz4780_nand.o jz4780_bch.o
-obj-$(CONFIG_MTD_NAND_GPMI_NAND)	+= gpmi-nand/
-obj-$(CONFIG_MTD_NAND_XWAY)		+= xway_nand.o
-obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH)	+= bcm47xxnflash/
-obj-$(CONFIG_MTD_NAND_SUNXI)		+= sunxi_nand.o
-obj-$(CONFIG_MTD_NAND_HISI504)	        += hisi504_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= brcmnand/
-obj-$(CONFIG_MTD_NAND_QCOM)		+= qcom_nandc.o
-obj-$(CONFIG_MTD_NAND_MTK)		+= mtk_nand.o mtk_ecc.o
-
-nand-objs := nand_base.o nand_bbt.o nand_timings.o
+obj-y	+= rawnand/
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
deleted file mode 100644
index 0972493b6cd2..000000000000
--- a/drivers/mtd/nand/ams-delta.c
+++ /dev/null
@@ -1,291 +0,0 @@ 
-/*
- *  drivers/mtd/nand/ams-delta.c
- *
- *  Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
- *
- *  Derived from drivers/mtd/toto.c
- *  Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
- *  Partially stolen from drivers/mtd/nand/plat_nand.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Overview:
- *   This is a device driver for the NAND flash device found on the
- *   Amstrad E3 (Delta).
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/gpio.h>
-#include <linux/platform_data/gpio-omap.h>
-
-#include <asm/io.h>
-#include <asm/sizes.h>
-
-#include <mach/board-ams-delta.h>
-
-#include <mach/hardware.h>
-
-/*
- * MTD structure for E3 (Delta)
- */
-static struct mtd_info *ams_delta_mtd = NULL;
-
-/*
- * Define partitions for flash devices
- */
-
-static struct mtd_partition partition_info[] = {
-	{ .name		= "Kernel",
-	  .offset	= 0,
-	  .size		= 3 * SZ_1M + SZ_512K },
-	{ .name		= "u-boot",
-	  .offset	= 3 * SZ_1M + SZ_512K,
-	  .size		= SZ_256K },
-	{ .name		= "u-boot params",
-	  .offset	= 3 * SZ_1M + SZ_512K + SZ_256K,
-	  .size		= SZ_256K },
-	{ .name		= "Amstrad LDR",
-	  .offset	= 4 * SZ_1M,
-	  .size		= SZ_256K },
-	{ .name		= "File system",
-	  .offset	= 4 * SZ_1M + 1 * SZ_256K,
-	  .size		= 27 * SZ_1M },
-	{ .name		= "PBL reserved",
-	  .offset	= 32 * SZ_1M - 3 * SZ_256K,
-	  .size		=  3 * SZ_256K },
-};
-
-static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	void __iomem *io_base = (void __iomem *)nand_get_controller_data(this);
-
-	writew(0, io_base + OMAP_MPUIO_IO_CNTL);
-	writew(byte, this->IO_ADDR_W);
-	gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 0);
-	ndelay(40);
-	gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 1);
-}
-
-static u_char ams_delta_read_byte(struct mtd_info *mtd)
-{
-	u_char res;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	void __iomem *io_base = (void __iomem *)nand_get_controller_data(this);
-
-	gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 0);
-	ndelay(40);
-	writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
-	res = readw(this->IO_ADDR_R);
-	gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 1);
-
-	return res;
-}
-
-static void ams_delta_write_buf(struct mtd_info *mtd, const u_char *buf,
-				int len)
-{
-	int i;
-
-	for (i=0; i<len; i++)
-		ams_delta_write_byte(mtd, buf[i]);
-}
-
-static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	int i;
-
-	for (i=0; i<len; i++)
-		buf[i] = ams_delta_read_byte(mtd);
-}
-
-/*
- * Command control function
- *
- * ctrl:
- * NAND_NCE: bit 0 -> bit 2
- * NAND_CLE: bit 1 -> bit 7
- * NAND_ALE: bit 2 -> bit 6
- */
-static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
-				unsigned int ctrl)
-{
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NCE,
-				(ctrl & NAND_NCE) == 0);
-		gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_CLE,
-				(ctrl & NAND_CLE) != 0);
-		gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_ALE,
-				(ctrl & NAND_ALE) != 0);
-	}
-
-	if (cmd != NAND_CMD_NONE)
-		ams_delta_write_byte(mtd, cmd);
-}
-
-static int ams_delta_nand_ready(struct mtd_info *mtd)
-{
-	return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB);
-}
-
-static const struct gpio _mandatory_gpio[] = {
-	{
-		.gpio	= AMS_DELTA_GPIO_PIN_NAND_NCE,
-		.flags	= GPIOF_OUT_INIT_HIGH,
-		.label	= "nand_nce",
-	},
-	{
-		.gpio	= AMS_DELTA_GPIO_PIN_NAND_NRE,
-		.flags	= GPIOF_OUT_INIT_HIGH,
-		.label	= "nand_nre",
-	},
-	{
-		.gpio	= AMS_DELTA_GPIO_PIN_NAND_NWP,
-		.flags	= GPIOF_OUT_INIT_HIGH,
-		.label	= "nand_nwp",
-	},
-	{
-		.gpio	= AMS_DELTA_GPIO_PIN_NAND_NWE,
-		.flags	= GPIOF_OUT_INIT_HIGH,
-		.label	= "nand_nwe",
-	},
-	{
-		.gpio	= AMS_DELTA_GPIO_PIN_NAND_ALE,
-		.flags	= GPIOF_OUT_INIT_LOW,
-		.label	= "nand_ale",
-	},
-	{
-		.gpio	= AMS_DELTA_GPIO_PIN_NAND_CLE,
-		.flags	= GPIOF_OUT_INIT_LOW,
-		.label	= "nand_cle",
-	},
-};
-
-/*
- * Main initialization routine
- */
-static int ams_delta_init(struct platform_device *pdev)
-{
-	struct nand_chip *this;
-	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	void __iomem *io_base;
-	int err = 0;
-
-	if (!res)
-		return -ENXIO;
-
-	/* Allocate memory for MTD device structure and private data */
-	this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-	if (!this) {
-		printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n");
-		err = -ENOMEM;
-		goto out;
-	}
-
-	ams_delta_mtd = nand_to_mtd(this);
-	ams_delta_mtd->owner = THIS_MODULE;
-
-	/*
-	 * Don't try to request the memory region from here,
-	 * it should have been already requested from the
-	 * gpio-omap driver and requesting it again would fail.
-	 */
-
-	io_base = ioremap(res->start, resource_size(res));
-	if (io_base == NULL) {
-		dev_err(&pdev->dev, "ioremap failed\n");
-		err = -EIO;
-		goto out_free;
-	}
-
-	nand_set_controller_data(this, (void *)io_base);
-
-	/* Set address of NAND IO lines */
-	this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
-	this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
-	this->read_byte = ams_delta_read_byte;
-	this->write_buf = ams_delta_write_buf;
-	this->read_buf = ams_delta_read_buf;
-	this->cmd_ctrl = ams_delta_hwcontrol;
-	if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
-		this->dev_ready = ams_delta_nand_ready;
-	} else {
-		this->dev_ready = NULL;
-		printk(KERN_NOTICE "Couldn't request gpio for Delta NAND ready.\n");
-	}
-	/* 25 us command delay time */
-	this->chip_delay = 30;
-	this->ecc.mode = NAND_ECC_SOFT;
-	this->ecc.algo = NAND_ECC_HAMMING;
-
-	platform_set_drvdata(pdev, io_base);
-
-	/* Set chip enabled, but  */
-	err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
-	if (err)
-		goto out_gpio;
-
-	/* Scan to find existence of the device */
-	if (nand_scan(ams_delta_mtd, 1)) {
-		err = -ENXIO;
-		goto out_mtd;
-	}
-
-	/* Register the partitions */
-	mtd_device_register(ams_delta_mtd, partition_info,
-			    ARRAY_SIZE(partition_info));
-
-	goto out;
-
- out_mtd:
-	gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
-out_gpio:
-	gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
-	iounmap(io_base);
-out_free:
-	kfree(this);
- out:
-	return err;
-}
-
-/*
- * Clean up routine
- */
-static int ams_delta_cleanup(struct platform_device *pdev)
-{
-	void __iomem *io_base = platform_get_drvdata(pdev);
-
-	/* Release resources, unregister device */
-	nand_release(ams_delta_mtd);
-
-	gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
-	gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
-	iounmap(io_base);
-
-	/* Free the MTD device structure */
-	kfree(mtd_to_nand(ams_delta_mtd));
-
-	return 0;
-}
-
-static struct platform_driver ams_delta_nand_driver = {
-	.probe		= ams_delta_init,
-	.remove		= ams_delta_cleanup,
-	.driver		= {
-		.name	= "ams-delta-nand",
-	},
-};
-
-module_platform_driver(ams_delta_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
-MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
deleted file mode 100644
index fbb7e5da2541..000000000000
--- a/drivers/mtd/nand/atmel_nand.c
+++ /dev/null
@@ -1,2481 +0,0 @@ 
-/*
- *  Copyright © 2003 Rick Bronson
- *
- *  Derived from drivers/mtd/nand/autcpu12.c
- *	 Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- *  Derived from drivers/mtd/spia.c
- *	 Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
- *
- *
- *  Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
- *     Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
- *
- *     Derived from Das U-Boot source code
- *     		(u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
- *     © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
- *
- *  Add Programmable Multibit ECC support for various AT91 SoC
- *     © Copyright 2012 ATMEL, Hong Xu
- *
- *  Add Nand Flash Controller support for SAMA5 SoC
- *     © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#include <linux/delay.h>
-#include <linux/dmaengine.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/platform_data/atmel.h>
-
-static int use_dma = 1;
-module_param(use_dma, int, 0);
-
-static int on_flash_bbt = 0;
-module_param(on_flash_bbt, int, 0);
-
-/* Register access macros */
-#define ecc_readl(add, reg)				\
-	__raw_readl(add + ATMEL_ECC_##reg)
-#define ecc_writel(add, reg, value)			\
-	__raw_writel((value), add + ATMEL_ECC_##reg)
-
-#include "atmel_nand_ecc.h"	/* Hardware ECC registers */
-#include "atmel_nand_nfc.h"	/* Nand Flash Controller definition */
-
-struct atmel_nand_caps {
-	bool pmecc_correct_erase_page;
-	uint8_t pmecc_max_correction;
-};
-
-/*
- * oob layout for large page size
- * bad block info is on bytes 0 and 1
- * the bytes have to be consecutives to avoid
- * several NAND_CMD_RNDOUT during read
- *
- * oob layout for small page size
- * bad block info is on bytes 4 and 5
- * the bytes have to be consecutives to avoid
- * several NAND_CMD_RNDOUT during read
- */
-static int atmel_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->length = 4;
-	oobregion->offset = 0;
-
-	return 0;
-}
-
-static int atmel_ooblayout_free_sp(struct mtd_info *mtd, int section,
-				   struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 6;
-	oobregion->length = mtd->oobsize - oobregion->offset;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops atmel_ooblayout_sp_ops = {
-	.ecc = atmel_ooblayout_ecc_sp,
-	.free = atmel_ooblayout_free_sp,
-};
-
-struct atmel_nfc {
-	void __iomem		*base_cmd_regs;
-	void __iomem		*hsmc_regs;
-	void			*sram_bank0;
-	dma_addr_t		sram_bank0_phys;
-	bool			use_nfc_sram;
-	bool			write_by_sram;
-
-	struct clk		*clk;
-
-	bool			is_initialized;
-	struct completion	comp_ready;
-	struct completion	comp_cmd_done;
-	struct completion	comp_xfer_done;
-
-	/* Point to the sram bank which include readed data via NFC */
-	void			*data_in_sram;
-	bool			will_write_sram;
-};
-static struct atmel_nfc	nand_nfc;
-
-struct atmel_nand_host {
-	struct nand_chip	nand_chip;
-	void __iomem		*io_base;
-	dma_addr_t		io_phys;
-	struct atmel_nand_data	board;
-	struct device		*dev;
-	void __iomem		*ecc;
-
-	struct completion	comp;
-	struct dma_chan		*dma_chan;
-
-	struct atmel_nfc	*nfc;
-
-	const struct atmel_nand_caps	*caps;
-	bool			has_pmecc;
-	u8			pmecc_corr_cap;
-	u16			pmecc_sector_size;
-	bool			has_no_lookup_table;
-	u32			pmecc_lookup_table_offset;
-	u32			pmecc_lookup_table_offset_512;
-	u32			pmecc_lookup_table_offset_1024;
-
-	int			pmecc_degree;	/* Degree of remainders */
-	int			pmecc_cw_len;	/* Length of codeword */
-
-	void __iomem		*pmerrloc_base;
-	void __iomem		*pmerrloc_el_base;
-	void __iomem		*pmecc_rom_base;
-
-	/* lookup table for alpha_to and index_of */
-	void __iomem		*pmecc_alpha_to;
-	void __iomem		*pmecc_index_of;
-
-	/* data for pmecc computation */
-	int16_t			*pmecc_partial_syn;
-	int16_t			*pmecc_si;
-	int16_t			*pmecc_smu;	/* Sigma table */
-	int16_t			*pmecc_lmu;	/* polynomal order */
-	int			*pmecc_mu;
-	int			*pmecc_dmu;
-	int			*pmecc_delta;
-};
-
-/*
- * Enable NAND.
- */
-static void atmel_nand_enable(struct atmel_nand_host *host)
-{
-	if (gpio_is_valid(host->board.enable_pin))
-		gpio_set_value(host->board.enable_pin, 0);
-}
-
-/*
- * Disable NAND.
- */
-static void atmel_nand_disable(struct atmel_nand_host *host)
-{
-	if (gpio_is_valid(host->board.enable_pin))
-		gpio_set_value(host->board.enable_pin, 1);
-}
-
-/*
- * Hardware specific access to control-lines
- */
-static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		if (ctrl & NAND_NCE)
-			atmel_nand_enable(host);
-		else
-			atmel_nand_disable(host);
-	}
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_CLE)
-		writeb(cmd, host->io_base + (1 << host->board.cle));
-	else
-		writeb(cmd, host->io_base + (1 << host->board.ale));
-}
-
-/*
- * Read the Device Ready pin.
- */
-static int atmel_nand_device_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-
-	return gpio_get_value(host->board.rdy_pin) ^
-                !!host->board.rdy_pin_active_low;
-}
-
-/* Set up for hardware ready pin and enable pin. */
-static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
-	int res = 0;
-
-	if (gpio_is_valid(host->board.rdy_pin)) {
-		res = devm_gpio_request(host->dev,
-				host->board.rdy_pin, "nand_rdy");
-		if (res < 0) {
-			dev_err(host->dev,
-				"can't request rdy gpio %d\n",
-				host->board.rdy_pin);
-			return res;
-		}
-
-		res = gpio_direction_input(host->board.rdy_pin);
-		if (res < 0) {
-			dev_err(host->dev,
-				"can't request input direction rdy gpio %d\n",
-				host->board.rdy_pin);
-			return res;
-		}
-
-		chip->dev_ready = atmel_nand_device_ready;
-	}
-
-	if (gpio_is_valid(host->board.enable_pin)) {
-		res = devm_gpio_request(host->dev,
-				host->board.enable_pin, "nand_enable");
-		if (res < 0) {
-			dev_err(host->dev,
-				"can't request enable gpio %d\n",
-				host->board.enable_pin);
-			return res;
-		}
-
-		res = gpio_direction_output(host->board.enable_pin, 1);
-		if (res < 0) {
-			dev_err(host->dev,
-				"can't request output direction enable gpio %d\n",
-				host->board.enable_pin);
-			return res;
-		}
-	}
-
-	return res;
-}
-
-/*
- * Minimal-overhead PIO for data access.
- */
-static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
-{
-	struct nand_chip	*nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
-		memcpy(buf, host->nfc->data_in_sram, len);
-		host->nfc->data_in_sram += len;
-	} else {
-		__raw_readsb(nand_chip->IO_ADDR_R, buf, len);
-	}
-}
-
-static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
-{
-	struct nand_chip	*nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
-		memcpy(buf, host->nfc->data_in_sram, len);
-		host->nfc->data_in_sram += len;
-	} else {
-		__raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
-	}
-}
-
-static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
-{
-	struct nand_chip	*nand_chip = mtd_to_nand(mtd);
-
-	__raw_writesb(nand_chip->IO_ADDR_W, buf, len);
-}
-
-static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
-{
-	struct nand_chip	*nand_chip = mtd_to_nand(mtd);
-
-	__raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
-}
-
-static void dma_complete_func(void *completion)
-{
-	complete(completion);
-}
-
-static int nfc_set_sram_bank(struct atmel_nand_host *host, unsigned int bank)
-{
-	/* NFC only has two banks. Must be 0 or 1 */
-	if (bank > 1)
-		return -EINVAL;
-
-	if (bank) {
-		struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
-
-		/* Only for a 2k-page or lower flash, NFC can handle 2 banks */
-		if (mtd->writesize > 2048)
-			return -EINVAL;
-		nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK1);
-	} else {
-		nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK0);
-	}
-
-	return 0;
-}
-
-static uint nfc_get_sram_off(struct atmel_nand_host *host)
-{
-	if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
-		return NFC_SRAM_BANK1_OFFSET;
-	else
-		return 0;
-}
-
-static dma_addr_t nfc_sram_phys(struct atmel_nand_host *host)
-{
-	if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
-		return host->nfc->sram_bank0_phys + NFC_SRAM_BANK1_OFFSET;
-	else
-		return host->nfc->sram_bank0_phys;
-}
-
-static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
-			       int is_read)
-{
-	struct dma_device *dma_dev;
-	enum dma_ctrl_flags flags;
-	dma_addr_t dma_src_addr, dma_dst_addr, phys_addr;
-	struct dma_async_tx_descriptor *tx = NULL;
-	dma_cookie_t cookie;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
-	void *p = buf;
-	int err = -EIO;
-	enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-	struct atmel_nfc *nfc = host->nfc;
-
-	if (buf >= high_memory)
-		goto err_buf;
-
-	dma_dev = host->dma_chan->device;
-
-	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
-
-	phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
-	if (dma_mapping_error(dma_dev->dev, phys_addr)) {
-		dev_err(host->dev, "Failed to dma_map_single\n");
-		goto err_buf;
-	}
-
-	if (is_read) {
-		if (nfc && nfc->data_in_sram)
-			dma_src_addr = nfc_sram_phys(host) + (nfc->data_in_sram
-				- (nfc->sram_bank0 + nfc_get_sram_off(host)));
-		else
-			dma_src_addr = host->io_phys;
-
-		dma_dst_addr = phys_addr;
-	} else {
-		dma_src_addr = phys_addr;
-
-		if (nfc && nfc->write_by_sram)
-			dma_dst_addr = nfc_sram_phys(host);
-		else
-			dma_dst_addr = host->io_phys;
-	}
-
-	tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
-					     dma_src_addr, len, flags);
-	if (!tx) {
-		dev_err(host->dev, "Failed to prepare DMA memcpy\n");
-		goto err_dma;
-	}
-
-	init_completion(&host->comp);
-	tx->callback = dma_complete_func;
-	tx->callback_param = &host->comp;
-
-	cookie = tx->tx_submit(tx);
-	if (dma_submit_error(cookie)) {
-		dev_err(host->dev, "Failed to do DMA tx_submit\n");
-		goto err_dma;
-	}
-
-	dma_async_issue_pending(host->dma_chan);
-	wait_for_completion(&host->comp);
-
-	if (is_read && nfc && nfc->data_in_sram)
-		/* After read data from SRAM, need to increase the position */
-		nfc->data_in_sram += len;
-
-	err = 0;
-
-err_dma:
-	dma_unmap_single(dma_dev->dev, phys_addr, len, dir);
-err_buf:
-	if (err != 0)
-		dev_dbg(host->dev, "Fall back to CPU I/O\n");
-	return err;
-}
-
-static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (use_dma && len > mtd->oobsize)
-		/* only use DMA for bigger than oob size: better performances */
-		if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
-			return;
-
-	if (chip->options & NAND_BUSWIDTH_16)
-		atmel_read_buf16(mtd, buf, len);
-	else
-		atmel_read_buf8(mtd, buf, len);
-}
-
-static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (use_dma && len > mtd->oobsize)
-		/* only use DMA for bigger than oob size: better performances */
-		if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
-			return;
-
-	if (chip->options & NAND_BUSWIDTH_16)
-		atmel_write_buf16(mtd, buf, len);
-	else
-		atmel_write_buf8(mtd, buf, len);
-}
-
-/*
- * Return number of ecc bytes per sector according to sector size and
- * correction capability
- *
- * Following table shows what at91 PMECC supported:
- * Correction Capability	Sector_512_bytes	Sector_1024_bytes
- * =====================	================	=================
- *                2-bits                 4-bytes                  4-bytes
- *                4-bits                 7-bytes                  7-bytes
- *                8-bits                13-bytes                 14-bytes
- *               12-bits                20-bytes                 21-bytes
- *               24-bits                39-bytes                 42-bytes
- *               32-bits                52-bytes                 56-bytes
- */
-static int pmecc_get_ecc_bytes(int cap, int sector_size)
-{
-	int m = 12 + sector_size / 512;
-	return (m * cap + 7) / 8;
-}
-
-static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
-{
-	int table_size;
-
-	table_size = host->pmecc_sector_size == 512 ?
-		PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024;
-
-	return host->pmecc_rom_base + host->pmecc_lookup_table_offset +
-			table_size * sizeof(int16_t);
-}
-
-static int pmecc_data_alloc(struct atmel_nand_host *host)
-{
-	const int cap = host->pmecc_corr_cap;
-	int size;
-
-	size = (2 * cap + 1) * sizeof(int16_t);
-	host->pmecc_partial_syn = devm_kzalloc(host->dev, size, GFP_KERNEL);
-	host->pmecc_si = devm_kzalloc(host->dev, size, GFP_KERNEL);
-	host->pmecc_lmu = devm_kzalloc(host->dev,
-			(cap + 1) * sizeof(int16_t), GFP_KERNEL);
-	host->pmecc_smu = devm_kzalloc(host->dev,
-			(cap + 2) * size, GFP_KERNEL);
-
-	size = (cap + 1) * sizeof(int);
-	host->pmecc_mu = devm_kzalloc(host->dev, size, GFP_KERNEL);
-	host->pmecc_dmu = devm_kzalloc(host->dev, size, GFP_KERNEL);
-	host->pmecc_delta = devm_kzalloc(host->dev, size, GFP_KERNEL);
-
-	if (!host->pmecc_partial_syn ||
-		!host->pmecc_si ||
-		!host->pmecc_lmu ||
-		!host->pmecc_smu ||
-		!host->pmecc_mu ||
-		!host->pmecc_dmu ||
-		!host->pmecc_delta)
-		return -ENOMEM;
-
-	return 0;
-}
-
-static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-	int i;
-	uint32_t value;
-
-	/* Fill odd syndromes */
-	for (i = 0; i < host->pmecc_corr_cap; i++) {
-		value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2);
-		if (i & 1)
-			value >>= 16;
-		value &= 0xffff;
-		host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
-	}
-}
-
-static void pmecc_substitute(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-	int16_t __iomem *alpha_to = host->pmecc_alpha_to;
-	int16_t __iomem *index_of = host->pmecc_index_of;
-	int16_t *partial_syn = host->pmecc_partial_syn;
-	const int cap = host->pmecc_corr_cap;
-	int16_t *si;
-	int i, j;
-
-	/* si[] is a table that holds the current syndrome value,
-	 * an element of that table belongs to the field
-	 */
-	si = host->pmecc_si;
-
-	memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
-
-	/* Computation 2t syndromes based on S(x) */
-	/* Odd syndromes */
-	for (i = 1; i < 2 * cap; i += 2) {
-		for (j = 0; j < host->pmecc_degree; j++) {
-			if (partial_syn[i] & ((unsigned short)0x1 << j))
-				si[i] = readw_relaxed(alpha_to + i * j) ^ si[i];
-		}
-	}
-	/* Even syndrome = (Odd syndrome) ** 2 */
-	for (i = 2, j = 1; j <= cap; i = ++j << 1) {
-		if (si[j] == 0) {
-			si[i] = 0;
-		} else {
-			int16_t tmp;
-
-			tmp = readw_relaxed(index_of + si[j]);
-			tmp = (tmp * 2) % host->pmecc_cw_len;
-			si[i] = readw_relaxed(alpha_to + tmp);
-		}
-	}
-
-	return;
-}
-
-static void pmecc_get_sigma(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-
-	int16_t *lmu = host->pmecc_lmu;
-	int16_t *si = host->pmecc_si;
-	int *mu = host->pmecc_mu;
-	int *dmu = host->pmecc_dmu;	/* Discrepancy */
-	int *delta = host->pmecc_delta; /* Delta order */
-	int cw_len = host->pmecc_cw_len;
-	const int16_t cap = host->pmecc_corr_cap;
-	const int num = 2 * cap + 1;
-	int16_t __iomem	*index_of = host->pmecc_index_of;
-	int16_t __iomem	*alpha_to = host->pmecc_alpha_to;
-	int i, j, k;
-	uint32_t dmu_0_count, tmp;
-	int16_t *smu = host->pmecc_smu;
-
-	/* index of largest delta */
-	int ro;
-	int largest;
-	int diff;
-
-	dmu_0_count = 0;
-
-	/* First Row */
-
-	/* Mu */
-	mu[0] = -1;
-
-	memset(smu, 0, sizeof(int16_t) * num);
-	smu[0] = 1;
-
-	/* discrepancy set to 1 */
-	dmu[0] = 1;
-	/* polynom order set to 0 */
-	lmu[0] = 0;
-	delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
-
-	/* Second Row */
-
-	/* Mu */
-	mu[1] = 0;
-	/* Sigma(x) set to 1 */
-	memset(&smu[num], 0, sizeof(int16_t) * num);
-	smu[num] = 1;
-
-	/* discrepancy set to S1 */
-	dmu[1] = si[1];
-
-	/* polynom order set to 0 */
-	lmu[1] = 0;
-
-	delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
-
-	/* Init the Sigma(x) last row */
-	memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num);
-
-	for (i = 1; i <= cap; i++) {
-		mu[i + 1] = i << 1;
-		/* Begin Computing Sigma (Mu+1) and L(mu) */
-		/* check if discrepancy is set to 0 */
-		if (dmu[i] == 0) {
-			dmu_0_count++;
-
-			tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
-			if ((cap - (lmu[i] >> 1) - 1) & 0x1)
-				tmp += 2;
-			else
-				tmp += 1;
-
-			if (dmu_0_count == tmp) {
-				for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
-					smu[(cap + 1) * num + j] =
-							smu[i * num + j];
-
-				lmu[cap + 1] = lmu[i];
-				return;
-			}
-
-			/* copy polynom */
-			for (j = 0; j <= lmu[i] >> 1; j++)
-				smu[(i + 1) * num + j] = smu[i * num + j];
-
-			/* copy previous polynom order to the next */
-			lmu[i + 1] = lmu[i];
-		} else {
-			ro = 0;
-			largest = -1;
-			/* find largest delta with dmu != 0 */
-			for (j = 0; j < i; j++) {
-				if ((dmu[j]) && (delta[j] > largest)) {
-					largest = delta[j];
-					ro = j;
-				}
-			}
-
-			/* compute difference */
-			diff = (mu[i] - mu[ro]);
-
-			/* Compute degree of the new smu polynomial */
-			if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
-				lmu[i + 1] = lmu[i];
-			else
-				lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
-
-			/* Init smu[i+1] with 0 */
-			for (k = 0; k < num; k++)
-				smu[(i + 1) * num + k] = 0;
-
-			/* Compute smu[i+1] */
-			for (k = 0; k <= lmu[ro] >> 1; k++) {
-				int16_t a, b, c;
-
-				if (!(smu[ro * num + k] && dmu[i]))
-					continue;
-				a = readw_relaxed(index_of + dmu[i]);
-				b = readw_relaxed(index_of + dmu[ro]);
-				c = readw_relaxed(index_of + smu[ro * num + k]);
-				tmp = a + (cw_len - b) + c;
-				a = readw_relaxed(alpha_to + tmp % cw_len);
-				smu[(i + 1) * num + (k + diff)] = a;
-			}
-
-			for (k = 0; k <= lmu[i] >> 1; k++)
-				smu[(i + 1) * num + k] ^= smu[i * num + k];
-		}
-
-		/* End Computing Sigma (Mu+1) and L(mu) */
-		/* In either case compute delta */
-		delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
-
-		/* Do not compute discrepancy for the last iteration */
-		if (i >= cap)
-			continue;
-
-		for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
-			tmp = 2 * (i - 1);
-			if (k == 0) {
-				dmu[i + 1] = si[tmp + 3];
-			} else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
-				int16_t a, b, c;
-				a = readw_relaxed(index_of +
-						smu[(i + 1) * num + k]);
-				b = si[2 * (i - 1) + 3 - k];
-				c = readw_relaxed(index_of + b);
-				tmp = a + c;
-				tmp %= cw_len;
-				dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^
-					dmu[i + 1];
-			}
-		}
-	}
-
-	return;
-}
-
-static int pmecc_err_location(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-	unsigned long end_time;
-	const int cap = host->pmecc_corr_cap;
-	const int num = 2 * cap + 1;
-	int sector_size = host->pmecc_sector_size;
-	int err_nbr = 0;	/* number of error */
-	int roots_nbr;		/* number of roots */
-	int i;
-	uint32_t val;
-	int16_t *smu = host->pmecc_smu;
-
-	pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE);
-
-	for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
-		pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i,
-				      smu[(cap + 1) * num + i]);
-		err_nbr++;
-	}
-
-	val = (err_nbr - 1) << 16;
-	if (sector_size == 1024)
-		val |= 1;
-
-	pmerrloc_writel(host->pmerrloc_base, ELCFG, val);
-	pmerrloc_writel(host->pmerrloc_base, ELEN,
-			sector_size * 8 + host->pmecc_degree * cap);
-
-	end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
-	while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
-		 & PMERRLOC_CALC_DONE)) {
-		if (unlikely(time_after(jiffies, end_time))) {
-			dev_err(host->dev, "PMECC: Timeout to calculate error location.\n");
-			return -1;
-		}
-		cpu_relax();
-	}
-
-	roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
-		& PMERRLOC_ERR_NUM_MASK) >> 8;
-	/* Number of roots == degree of smu hence <= cap */
-	if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
-		return err_nbr - 1;
-
-	/* Number of roots does not match the degree of smu
-	 * unable to correct error */
-	return -1;
-}
-
-static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
-		int sector_num, int extra_bytes, int err_nbr)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-	int i = 0;
-	int byte_pos, bit_pos, sector_size, pos;
-	uint32_t tmp;
-	uint8_t err_byte;
-
-	sector_size = host->pmecc_sector_size;
-
-	while (err_nbr) {
-		tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_el_base, i) - 1;
-		byte_pos = tmp / 8;
-		bit_pos  = tmp % 8;
-
-		if (byte_pos >= (sector_size + extra_bytes))
-			BUG();	/* should never happen */
-
-		if (byte_pos < sector_size) {
-			err_byte = *(buf + byte_pos);
-			*(buf + byte_pos) ^= (1 << bit_pos);
-
-			pos = sector_num * host->pmecc_sector_size + byte_pos;
-			dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
-				pos, bit_pos, err_byte, *(buf + byte_pos));
-		} else {
-			struct mtd_oob_region oobregion;
-
-			/* Bit flip in OOB area */
-			tmp = sector_num * nand_chip->ecc.bytes
-					+ (byte_pos - sector_size);
-			err_byte = ecc[tmp];
-			ecc[tmp] ^= (1 << bit_pos);
-
-			mtd_ooblayout_ecc(mtd, 0, &oobregion);
-			pos = tmp + oobregion.offset;
-			dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
-				pos, bit_pos, err_byte, ecc[tmp]);
-		}
-
-		i++;
-		err_nbr--;
-	}
-
-	return;
-}
-
-static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
-	u8 *ecc)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-	int i, err_nbr;
-	uint8_t *buf_pos;
-	int max_bitflips = 0;
-
-	for (i = 0; i < nand_chip->ecc.steps; i++) {
-		err_nbr = 0;
-		if (pmecc_stat & 0x1) {
-			buf_pos = buf + i * host->pmecc_sector_size;
-
-			pmecc_gen_syndrome(mtd, i);
-			pmecc_substitute(mtd);
-			pmecc_get_sigma(mtd);
-
-			err_nbr = pmecc_err_location(mtd);
-			if (err_nbr >= 0) {
-				pmecc_correct_data(mtd, buf_pos, ecc, i,
-						   nand_chip->ecc.bytes,
-						   err_nbr);
-			} else if (!host->caps->pmecc_correct_erase_page) {
-				u8 *ecc_pos = ecc + (i * nand_chip->ecc.bytes);
-
-				/* Try to detect erased pages */
-				err_nbr = nand_check_erased_ecc_chunk(buf_pos,
-							host->pmecc_sector_size,
-							ecc_pos,
-							nand_chip->ecc.bytes,
-							NULL, 0,
-							nand_chip->ecc.strength);
-			}
-
-			if (err_nbr < 0) {
-				dev_err(host->dev, "PMECC: Too many errors\n");
-				mtd->ecc_stats.failed++;
-				return -EIO;
-			}
-
-			mtd->ecc_stats.corrected += err_nbr;
-			max_bitflips = max_t(int, max_bitflips, err_nbr);
-		}
-		pmecc_stat >>= 1;
-	}
-
-	return max_bitflips;
-}
-
-static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
-{
-	u32 val;
-
-	if (ecc_op != NAND_ECC_READ && ecc_op != NAND_ECC_WRITE) {
-		dev_err(host->dev, "atmel_nand: wrong pmecc operation type!");
-		return;
-	}
-
-	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
-	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
-	val = pmecc_readl_relaxed(host->ecc, CFG);
-
-	if (ecc_op == NAND_ECC_READ)
-		pmecc_writel(host->ecc, CFG, (val & ~PMECC_CFG_WRITE_OP)
-			| PMECC_CFG_AUTO_ENABLE);
-	else
-		pmecc_writel(host->ecc, CFG, (val | PMECC_CFG_WRITE_OP)
-			& ~PMECC_CFG_AUTO_ENABLE);
-
-	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
-	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
-}
-
-static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
-	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
-{
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
-	int eccsize = chip->ecc.size * chip->ecc.steps;
-	uint8_t *oob = chip->oob_poi;
-	uint32_t stat;
-	unsigned long end_time;
-	int bitflips = 0;
-
-	if (!host->nfc || !host->nfc->use_nfc_sram)
-		pmecc_enable(host, NAND_ECC_READ);
-
-	chip->read_buf(mtd, buf, eccsize);
-	chip->read_buf(mtd, oob, mtd->oobsize);
-
-	end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
-	while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
-		if (unlikely(time_after(jiffies, end_time))) {
-			dev_err(host->dev, "PMECC: Timeout to get error status.\n");
-			return -EIO;
-		}
-		cpu_relax();
-	}
-
-	stat = pmecc_readl_relaxed(host->ecc, ISR);
-	if (stat != 0) {
-		struct mtd_oob_region oobregion;
-
-		mtd_ooblayout_ecc(mtd, 0, &oobregion);
-		bitflips = pmecc_correction(mtd, stat, buf,
-					    &oob[oobregion.offset]);
-		if (bitflips < 0)
-			/* uncorrectable errors */
-			return 0;
-	}
-
-	return bitflips;
-}
-
-static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
-		struct nand_chip *chip, const uint8_t *buf, int oob_required,
-		int page)
-{
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
-	struct mtd_oob_region oobregion = { };
-	int i, j, section = 0;
-	unsigned long end_time;
-
-	if (!host->nfc || !host->nfc->write_by_sram) {
-		pmecc_enable(host, NAND_ECC_WRITE);
-		chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
-	}
-
-	end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
-	while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
-		if (unlikely(time_after(jiffies, end_time))) {
-			dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
-			return -EIO;
-		}
-		cpu_relax();
-	}
-
-	for (i = 0; i < chip->ecc.steps; i++) {
-		for (j = 0; j < chip->ecc.bytes; j++) {
-			if (!oobregion.length)
-				mtd_ooblayout_ecc(mtd, section, &oobregion);
-
-			chip->oob_poi[oobregion.offset] =
-				pmecc_readb_ecc_relaxed(host->ecc, i, j);
-			oobregion.length--;
-			oobregion.offset++;
-			section++;
-		}
-	}
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-static void atmel_pmecc_core_init(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-	int eccbytes = mtd_ooblayout_count_eccbytes(mtd);
-	uint32_t val = 0;
-	struct mtd_oob_region oobregion;
-
-	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
-	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
-
-	switch (host->pmecc_corr_cap) {
-	case 2:
-		val = PMECC_CFG_BCH_ERR2;
-		break;
-	case 4:
-		val = PMECC_CFG_BCH_ERR4;
-		break;
-	case 8:
-		val = PMECC_CFG_BCH_ERR8;
-		break;
-	case 12:
-		val = PMECC_CFG_BCH_ERR12;
-		break;
-	case 24:
-		val = PMECC_CFG_BCH_ERR24;
-		break;
-	case 32:
-		val = PMECC_CFG_BCH_ERR32;
-		break;
-	}
-
-	if (host->pmecc_sector_size == 512)
-		val |= PMECC_CFG_SECTOR512;
-	else if (host->pmecc_sector_size == 1024)
-		val |= PMECC_CFG_SECTOR1024;
-
-	switch (nand_chip->ecc.steps) {
-	case 1:
-		val |= PMECC_CFG_PAGE_1SECTOR;
-		break;
-	case 2:
-		val |= PMECC_CFG_PAGE_2SECTORS;
-		break;
-	case 4:
-		val |= PMECC_CFG_PAGE_4SECTORS;
-		break;
-	case 8:
-		val |= PMECC_CFG_PAGE_8SECTORS;
-		break;
-	}
-
-	val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
-		| PMECC_CFG_AUTO_DISABLE);
-	pmecc_writel(host->ecc, CFG, val);
-
-	pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
-	mtd_ooblayout_ecc(mtd, 0, &oobregion);
-	pmecc_writel(host->ecc, SADDR, oobregion.offset);
-	pmecc_writel(host->ecc, EADDR,
-		     oobregion.offset + eccbytes - 1);
-	/* See datasheet about PMECC Clock Control Register */
-	pmecc_writel(host->ecc, CLK, 2);
-	pmecc_writel(host->ecc, IDR, 0xff);
-	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
-}
-
-/*
- * Get minimum ecc requirements from NAND.
- * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
- * will set them according to minimum ecc requirement. Otherwise, use the
- * value in DTS file.
- * return 0 if success. otherwise return error code.
- */
-static int pmecc_choose_ecc(struct atmel_nand_host *host,
-		int *cap, int *sector_size)
-{
-	/* Get minimum ECC requirements */
-	if (host->nand_chip.ecc_strength_ds) {
-		*cap = host->nand_chip.ecc_strength_ds;
-		*sector_size = host->nand_chip.ecc_step_ds;
-		dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",
-				*cap, *sector_size);
-	} else {
-		*cap = 2;
-		*sector_size = 512;
-		dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");
-	}
-
-	/* If device tree doesn't specify, use NAND's minimum ECC parameters */
-	if (host->pmecc_corr_cap == 0) {
-		if (*cap > host->caps->pmecc_max_correction)
-			return -EINVAL;
-
-		/* use the most fitable ecc bits (the near bigger one ) */
-		if (*cap <= 2)
-			host->pmecc_corr_cap = 2;
-		else if (*cap <= 4)
-			host->pmecc_corr_cap = 4;
-		else if (*cap <= 8)
-			host->pmecc_corr_cap = 8;
-		else if (*cap <= 12)
-			host->pmecc_corr_cap = 12;
-		else if (*cap <= 24)
-			host->pmecc_corr_cap = 24;
-		else if (*cap <= 32)
-			host->pmecc_corr_cap = 32;
-		else
-			return -EINVAL;
-	}
-	if (host->pmecc_sector_size == 0) {
-		/* use the most fitable sector size (the near smaller one ) */
-		if (*sector_size >= 1024)
-			host->pmecc_sector_size = 1024;
-		else if (*sector_size >= 512)
-			host->pmecc_sector_size = 512;
-		else
-			return -EINVAL;
-	}
-	return 0;
-}
-
-static inline int deg(unsigned int poly)
-{
-	/* polynomial degree is the most-significant bit index */
-	return fls(poly) - 1;
-}
-
-static int build_gf_tables(int mm, unsigned int poly,
-		int16_t *index_of, int16_t *alpha_to)
-{
-	unsigned int i, x = 1;
-	const unsigned int k = 1 << deg(poly);
-	unsigned int nn = (1 << mm) - 1;
-
-	/* primitive polynomial must be of degree m */
-	if (k != (1u << mm))
-		return -EINVAL;
-
-	for (i = 0; i < nn; i++) {
-		alpha_to[i] = x;
-		index_of[x] = i;
-		if (i && (x == 1))
-			/* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
-			return -EINVAL;
-		x <<= 1;
-		if (x & k)
-			x ^= poly;
-	}
-	alpha_to[nn] = 1;
-	index_of[0] = 0;
-
-	return 0;
-}
-
-static uint16_t *create_lookup_table(struct device *dev, int sector_size)
-{
-	int degree = (sector_size == 512) ?
-			PMECC_GF_DIMENSION_13 :
-			PMECC_GF_DIMENSION_14;
-	unsigned int poly = (sector_size == 512) ?
-			PMECC_GF_13_PRIMITIVE_POLY :
-			PMECC_GF_14_PRIMITIVE_POLY;
-	int table_size = (sector_size == 512) ?
-			PMECC_LOOKUP_TABLE_SIZE_512 :
-			PMECC_LOOKUP_TABLE_SIZE_1024;
-
-	int16_t *addr = devm_kzalloc(dev, 2 * table_size * sizeof(uint16_t),
-			GFP_KERNEL);
-	if (addr && build_gf_tables(degree, poly, addr, addr + table_size))
-		return NULL;
-
-	return addr;
-}
-
-static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
-					 struct atmel_nand_host *host)
-{
-	struct nand_chip *nand_chip = &host->nand_chip;
-	struct mtd_info *mtd = nand_to_mtd(nand_chip);
-	struct resource *regs, *regs_pmerr, *regs_rom;
-	uint16_t *galois_table;
-	int cap, sector_size, err_no;
-
-	err_no = pmecc_choose_ecc(host, &cap, &sector_size);
-	if (err_no) {
-		dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
-		return err_no;
-	}
-
-	if (cap > host->pmecc_corr_cap ||
-			sector_size != host->pmecc_sector_size)
-		dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
-
-	cap = host->pmecc_corr_cap;
-	sector_size = host->pmecc_sector_size;
-	host->pmecc_lookup_table_offset = (sector_size == 512) ?
-			host->pmecc_lookup_table_offset_512 :
-			host->pmecc_lookup_table_offset_1024;
-
-	dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
-		 cap, sector_size);
-
-	regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!regs) {
-		dev_warn(host->dev,
-			"Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
-		nand_chip->ecc.mode = NAND_ECC_SOFT;
-		nand_chip->ecc.algo = NAND_ECC_HAMMING;
-		return 0;
-	}
-
-	host->ecc = devm_ioremap_resource(&pdev->dev, regs);
-	if (IS_ERR(host->ecc)) {
-		err_no = PTR_ERR(host->ecc);
-		goto err;
-	}
-
-	regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-	host->pmerrloc_base = devm_ioremap_resource(&pdev->dev, regs_pmerr);
-	if (IS_ERR(host->pmerrloc_base)) {
-		err_no = PTR_ERR(host->pmerrloc_base);
-		goto err;
-	}
-	host->pmerrloc_el_base = host->pmerrloc_base + ATMEL_PMERRLOC_SIGMAx +
-		(host->caps->pmecc_max_correction + 1) * 4;
-
-	if (!host->has_no_lookup_table) {
-		regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
-		host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev,
-								regs_rom);
-		if (IS_ERR(host->pmecc_rom_base)) {
-			dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n");
-			host->has_no_lookup_table = true;
-		}
-	}
-
-	if (host->has_no_lookup_table) {
-		/* Build the look-up table in runtime */
-		galois_table = create_lookup_table(host->dev, sector_size);
-		if (!galois_table) {
-			dev_err(host->dev, "Failed to build a lookup table in runtime!\n");
-			err_no = -EINVAL;
-			goto err;
-		}
-
-		host->pmecc_rom_base = (void __iomem *)galois_table;
-		host->pmecc_lookup_table_offset = 0;
-	}
-
-	nand_chip->ecc.size = sector_size;
-
-	/* set ECC page size and oob layout */
-	switch (mtd->writesize) {
-	case 512:
-	case 1024:
-	case 2048:
-	case 4096:
-	case 8192:
-		if (sector_size > mtd->writesize) {
-			dev_err(host->dev, "pmecc sector size is bigger than the page size!\n");
-			err_no = -EINVAL;
-			goto err;
-		}
-
-		host->pmecc_degree = (sector_size == 512) ?
-			PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14;
-		host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
-		host->pmecc_alpha_to = pmecc_get_alpha_to(host);
-		host->pmecc_index_of = host->pmecc_rom_base +
-			host->pmecc_lookup_table_offset;
-
-		nand_chip->ecc.strength = cap;
-		nand_chip->ecc.bytes = pmecc_get_ecc_bytes(cap, sector_size);
-		nand_chip->ecc.steps = mtd->writesize / sector_size;
-		nand_chip->ecc.total = nand_chip->ecc.bytes *
-			nand_chip->ecc.steps;
-		if (nand_chip->ecc.total >
-				mtd->oobsize - PMECC_OOB_RESERVED_BYTES) {
-			dev_err(host->dev, "No room for ECC bytes\n");
-			err_no = -EINVAL;
-			goto err;
-		}
-
-		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-		break;
-	default:
-		dev_warn(host->dev,
-			"Unsupported page size for PMECC, use Software ECC\n");
-		/* page size not handled by HW ECC */
-		/* switching back to soft ECC */
-		nand_chip->ecc.mode = NAND_ECC_SOFT;
-		nand_chip->ecc.algo = NAND_ECC_HAMMING;
-		return 0;
-	}
-
-	/* Allocate data for PMECC computation */
-	err_no = pmecc_data_alloc(host);
-	if (err_no) {
-		dev_err(host->dev,
-				"Cannot allocate memory for PMECC computation!\n");
-		goto err;
-	}
-
-	nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
-	nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
-	nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
-
-	atmel_pmecc_core_init(mtd);
-
-	return 0;
-
-err:
-	return err_no;
-}
-
-/*
- * Calculate HW ECC
- *
- * function called after a write
- *
- * mtd:        MTD block structure
- * dat:        raw data (unused)
- * ecc_code:   buffer for ECC
- */
-static int atmel_nand_calculate(struct mtd_info *mtd,
-		const u_char *dat, unsigned char *ecc_code)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-	unsigned int ecc_value;
-
-	/* get the first 2 ECC bytes */
-	ecc_value = ecc_readl(host->ecc, PR);
-
-	ecc_code[0] = ecc_value & 0xFF;
-	ecc_code[1] = (ecc_value >> 8) & 0xFF;
-
-	/* get the last 2 ECC bytes */
-	ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
-
-	ecc_code[2] = ecc_value & 0xFF;
-	ecc_code[3] = (ecc_value >> 8) & 0xFF;
-
-	return 0;
-}
-
-/*
- * HW ECC read page function
- *
- * mtd:        mtd info structure
- * chip:       nand chip info structure
- * buf:        buffer to store read data
- * oob_required:    caller expects OOB data read to chip->oob_poi
- */
-static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int oob_required, int page)
-{
-	int eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	uint8_t *p = buf;
-	uint8_t *oob = chip->oob_poi;
-	uint8_t *ecc_pos;
-	int stat;
-	unsigned int max_bitflips = 0;
-	struct mtd_oob_region oobregion = {};
-
-	/*
-	 * Errata: ALE is incorrectly wired up to the ECC controller
-	 * on the AP7000, so it will include the address cycles in the
-	 * ECC calculation.
-	 *
-	 * Workaround: Reset the parity registers before reading the
-	 * actual data.
-	 */
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
-	if (host->board.need_reset_workaround)
-		ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
-
-	/* read the page */
-	chip->read_buf(mtd, p, eccsize);
-
-	/* move to ECC position if needed */
-	mtd_ooblayout_ecc(mtd, 0, &oobregion);
-	if (oobregion.offset != 0) {
-		/*
-		 * This only works on large pages because the ECC controller
-		 * waits for NAND_CMD_RNDOUTSTART after the NAND_CMD_RNDOUT.
-		 * Anyway, for small pages, the first ECC byte is at offset
-		 * 0 in the OOB area.
-		 */
-		chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
-			      mtd->writesize + oobregion.offset, -1);
-	}
-
-	/* the ECC controller needs to read the ECC just after the data */
-	ecc_pos = oob + oobregion.offset;
-	chip->read_buf(mtd, ecc_pos, eccbytes);
-
-	/* check if there's an error */
-	stat = chip->ecc.correct(mtd, p, oob, NULL);
-
-	if (stat < 0) {
-		mtd->ecc_stats.failed++;
-	} else {
-		mtd->ecc_stats.corrected += stat;
-		max_bitflips = max_t(unsigned int, max_bitflips, stat);
-	}
-
-	/* get back to oob start (end of page) */
-	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
-
-	/* read the oob */
-	chip->read_buf(mtd, oob, mtd->oobsize);
-
-	return max_bitflips;
-}
-
-/*
- * HW ECC Correction
- *
- * function called after a read
- *
- * mtd:        MTD block structure
- * dat:        raw data read from the chip
- * read_ecc:   ECC from the chip (unused)
- * isnull:     unused
- *
- * Detect and correct a 1 bit error for a page
- */
-static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
-		u_char *read_ecc, u_char *isnull)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-	unsigned int ecc_status;
-	unsigned int ecc_word, ecc_bit;
-
-	/* get the status from the Status Register */
-	ecc_status = ecc_readl(host->ecc, SR);
-
-	/* if there's no error */
-	if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
-		return 0;
-
-	/* get error bit offset (4 bits) */
-	ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR;
-	/* get word address (12 bits) */
-	ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR;
-	ecc_word >>= 4;
-
-	/* if there are multiple errors */
-	if (ecc_status & ATMEL_ECC_MULERR) {
-		/* check if it is a freshly erased block
-		 * (filled with 0xff) */
-		if ((ecc_bit == ATMEL_ECC_BITADDR)
-				&& (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
-			/* the block has just been erased, return OK */
-			return 0;
-		}
-		/* it doesn't seems to be a freshly
-		 * erased block.
-		 * We can't correct so many errors */
-		dev_dbg(host->dev, "atmel_nand : multiple errors detected."
-				" Unable to correct.\n");
-		return -EBADMSG;
-	}
-
-	/* if there's a single bit error : we can correct it */
-	if (ecc_status & ATMEL_ECC_ECCERR) {
-		/* there's nothing much to do here.
-		 * the bit error is on the ECC itself.
-		 */
-		dev_dbg(host->dev, "atmel_nand : one bit error on ECC code."
-				" Nothing to correct\n");
-		return 0;
-	}
-
-	dev_dbg(host->dev, "atmel_nand : one bit error on data."
-			" (word offset in the page :"
-			" 0x%x bit offset : 0x%x)\n",
-			ecc_word, ecc_bit);
-	/* correct the error */
-	if (nand_chip->options & NAND_BUSWIDTH_16) {
-		/* 16 bits words */
-		((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
-	} else {
-		/* 8 bits words */
-		dat[ecc_word] ^= (1 << ecc_bit);
-	}
-	dev_dbg(host->dev, "atmel_nand : error corrected\n");
-	return 1;
-}
-
-/*
- * Enable HW ECC : unused on most chips
- */
-static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if (host->board.need_reset_workaround)
-		ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
-}
-
-static int atmel_of_init_ecc(struct atmel_nand_host *host,
-			     struct device_node *np)
-{
-	u32 offset[2];
-	u32 val;
-
-	host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
-
-	/* Not using PMECC */
-	if (!(host->nand_chip.ecc.mode == NAND_ECC_HW) || !host->has_pmecc)
-		return 0;
-
-	/* use PMECC, get correction capability, sector size and lookup
-	 * table offset.
-	 * If correction bits and sector size are not specified, then find
-	 * them from NAND ONFI parameters.
-	 */
-	if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
-		if (val > host->caps->pmecc_max_correction) {
-			dev_err(host->dev,
-				"Required ECC strength too high: %u max %u\n",
-				val, host->caps->pmecc_max_correction);
-			return -EINVAL;
-		}
-		if ((val != 2)  && (val != 4)  && (val != 8) &&
-		    (val != 12) && (val != 24) && (val != 32)) {
-			dev_err(host->dev,
-				"Required ECC strength not supported: %u\n",
-				val);
-			return -EINVAL;
-		}
-		host->pmecc_corr_cap = (u8)val;
-	}
-
-	if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
-		if ((val != 512) && (val != 1024)) {
-			dev_err(host->dev,
-				"Required ECC sector size not supported: %u\n",
-				val);
-			return -EINVAL;
-		}
-		host->pmecc_sector_size = (u16)val;
-	}
-
-	if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
-			offset, 2) != 0) {
-		dev_err(host->dev, "Cannot get PMECC lookup table offset, will build a lookup table in runtime.\n");
-		host->has_no_lookup_table = true;
-		/* Will build a lookup table and initialize the offset later */
-		return 0;
-	}
-
-	if (!offset[0] && !offset[1]) {
-		dev_err(host->dev, "Invalid PMECC lookup table offset\n");
-		return -EINVAL;
-	}
-
-	host->pmecc_lookup_table_offset_512 = offset[0];
-	host->pmecc_lookup_table_offset_1024 = offset[1];
-
-	return 0;
-}
-
-static int atmel_of_init_port(struct atmel_nand_host *host,
-			      struct device_node *np)
-{
-	u32 val;
-	struct atmel_nand_data *board = &host->board;
-	enum of_gpio_flags flags = 0;
-
-	host->caps = (struct atmel_nand_caps *)
-		of_device_get_match_data(host->dev);
-
-	if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
-		if (val >= 32) {
-			dev_err(host->dev, "invalid addr-offset %u\n", val);
-			return -EINVAL;
-		}
-		board->ale = val;
-	}
-
-	if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
-		if (val >= 32) {
-			dev_err(host->dev, "invalid cmd-offset %u\n", val);
-			return -EINVAL;
-		}
-		board->cle = val;
-	}
-
-	board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
-
-	board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
-	board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
-
-	board->enable_pin = of_get_gpio(np, 1);
-	board->det_pin = of_get_gpio(np, 2);
-
-	/* load the nfc driver if there is */
-	of_platform_populate(np, NULL, NULL, host->dev);
-
-	/*
-	 * Initialize ECC mode to NAND_ECC_SOFT so that we have a correct value
-	 * even if the nand-ecc-mode property is not defined.
-	 */
-	host->nand_chip.ecc.mode = NAND_ECC_SOFT;
-	host->nand_chip.ecc.algo = NAND_ECC_HAMMING;
-
-	return 0;
-}
-
-static int atmel_hw_nand_init_params(struct platform_device *pdev,
-					 struct atmel_nand_host *host)
-{
-	struct nand_chip *nand_chip = &host->nand_chip;
-	struct mtd_info *mtd = nand_to_mtd(nand_chip);
-	struct resource		*regs;
-
-	regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!regs) {
-		dev_err(host->dev,
-			"Can't get I/O resource regs, use software ECC\n");
-		nand_chip->ecc.mode = NAND_ECC_SOFT;
-		nand_chip->ecc.algo = NAND_ECC_HAMMING;
-		return 0;
-	}
-
-	host->ecc = devm_ioremap_resource(&pdev->dev, regs);
-	if (IS_ERR(host->ecc))
-		return PTR_ERR(host->ecc);
-
-	/* ECC is calculated for the whole page (1 step) */
-	nand_chip->ecc.size = mtd->writesize;
-
-	/* set ECC page size and oob layout */
-	switch (mtd->writesize) {
-	case 512:
-		mtd_set_ooblayout(mtd, &atmel_ooblayout_sp_ops);
-		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
-		break;
-	case 1024:
-		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
-		break;
-	case 2048:
-		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
-		break;
-	case 4096:
-		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
-		break;
-	default:
-		/* page size not handled by HW ECC */
-		/* switching back to soft ECC */
-		nand_chip->ecc.mode = NAND_ECC_SOFT;
-		nand_chip->ecc.algo = NAND_ECC_HAMMING;
-		return 0;
-	}
-
-	/* set up for HW ECC */
-	nand_chip->ecc.calculate = atmel_nand_calculate;
-	nand_chip->ecc.correct = atmel_nand_correct;
-	nand_chip->ecc.hwctl = atmel_nand_hwctl;
-	nand_chip->ecc.read_page = atmel_nand_read_page;
-	nand_chip->ecc.bytes = 4;
-	nand_chip->ecc.strength = 1;
-
-	return 0;
-}
-
-static inline u32 nfc_read_status(struct atmel_nand_host *host)
-{
-	u32 err_flags = NFC_SR_DTOE | NFC_SR_UNDEF | NFC_SR_AWB | NFC_SR_ASE;
-	u32 nfc_status = nfc_readl(host->nfc->hsmc_regs, SR);
-
-	if (unlikely(nfc_status & err_flags)) {
-		if (nfc_status & NFC_SR_DTOE)
-			dev_err(host->dev, "NFC: Waiting Nand R/B Timeout Error\n");
-		else if (nfc_status & NFC_SR_UNDEF)
-			dev_err(host->dev, "NFC: Access Undefined Area Error\n");
-		else if (nfc_status & NFC_SR_AWB)
-			dev_err(host->dev, "NFC: Access memory While NFC is busy\n");
-		else if (nfc_status & NFC_SR_ASE)
-			dev_err(host->dev, "NFC: Access memory Size Error\n");
-	}
-
-	return nfc_status;
-}
-
-/* SMC interrupt service routine */
-static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
-{
-	struct atmel_nand_host *host = dev_id;
-	u32 status, mask, pending;
-	irqreturn_t ret = IRQ_NONE;
-
-	status = nfc_read_status(host);
-	mask = nfc_readl(host->nfc->hsmc_regs, IMR);
-	pending = status & mask;
-
-	if (pending & NFC_SR_XFR_DONE) {
-		complete(&host->nfc->comp_xfer_done);
-		nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
-		ret = IRQ_HANDLED;
-	}
-	if (pending & NFC_SR_RB_EDGE) {
-		complete(&host->nfc->comp_ready);
-		nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
-		ret = IRQ_HANDLED;
-	}
-	if (pending & NFC_SR_CMD_DONE) {
-		complete(&host->nfc->comp_cmd_done);
-		nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE);
-		ret = IRQ_HANDLED;
-	}
-
-	return ret;
-}
-
-/* NFC(Nand Flash Controller) related functions */
-static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
-{
-	if (flag & NFC_SR_XFR_DONE)
-		init_completion(&host->nfc->comp_xfer_done);
-
-	if (flag & NFC_SR_RB_EDGE)
-		init_completion(&host->nfc->comp_ready);
-
-	if (flag & NFC_SR_CMD_DONE)
-		init_completion(&host->nfc->comp_cmd_done);
-
-	/* Enable interrupt that need to wait for */
-	nfc_writel(host->nfc->hsmc_regs, IER, flag);
-}
-
-static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
-{
-	int i, index = 0;
-	struct completion *comp[3];	/* Support 3 interrupt completion */
-
-	if (flag & NFC_SR_XFR_DONE)
-		comp[index++] = &host->nfc->comp_xfer_done;
-
-	if (flag & NFC_SR_RB_EDGE)
-		comp[index++] = &host->nfc->comp_ready;
-
-	if (flag & NFC_SR_CMD_DONE)
-		comp[index++] = &host->nfc->comp_cmd_done;
-
-	if (index == 0) {
-		dev_err(host->dev, "Unknown interrupt flag: 0x%08x\n", flag);
-		return -EINVAL;
-	}
-
-	for (i = 0; i < index; i++) {
-		if (wait_for_completion_timeout(comp[i],
-				msecs_to_jiffies(NFC_TIME_OUT_MS)))
-			continue;	/* wait for next completion */
-		else
-			goto err_timeout;
-	}
-
-	return 0;
-
-err_timeout:
-	dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag);
-	/* Disable the interrupt as it is not handled by interrupt handler */
-	nfc_writel(host->nfc->hsmc_regs, IDR, flag);
-	return -ETIMEDOUT;
-}
-
-static int nfc_send_command(struct atmel_nand_host *host,
-	unsigned int cmd, unsigned int addr, unsigned char cycle0)
-{
-	unsigned long timeout;
-	u32 flag = NFC_SR_CMD_DONE;
-	flag |= cmd & NFCADDR_CMD_DATAEN ? NFC_SR_XFR_DONE : 0;
-
-	dev_dbg(host->dev,
-		"nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n",
-		cmd, addr, cycle0);
-
-	timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
-	while (nfc_readl(host->nfc->hsmc_regs, SR) & NFC_SR_BUSY) {
-		if (time_after(jiffies, timeout)) {
-			dev_err(host->dev,
-				"Time out to wait for NFC ready!\n");
-			return -ETIMEDOUT;
-		}
-	}
-
-	nfc_prepare_interrupt(host, flag);
-	nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0);
-	nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs);
-	return nfc_wait_interrupt(host, flag);
-}
-
-static int nfc_device_ready(struct mtd_info *mtd)
-{
-	u32 status, mask;
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-
-	status = nfc_read_status(host);
-	mask = nfc_readl(host->nfc->hsmc_regs, IMR);
-
-	/* The mask should be 0. If not we may lost interrupts */
-	if (unlikely(mask & status))
-		dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
-				mask & status);
-
-	return status & NFC_SR_RB_EDGE;
-}
-
-static void nfc_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if (chip == -1)
-		nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_DISABLE);
-	else
-		nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE);
-}
-
-static int nfc_make_addr(struct mtd_info *mtd, int command, int column,
-		int page_addr, unsigned int *addr1234, unsigned int *cycle0)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	int acycle = 0;
-	unsigned char addr_bytes[8];
-	int index = 0, bit_shift;
-
-	BUG_ON(addr1234 == NULL || cycle0 == NULL);
-
-	*cycle0 = 0;
-	*addr1234 = 0;
-
-	if (column != -1) {
-		if (chip->options & NAND_BUSWIDTH_16 &&
-				!nand_opcode_8bits(command))
-			column >>= 1;
-		addr_bytes[acycle++] = column & 0xff;
-		if (mtd->writesize > 512)
-			addr_bytes[acycle++] = (column >> 8) & 0xff;
-	}
-
-	if (page_addr != -1) {
-		addr_bytes[acycle++] = page_addr & 0xff;
-		addr_bytes[acycle++] = (page_addr >> 8) & 0xff;
-		if (chip->chipsize > (128 << 20))
-			addr_bytes[acycle++] = (page_addr >> 16) & 0xff;
-	}
-
-	if (acycle > 4)
-		*cycle0 = addr_bytes[index++];
-
-	for (bit_shift = 0; index < acycle; bit_shift += 8)
-		*addr1234 += addr_bytes[index++] << bit_shift;
-
-	/* return acycle in cmd register */
-	return acycle << NFCADDR_CMD_ACYCLE_BIT_POS;
-}
-
-static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
-				int column, int page_addr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
-	unsigned long timeout;
-	unsigned int nfc_addr_cmd = 0;
-
-	unsigned int cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
-
-	/* Set default settings: no cmd2, no addr cycle. read from nand */
-	unsigned int cmd2 = 0;
-	unsigned int vcmd2 = 0;
-	int acycle = NFCADDR_CMD_ACYCLE_NONE;
-	int csid = NFCADDR_CMD_CSID_3;
-	int dataen = NFCADDR_CMD_DATADIS;
-	int nfcwr = NFCADDR_CMD_NFCRD;
-	unsigned int addr1234 = 0;
-	unsigned int cycle0 = 0;
-	bool do_addr = true;
-	host->nfc->data_in_sram = NULL;
-
-	dev_dbg(host->dev, "%s: cmd = 0x%02x, col = 0x%08x, page = 0x%08x\n",
-	     __func__, command, column, page_addr);
-
-	switch (command) {
-	case NAND_CMD_RESET:
-		nfc_addr_cmd = cmd1 | acycle | csid | dataen | nfcwr;
-		nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
-		udelay(chip->chip_delay);
-
-		nfc_nand_command(mtd, NAND_CMD_STATUS, -1, -1);
-		timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
-		while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) {
-			if (time_after(jiffies, timeout)) {
-				dev_err(host->dev,
-					"Time out to wait status ready!\n");
-				break;
-			}
-		}
-		return;
-	case NAND_CMD_STATUS:
-		do_addr = false;
-		break;
-	case NAND_CMD_PARAM:
-	case NAND_CMD_READID:
-		do_addr = false;
-		acycle = NFCADDR_CMD_ACYCLE_1;
-		if (column != -1)
-			addr1234 = column;
-		break;
-	case NAND_CMD_RNDOUT:
-		cmd2 = NAND_CMD_RNDOUTSTART << NFCADDR_CMD_CMD2_BIT_POS;
-		vcmd2 = NFCADDR_CMD_VCMD2;
-		break;
-	case NAND_CMD_READ0:
-	case NAND_CMD_READOOB:
-		if (command == NAND_CMD_READOOB) {
-			column += mtd->writesize;
-			command = NAND_CMD_READ0; /* only READ0 is valid */
-			cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
-		}
-		if (host->nfc->use_nfc_sram) {
-			/* Enable Data transfer to sram */
-			dataen = NFCADDR_CMD_DATAEN;
-
-			/* Need enable PMECC now, since NFC will transfer
-			 * data in bus after sending nfc read command.
-			 */
-			if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
-				pmecc_enable(host, NAND_ECC_READ);
-		}
-
-		cmd2 = NAND_CMD_READSTART << NFCADDR_CMD_CMD2_BIT_POS;
-		vcmd2 = NFCADDR_CMD_VCMD2;
-		break;
-	/* For prgramming command, the cmd need set to write enable */
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_RNDIN:
-		nfcwr = NFCADDR_CMD_NFCWR;
-		if (host->nfc->will_write_sram && command == NAND_CMD_SEQIN)
-			dataen = NFCADDR_CMD_DATAEN;
-		break;
-	default:
-		break;
-	}
-
-	if (do_addr)
-		acycle = nfc_make_addr(mtd, command, column, page_addr,
-				&addr1234, &cycle0);
-
-	nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
-	nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
-
-	/*
-	 * Program and erase have their own busy handlers status, sequential
-	 * in, and deplete1 need no delay.
-	 */
-	switch (command) {
-	case NAND_CMD_CACHEDPROG:
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_RNDIN:
-	case NAND_CMD_STATUS:
-	case NAND_CMD_RNDOUT:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_READID:
-		return;
-
-	case NAND_CMD_READ0:
-		if (dataen == NFCADDR_CMD_DATAEN) {
-			host->nfc->data_in_sram = host->nfc->sram_bank0 +
-				nfc_get_sram_off(host);
-			return;
-		}
-		/* fall through */
-	default:
-		nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
-		nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
-	}
-}
-
-static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-			uint32_t offset, int data_len, const uint8_t *buf,
-			int oob_required, int page, int cached, int raw)
-{
-	int cfg, len;
-	int status = 0;
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
-	void *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
-
-	/* Subpage write is not supported */
-	if (offset || (data_len < mtd->writesize))
-		return -EINVAL;
-
-	len = mtd->writesize;
-	/* Copy page data to sram that will write to nand via NFC */
-	if (use_dma) {
-		if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0)
-			/* Fall back to use cpu copy */
-			memcpy(sram, buf, len);
-	} else {
-		memcpy(sram, buf, len);
-	}
-
-	cfg = nfc_readl(host->nfc->hsmc_regs, CFG);
-	if (unlikely(raw) && oob_required) {
-		memcpy(sram + len, chip->oob_poi, mtd->oobsize);
-		len += mtd->oobsize;
-		nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE);
-	} else {
-		nfc_writel(host->nfc->hsmc_regs, CFG, cfg & ~NFC_CFG_WSPARE);
-	}
-
-	if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
-		/*
-		 * When use NFC sram, need set up PMECC before send
-		 * NAND_CMD_SEQIN command. Since when the nand command
-		 * is sent, nfc will do transfer from sram and nand.
-		 */
-		pmecc_enable(host, NAND_ECC_WRITE);
-
-	host->nfc->will_write_sram = true;
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-	host->nfc->will_write_sram = false;
-
-	if (likely(!raw))
-		/* Need to write ecc into oob */
-		status = chip->ecc.write_page(mtd, chip, buf, oob_required,
-					      page);
-
-	if (status < 0)
-		return status;
-
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-	status = chip->waitfunc(mtd, chip);
-
-	if ((status & NAND_STATUS_FAIL) && (chip->errstat))
-		status = chip->errstat(mtd, chip, FL_WRITING, status, page);
-
-	if (status & NAND_STATUS_FAIL)
-		return -EIO;
-
-	return 0;
-}
-
-static int nfc_sram_init(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct atmel_nand_host *host = nand_get_controller_data(chip);
-	int res = 0;
-
-	/* Initialize the NFC CFG register */
-	unsigned int cfg_nfc = 0;
-
-	/* set page size and oob layout */
-	switch (mtd->writesize) {
-	case 512:
-		cfg_nfc = NFC_CFG_PAGESIZE_512;
-		break;
-	case 1024:
-		cfg_nfc = NFC_CFG_PAGESIZE_1024;
-		break;
-	case 2048:
-		cfg_nfc = NFC_CFG_PAGESIZE_2048;
-		break;
-	case 4096:
-		cfg_nfc = NFC_CFG_PAGESIZE_4096;
-		break;
-	case 8192:
-		cfg_nfc = NFC_CFG_PAGESIZE_8192;
-		break;
-	default:
-		dev_err(host->dev, "Unsupported page size for NFC.\n");
-		res = -ENXIO;
-		return res;
-	}
-
-	/* oob bytes size = (NFCSPARESIZE + 1) * 4
-	 * Max support spare size is 512 bytes. */
-	cfg_nfc |= (((mtd->oobsize / 4) - 1) << NFC_CFG_NFC_SPARESIZE_BIT_POS
-		& NFC_CFG_NFC_SPARESIZE);
-	/* default set a max timeout */
-	cfg_nfc |= NFC_CFG_RSPARE |
-			NFC_CFG_NFC_DTOCYC | NFC_CFG_NFC_DTOMUL;
-
-	nfc_writel(host->nfc->hsmc_regs, CFG, cfg_nfc);
-
-	host->nfc->will_write_sram = false;
-	nfc_set_sram_bank(host, 0);
-
-	/* Use Write page with NFC SRAM only for PMECC or ECC NONE. */
-	if (host->nfc->write_by_sram) {
-		if ((chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) ||
-				chip->ecc.mode == NAND_ECC_NONE)
-			chip->write_page = nfc_sram_write_page;
-		else
-			host->nfc->write_by_sram = false;
-	}
-
-	dev_info(host->dev, "Using NFC Sram read %s\n",
-			host->nfc->write_by_sram ? "and write" : "");
-	return 0;
-}
-
-static struct platform_driver atmel_nand_nfc_driver;
-/*
- * Probe for the NAND device.
- */
-static int atmel_nand_probe(struct platform_device *pdev)
-{
-	struct atmel_nand_host *host;
-	struct mtd_info *mtd;
-	struct nand_chip *nand_chip;
-	struct resource *mem;
-	int res, irq;
-
-	/* Allocate memory for the device structure (and zero it) */
-	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
-	if (!host)
-		return -ENOMEM;
-
-	res = platform_driver_register(&atmel_nand_nfc_driver);
-	if (res)
-		dev_err(&pdev->dev, "atmel_nand: can't register NFC driver\n");
-
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	host->io_base = devm_ioremap_resource(&pdev->dev, mem);
-	if (IS_ERR(host->io_base)) {
-		res = PTR_ERR(host->io_base);
-		goto err_nand_ioremap;
-	}
-	host->io_phys = (dma_addr_t)mem->start;
-
-	nand_chip = &host->nand_chip;
-	mtd = nand_to_mtd(nand_chip);
-	host->dev = &pdev->dev;
-	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
-		nand_set_flash_node(nand_chip, pdev->dev.of_node);
-		/* Only when CONFIG_OF is enabled of_node can be parsed */
-		res = atmel_of_init_port(host, pdev->dev.of_node);
-		if (res)
-			goto err_nand_ioremap;
-	} else {
-		memcpy(&host->board, dev_get_platdata(&pdev->dev),
-		       sizeof(struct atmel_nand_data));
-		nand_chip->ecc.mode = host->board.ecc_mode;
-
-		/*
-		 * When using software ECC every supported avr32 board means
-		 * Hamming algorithm. If that ever changes we'll need to add
-		 * ecc_algo field to the struct atmel_nand_data.
-		 */
-		if (nand_chip->ecc.mode == NAND_ECC_SOFT)
-			nand_chip->ecc.algo = NAND_ECC_HAMMING;
-
-		/* 16-bit bus width */
-		if (host->board.bus_width_16)
-			nand_chip->options |= NAND_BUSWIDTH_16;
-	}
-
-	 /* link the private data structures */
-	nand_set_controller_data(nand_chip, host);
-	mtd->dev.parent = &pdev->dev;
-
-	/* Set address of NAND IO lines */
-	nand_chip->IO_ADDR_R = host->io_base;
-	nand_chip->IO_ADDR_W = host->io_base;
-
-	if (nand_nfc.is_initialized) {
-		/* NFC driver is probed and initialized */
-		host->nfc = &nand_nfc;
-
-		nand_chip->select_chip = nfc_select_chip;
-		nand_chip->dev_ready = nfc_device_ready;
-		nand_chip->cmdfunc = nfc_nand_command;
-
-		/* Initialize the interrupt for NFC */
-		irq = platform_get_irq(pdev, 0);
-		if (irq < 0) {
-			dev_err(host->dev, "Cannot get HSMC irq!\n");
-			res = irq;
-			goto err_nand_ioremap;
-		}
-
-		res = devm_request_irq(&pdev->dev, irq, hsmc_interrupt,
-				0, "hsmc", host);
-		if (res) {
-			dev_err(&pdev->dev, "Unable to request HSMC irq %d\n",
-				irq);
-			goto err_nand_ioremap;
-		}
-	} else {
-		res = atmel_nand_set_enable_ready_pins(mtd);
-		if (res)
-			goto err_nand_ioremap;
-
-		nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
-	}
-
-	nand_chip->chip_delay = 40;		/* 40us command delay time */
-
-
-	nand_chip->read_buf = atmel_read_buf;
-	nand_chip->write_buf = atmel_write_buf;
-
-	platform_set_drvdata(pdev, host);
-	atmel_nand_enable(host);
-
-	if (gpio_is_valid(host->board.det_pin)) {
-		res = devm_gpio_request(&pdev->dev,
-				host->board.det_pin, "nand_det");
-		if (res < 0) {
-			dev_err(&pdev->dev,
-				"can't request det gpio %d\n",
-				host->board.det_pin);
-			goto err_no_card;
-		}
-
-		res = gpio_direction_input(host->board.det_pin);
-		if (res < 0) {
-			dev_err(&pdev->dev,
-				"can't request input direction det gpio %d\n",
-				host->board.det_pin);
-			goto err_no_card;
-		}
-
-		if (gpio_get_value(host->board.det_pin)) {
-			dev_info(&pdev->dev, "No SmartMedia card inserted.\n");
-			res = -ENXIO;
-			goto err_no_card;
-		}
-	}
-
-	if (!host->board.has_dma)
-		use_dma = 0;
-
-	if (use_dma) {
-		dma_cap_mask_t mask;
-
-		dma_cap_zero(mask);
-		dma_cap_set(DMA_MEMCPY, mask);
-		host->dma_chan = dma_request_channel(mask, NULL, NULL);
-		if (!host->dma_chan) {
-			dev_err(host->dev, "Failed to request DMA channel\n");
-			use_dma = 0;
-		}
-	}
-	if (use_dma)
-		dev_info(host->dev, "Using %s for DMA transfers.\n",
-					dma_chan_name(host->dma_chan));
-	else
-		dev_info(host->dev, "No DMA support for NAND access.\n");
-
-	/* first scan to find the device and get the page size */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		res = -ENXIO;
-		goto err_scan_ident;
-	}
-
-	if (host->board.on_flash_bbt || on_flash_bbt)
-		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
-
-	if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
-		dev_info(&pdev->dev, "Use On Flash BBT\n");
-
-	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
-		res = atmel_of_init_ecc(host, pdev->dev.of_node);
-		if (res)
-			goto err_hw_ecc;
-	}
-
-	if (nand_chip->ecc.mode == NAND_ECC_HW) {
-		if (host->has_pmecc)
-			res = atmel_pmecc_nand_init_params(pdev, host);
-		else
-			res = atmel_hw_nand_init_params(pdev, host);
-
-		if (res != 0)
-			goto err_hw_ecc;
-	}
-
-	/* initialize the nfc configuration register */
-	if (host->nfc && host->nfc->use_nfc_sram) {
-		res = nfc_sram_init(mtd);
-		if (res) {
-			host->nfc->use_nfc_sram = false;
-			dev_err(host->dev, "Disable use nfc sram for data transfer.\n");
-		}
-	}
-
-	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		res = -ENXIO;
-		goto err_scan_tail;
-	}
-
-	mtd->name = "atmel_nand";
-	res = mtd_device_register(mtd, host->board.parts,
-				  host->board.num_parts);
-	if (!res)
-		return res;
-
-err_scan_tail:
-	if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW)
-		pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
-err_hw_ecc:
-err_scan_ident:
-err_no_card:
-	atmel_nand_disable(host);
-	if (host->dma_chan)
-		dma_release_channel(host->dma_chan);
-err_nand_ioremap:
-	return res;
-}
-
-/*
- * Remove a NAND device.
- */
-static int atmel_nand_remove(struct platform_device *pdev)
-{
-	struct atmel_nand_host *host = platform_get_drvdata(pdev);
-	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
-
-	nand_release(mtd);
-
-	atmel_nand_disable(host);
-
-	if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
-		pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
-		pmerrloc_writel(host->pmerrloc_base, ELDIS,
-				PMERRLOC_DISABLE);
-	}
-
-	if (host->dma_chan)
-		dma_release_channel(host->dma_chan);
-
-	platform_driver_unregister(&atmel_nand_nfc_driver);
-
-	return 0;
-}
-
-/*
- * AT91RM9200 does not have PMECC or PMECC Errloc peripherals for
- * BCH ECC. Combined with the "atmel,has-pmecc", it is used to describe
- * devices from the SAM9 family that have those.
- */
-static const struct atmel_nand_caps at91rm9200_caps = {
-	.pmecc_correct_erase_page = false,
-	.pmecc_max_correction = 24,
-};
-
-static const struct atmel_nand_caps sama5d4_caps = {
-	.pmecc_correct_erase_page = true,
-	.pmecc_max_correction = 24,
-};
-
-/*
- * The PMECC Errloc controller starting in SAMA5D2 is not compatible,
- * as the increased correction strength requires more registers.
- */
-static const struct atmel_nand_caps sama5d2_caps = {
-	.pmecc_correct_erase_page = true,
-	.pmecc_max_correction = 32,
-};
-
-static const struct of_device_id atmel_nand_dt_ids[] = {
-	{ .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
-	{ .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
-	{ .compatible = "atmel,sama5d2-nand", .data = &sama5d2_caps },
-	{ /* sentinel */ }
-};
-
-MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
-
-static int atmel_nand_nfc_probe(struct platform_device *pdev)
-{
-	struct atmel_nfc *nfc = &nand_nfc;
-	struct resource *nfc_cmd_regs, *nfc_hsmc_regs, *nfc_sram;
-	int ret;
-
-	nfc_cmd_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	nfc->base_cmd_regs = devm_ioremap_resource(&pdev->dev, nfc_cmd_regs);
-	if (IS_ERR(nfc->base_cmd_regs))
-		return PTR_ERR(nfc->base_cmd_regs);
-
-	nfc_hsmc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	nfc->hsmc_regs = devm_ioremap_resource(&pdev->dev, nfc_hsmc_regs);
-	if (IS_ERR(nfc->hsmc_regs))
-		return PTR_ERR(nfc->hsmc_regs);
-
-	nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-	if (nfc_sram) {
-		nfc->sram_bank0 = (void * __force)
-				devm_ioremap_resource(&pdev->dev, nfc_sram);
-		if (IS_ERR(nfc->sram_bank0)) {
-			dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n",
-					PTR_ERR(nfc->sram_bank0));
-		} else {
-			nfc->use_nfc_sram = true;
-			nfc->sram_bank0_phys = (dma_addr_t)nfc_sram->start;
-
-			if (pdev->dev.of_node)
-				nfc->write_by_sram = of_property_read_bool(
-						pdev->dev.of_node,
-						"atmel,write-by-sram");
-		}
-	}
-
-	nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
-	nfc_readl(nfc->hsmc_regs, SR);	/* clear the NFC_SR */
-
-	nfc->clk = devm_clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(nfc->clk)) {
-		ret = clk_prepare_enable(nfc->clk);
-		if (ret)
-			return ret;
-	} else {
-		dev_warn(&pdev->dev, "NFC clock missing, update your Device Tree");
-	}
-
-	nfc->is_initialized = true;
-	dev_info(&pdev->dev, "NFC is probed.\n");
-
-	return 0;
-}
-
-static int atmel_nand_nfc_remove(struct platform_device *pdev)
-{
-	struct atmel_nfc *nfc = &nand_nfc;
-
-	if (!IS_ERR(nfc->clk))
-		clk_disable_unprepare(nfc->clk);
-
-	return 0;
-}
-
-static const struct of_device_id atmel_nand_nfc_match[] = {
-	{ .compatible = "atmel,sama5d3-nfc" },
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
-
-static struct platform_driver atmel_nand_nfc_driver = {
-	.driver = {
-		.name = "atmel_nand_nfc",
-		.of_match_table = of_match_ptr(atmel_nand_nfc_match),
-	},
-	.probe = atmel_nand_nfc_probe,
-	.remove = atmel_nand_nfc_remove,
-};
-
-static struct platform_driver atmel_nand_driver = {
-	.probe		= atmel_nand_probe,
-	.remove		= atmel_nand_remove,
-	.driver		= {
-		.name	= "atmel_nand",
-		.of_match_table	= of_match_ptr(atmel_nand_dt_ids),
-	},
-};
-
-module_platform_driver(atmel_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Rick Bronson");
-MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91 / AVR32");
-MODULE_ALIAS("platform:atmel_nand");
diff --git a/drivers/mtd/nand/atmel_nand_ecc.h b/drivers/mtd/nand/atmel_nand_ecc.h
deleted file mode 100644
index 834d694487bd..000000000000
--- a/drivers/mtd/nand/atmel_nand_ecc.h
+++ /dev/null
@@ -1,163 +0,0 @@ 
-/*
- * Error Corrected Code Controller (ECC) - System peripherals regsters.
- * Based on AT91SAM9260 datasheet revision B.
- *
- * Copyright (C) 2007 Andrew Victor
- * Copyright (C) 2007 - 2012 Atmel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef ATMEL_NAND_ECC_H
-#define ATMEL_NAND_ECC_H
-
-#define ATMEL_ECC_CR		0x00			/* Control register */
-#define		ATMEL_ECC_RST		(1 << 0)		/* Reset parity */
-
-#define ATMEL_ECC_MR		0x04			/* Mode register */
-#define		ATMEL_ECC_PAGESIZE	(3 << 0)		/* Page Size */
-#define			ATMEL_ECC_PAGESIZE_528		(0)
-#define			ATMEL_ECC_PAGESIZE_1056		(1)
-#define			ATMEL_ECC_PAGESIZE_2112		(2)
-#define			ATMEL_ECC_PAGESIZE_4224		(3)
-
-#define ATMEL_ECC_SR		0x08			/* Status register */
-#define		ATMEL_ECC_RECERR		(1 << 0)		/* Recoverable Error */
-#define		ATMEL_ECC_ECCERR		(1 << 1)		/* ECC Single Bit Error */
-#define		ATMEL_ECC_MULERR		(1 << 2)		/* Multiple Errors */
-
-#define ATMEL_ECC_PR		0x0c			/* Parity register */
-#define		ATMEL_ECC_BITADDR	(0xf << 0)		/* Bit Error Address */
-#define		ATMEL_ECC_WORDADDR	(0xfff << 4)		/* Word Error Address */
-
-#define ATMEL_ECC_NPR		0x10			/* NParity register */
-#define		ATMEL_ECC_NPARITY	(0xffff << 0)		/* NParity */
-
-/* PMECC Register Definitions */
-#define ATMEL_PMECC_CFG			0x000	/* Configuration Register */
-#define		PMECC_CFG_BCH_ERR2		(0 << 0)
-#define		PMECC_CFG_BCH_ERR4		(1 << 0)
-#define		PMECC_CFG_BCH_ERR8		(2 << 0)
-#define		PMECC_CFG_BCH_ERR12		(3 << 0)
-#define		PMECC_CFG_BCH_ERR24		(4 << 0)
-#define		PMECC_CFG_BCH_ERR32		(5 << 0)
-
-#define		PMECC_CFG_SECTOR512		(0 << 4)
-#define		PMECC_CFG_SECTOR1024		(1 << 4)
-
-#define		PMECC_CFG_PAGE_1SECTOR		(0 << 8)
-#define		PMECC_CFG_PAGE_2SECTORS		(1 << 8)
-#define		PMECC_CFG_PAGE_4SECTORS		(2 << 8)
-#define		PMECC_CFG_PAGE_8SECTORS		(3 << 8)
-
-#define		PMECC_CFG_READ_OP		(0 << 12)
-#define		PMECC_CFG_WRITE_OP		(1 << 12)
-
-#define		PMECC_CFG_SPARE_ENABLE		(1 << 16)
-#define		PMECC_CFG_SPARE_DISABLE		(0 << 16)
-
-#define		PMECC_CFG_AUTO_ENABLE		(1 << 20)
-#define		PMECC_CFG_AUTO_DISABLE		(0 << 20)
-
-#define ATMEL_PMECC_SAREA		0x004	/* Spare area size */
-#define ATMEL_PMECC_SADDR		0x008	/* PMECC starting address */
-#define ATMEL_PMECC_EADDR		0x00c	/* PMECC ending address */
-#define ATMEL_PMECC_CLK			0x010	/* PMECC clock control */
-#define		PMECC_CLK_133MHZ		(2 << 0)
-
-#define ATMEL_PMECC_CTRL		0x014	/* PMECC control register */
-#define		PMECC_CTRL_RST			(1 << 0)
-#define		PMECC_CTRL_DATA			(1 << 1)
-#define		PMECC_CTRL_USER			(1 << 2)
-#define		PMECC_CTRL_ENABLE		(1 << 4)
-#define		PMECC_CTRL_DISABLE		(1 << 5)
-
-#define ATMEL_PMECC_SR			0x018	/* PMECC status register */
-#define		PMECC_SR_BUSY			(1 << 0)
-#define		PMECC_SR_ENABLE			(1 << 4)
-
-#define ATMEL_PMECC_IER			0x01c	/* PMECC interrupt enable */
-#define		PMECC_IER_ENABLE		(1 << 0)
-#define ATMEL_PMECC_IDR			0x020	/* PMECC interrupt disable */
-#define		PMECC_IER_DISABLE		(1 << 0)
-#define ATMEL_PMECC_IMR			0x024	/* PMECC interrupt mask */
-#define		PMECC_IER_MASK			(1 << 0)
-#define ATMEL_PMECC_ISR			0x028	/* PMECC interrupt status */
-#define ATMEL_PMECC_ECCx		0x040	/* PMECC ECC x */
-#define ATMEL_PMECC_REMx		0x240	/* PMECC REM x */
-
-/* PMERRLOC Register Definitions */
-#define ATMEL_PMERRLOC_ELCFG		0x000	/* Error location config */
-#define		PMERRLOC_ELCFG_SECTOR_512	(0 << 0)
-#define		PMERRLOC_ELCFG_SECTOR_1024	(1 << 0)
-#define		PMERRLOC_ELCFG_NUM_ERRORS(n)	((n) << 16)
-
-#define ATMEL_PMERRLOC_ELPRIM		0x004	/* Error location primitive */
-#define ATMEL_PMERRLOC_ELEN		0x008	/* Error location enable */
-#define ATMEL_PMERRLOC_ELDIS		0x00c	/* Error location disable */
-#define		PMERRLOC_DISABLE		(1 << 0)
-
-#define ATMEL_PMERRLOC_ELSR		0x010	/* Error location status */
-#define		PMERRLOC_ELSR_BUSY		(1 << 0)
-#define ATMEL_PMERRLOC_ELIER		0x014	/* Error location int enable */
-#define ATMEL_PMERRLOC_ELIDR		0x018	/* Error location int disable */
-#define ATMEL_PMERRLOC_ELIMR		0x01c	/* Error location int mask */
-#define ATMEL_PMERRLOC_ELISR		0x020	/* Error location int status */
-#define		PMERRLOC_ERR_NUM_MASK		(0x1f << 8)
-#define		PMERRLOC_CALC_DONE		(1 << 0)
-#define ATMEL_PMERRLOC_SIGMAx		0x028	/* Error location SIGMA x */
-
-/*
- * The ATMEL_PMERRLOC_ELx register location depends from the number of
- * bits corrected by the PMECC controller. Do not use it.
- */
-
-/* Register access macros for PMECC */
-#define pmecc_readl_relaxed(addr, reg) \
-	readl_relaxed((addr) + ATMEL_PMECC_##reg)
-
-#define pmecc_writel(addr, reg, value) \
-	writel((value), (addr) + ATMEL_PMECC_##reg)
-
-#define pmecc_readb_ecc_relaxed(addr, sector, n) \
-	readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
-
-#define pmecc_readl_rem_relaxed(addr, sector, n) \
-	readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
-
-#define pmerrloc_readl_relaxed(addr, reg) \
-	readl_relaxed((addr) + ATMEL_PMERRLOC_##reg)
-
-#define pmerrloc_writel(addr, reg, value) \
-	writel((value), (addr) + ATMEL_PMERRLOC_##reg)
-
-#define pmerrloc_writel_sigma_relaxed(addr, n, value) \
-	writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
-
-#define pmerrloc_readl_sigma_relaxed(addr, n) \
-	readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
-
-#define pmerrloc_readl_el_relaxed(addr, n) \
-	readl_relaxed((addr) + ((n) * 4))
-
-/* Galois field dimension */
-#define PMECC_GF_DIMENSION_13			13
-#define PMECC_GF_DIMENSION_14			14
-
-/* Primitive Polynomial used by PMECC */
-#define PMECC_GF_13_PRIMITIVE_POLY		0x201b
-#define PMECC_GF_14_PRIMITIVE_POLY		0x4443
-
-#define PMECC_LOOKUP_TABLE_SIZE_512		0x2000
-#define PMECC_LOOKUP_TABLE_SIZE_1024		0x4000
-
-/* Time out value for reading PMECC status register */
-#define PMECC_MAX_TIMEOUT_MS			100
-
-/* Reserved bytes in oob area */
-#define PMECC_OOB_RESERVED_BYTES		2
-
-#endif
diff --git a/drivers/mtd/nand/atmel_nand_nfc.h b/drivers/mtd/nand/atmel_nand_nfc.h
deleted file mode 100644
index 4d5d26221a7e..000000000000
--- a/drivers/mtd/nand/atmel_nand_nfc.h
+++ /dev/null
@@ -1,103 +0,0 @@ 
-/*
- * Atmel Nand Flash Controller (NFC) - System peripherals regsters.
- * Based on SAMA5D3 datasheet.
- *
- * © Copyright 2013 Atmel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#ifndef ATMEL_NAND_NFC_H
-#define ATMEL_NAND_NFC_H
-
-/*
- * HSMC NFC registers
- */
-#define ATMEL_HSMC_NFC_CFG	0x00		/* NFC Configuration Register */
-#define		NFC_CFG_PAGESIZE	(7 << 0)
-#define			NFC_CFG_PAGESIZE_512	(0 << 0)
-#define			NFC_CFG_PAGESIZE_1024	(1 << 0)
-#define			NFC_CFG_PAGESIZE_2048	(2 << 0)
-#define			NFC_CFG_PAGESIZE_4096	(3 << 0)
-#define			NFC_CFG_PAGESIZE_8192	(4 << 0)
-#define		NFC_CFG_WSPARE		(1 << 8)
-#define		NFC_CFG_RSPARE		(1 << 9)
-#define		NFC_CFG_NFC_DTOCYC	(0xf << 16)
-#define		NFC_CFG_NFC_DTOMUL	(0x7 << 20)
-#define		NFC_CFG_NFC_SPARESIZE	(0x7f << 24)
-#define		NFC_CFG_NFC_SPARESIZE_BIT_POS	24
-
-#define ATMEL_HSMC_NFC_CTRL	0x04		/* NFC Control Register */
-#define		NFC_CTRL_ENABLE		(1 << 0)
-#define		NFC_CTRL_DISABLE	(1 << 1)
-
-#define ATMEL_HSMC_NFC_SR	0x08		/* NFC Status Register */
-#define		NFC_SR_BUSY		(1 << 8)
-#define		NFC_SR_XFR_DONE		(1 << 16)
-#define		NFC_SR_CMD_DONE		(1 << 17)
-#define		NFC_SR_DTOE		(1 << 20)
-#define		NFC_SR_UNDEF		(1 << 21)
-#define		NFC_SR_AWB		(1 << 22)
-#define		NFC_SR_ASE		(1 << 23)
-#define		NFC_SR_RB_EDGE		(1 << 24)
-
-#define ATMEL_HSMC_NFC_IER	0x0c
-#define ATMEL_HSMC_NFC_IDR	0x10
-#define ATMEL_HSMC_NFC_IMR	0x14
-#define ATMEL_HSMC_NFC_CYCLE0	0x18		/* NFC Address Cycle Zero */
-#define		ATMEL_HSMC_NFC_ADDR_CYCLE0	(0xff)
-
-#define ATMEL_HSMC_NFC_BANK	0x1c		/* NFC Bank Register */
-#define		ATMEL_HSMC_NFC_BANK0		(0 << 0)
-#define		ATMEL_HSMC_NFC_BANK1		(1 << 0)
-
-#define nfc_writel(addr, reg, value) \
-	writel((value), (addr) + ATMEL_HSMC_NFC_##reg)
-
-#define nfc_readl(addr, reg) \
-	readl_relaxed((addr) + ATMEL_HSMC_NFC_##reg)
-
-/*
- * NFC Address Command definitions
- */
-#define NFCADDR_CMD_CMD1	(0xff << 2)	/* Command for Cycle 1 */
-#define NFCADDR_CMD_CMD1_BIT_POS	2
-#define NFCADDR_CMD_CMD2	(0xff << 10)	/* Command for Cycle 2 */
-#define NFCADDR_CMD_CMD2_BIT_POS	10
-#define NFCADDR_CMD_VCMD2	(0x1 << 18)	/* Valid Cycle 2 Command */
-#define NFCADDR_CMD_ACYCLE	(0x7 << 19)	/* Number of Address required */
-#define		NFCADDR_CMD_ACYCLE_NONE		(0x0 << 19)
-#define		NFCADDR_CMD_ACYCLE_1		(0x1 << 19)
-#define		NFCADDR_CMD_ACYCLE_2		(0x2 << 19)
-#define		NFCADDR_CMD_ACYCLE_3		(0x3 << 19)
-#define		NFCADDR_CMD_ACYCLE_4		(0x4 << 19)
-#define		NFCADDR_CMD_ACYCLE_5		(0x5 << 19)
-#define NFCADDR_CMD_ACYCLE_BIT_POS	19
-#define NFCADDR_CMD_CSID	(0x7 << 22)	/* Chip Select Identifier */
-#define		NFCADDR_CMD_CSID_0		(0x0 << 22)
-#define		NFCADDR_CMD_CSID_1		(0x1 << 22)
-#define		NFCADDR_CMD_CSID_2		(0x2 << 22)
-#define		NFCADDR_CMD_CSID_3		(0x3 << 22)
-#define		NFCADDR_CMD_CSID_4		(0x4 << 22)
-#define		NFCADDR_CMD_CSID_5		(0x5 << 22)
-#define		NFCADDR_CMD_CSID_6		(0x6 << 22)
-#define		NFCADDR_CMD_CSID_7		(0x7 << 22)
-#define NFCADDR_CMD_DATAEN	(0x1 << 25)	/* Data Transfer Enable */
-#define NFCADDR_CMD_DATADIS	(0x0 << 25)	/* Data Transfer Disable */
-#define NFCADDR_CMD_NFCRD	(0x0 << 26)	/* NFC Read Enable */
-#define NFCADDR_CMD_NFCWR	(0x1 << 26)	/* NFC Write Enable */
-#define NFCADDR_CMD_NFCBUSY	(0x1 << 27)	/* NFC Busy */
-
-#define nfc_cmd_addr1234_writel(cmd, addr1234, nfc_base) \
-	writel((addr1234), (cmd) + nfc_base)
-
-#define nfc_cmd_readl(bitstatus, nfc_base) \
-	readl_relaxed((bitstatus) + nfc_base)
-
-#define NFC_TIME_OUT_MS		100
-#define	NFC_SRAM_BANK1_OFFSET	0x1200
-
-#endif
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
deleted file mode 100644
index 9d4a28fa6b73..000000000000
--- a/drivers/mtd/nand/au1550nd.c
+++ /dev/null
@@ -1,518 +0,0 @@ 
-/*
- *  drivers/mtd/nand/au1550nd.c
- *
- *  Copyright (C) 2004 Embedded Edge, LLC
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/platform_device.h>
-#include <asm/io.h>
-#include <asm/mach-au1x00/au1000.h>
-#include <asm/mach-au1x00/au1550nd.h>
-
-
-struct au1550nd_ctx {
-	struct nand_chip chip;
-
-	int cs;
-	void __iomem *base;
-	void (*write_byte)(struct mtd_info *, u_char);
-};
-
-/**
- * au_read_byte -  read one byte from the chip
- * @mtd:	MTD device structure
- *
- * read function for 8bit buswidth
- */
-static u_char au_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	u_char ret = readb(this->IO_ADDR_R);
-	wmb(); /* drain writebuffer */
-	return ret;
-}
-
-/**
- * au_write_byte -  write one byte to the chip
- * @mtd:	MTD device structure
- * @byte:	pointer to data byte to write
- *
- * write function for 8it buswidth
- */
-static void au_write_byte(struct mtd_info *mtd, u_char byte)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	writeb(byte, this->IO_ADDR_W);
-	wmb(); /* drain writebuffer */
-}
-
-/**
- * au_read_byte16 -  read one byte endianness aware from the chip
- * @mtd:	MTD device structure
- *
- * read function for 16bit buswidth with endianness conversion
- */
-static u_char au_read_byte16(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
-	wmb(); /* drain writebuffer */
-	return ret;
-}
-
-/**
- * au_write_byte16 -  write one byte endianness aware to the chip
- * @mtd:	MTD device structure
- * @byte:	pointer to data byte to write
- *
- * write function for 16bit buswidth with endianness conversion
- */
-static void au_write_byte16(struct mtd_info *mtd, u_char byte)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
-	wmb(); /* drain writebuffer */
-}
-
-/**
- * au_read_word -  read one word from the chip
- * @mtd:	MTD device structure
- *
- * read function for 16bit buswidth without endianness conversion
- */
-static u16 au_read_word(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	u16 ret = readw(this->IO_ADDR_R);
-	wmb(); /* drain writebuffer */
-	return ret;
-}
-
-/**
- * au_write_buf -  write buffer to chip
- * @mtd:	MTD device structure
- * @buf:	data buffer
- * @len:	number of bytes to write
- *
- * write function for 8bit buswidth
- */
-static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	int i;
-	struct nand_chip *this = mtd_to_nand(mtd);
-
-	for (i = 0; i < len; i++) {
-		writeb(buf[i], this->IO_ADDR_W);
-		wmb(); /* drain writebuffer */
-	}
-}
-
-/**
- * au_read_buf -  read chip data into buffer
- * @mtd:	MTD device structure
- * @buf:	buffer to store date
- * @len:	number of bytes to read
- *
- * read function for 8bit buswidth
- */
-static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	int i;
-	struct nand_chip *this = mtd_to_nand(mtd);
-
-	for (i = 0; i < len; i++) {
-		buf[i] = readb(this->IO_ADDR_R);
-		wmb(); /* drain writebuffer */
-	}
-}
-
-/**
- * au_write_buf16 -  write buffer to chip
- * @mtd:	MTD device structure
- * @buf:	data buffer
- * @len:	number of bytes to write
- *
- * write function for 16bit buswidth
- */
-static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	int i;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	u16 *p = (u16 *) buf;
-	len >>= 1;
-
-	for (i = 0; i < len; i++) {
-		writew(p[i], this->IO_ADDR_W);
-		wmb(); /* drain writebuffer */
-	}
-
-}
-
-/**
- * au_read_buf16 -  read chip data into buffer
- * @mtd:	MTD device structure
- * @buf:	buffer to store date
- * @len:	number of bytes to read
- *
- * read function for 16bit buswidth
- */
-static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
-{
-	int i;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	u16 *p = (u16 *) buf;
-	len >>= 1;
-
-	for (i = 0; i < len; i++) {
-		p[i] = readw(this->IO_ADDR_R);
-		wmb(); /* drain writebuffer */
-	}
-}
-
-/* Select the chip by setting nCE to low */
-#define NAND_CTL_SETNCE		1
-/* Deselect the chip by setting nCE to high */
-#define NAND_CTL_CLRNCE		2
-/* Select the command latch by setting CLE to high */
-#define NAND_CTL_SETCLE		3
-/* Deselect the command latch by setting CLE to low */
-#define NAND_CTL_CLRCLE		4
-/* Select the address latch by setting ALE to high */
-#define NAND_CTL_SETALE		5
-/* Deselect the address latch by setting ALE to low */
-#define NAND_CTL_CLRALE		6
-
-static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
-						chip);
-
-	switch (cmd) {
-
-	case NAND_CTL_SETCLE:
-		this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
-		break;
-
-	case NAND_CTL_CLRCLE:
-		this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
-		break;
-
-	case NAND_CTL_SETALE:
-		this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
-		break;
-
-	case NAND_CTL_CLRALE:
-		this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
-		/* FIXME: Nobody knows why this is necessary,
-		 * but it works only that way */
-		udelay(1);
-		break;
-
-	case NAND_CTL_SETNCE:
-		/* assert (force assert) chip enable */
-		alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
-		break;
-
-	case NAND_CTL_CLRNCE:
-		/* deassert chip enable */
-		alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
-		break;
-	}
-
-	this->IO_ADDR_R = this->IO_ADDR_W;
-
-	wmb(); /* Drain the writebuffer */
-}
-
-int au1550_device_ready(struct mtd_info *mtd)
-{
-	return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0;
-}
-
-/**
- * au1550_select_chip - control -CE line
- *	Forbid driving -CE manually permitting the NAND controller to do this.
- *	Keeping -CE asserted during the whole sector reads interferes with the
- *	NOR flash and PCMCIA drivers as it causes contention on the static bus.
- *	We only have to hold -CE low for the NAND read commands since the flash
- *	chip needs it to be asserted during chip not ready time but the NAND
- *	controller keeps it released.
- *
- * @mtd:	MTD device structure
- * @chip:	chipnumber to select, -1 for deselect
- */
-static void au1550_select_chip(struct mtd_info *mtd, int chip)
-{
-}
-
-/**
- * au1550_command - Send command to NAND device
- * @mtd:	MTD device structure
- * @command:	the command to be sent
- * @column:	the column address for this command, -1 if none
- * @page_addr:	the page address for this command, -1 if none
- */
-static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
-						chip);
-	int ce_override = 0, i;
-	unsigned long flags = 0;
-
-	/* Begin command latch cycle */
-	au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
-	/*
-	 * Write out the command to the device.
-	 */
-	if (command == NAND_CMD_SEQIN) {
-		int readcmd;
-
-		if (column >= mtd->writesize) {
-			/* OOB area */
-			column -= mtd->writesize;
-			readcmd = NAND_CMD_READOOB;
-		} else if (column < 256) {
-			/* First 256 bytes --> READ0 */
-			readcmd = NAND_CMD_READ0;
-		} else {
-			column -= 256;
-			readcmd = NAND_CMD_READ1;
-		}
-		ctx->write_byte(mtd, readcmd);
-	}
-	ctx->write_byte(mtd, command);
-
-	/* Set ALE and clear CLE to start address cycle */
-	au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
-
-	if (column != -1 || page_addr != -1) {
-		au1550_hwcontrol(mtd, NAND_CTL_SETALE);
-
-		/* Serially input address */
-		if (column != -1) {
-			/* Adjust columns for 16 bit buswidth */
-			if (this->options & NAND_BUSWIDTH_16 &&
-					!nand_opcode_8bits(command))
-				column >>= 1;
-			ctx->write_byte(mtd, column);
-		}
-		if (page_addr != -1) {
-			ctx->write_byte(mtd, (u8)(page_addr & 0xff));
-
-			if (command == NAND_CMD_READ0 ||
-			    command == NAND_CMD_READ1 ||
-			    command == NAND_CMD_READOOB) {
-				/*
-				 * NAND controller will release -CE after
-				 * the last address byte is written, so we'll
-				 * have to forcibly assert it. No interrupts
-				 * are allowed while we do this as we don't
-				 * want the NOR flash or PCMCIA drivers to
-				 * steal our precious bytes of data...
-				 */
-				ce_override = 1;
-				local_irq_save(flags);
-				au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
-			}
-
-			ctx->write_byte(mtd, (u8)(page_addr >> 8));
-
-			/* One more address cycle for devices > 32MiB */
-			if (this->chipsize > (32 << 20))
-				ctx->write_byte(mtd,
-						((page_addr >> 16) & 0x0f));
-		}
-		/* Latch in address */
-		au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
-	}
-
-	/*
-	 * Program and erase have their own busy handlers.
-	 * Status and sequential in need no delay.
-	 */
-	switch (command) {
-
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_STATUS:
-		return;
-
-	case NAND_CMD_RESET:
-		break;
-
-	case NAND_CMD_READ0:
-	case NAND_CMD_READ1:
-	case NAND_CMD_READOOB:
-		/* Check if we're really driving -CE low (just in case) */
-		if (unlikely(!ce_override))
-			break;
-
-		/* Apply a short delay always to ensure that we do wait tWB. */
-		ndelay(100);
-		/* Wait for a chip to become ready... */
-		for (i = this->chip_delay; !this->dev_ready(mtd) && i > 0; --i)
-			udelay(1);
-
-		/* Release -CE and re-enable interrupts. */
-		au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
-		local_irq_restore(flags);
-		return;
-	}
-	/* Apply this short delay always to ensure that we do wait tWB. */
-	ndelay(100);
-
-	while(!this->dev_ready(mtd));
-}
-
-static int find_nand_cs(unsigned long nand_base)
-{
-	void __iomem *base =
-			(void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
-	unsigned long addr, staddr, start, mask, end;
-	int i;
-
-	for (i = 0; i < 4; i++) {
-		addr = 0x1000 + (i * 0x10);			/* CSx */
-		staddr = __raw_readl(base + addr + 0x08);	/* STADDRx */
-		/* figure out the decoded range of this CS */
-		start = (staddr << 4) & 0xfffc0000;
-		mask = (staddr << 18) & 0xfffc0000;
-		end = (start | (start - 1)) & ~(start ^ mask);
-		if ((nand_base >= start) && (nand_base < end))
-			return i;
-	}
-
-	return -ENODEV;
-}
-
-static int au1550nd_probe(struct platform_device *pdev)
-{
-	struct au1550nd_platdata *pd;
-	struct au1550nd_ctx *ctx;
-	struct nand_chip *this;
-	struct mtd_info *mtd;
-	struct resource *r;
-	int ret, cs;
-
-	pd = dev_get_platdata(&pdev->dev);
-	if (!pd) {
-		dev_err(&pdev->dev, "missing platform data\n");
-		return -ENODEV;
-	}
-
-	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
-	if (!ctx)
-		return -ENOMEM;
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!r) {
-		dev_err(&pdev->dev, "no NAND memory resource\n");
-		ret = -ENODEV;
-		goto out1;
-	}
-	if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
-		dev_err(&pdev->dev, "cannot claim NAND memory area\n");
-		ret = -ENOMEM;
-		goto out1;
-	}
-
-	ctx->base = ioremap_nocache(r->start, 0x1000);
-	if (!ctx->base) {
-		dev_err(&pdev->dev, "cannot remap NAND memory area\n");
-		ret = -ENODEV;
-		goto out2;
-	}
-
-	this = &ctx->chip;
-	mtd = nand_to_mtd(this);
-	mtd->dev.parent = &pdev->dev;
-
-	/* figure out which CS# r->start belongs to */
-	cs = find_nand_cs(r->start);
-	if (cs < 0) {
-		dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
-		ret = -ENODEV;
-		goto out3;
-	}
-	ctx->cs = cs;
-
-	this->dev_ready = au1550_device_ready;
-	this->select_chip = au1550_select_chip;
-	this->cmdfunc = au1550_command;
-
-	/* 30 us command delay time */
-	this->chip_delay = 30;
-	this->ecc.mode = NAND_ECC_SOFT;
-	this->ecc.algo = NAND_ECC_HAMMING;
-
-	if (pd->devwidth)
-		this->options |= NAND_BUSWIDTH_16;
-
-	this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
-	ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
-	this->read_word = au_read_word;
-	this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
-	this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
-
-	ret = nand_scan(mtd, 1);
-	if (ret) {
-		dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
-		goto out3;
-	}
-
-	mtd_device_register(mtd, pd->parts, pd->num_parts);
-
-	platform_set_drvdata(pdev, ctx);
-
-	return 0;
-
-out3:
-	iounmap(ctx->base);
-out2:
-	release_mem_region(r->start, resource_size(r));
-out1:
-	kfree(ctx);
-	return ret;
-}
-
-static int au1550nd_remove(struct platform_device *pdev)
-{
-	struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
-	struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-	nand_release(nand_to_mtd(&ctx->chip));
-	iounmap(ctx->base);
-	release_mem_region(r->start, 0x1000);
-	kfree(ctx);
-	return 0;
-}
-
-static struct platform_driver au1550nd_driver = {
-	.driver = {
-		.name	= "au1550-nand",
-	},
-	.probe		= au1550nd_probe,
-	.remove		= au1550nd_remove,
-};
-
-module_platform_driver(au1550nd_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Embedded Edge, LLC");
-MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
diff --git a/drivers/mtd/nand/bcm47xxnflash/Makefile b/drivers/mtd/nand/bcm47xxnflash/Makefile
deleted file mode 100644
index f05b119e134b..000000000000
--- a/drivers/mtd/nand/bcm47xxnflash/Makefile
+++ /dev/null
@@ -1,4 +0,0 @@ 
-bcm47xxnflash-y				+= main.o
-bcm47xxnflash-y				+= ops_bcm4706.o
-
-obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH)	+= bcm47xxnflash.o
diff --git a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
deleted file mode 100644
index c8834767ab6d..000000000000
--- a/drivers/mtd/nand/bcm47xxnflash/bcm47xxnflash.h
+++ /dev/null
@@ -1,25 +0,0 @@ 
-#ifndef __BCM47XXNFLASH_H
-#define __BCM47XXNFLASH_H
-
-#ifndef pr_fmt
-#define pr_fmt(fmt)		KBUILD_MODNAME ": " fmt
-#endif
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-
-struct bcm47xxnflash {
-	struct bcma_drv_cc *cc;
-
-	struct nand_chip nand_chip;
-
-	unsigned curr_command;
-	int curr_page_addr;
-	int curr_column;
-
-	u8 id_data[8];
-};
-
-int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
-
-#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/bcm47xxnflash/main.c b/drivers/mtd/nand/bcm47xxnflash/main.c
deleted file mode 100644
index fb31429b70a9..000000000000
--- a/drivers/mtd/nand/bcm47xxnflash/main.c
+++ /dev/null
@@ -1,81 +0,0 @@ 
-/*
- * BCM47XX NAND flash driver
- *
- * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include "bcm47xxnflash.h"
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/bcma/bcma.h>
-
-MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Rafał Miłecki");
-
-static const char *probes[] = { "bcm47xxpart", NULL };
-
-static int bcm47xxnflash_probe(struct platform_device *pdev)
-{
-	struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
-	struct bcm47xxnflash *b47n;
-	struct mtd_info *mtd;
-	int err = 0;
-
-	b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
-	if (!b47n)
-		return -ENOMEM;
-
-	nand_set_controller_data(&b47n->nand_chip, b47n);
-	mtd = nand_to_mtd(&b47n->nand_chip);
-	mtd->dev.parent = &pdev->dev;
-	b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
-
-	if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
-		err = bcm47xxnflash_ops_bcm4706_init(b47n);
-	} else {
-		pr_err("Device not supported\n");
-		err = -ENOTSUPP;
-	}
-	if (err) {
-		pr_err("Initialization failed: %d\n", err);
-		return err;
-	}
-
-	platform_set_drvdata(pdev, b47n);
-
-	err = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
-	if (err) {
-		pr_err("Failed to register MTD device: %d\n", err);
-		return err;
-	}
-
-	return 0;
-}
-
-static int bcm47xxnflash_remove(struct platform_device *pdev)
-{
-	struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
-
-	nand_release(nand_to_mtd(&nflash->nand_chip));
-
-	return 0;
-}
-
-static struct platform_driver bcm47xxnflash_driver = {
-	.probe	= bcm47xxnflash_probe,
-	.remove = bcm47xxnflash_remove,
-	.driver = {
-		.name = "bcma_nflash",
-	},
-};
-
-module_platform_driver(bcm47xxnflash_driver);
diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
deleted file mode 100644
index f1da4ea88f2c..000000000000
--- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c
+++ /dev/null
@@ -1,454 +0,0 @@ 
-/*
- * BCM47XX NAND flash driver
- *
- * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include "bcm47xxnflash.h"
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/bcma/bcma.h>
-
-/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
- * shown ~1000 retries as maxiumum. */
-#define NFLASH_READY_RETRIES		10000
-
-#define NFLASH_SECTOR_SIZE		512
-
-#define NCTL_CMD0			0x00010000
-#define NCTL_COL			0x00020000	/* Update column with value from BCMA_CC_NFLASH_COL_ADDR */
-#define NCTL_ROW			0x00040000	/* Update row (page) with value from BCMA_CC_NFLASH_ROW_ADDR */
-#define NCTL_CMD1W			0x00080000
-#define NCTL_READ			0x00100000
-#define NCTL_WRITE			0x00200000
-#define NCTL_SPECADDR			0x01000000
-#define NCTL_READY			0x04000000
-#define NCTL_ERR			0x08000000
-#define NCTL_CSA			0x40000000
-#define NCTL_START			0x80000000
-
-/**************************************************
- * Various helpers
- **************************************************/
-
-static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
-{
-	return ((ns * 1000 * clock) / 1000000) + 1;
-}
-
-static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
-{
-	int i = 0;
-
-	bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
-	for (i = 0; i < NFLASH_READY_RETRIES; i++) {
-		if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
-			i = 0;
-			break;
-		}
-	}
-	if (i) {
-		pr_err("NFLASH control command not ready!\n");
-		return -EBUSY;
-	}
-	return 0;
-}
-
-static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
-{
-	int i;
-
-	for (i = 0; i < NFLASH_READY_RETRIES; i++) {
-		if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
-			if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
-			    BCMA_CC_NFLASH_CTL_ERR) {
-				pr_err("Error on polling\n");
-				return -EBUSY;
-			} else {
-				return 0;
-			}
-		}
-	}
-
-	pr_err("Polling timeout!\n");
-	return -EBUSY;
-}
-
-/**************************************************
- * R/W
- **************************************************/
-
-static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
-					   int len)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
-
-	u32 ctlcode;
-	u32 *dest = (u32 *)buf;
-	int i;
-	int toread;
-
-	BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
-	/* Don't validate column using nand_chip->page_shift, it may be bigger
-	 * when accessing OOB */
-
-	while (len) {
-		/* We can read maximum of 0x200 bytes at once */
-		toread = min(len, 0x200);
-
-		/* Set page and column */
-		bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
-				b47n->curr_column);
-		bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
-				b47n->curr_page_addr);
-
-		/* Prepare to read */
-		ctlcode = NCTL_CSA | NCTL_CMD1W | NCTL_ROW | NCTL_COL |
-			  NCTL_CMD0;
-		ctlcode |= NAND_CMD_READSTART << 8;
-		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
-			return;
-		if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
-			return;
-
-		/* Eventually read some data :) */
-		for (i = 0; i < toread; i += 4, dest++) {
-			ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
-			if (i == toread - 4) /* Last read goes without that */
-				ctlcode &= ~NCTL_CSA;
-			if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
-							      ctlcode))
-				return;
-			*dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
-		}
-
-		b47n->curr_column += toread;
-		len -= toread;
-	}
-}
-
-static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
-					    const uint8_t *buf, int len)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
-	struct bcma_drv_cc *cc = b47n->cc;
-
-	u32 ctlcode;
-	const u32 *data = (u32 *)buf;
-	int i;
-
-	BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
-	/* Don't validate column using nand_chip->page_shift, it may be bigger
-	 * when accessing OOB */
-
-	for (i = 0; i < len; i += 4, data++) {
-		bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
-
-		ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
-		if (i == len - 4) /* Last read goes without that */
-			ctlcode &= ~NCTL_CSA;
-		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
-			pr_err("%s ctl_cmd didn't work!\n", __func__);
-			return;
-		}
-	}
-
-	b47n->curr_column += len;
-}
-
-/**************************************************
- * NAND chip ops
- **************************************************/
-
-static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct mtd_info *mtd, int cmd,
-					       unsigned int ctrl)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
-	u32 code = 0;
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (cmd & NAND_CTRL_CLE)
-		code = cmd | NCTL_CMD0;
-
-	/* nCS is not needed for reset command */
-	if (cmd != NAND_CMD_RESET)
-		code |= NCTL_CSA;
-
-	bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
-}
-
-/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
-static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
-						  int chip)
-{
-	return;
-}
-
-static int bcm47xxnflash_ops_bcm4706_dev_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
-
-	return !!(bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_CTL) & NCTL_READY);
-}
-
-/*
- * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
- * For example, reading chip id is performed in a non-standard way.
- * Setting column and page is also handled differently, we use a special
- * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
- * standard commands would be much more complicated.
- */
-static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
-					      unsigned command, int column,
-					      int page_addr)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
-	struct bcma_drv_cc *cc = b47n->cc;
-	u32 ctlcode;
-	int i;
-
-	if (column != -1)
-		b47n->curr_column = column;
-	if (page_addr != -1)
-		b47n->curr_page_addr = page_addr;
-
-	switch (command) {
-	case NAND_CMD_RESET:
-		nand_chip->cmd_ctrl(mtd, command, NAND_CTRL_CLE);
-
-		ndelay(100);
-		nand_wait_ready(mtd);
-		break;
-	case NAND_CMD_READID:
-		ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
-		ctlcode |= NAND_CMD_READID;
-		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
-			pr_err("READID error\n");
-			break;
-		}
-
-		/*
-		 * Reading is specific, last one has to go without NCTL_CSA
-		 * bit. We don't know how many reads NAND subsystem is going
-		 * to perform, so cache everything.
-		 */
-		for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
-			ctlcode = NCTL_CSA | NCTL_READ;
-			if (i == ARRAY_SIZE(b47n->id_data) - 1)
-				ctlcode &= ~NCTL_CSA;
-			if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
-							      ctlcode)) {
-				pr_err("READID error\n");
-				break;
-			}
-			b47n->id_data[i] =
-				bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
-				& 0xFF;
-		}
-
-		break;
-	case NAND_CMD_STATUS:
-		ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
-		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
-			pr_err("STATUS command error\n");
-		break;
-	case NAND_CMD_READ0:
-		break;
-	case NAND_CMD_READOOB:
-		if (page_addr != -1)
-			b47n->curr_column += mtd->writesize;
-		break;
-	case NAND_CMD_ERASE1:
-		bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
-				b47n->curr_page_addr);
-		ctlcode = NCTL_ROW | NCTL_CMD1W | NCTL_CMD0 |
-			  NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
-		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
-			pr_err("ERASE1 failed\n");
-		break;
-	case NAND_CMD_ERASE2:
-		break;
-	case NAND_CMD_SEQIN:
-		/* Set page and column */
-		bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
-				b47n->curr_column);
-		bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
-				b47n->curr_page_addr);
-
-		/* Prepare to write */
-		ctlcode = 0x40000000 | NCTL_ROW | NCTL_COL | NCTL_CMD0;
-		ctlcode |= NAND_CMD_SEQIN;
-		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
-			pr_err("SEQIN failed\n");
-		break;
-	case NAND_CMD_PAGEPROG:
-		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_CMD0 |
-							  NAND_CMD_PAGEPROG))
-			pr_err("PAGEPROG failed\n");
-		if (bcm47xxnflash_ops_bcm4706_poll(cc))
-			pr_err("PAGEPROG not ready\n");
-		break;
-	default:
-		pr_err("Command 0x%X unsupported\n", command);
-		break;
-	}
-	b47n->curr_command = command;
-}
-
-static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
-	struct bcma_drv_cc *cc = b47n->cc;
-	u32 tmp = 0;
-
-	switch (b47n->curr_command) {
-	case NAND_CMD_READID:
-		if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
-			pr_err("Requested invalid id_data: %d\n",
-			       b47n->curr_column);
-			return 0;
-		}
-		return b47n->id_data[b47n->curr_column++];
-	case NAND_CMD_STATUS:
-		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
-			return 0;
-		return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
-	case NAND_CMD_READOOB:
-		bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
-		return tmp & 0xFF;
-	}
-
-	pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
-	return 0;
-}
-
-static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
-					       uint8_t *buf, int len)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
-
-	switch (b47n->curr_command) {
-	case NAND_CMD_READ0:
-	case NAND_CMD_READOOB:
-		bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
-		return;
-	}
-
-	pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
-}
-
-static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
-						const uint8_t *buf, int len)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
-
-	switch (b47n->curr_command) {
-	case NAND_CMD_SEQIN:
-		bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
-		return;
-	}
-
-	pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
-}
-
-/**************************************************
- * Init
- **************************************************/
-
-int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
-{
-	struct nand_chip *nand_chip = (struct nand_chip *)&b47n->nand_chip;
-	int err;
-	u32 freq;
-	u16 clock;
-	u8 w0, w1, w2, w3, w4;
-
-	unsigned long chipsize; /* MiB */
-	u8 tbits, col_bits, col_size, row_bits, row_bsize;
-	u32 val;
-
-	b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
-	nand_chip->cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
-	nand_chip->dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
-	b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
-	b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
-	b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
-	b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
-
-	nand_chip->chip_delay = 50;
-	b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
-	b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
-
-	/* Enable NAND flash access */
-	bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
-		      BCMA_CC_4706_FLASHSCFG_NF1);
-
-	/* Configure wait counters */
-	if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
-		/* 400 MHz */
-		freq = 400000000 / 4;
-	} else {
-		freq = bcma_chipco_pll_read(b47n->cc, 4);
-		freq = (freq & 0xFFF) >> 3;
-		/* Fixed reference clock 25 MHz and m = 2 */
-		freq = (freq * 25000000 / 2) / 4;
-	}
-	clock = freq / 1000000;
-	w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
-	w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
-	w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
-	w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
-	w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
-	bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
-			(w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
-
-	/* Scan NAND */
-	err = nand_scan(nand_to_mtd(&b47n->nand_chip), 1);
-	if (err) {
-		pr_err("Could not scan NAND flash: %d\n", err);
-		goto exit;
-	}
-
-	/* Configure FLASH */
-	chipsize = b47n->nand_chip.chipsize >> 20;
-	tbits = ffs(chipsize); /* find first bit set */
-	if (!tbits || tbits != fls(chipsize)) {
-		pr_err("Invalid flash size: 0x%lX\n", chipsize);
-		err = -ENOTSUPP;
-		goto exit;
-	}
-	tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
-
-	col_bits = b47n->nand_chip.page_shift + 1;
-	col_size = (col_bits + 7) / 8;
-
-	row_bits = tbits - col_bits + 1;
-	row_bsize = (row_bits + 7) / 8;
-
-	val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
-	bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
-
-exit:
-	if (err)
-		bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
-			       ~BCMA_CC_4706_FLASHSCFG_NF1);
-	return err;
-}
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
deleted file mode 100644
index 5655dca6ce43..000000000000
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ /dev/null
@@ -1,860 +0,0 @@ 
-/* linux/drivers/mtd/nand/bf5xx_nand.c
- *
- * Copyright 2006-2008 Analog Devices Inc.
- *	http://blackfin.uclinux.org/
- *	Bryan Wu <bryan.wu@analog.com>
- *
- * Blackfin BF5xx on-chip NAND flash controller driver
- *
- * Derived from drivers/mtd/nand/s3c2410.c
- * Copyright (c) 2007 Ben Dooks <ben@simtec.co.uk>
- *
- * Derived from drivers/mtd/nand/cafe.c
- * Copyright © 2006 Red Hat, Inc.
- * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
- *
- * Changelog:
- *	12-Jun-2007  Bryan Wu:  Initial version
- *	18-Jul-2007  Bryan Wu:
- *		- ECC_HW and ECC_SW supported
- *		- DMA supported in ECC_HW
- *		- YAFFS tested as rootfs in both ECC_HW and ECC_SW
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-*/
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/blackfin.h>
-#include <asm/dma.h>
-#include <asm/cacheflush.h>
-#include <asm/nand.h>
-#include <asm/portmux.h>
-
-#define DRV_NAME	"bf5xx-nand"
-#define DRV_VERSION	"1.2"
-#define DRV_AUTHOR	"Bryan Wu <bryan.wu@analog.com>"
-#define DRV_DESC	"BF5xx on-chip NAND FLash Controller Driver"
-
-/* NFC_STAT Masks */
-#define NBUSY       0x01  /* Not Busy */
-#define WB_FULL     0x02  /* Write Buffer Full */
-#define PG_WR_STAT  0x04  /* Page Write Pending */
-#define PG_RD_STAT  0x08  /* Page Read Pending */
-#define WB_EMPTY    0x10  /* Write Buffer Empty */
-
-/* NFC_IRQSTAT Masks */
-#define NBUSYIRQ    0x01  /* Not Busy IRQ */
-#define WB_OVF      0x02  /* Write Buffer Overflow */
-#define WB_EDGE     0x04  /* Write Buffer Edge Detect */
-#define RD_RDY      0x08  /* Read Data Ready */
-#define WR_DONE     0x10  /* Page Write Done */
-
-/* NFC_RST Masks */
-#define ECC_RST     0x01  /* ECC (and NFC counters) Reset */
-
-/* NFC_PGCTL Masks */
-#define PG_RD_START 0x01  /* Page Read Start */
-#define PG_WR_START 0x02  /* Page Write Start */
-
-#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
-static int hardware_ecc = 1;
-#else
-static int hardware_ecc;
-#endif
-
-static const unsigned short bfin_nfc_pin_req[] =
-	{P_NAND_CE,
-	 P_NAND_RB,
-	 P_NAND_D0,
-	 P_NAND_D1,
-	 P_NAND_D2,
-	 P_NAND_D3,
-	 P_NAND_D4,
-	 P_NAND_D5,
-	 P_NAND_D6,
-	 P_NAND_D7,
-	 P_NAND_WE,
-	 P_NAND_RE,
-	 P_NAND_CLE,
-	 P_NAND_ALE,
-	 0};
-
-#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
-static int bootrom_ooblayout_ecc(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	if (section > 7)
-		return -ERANGE;
-
-	oobregion->offset = section * 8;
-	oobregion->length = 3;
-
-	return 0;
-}
-
-static int bootrom_ooblayout_free(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	if (section > 7)
-		return -ERANGE;
-
-	oobregion->offset = (section * 8) + 3;
-	oobregion->length = 5;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops bootrom_ooblayout_ops = {
-	.ecc = bootrom_ooblayout_ecc,
-	.free = bootrom_ooblayout_free,
-};
-#endif
-
-/*
- * Data structures for bf5xx nand flash controller driver
- */
-
-/* bf5xx nand info */
-struct bf5xx_nand_info {
-	/* mtd info */
-	struct nand_hw_control		controller;
-	struct nand_chip		chip;
-
-	/* platform info */
-	struct bf5xx_nand_platform	*platform;
-
-	/* device info */
-	struct device			*device;
-
-	/* DMA stuff */
-	struct completion		dma_completion;
-};
-
-/*
- * Conversion functions
- */
-static struct bf5xx_nand_info *mtd_to_nand_info(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct bf5xx_nand_info,
-			    chip);
-}
-
-static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev)
-{
-	return platform_get_drvdata(pdev);
-}
-
-static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev)
-{
-	return dev_get_platdata(&pdev->dev);
-}
-
-/*
- * struct nand_chip interface function pointers
- */
-
-/*
- * bf5xx_nand_hwcontrol
- *
- * Issue command and address cycles to the chip
- */
-static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd,
-				   unsigned int ctrl)
-{
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	while (bfin_read_NFC_STAT() & WB_FULL)
-		cpu_relax();
-
-	if (ctrl & NAND_CLE)
-		bfin_write_NFC_CMD(cmd);
-	else if (ctrl & NAND_ALE)
-		bfin_write_NFC_ADDR(cmd);
-	SSYNC();
-}
-
-/*
- * bf5xx_nand_devready()
- *
- * returns 0 if the nand is busy, 1 if it is ready
- */
-static int bf5xx_nand_devready(struct mtd_info *mtd)
-{
-	unsigned short val = bfin_read_NFC_STAT();
-
-	if ((val & NBUSY) == NBUSY)
-		return 1;
-	else
-		return 0;
-}
-
-/*
- * ECC functions
- * These allow the bf5xx to use the controller's ECC
- * generator block to ECC the data as it passes through
- */
-
-/*
- * ECC error correction function
- */
-static int bf5xx_nand_correct_data_256(struct mtd_info *mtd, u_char *dat,
-					u_char *read_ecc, u_char *calc_ecc)
-{
-	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
-	u32 syndrome[5];
-	u32 calced, stored;
-	int i;
-	unsigned short failing_bit, failing_byte;
-	u_char data;
-
-	calced = calc_ecc[0] | (calc_ecc[1] << 8) | (calc_ecc[2] << 16);
-	stored = read_ecc[0] | (read_ecc[1] << 8) | (read_ecc[2] << 16);
-
-	syndrome[0] = (calced ^ stored);
-
-	/*
-	 * syndrome 0: all zero
-	 * No error in data
-	 * No action
-	 */
-	if (!syndrome[0] || !calced || !stored)
-		return 0;
-
-	/*
-	 * sysdrome 0: only one bit is one
-	 * ECC data was incorrect
-	 * No action
-	 */
-	if (hweight32(syndrome[0]) == 1) {
-		dev_err(info->device, "ECC data was incorrect!\n");
-		return -EBADMSG;
-	}
-
-	syndrome[1] = (calced & 0x7FF) ^ (stored & 0x7FF);
-	syndrome[2] = (calced & 0x7FF) ^ ((calced >> 11) & 0x7FF);
-	syndrome[3] = (stored & 0x7FF) ^ ((stored >> 11) & 0x7FF);
-	syndrome[4] = syndrome[2] ^ syndrome[3];
-
-	for (i = 0; i < 5; i++)
-		dev_info(info->device, "syndrome[%d] 0x%08x\n", i, syndrome[i]);
-
-	dev_info(info->device,
-		"calced[0x%08x], stored[0x%08x]\n",
-		calced, stored);
-
-	/*
-	 * sysdrome 0: exactly 11 bits are one, each parity
-	 * and parity' pair is 1 & 0 or 0 & 1.
-	 * 1-bit correctable error
-	 * Correct the error
-	 */
-	if (hweight32(syndrome[0]) == 11 && syndrome[4] == 0x7FF) {
-		dev_info(info->device,
-			"1-bit correctable error, correct it.\n");
-		dev_info(info->device,
-			"syndrome[1] 0x%08x\n", syndrome[1]);
-
-		failing_bit = syndrome[1] & 0x7;
-		failing_byte = syndrome[1] >> 0x3;
-		data = *(dat + failing_byte);
-		data = data ^ (0x1 << failing_bit);
-		*(dat + failing_byte) = data;
-
-		return 1;
-	}
-
-	/*
-	 * sysdrome 0: random data
-	 * More than 1-bit error, non-correctable error
-	 * Discard data, mark bad block
-	 */
-	dev_err(info->device,
-		"More than 1-bit error, non-correctable error.\n");
-	dev_err(info->device,
-		"Please discard data, mark bad block\n");
-
-	return -EBADMSG;
-}
-
-static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat,
-					u_char *read_ecc, u_char *calc_ecc)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int ret, bitflips = 0;
-
-	ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
-	if (ret < 0)
-		return ret;
-
-	bitflips = ret;
-
-	/* If ecc size is 512, correct second 256 bytes */
-	if (chip->ecc.size == 512) {
-		dat += 256;
-		read_ecc += 3;
-		calc_ecc += 3;
-		ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
-		if (ret < 0)
-			return ret;
-
-		bitflips += ret;
-	}
-
-	return bitflips;
-}
-
-static void bf5xx_nand_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	return;
-}
-
-static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
-		const u_char *dat, u_char *ecc_code)
-{
-	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	u16 ecc0, ecc1;
-	u32 code[2];
-	u8 *p;
-
-	/* first 3 bytes ECC code for 256 page size */
-	ecc0 = bfin_read_NFC_ECC0();
-	ecc1 = bfin_read_NFC_ECC1();
-
-	code[0] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
-
-	dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
-
-	p = (u8 *) code;
-	memcpy(ecc_code, p, 3);
-
-	/* second 3 bytes ECC code for 512 ecc size */
-	if (chip->ecc.size == 512) {
-		ecc0 = bfin_read_NFC_ECC2();
-		ecc1 = bfin_read_NFC_ECC3();
-		code[1] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
-
-		/* second 3 bytes in ecc_code for second 256
-		 * bytes of 512 page size
-		 */
-		p = (u8 *) (code + 1);
-		memcpy((ecc_code + 3), p, 3);
-		dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]);
-	}
-
-	return 0;
-}
-
-/*
- * PIO mode for buffer writing and reading
- */
-static void bf5xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	int i;
-	unsigned short val;
-
-	/*
-	 * Data reads are requested by first writing to NFC_DATA_RD
-	 * and then reading back from NFC_READ.
-	 */
-	for (i = 0; i < len; i++) {
-		while (bfin_read_NFC_STAT() & WB_FULL)
-			cpu_relax();
-
-		/* Contents do not matter */
-		bfin_write_NFC_DATA_RD(0x0000);
-		SSYNC();
-
-		while ((bfin_read_NFC_IRQSTAT() & RD_RDY) != RD_RDY)
-			cpu_relax();
-
-		buf[i] = bfin_read_NFC_READ();
-
-		val = bfin_read_NFC_IRQSTAT();
-		val |= RD_RDY;
-		bfin_write_NFC_IRQSTAT(val);
-		SSYNC();
-	}
-}
-
-static uint8_t bf5xx_nand_read_byte(struct mtd_info *mtd)
-{
-	uint8_t val;
-
-	bf5xx_nand_read_buf(mtd, &val, 1);
-
-	return val;
-}
-
-static void bf5xx_nand_write_buf(struct mtd_info *mtd,
-				const uint8_t *buf, int len)
-{
-	int i;
-
-	for (i = 0; i < len; i++) {
-		while (bfin_read_NFC_STAT() & WB_FULL)
-			cpu_relax();
-
-		bfin_write_NFC_DATA_WR(buf[i]);
-		SSYNC();
-	}
-}
-
-static void bf5xx_nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	int i;
-	u16 *p = (u16 *) buf;
-	len >>= 1;
-
-	/*
-	 * Data reads are requested by first writing to NFC_DATA_RD
-	 * and then reading back from NFC_READ.
-	 */
-	bfin_write_NFC_DATA_RD(0x5555);
-
-	SSYNC();
-
-	for (i = 0; i < len; i++)
-		p[i] = bfin_read_NFC_READ();
-}
-
-static void bf5xx_nand_write_buf16(struct mtd_info *mtd,
-				const uint8_t *buf, int len)
-{
-	int i;
-	u16 *p = (u16 *) buf;
-	len >>= 1;
-
-	for (i = 0; i < len; i++)
-		bfin_write_NFC_DATA_WR(p[i]);
-
-	SSYNC();
-}
-
-/*
- * DMA functions for buffer writing and reading
- */
-static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id)
-{
-	struct bf5xx_nand_info *info = dev_id;
-
-	clear_dma_irqstat(CH_NFC);
-	disable_dma(CH_NFC);
-	complete(&info->dma_completion);
-
-	return IRQ_HANDLED;
-}
-
-static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
-				uint8_t *buf, int is_read)
-{
-	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	unsigned short val;
-
-	dev_dbg(info->device, " mtd->%p, buf->%p, is_read %d\n",
-			mtd, buf, is_read);
-
-	/*
-	 * Before starting a dma transfer, be sure to invalidate/flush
-	 * the cache over the address range of your DMA buffer to
-	 * prevent cache coherency problems. Otherwise very subtle bugs
-	 * can be introduced to your driver.
-	 */
-	if (is_read)
-		invalidate_dcache_range((unsigned int)buf,
-				(unsigned int)(buf + chip->ecc.size));
-	else
-		flush_dcache_range((unsigned int)buf,
-				(unsigned int)(buf + chip->ecc.size));
-
-	/*
-	 * This register must be written before each page is
-	 * transferred to generate the correct ECC register
-	 * values.
-	 */
-	bfin_write_NFC_RST(ECC_RST);
-	SSYNC();
-	while (bfin_read_NFC_RST() & ECC_RST)
-		cpu_relax();
-
-	disable_dma(CH_NFC);
-	clear_dma_irqstat(CH_NFC);
-
-	/* setup DMA register with Blackfin DMA API */
-	set_dma_config(CH_NFC, 0x0);
-	set_dma_start_addr(CH_NFC, (unsigned long) buf);
-
-	/* The DMAs have different size on BF52x and BF54x */
-#ifdef CONFIG_BF52x
-	set_dma_x_count(CH_NFC, (chip->ecc.size >> 1));
-	set_dma_x_modify(CH_NFC, 2);
-	val = DI_EN | WDSIZE_16;
-#endif
-
-#ifdef CONFIG_BF54x
-	set_dma_x_count(CH_NFC, (chip->ecc.size >> 2));
-	set_dma_x_modify(CH_NFC, 4);
-	val = DI_EN | WDSIZE_32;
-#endif
-	/* setup write or read operation */
-	if (is_read)
-		val |= WNR;
-	set_dma_config(CH_NFC, val);
-	enable_dma(CH_NFC);
-
-	/* Start PAGE read/write operation */
-	if (is_read)
-		bfin_write_NFC_PGCTL(PG_RD_START);
-	else
-		bfin_write_NFC_PGCTL(PG_WR_START);
-	wait_for_completion(&info->dma_completion);
-}
-
-static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
-					uint8_t *buf, int len)
-{
-	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	dev_dbg(info->device, "mtd->%p, buf->%p, int %d\n", mtd, buf, len);
-
-	if (len == chip->ecc.size)
-		bf5xx_nand_dma_rw(mtd, buf, 1);
-	else
-		bf5xx_nand_read_buf(mtd, buf, len);
-}
-
-static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
-				const uint8_t *buf, int len)
-{
-	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	dev_dbg(info->device, "mtd->%p, buf->%p, len %d\n", mtd, buf, len);
-
-	if (len == chip->ecc.size)
-		bf5xx_nand_dma_rw(mtd, (uint8_t *)buf, 0);
-	else
-		bf5xx_nand_write_buf(mtd, buf, len);
-}
-
-static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-		uint8_t *buf, int oob_required, int page)
-{
-	bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
-	bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
-		struct nand_chip *chip,	const uint8_t *buf, int oob_required,
-		int page)
-{
-	bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
-	bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-/*
- * System initialization functions
- */
-static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
-{
-	int ret;
-
-	/* Do not use dma */
-	if (!hardware_ecc)
-		return 0;
-
-	init_completion(&info->dma_completion);
-
-	/* Request NFC DMA channel */
-	ret = request_dma(CH_NFC, "BF5XX NFC driver");
-	if (ret < 0) {
-		dev_err(info->device, " unable to get DMA channel\n");
-		return ret;
-	}
-
-#ifdef CONFIG_BF54x
-	/* Setup DMAC1 channel mux for NFC which shared with SDH */
-	bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() & ~1);
-	SSYNC();
-#endif
-
-	set_dma_callback(CH_NFC, bf5xx_nand_dma_irq, info);
-
-	/* Turn off the DMA channel first */
-	disable_dma(CH_NFC);
-	return 0;
-}
-
-static void bf5xx_nand_dma_remove(struct bf5xx_nand_info *info)
-{
-	/* Free NFC DMA channel */
-	if (hardware_ecc)
-		free_dma(CH_NFC);
-}
-
-/*
- * BF5XX NFC hardware initialization
- *  - pin mux setup
- *  - clear interrupt status
- */
-static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
-{
-	int err = 0;
-	unsigned short val;
-	struct bf5xx_nand_platform *plat = info->platform;
-
-	/* setup NFC_CTL register */
-	dev_info(info->device,
-		"data_width=%d, wr_dly=%d, rd_dly=%d\n",
-		(plat->data_width ? 16 : 8),
-		plat->wr_dly, plat->rd_dly);
-
-	val = (1 << NFC_PG_SIZE_OFFSET) |
-		(plat->data_width << NFC_NWIDTH_OFFSET) |
-		(plat->rd_dly << NFC_RDDLY_OFFSET) |
-		(plat->wr_dly << NFC_WRDLY_OFFSET);
-	dev_dbg(info->device, "NFC_CTL is 0x%04x\n", val);
-
-	bfin_write_NFC_CTL(val);
-	SSYNC();
-
-	/* clear interrupt status */
-	bfin_write_NFC_IRQMASK(0x0);
-	SSYNC();
-	val = bfin_read_NFC_IRQSTAT();
-	bfin_write_NFC_IRQSTAT(val);
-	SSYNC();
-
-	/* DMA initialization  */
-	if (bf5xx_nand_dma_init(info))
-		err = -ENXIO;
-
-	return err;
-}
-
-/*
- * Device management interface
- */
-static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
-{
-	struct mtd_info *mtd = nand_to_mtd(&info->chip);
-	struct mtd_partition *parts = info->platform->partitions;
-	int nr = info->platform->nr_partitions;
-
-	return mtd_device_register(mtd, parts, nr);
-}
-
-static int bf5xx_nand_remove(struct platform_device *pdev)
-{
-	struct bf5xx_nand_info *info = to_nand_info(pdev);
-
-	/* first thing we need to do is release all our mtds
-	 * and their partitions, then go through freeing the
-	 * resources used
-	 */
-	nand_release(nand_to_mtd(&info->chip));
-
-	peripheral_free_list(bfin_nfc_pin_req);
-	bf5xx_nand_dma_remove(info);
-
-	return 0;
-}
-
-static int bf5xx_nand_scan(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int ret;
-
-	ret = nand_scan_ident(mtd, 1, NULL);
-	if (ret)
-		return ret;
-
-	if (hardware_ecc) {
-		/*
-		 * for nand with page size > 512B, think it as several sections with 512B
-		 */
-		if (likely(mtd->writesize >= 512)) {
-			chip->ecc.size = 512;
-			chip->ecc.bytes = 6;
-			chip->ecc.strength = 2;
-		} else {
-			chip->ecc.size = 256;
-			chip->ecc.bytes = 3;
-			chip->ecc.strength = 1;
-			bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
-			SSYNC();
-		}
-	}
-
-	return	nand_scan_tail(mtd);
-}
-
-/*
- * bf5xx_nand_probe
- *
- * called by device layer when it finds a device matching
- * one our driver can handled. This code checks to see if
- * it can allocate all necessary resources then calls the
- * nand layer to look for devices
- */
-static int bf5xx_nand_probe(struct platform_device *pdev)
-{
-	struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
-	struct bf5xx_nand_info *info = NULL;
-	struct nand_chip *chip = NULL;
-	struct mtd_info *mtd = NULL;
-	int err = 0;
-
-	dev_dbg(&pdev->dev, "(%p)\n", pdev);
-
-	if (!plat) {
-		dev_err(&pdev->dev, "no platform specific information\n");
-		return -EINVAL;
-	}
-
-	if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
-		dev_err(&pdev->dev, "requesting Peripherals failed\n");
-		return -EFAULT;
-	}
-
-	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
-	if (info == NULL) {
-		err = -ENOMEM;
-		goto out_err;
-	}
-
-	platform_set_drvdata(pdev, info);
-
-	nand_hw_control_init(&info->controller);
-
-	info->device     = &pdev->dev;
-	info->platform   = plat;
-
-	/* initialise chip data struct */
-	chip = &info->chip;
-	mtd = nand_to_mtd(&info->chip);
-
-	if (plat->data_width)
-		chip->options |= NAND_BUSWIDTH_16;
-
-	chip->options |= NAND_CACHEPRG | NAND_SKIP_BBTSCAN;
-
-	chip->read_buf = (plat->data_width) ?
-		bf5xx_nand_read_buf16 : bf5xx_nand_read_buf;
-	chip->write_buf = (plat->data_width) ?
-		bf5xx_nand_write_buf16 : bf5xx_nand_write_buf;
-
-	chip->read_byte    = bf5xx_nand_read_byte;
-
-	chip->cmd_ctrl     = bf5xx_nand_hwcontrol;
-	chip->dev_ready    = bf5xx_nand_devready;
-
-	nand_set_controller_data(chip, mtd);
-	chip->controller   = &info->controller;
-
-	chip->IO_ADDR_R    = (void __iomem *) NFC_READ;
-	chip->IO_ADDR_W    = (void __iomem *) NFC_DATA_WR;
-
-	chip->chip_delay   = 0;
-
-	/* initialise mtd info data struct */
-	mtd->dev.parent = &pdev->dev;
-
-	/* initialise the hardware */
-	err = bf5xx_nand_hw_init(info);
-	if (err)
-		goto out_err;
-
-	/* setup hardware ECC data struct */
-	if (hardware_ecc) {
-#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
-		mtd_set_ooblayout(mtd, &bootrom_ooblayout_ops);
-#endif
-		chip->read_buf      = bf5xx_nand_dma_read_buf;
-		chip->write_buf     = bf5xx_nand_dma_write_buf;
-		chip->ecc.calculate = bf5xx_nand_calculate_ecc;
-		chip->ecc.correct   = bf5xx_nand_correct_data;
-		chip->ecc.mode	    = NAND_ECC_HW;
-		chip->ecc.hwctl	    = bf5xx_nand_enable_hwecc;
-		chip->ecc.read_page_raw = bf5xx_nand_read_page_raw;
-		chip->ecc.write_page_raw = bf5xx_nand_write_page_raw;
-	} else {
-		chip->ecc.mode	    = NAND_ECC_SOFT;
-		chip->ecc.algo	= NAND_ECC_HAMMING;
-	}
-
-	/* scan hardware nand chip and setup mtd info data struct */
-	if (bf5xx_nand_scan(mtd)) {
-		err = -ENXIO;
-		goto out_err_nand_scan;
-	}
-
-#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
-	chip->badblockpos = 63;
-#endif
-
-	/* add NAND partition */
-	bf5xx_nand_add_partition(info);
-
-	dev_dbg(&pdev->dev, "initialised ok\n");
-	return 0;
-
-out_err_nand_scan:
-	bf5xx_nand_dma_remove(info);
-out_err:
-	peripheral_free_list(bfin_nfc_pin_req);
-
-	return err;
-}
-
-/* driver device registration */
-static struct platform_driver bf5xx_nand_driver = {
-	.probe		= bf5xx_nand_probe,
-	.remove		= bf5xx_nand_remove,
-	.driver		= {
-		.name	= DRV_NAME,
-	},
-};
-
-module_platform_driver(bf5xx_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR(DRV_AUTHOR);
-MODULE_DESCRIPTION(DRV_DESC);
-MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/mtd/nand/brcmnand/Makefile b/drivers/mtd/nand/brcmnand/Makefile
deleted file mode 100644
index b28ffb59eb43..000000000000
--- a/drivers/mtd/nand/brcmnand/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@ 
-# link order matters; don't link the more generic brcmstb_nand.o before the
-# more specific iproc_nand.o, for instance
-obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= iproc_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= bcm63138_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= bcm6368_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= brcmstb_nand.o
-obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= brcmnand.o
diff --git a/drivers/mtd/nand/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/brcmnand/bcm63138_nand.c
deleted file mode 100644
index 59444b3a697d..000000000000
--- a/drivers/mtd/nand/brcmnand/bcm63138_nand.c
+++ /dev/null
@@ -1,109 +0,0 @@ 
-/*
- * Copyright © 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "brcmnand.h"
-
-struct bcm63138_nand_soc {
-	struct brcmnand_soc soc;
-	void __iomem *base;
-};
-
-#define BCM63138_NAND_INT_STATUS		0x00
-#define BCM63138_NAND_INT_EN			0x04
-
-enum {
-	BCM63138_CTLRDY		= BIT(4),
-};
-
-static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
-{
-	struct bcm63138_nand_soc *priv =
-			container_of(soc, struct bcm63138_nand_soc, soc);
-	void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
-	u32 val = brcmnand_readl(mmio);
-
-	if (val & BCM63138_CTLRDY) {
-		brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
-		return true;
-	}
-
-	return false;
-}
-
-static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
-{
-	struct bcm63138_nand_soc *priv =
-			container_of(soc, struct bcm63138_nand_soc, soc);
-	void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
-	u32 val = brcmnand_readl(mmio);
-
-	if (en)
-		val |= BCM63138_CTLRDY;
-	else
-		val &= ~BCM63138_CTLRDY;
-
-	brcmnand_writel(val, mmio);
-}
-
-static int bcm63138_nand_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct bcm63138_nand_soc *priv;
-	struct brcmnand_soc *soc;
-	struct resource *res;
-
-	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-	soc = &priv->soc;
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-int-base");
-	priv->base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(priv->base))
-		return PTR_ERR(priv->base);
-
-	soc->ctlrdy_ack = bcm63138_nand_intc_ack;
-	soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
-
-	return brcmnand_probe(pdev, soc);
-}
-
-static const struct of_device_id bcm63138_nand_of_match[] = {
-	{ .compatible = "brcm,nand-bcm63138" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
-
-static struct platform_driver bcm63138_nand_driver = {
-	.probe			= bcm63138_nand_probe,
-	.remove			= brcmnand_remove,
-	.driver = {
-		.name		= "bcm63138_nand",
-		.pm		= &brcmnand_pm_ops,
-		.of_match_table	= bcm63138_nand_of_match,
-	}
-};
-module_platform_driver(bcm63138_nand_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Brian Norris");
-MODULE_DESCRIPTION("NAND driver for BCM63138");
diff --git a/drivers/mtd/nand/brcmnand/bcm6368_nand.c b/drivers/mtd/nand/brcmnand/bcm6368_nand.c
deleted file mode 100644
index 34c91b0e1e69..000000000000
--- a/drivers/mtd/nand/brcmnand/bcm6368_nand.c
+++ /dev/null
@@ -1,142 +0,0 @@ 
-/*
- * Copyright 2015 Simon Arlott
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Derived from bcm63138_nand.c:
- * Copyright © 2015 Broadcom Corporation
- *
- * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h:
- * Copyright 2000-2010 Broadcom Corporation
- *
- * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/flash/nandflash.c:
- * Copyright 2000-2010 Broadcom Corporation
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "brcmnand.h"
-
-struct bcm6368_nand_soc {
-	struct brcmnand_soc soc;
-	void __iomem *base;
-};
-
-#define BCM6368_NAND_INT		0x00
-#define  BCM6368_NAND_STATUS_SHIFT	0
-#define  BCM6368_NAND_STATUS_MASK	(0xfff << BCM6368_NAND_STATUS_SHIFT)
-#define  BCM6368_NAND_ENABLE_SHIFT	16
-#define  BCM6368_NAND_ENABLE_MASK	(0xffff << BCM6368_NAND_ENABLE_SHIFT)
-#define BCM6368_NAND_BASE_ADDR0	0x04
-#define BCM6368_NAND_BASE_ADDR1	0x0c
-
-enum {
-	BCM6368_NP_READ		= BIT(0),
-	BCM6368_BLOCK_ERASE	= BIT(1),
-	BCM6368_COPY_BACK	= BIT(2),
-	BCM6368_PAGE_PGM	= BIT(3),
-	BCM6368_CTRL_READY	= BIT(4),
-	BCM6368_DEV_RBPIN	= BIT(5),
-	BCM6368_ECC_ERR_UNC	= BIT(6),
-	BCM6368_ECC_ERR_CORR	= BIT(7),
-};
-
-static bool bcm6368_nand_intc_ack(struct brcmnand_soc *soc)
-{
-	struct bcm6368_nand_soc *priv =
-			container_of(soc, struct bcm6368_nand_soc, soc);
-	void __iomem *mmio = priv->base + BCM6368_NAND_INT;
-	u32 val = brcmnand_readl(mmio);
-
-	if (val & (BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT)) {
-		/* Ack interrupt */
-		val &= ~BCM6368_NAND_STATUS_MASK;
-		val |= BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT;
-		brcmnand_writel(val, mmio);
-		return true;
-	}
-
-	return false;
-}
-
-static void bcm6368_nand_intc_set(struct brcmnand_soc *soc, bool en)
-{
-	struct bcm6368_nand_soc *priv =
-			container_of(soc, struct bcm6368_nand_soc, soc);
-	void __iomem *mmio = priv->base + BCM6368_NAND_INT;
-	u32 val = brcmnand_readl(mmio);
-
-	/* Don't ack any interrupts */
-	val &= ~BCM6368_NAND_STATUS_MASK;
-
-	if (en)
-		val |= BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT;
-	else
-		val &= ~(BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT);
-
-	brcmnand_writel(val, mmio);
-}
-
-static int bcm6368_nand_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct bcm6368_nand_soc *priv;
-	struct brcmnand_soc *soc;
-	struct resource *res;
-
-	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-	soc = &priv->soc;
-
-	res = platform_get_resource_byname(pdev,
-		IORESOURCE_MEM, "nand-int-base");
-	priv->base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(priv->base))
-		return PTR_ERR(priv->base);
-
-	soc->ctlrdy_ack = bcm6368_nand_intc_ack;
-	soc->ctlrdy_set_enabled = bcm6368_nand_intc_set;
-
-	/* Disable and ack all interrupts  */
-	brcmnand_writel(0, priv->base + BCM6368_NAND_INT);
-	brcmnand_writel(BCM6368_NAND_STATUS_MASK,
-			priv->base + BCM6368_NAND_INT);
-
-	return brcmnand_probe(pdev, soc);
-}
-
-static const struct of_device_id bcm6368_nand_of_match[] = {
-	{ .compatible = "brcm,nand-bcm6368" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, bcm6368_nand_of_match);
-
-static struct platform_driver bcm6368_nand_driver = {
-	.probe			= bcm6368_nand_probe,
-	.remove			= brcmnand_remove,
-	.driver = {
-		.name		= "bcm6368_nand",
-		.pm		= &brcmnand_pm_ops,
-		.of_match_table	= bcm6368_nand_of_match,
-	}
-};
-module_platform_driver(bcm6368_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Simon Arlott");
-MODULE_DESCRIPTION("NAND driver for BCM6368");
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
deleted file mode 100644
index 98453816a0a2..000000000000
--- a/drivers/mtd/nand/brcmnand/brcmnand.c
+++ /dev/null
@@ -1,2561 +0,0 @@ 
-/*
- * Copyright © 2010-2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk.h>
-#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/dma-mapping.h>
-#include <linux/ioport.h>
-#include <linux/bug.h>
-#include <linux/kernel.h>
-#include <linux/bitops.h>
-#include <linux/mm.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-#include <linux/log2.h>
-
-#include "brcmnand.h"
-
-/*
- * This flag controls if WP stays on between erase/write commands to mitigate
- * flash corruption due to power glitches. Values:
- * 0: NAND_WP is not used or not available
- * 1: NAND_WP is set by default, cleared for erase/write operations
- * 2: NAND_WP is always cleared
- */
-static int wp_on = 1;
-module_param(wp_on, int, 0444);
-
-/***********************************************************************
- * Definitions
- ***********************************************************************/
-
-#define DRV_NAME			"brcmnand"
-
-#define CMD_NULL			0x00
-#define CMD_PAGE_READ			0x01
-#define CMD_SPARE_AREA_READ		0x02
-#define CMD_STATUS_READ			0x03
-#define CMD_PROGRAM_PAGE		0x04
-#define CMD_PROGRAM_SPARE_AREA		0x05
-#define CMD_COPY_BACK			0x06
-#define CMD_DEVICE_ID_READ		0x07
-#define CMD_BLOCK_ERASE			0x08
-#define CMD_FLASH_RESET			0x09
-#define CMD_BLOCKS_LOCK			0x0a
-#define CMD_BLOCKS_LOCK_DOWN		0x0b
-#define CMD_BLOCKS_UNLOCK		0x0c
-#define CMD_READ_BLOCKS_LOCK_STATUS	0x0d
-#define CMD_PARAMETER_READ		0x0e
-#define CMD_PARAMETER_CHANGE_COL	0x0f
-#define CMD_LOW_LEVEL_OP		0x10
-
-struct brcm_nand_dma_desc {
-	u32 next_desc;
-	u32 next_desc_ext;
-	u32 cmd_irq;
-	u32 dram_addr;
-	u32 dram_addr_ext;
-	u32 tfr_len;
-	u32 total_len;
-	u32 flash_addr;
-	u32 flash_addr_ext;
-	u32 cs;
-	u32 pad2[5];
-	u32 status_valid;
-} __packed;
-
-/* Bitfields for brcm_nand_dma_desc::status_valid */
-#define FLASH_DMA_ECC_ERROR	(1 << 8)
-#define FLASH_DMA_CORR_ERROR	(1 << 9)
-
-/* 512B flash cache in the NAND controller HW */
-#define FC_SHIFT		9U
-#define FC_BYTES		512U
-#define FC_WORDS		(FC_BYTES >> 2)
-
-#define BRCMNAND_MIN_PAGESIZE	512
-#define BRCMNAND_MIN_BLOCKSIZE	(8 * 1024)
-#define BRCMNAND_MIN_DEVSIZE	(4ULL * 1024 * 1024)
-
-/* Controller feature flags */
-enum {
-	BRCMNAND_HAS_1K_SECTORS			= BIT(0),
-	BRCMNAND_HAS_PREFETCH			= BIT(1),
-	BRCMNAND_HAS_CACHE_MODE			= BIT(2),
-	BRCMNAND_HAS_WP				= BIT(3),
-};
-
-struct brcmnand_controller {
-	struct device		*dev;
-	struct nand_hw_control	controller;
-	void __iomem		*nand_base;
-	void __iomem		*nand_fc; /* flash cache */
-	void __iomem		*flash_dma_base;
-	unsigned int		irq;
-	unsigned int		dma_irq;
-	int			nand_version;
-
-	/* Some SoCs provide custom interrupt status register(s) */
-	struct brcmnand_soc	*soc;
-
-	/* Some SoCs have a gateable clock for the controller */
-	struct clk		*clk;
-
-	int			cmd_pending;
-	bool			dma_pending;
-	struct completion	done;
-	struct completion	dma_done;
-
-	/* List of NAND hosts (one for each chip-select) */
-	struct list_head host_list;
-
-	struct brcm_nand_dma_desc *dma_desc;
-	dma_addr_t		dma_pa;
-
-	/* in-memory cache of the FLASH_CACHE, used only for some commands */
-	u8			flash_cache[FC_BYTES];
-
-	/* Controller revision details */
-	const u16		*reg_offsets;
-	unsigned int		reg_spacing; /* between CS1, CS2, ... regs */
-	const u8		*cs_offsets; /* within each chip-select */
-	const u8		*cs0_offsets; /* within CS0, if different */
-	unsigned int		max_block_size;
-	const unsigned int	*block_sizes;
-	unsigned int		max_page_size;
-	const unsigned int	*page_sizes;
-	unsigned int		max_oob;
-	u32			features;
-
-	/* for low-power standby/resume only */
-	u32			nand_cs_nand_select;
-	u32			nand_cs_nand_xor;
-	u32			corr_stat_threshold;
-	u32			flash_dma_mode;
-};
-
-struct brcmnand_cfg {
-	u64			device_size;
-	unsigned int		block_size;
-	unsigned int		page_size;
-	unsigned int		spare_area_size;
-	unsigned int		device_width;
-	unsigned int		col_adr_bytes;
-	unsigned int		blk_adr_bytes;
-	unsigned int		ful_adr_bytes;
-	unsigned int		sector_size_1k;
-	unsigned int		ecc_level;
-	/* use for low-power standby/resume only */
-	u32			acc_control;
-	u32			config;
-	u32			config_ext;
-	u32			timing_1;
-	u32			timing_2;
-};
-
-struct brcmnand_host {
-	struct list_head	node;
-
-	struct nand_chip	chip;
-	struct platform_device	*pdev;
-	int			cs;
-
-	unsigned int		last_cmd;
-	unsigned int		last_byte;
-	u64			last_addr;
-	struct brcmnand_cfg	hwcfg;
-	struct brcmnand_controller *ctrl;
-};
-
-enum brcmnand_reg {
-	BRCMNAND_CMD_START = 0,
-	BRCMNAND_CMD_EXT_ADDRESS,
-	BRCMNAND_CMD_ADDRESS,
-	BRCMNAND_INTFC_STATUS,
-	BRCMNAND_CS_SELECT,
-	BRCMNAND_CS_XOR,
-	BRCMNAND_LL_OP,
-	BRCMNAND_CS0_BASE,
-	BRCMNAND_CS1_BASE,		/* CS1 regs, if non-contiguous */
-	BRCMNAND_CORR_THRESHOLD,
-	BRCMNAND_CORR_THRESHOLD_EXT,
-	BRCMNAND_UNCORR_COUNT,
-	BRCMNAND_CORR_COUNT,
-	BRCMNAND_CORR_EXT_ADDR,
-	BRCMNAND_CORR_ADDR,
-	BRCMNAND_UNCORR_EXT_ADDR,
-	BRCMNAND_UNCORR_ADDR,
-	BRCMNAND_SEMAPHORE,
-	BRCMNAND_ID,
-	BRCMNAND_ID_EXT,
-	BRCMNAND_LL_RDATA,
-	BRCMNAND_OOB_READ_BASE,
-	BRCMNAND_OOB_READ_10_BASE,	/* offset 0x10, if non-contiguous */
-	BRCMNAND_OOB_WRITE_BASE,
-	BRCMNAND_OOB_WRITE_10_BASE,	/* offset 0x10, if non-contiguous */
-	BRCMNAND_FC_BASE,
-};
-
-/* BRCMNAND v4.0 */
-static const u16 brcmnand_regs_v40[] = {
-	[BRCMNAND_CMD_START]		=  0x04,
-	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
-	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
-	[BRCMNAND_INTFC_STATUS]		=  0x6c,
-	[BRCMNAND_CS_SELECT]		=  0x14,
-	[BRCMNAND_CS_XOR]		=  0x18,
-	[BRCMNAND_LL_OP]		= 0x178,
-	[BRCMNAND_CS0_BASE]		=  0x40,
-	[BRCMNAND_CS1_BASE]		=  0xd0,
-	[BRCMNAND_CORR_THRESHOLD]	=  0x84,
-	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
-	[BRCMNAND_UNCORR_COUNT]		=     0,
-	[BRCMNAND_CORR_COUNT]		=     0,
-	[BRCMNAND_CORR_EXT_ADDR]	=  0x70,
-	[BRCMNAND_CORR_ADDR]		=  0x74,
-	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x78,
-	[BRCMNAND_UNCORR_ADDR]		=  0x7c,
-	[BRCMNAND_SEMAPHORE]		=  0x58,
-	[BRCMNAND_ID]			=  0x60,
-	[BRCMNAND_ID_EXT]		=  0x64,
-	[BRCMNAND_LL_RDATA]		= 0x17c,
-	[BRCMNAND_OOB_READ_BASE]	=  0x20,
-	[BRCMNAND_OOB_READ_10_BASE]	= 0x130,
-	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
-	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
-	[BRCMNAND_FC_BASE]		= 0x200,
-};
-
-/* BRCMNAND v5.0 */
-static const u16 brcmnand_regs_v50[] = {
-	[BRCMNAND_CMD_START]		=  0x04,
-	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
-	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
-	[BRCMNAND_INTFC_STATUS]		=  0x6c,
-	[BRCMNAND_CS_SELECT]		=  0x14,
-	[BRCMNAND_CS_XOR]		=  0x18,
-	[BRCMNAND_LL_OP]		= 0x178,
-	[BRCMNAND_CS0_BASE]		=  0x40,
-	[BRCMNAND_CS1_BASE]		=  0xd0,
-	[BRCMNAND_CORR_THRESHOLD]	=  0x84,
-	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
-	[BRCMNAND_UNCORR_COUNT]		=     0,
-	[BRCMNAND_CORR_COUNT]		=     0,
-	[BRCMNAND_CORR_EXT_ADDR]	=  0x70,
-	[BRCMNAND_CORR_ADDR]		=  0x74,
-	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x78,
-	[BRCMNAND_UNCORR_ADDR]		=  0x7c,
-	[BRCMNAND_SEMAPHORE]		=  0x58,
-	[BRCMNAND_ID]			=  0x60,
-	[BRCMNAND_ID_EXT]		=  0x64,
-	[BRCMNAND_LL_RDATA]		= 0x17c,
-	[BRCMNAND_OOB_READ_BASE]	=  0x20,
-	[BRCMNAND_OOB_READ_10_BASE]	= 0x130,
-	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
-	[BRCMNAND_OOB_WRITE_10_BASE]	= 0x140,
-	[BRCMNAND_FC_BASE]		= 0x200,
-};
-
-/* BRCMNAND v6.0 - v7.1 */
-static const u16 brcmnand_regs_v60[] = {
-	[BRCMNAND_CMD_START]		=  0x04,
-	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
-	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
-	[BRCMNAND_INTFC_STATUS]		=  0x14,
-	[BRCMNAND_CS_SELECT]		=  0x18,
-	[BRCMNAND_CS_XOR]		=  0x1c,
-	[BRCMNAND_LL_OP]		=  0x20,
-	[BRCMNAND_CS0_BASE]		=  0x50,
-	[BRCMNAND_CS1_BASE]		=     0,
-	[BRCMNAND_CORR_THRESHOLD]	=  0xc0,
-	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xc4,
-	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
-	[BRCMNAND_CORR_COUNT]		= 0x100,
-	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
-	[BRCMNAND_CORR_ADDR]		= 0x110,
-	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
-	[BRCMNAND_UNCORR_ADDR]		= 0x118,
-	[BRCMNAND_SEMAPHORE]		= 0x150,
-	[BRCMNAND_ID]			= 0x194,
-	[BRCMNAND_ID_EXT]		= 0x198,
-	[BRCMNAND_LL_RDATA]		= 0x19c,
-	[BRCMNAND_OOB_READ_BASE]	= 0x200,
-	[BRCMNAND_OOB_READ_10_BASE]	=     0,
-	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
-	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
-	[BRCMNAND_FC_BASE]		= 0x400,
-};
-
-/* BRCMNAND v7.1 */
-static const u16 brcmnand_regs_v71[] = {
-	[BRCMNAND_CMD_START]		=  0x04,
-	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
-	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
-	[BRCMNAND_INTFC_STATUS]		=  0x14,
-	[BRCMNAND_CS_SELECT]		=  0x18,
-	[BRCMNAND_CS_XOR]		=  0x1c,
-	[BRCMNAND_LL_OP]		=  0x20,
-	[BRCMNAND_CS0_BASE]		=  0x50,
-	[BRCMNAND_CS1_BASE]		=     0,
-	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
-	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
-	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
-	[BRCMNAND_CORR_COUNT]		= 0x100,
-	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
-	[BRCMNAND_CORR_ADDR]		= 0x110,
-	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
-	[BRCMNAND_UNCORR_ADDR]		= 0x118,
-	[BRCMNAND_SEMAPHORE]		= 0x150,
-	[BRCMNAND_ID]			= 0x194,
-	[BRCMNAND_ID_EXT]		= 0x198,
-	[BRCMNAND_LL_RDATA]		= 0x19c,
-	[BRCMNAND_OOB_READ_BASE]	= 0x200,
-	[BRCMNAND_OOB_READ_10_BASE]	=     0,
-	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
-	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
-	[BRCMNAND_FC_BASE]		= 0x400,
-};
-
-/* BRCMNAND v7.2 */
-static const u16 brcmnand_regs_v72[] = {
-	[BRCMNAND_CMD_START]		=  0x04,
-	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
-	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
-	[BRCMNAND_INTFC_STATUS]		=  0x14,
-	[BRCMNAND_CS_SELECT]		=  0x18,
-	[BRCMNAND_CS_XOR]		=  0x1c,
-	[BRCMNAND_LL_OP]		=  0x20,
-	[BRCMNAND_CS0_BASE]		=  0x50,
-	[BRCMNAND_CS1_BASE]		=     0,
-	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
-	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
-	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
-	[BRCMNAND_CORR_COUNT]		= 0x100,
-	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
-	[BRCMNAND_CORR_ADDR]		= 0x110,
-	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
-	[BRCMNAND_UNCORR_ADDR]		= 0x118,
-	[BRCMNAND_SEMAPHORE]		= 0x150,
-	[BRCMNAND_ID]			= 0x194,
-	[BRCMNAND_ID_EXT]		= 0x198,
-	[BRCMNAND_LL_RDATA]		= 0x19c,
-	[BRCMNAND_OOB_READ_BASE]	= 0x200,
-	[BRCMNAND_OOB_READ_10_BASE]	=     0,
-	[BRCMNAND_OOB_WRITE_BASE]	= 0x400,
-	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
-	[BRCMNAND_FC_BASE]		= 0x600,
-};
-
-enum brcmnand_cs_reg {
-	BRCMNAND_CS_CFG_EXT = 0,
-	BRCMNAND_CS_CFG,
-	BRCMNAND_CS_ACC_CONTROL,
-	BRCMNAND_CS_TIMING1,
-	BRCMNAND_CS_TIMING2,
-};
-
-/* Per chip-select offsets for v7.1 */
-static const u8 brcmnand_cs_offsets_v71[] = {
-	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
-	[BRCMNAND_CS_CFG_EXT]		= 0x04,
-	[BRCMNAND_CS_CFG]		= 0x08,
-	[BRCMNAND_CS_TIMING1]		= 0x0c,
-	[BRCMNAND_CS_TIMING2]		= 0x10,
-};
-
-/* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
-static const u8 brcmnand_cs_offsets[] = {
-	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
-	[BRCMNAND_CS_CFG_EXT]		= 0x04,
-	[BRCMNAND_CS_CFG]		= 0x04,
-	[BRCMNAND_CS_TIMING1]		= 0x08,
-	[BRCMNAND_CS_TIMING2]		= 0x0c,
-};
-
-/* Per chip-select offset for <= v5.0 on CS0 only */
-static const u8 brcmnand_cs_offsets_cs0[] = {
-	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
-	[BRCMNAND_CS_CFG_EXT]		= 0x08,
-	[BRCMNAND_CS_CFG]		= 0x08,
-	[BRCMNAND_CS_TIMING1]		= 0x10,
-	[BRCMNAND_CS_TIMING2]		= 0x14,
-};
-
-/*
- * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
- * one config register, but once the bitfields overflowed, newer controllers
- * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
- */
-enum {
-	CFG_BLK_ADR_BYTES_SHIFT		= 8,
-	CFG_COL_ADR_BYTES_SHIFT		= 12,
-	CFG_FUL_ADR_BYTES_SHIFT		= 16,
-	CFG_BUS_WIDTH_SHIFT		= 23,
-	CFG_BUS_WIDTH			= BIT(CFG_BUS_WIDTH_SHIFT),
-	CFG_DEVICE_SIZE_SHIFT		= 24,
-
-	/* Only for pre-v7.1 (with no CFG_EXT register) */
-	CFG_PAGE_SIZE_SHIFT		= 20,
-	CFG_BLK_SIZE_SHIFT		= 28,
-
-	/* Only for v7.1+ (with CFG_EXT register) */
-	CFG_EXT_PAGE_SIZE_SHIFT		= 0,
-	CFG_EXT_BLK_SIZE_SHIFT		= 4,
-};
-
-/* BRCMNAND_INTFC_STATUS */
-enum {
-	INTFC_FLASH_STATUS		= GENMASK(7, 0),
-
-	INTFC_ERASED			= BIT(27),
-	INTFC_OOB_VALID			= BIT(28),
-	INTFC_CACHE_VALID		= BIT(29),
-	INTFC_FLASH_READY		= BIT(30),
-	INTFC_CTLR_READY		= BIT(31),
-};
-
-static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
-{
-	return brcmnand_readl(ctrl->nand_base + offs);
-}
-
-static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
-				 u32 val)
-{
-	brcmnand_writel(val, ctrl->nand_base + offs);
-}
-
-static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
-{
-	static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
-	static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
-	static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
-
-	ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
-
-	/* Only support v4.0+? */
-	if (ctrl->nand_version < 0x0400) {
-		dev_err(ctrl->dev, "version %#x not supported\n",
-			ctrl->nand_version);
-		return -ENODEV;
-	}
-
-	/* Register offsets */
-	if (ctrl->nand_version >= 0x0702)
-		ctrl->reg_offsets = brcmnand_regs_v72;
-	else if (ctrl->nand_version >= 0x0701)
-		ctrl->reg_offsets = brcmnand_regs_v71;
-	else if (ctrl->nand_version >= 0x0600)
-		ctrl->reg_offsets = brcmnand_regs_v60;
-	else if (ctrl->nand_version >= 0x0500)
-		ctrl->reg_offsets = brcmnand_regs_v50;
-	else if (ctrl->nand_version >= 0x0400)
-		ctrl->reg_offsets = brcmnand_regs_v40;
-
-	/* Chip-select stride */
-	if (ctrl->nand_version >= 0x0701)
-		ctrl->reg_spacing = 0x14;
-	else
-		ctrl->reg_spacing = 0x10;
-
-	/* Per chip-select registers */
-	if (ctrl->nand_version >= 0x0701) {
-		ctrl->cs_offsets = brcmnand_cs_offsets_v71;
-	} else {
-		ctrl->cs_offsets = brcmnand_cs_offsets;
-
-		/* v5.0 and earlier has a different CS0 offset layout */
-		if (ctrl->nand_version <= 0x0500)
-			ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
-	}
-
-	/* Page / block sizes */
-	if (ctrl->nand_version >= 0x0701) {
-		/* >= v7.1 use nice power-of-2 values! */
-		ctrl->max_page_size = 16 * 1024;
-		ctrl->max_block_size = 2 * 1024 * 1024;
-	} else {
-		ctrl->page_sizes = page_sizes;
-		if (ctrl->nand_version >= 0x0600)
-			ctrl->block_sizes = block_sizes_v6;
-		else
-			ctrl->block_sizes = block_sizes_v4;
-
-		if (ctrl->nand_version < 0x0400) {
-			ctrl->max_page_size = 4096;
-			ctrl->max_block_size = 512 * 1024;
-		}
-	}
-
-	/* Maximum spare area sector size (per 512B) */
-	if (ctrl->nand_version >= 0x0702)
-		ctrl->max_oob = 128;
-	else if (ctrl->nand_version >= 0x0600)
-		ctrl->max_oob = 64;
-	else if (ctrl->nand_version >= 0x0500)
-		ctrl->max_oob = 32;
-	else
-		ctrl->max_oob = 16;
-
-	/* v6.0 and newer (except v6.1) have prefetch support */
-	if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
-		ctrl->features |= BRCMNAND_HAS_PREFETCH;
-
-	/*
-	 * v6.x has cache mode, but it's implemented differently. Ignore it for
-	 * now.
-	 */
-	if (ctrl->nand_version >= 0x0700)
-		ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
-
-	if (ctrl->nand_version >= 0x0500)
-		ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
-
-	if (ctrl->nand_version >= 0x0700)
-		ctrl->features |= BRCMNAND_HAS_WP;
-	else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
-		ctrl->features |= BRCMNAND_HAS_WP;
-
-	return 0;
-}
-
-static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
-		enum brcmnand_reg reg)
-{
-	u16 offs = ctrl->reg_offsets[reg];
-
-	if (offs)
-		return nand_readreg(ctrl, offs);
-	else
-		return 0;
-}
-
-static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
-				      enum brcmnand_reg reg, u32 val)
-{
-	u16 offs = ctrl->reg_offsets[reg];
-
-	if (offs)
-		nand_writereg(ctrl, offs, val);
-}
-
-static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
-				    enum brcmnand_reg reg, u32 mask, unsigned
-				    int shift, u32 val)
-{
-	u32 tmp = brcmnand_read_reg(ctrl, reg);
-
-	tmp &= ~mask;
-	tmp |= val << shift;
-	brcmnand_write_reg(ctrl, reg, tmp);
-}
-
-static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
-{
-	return __raw_readl(ctrl->nand_fc + word * 4);
-}
-
-static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
-				     int word, u32 val)
-{
-	__raw_writel(val, ctrl->nand_fc + word * 4);
-}
-
-static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
-				     enum brcmnand_cs_reg reg)
-{
-	u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
-	u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
-	u8 cs_offs;
-
-	if (cs == 0 && ctrl->cs0_offsets)
-		cs_offs = ctrl->cs0_offsets[reg];
-	else
-		cs_offs = ctrl->cs_offsets[reg];
-
-	if (cs && offs_cs1)
-		return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
-
-	return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
-}
-
-static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
-{
-	if (ctrl->nand_version < 0x0600)
-		return 1;
-	return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
-}
-
-static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	unsigned int shift = 0, bits;
-	enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
-	int cs = host->cs;
-
-	if (ctrl->nand_version >= 0x0702)
-		bits = 7;
-	else if (ctrl->nand_version >= 0x0600)
-		bits = 6;
-	else if (ctrl->nand_version >= 0x0500)
-		bits = 5;
-	else
-		bits = 4;
-
-	if (ctrl->nand_version >= 0x0702) {
-		if (cs >= 4)
-			reg = BRCMNAND_CORR_THRESHOLD_EXT;
-		shift = (cs % 4) * bits;
-	} else if (ctrl->nand_version >= 0x0600) {
-		if (cs >= 5)
-			reg = BRCMNAND_CORR_THRESHOLD_EXT;
-		shift = (cs % 5) * bits;
-	}
-	brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
-}
-
-static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
-{
-	if (ctrl->nand_version < 0x0602)
-		return 24;
-	return 0;
-}
-
-/***********************************************************************
- * NAND ACC CONTROL bitfield
- *
- * Some bits have remained constant throughout hardware revision, while
- * others have shifted around.
- ***********************************************************************/
-
-/* Constant for all versions (where supported) */
-enum {
-	/* See BRCMNAND_HAS_CACHE_MODE */
-	ACC_CONTROL_CACHE_MODE				= BIT(22),
-
-	/* See BRCMNAND_HAS_PREFETCH */
-	ACC_CONTROL_PREFETCH				= BIT(23),
-
-	ACC_CONTROL_PAGE_HIT				= BIT(24),
-	ACC_CONTROL_WR_PREEMPT				= BIT(25),
-	ACC_CONTROL_PARTIAL_PAGE			= BIT(26),
-	ACC_CONTROL_RD_ERASED				= BIT(27),
-	ACC_CONTROL_FAST_PGM_RDIN			= BIT(28),
-	ACC_CONTROL_WR_ECC				= BIT(30),
-	ACC_CONTROL_RD_ECC				= BIT(31),
-};
-
-static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
-{
-	if (ctrl->nand_version >= 0x0702)
-		return GENMASK(7, 0);
-	else if (ctrl->nand_version >= 0x0600)
-		return GENMASK(6, 0);
-	else
-		return GENMASK(5, 0);
-}
-
-#define NAND_ACC_CONTROL_ECC_SHIFT	16
-#define NAND_ACC_CONTROL_ECC_EXT_SHIFT	13
-
-static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
-{
-	u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
-
-	mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
-
-	/* v7.2 includes additional ECC levels */
-	if (ctrl->nand_version >= 0x0702)
-		mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
-
-	return mask;
-}
-
-static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
-	u32 acc_control = nand_readreg(ctrl, offs);
-	u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
-
-	if (en) {
-		acc_control |= ecc_flags; /* enable RD/WR ECC */
-		acc_control |= host->hwcfg.ecc_level
-			       << NAND_ACC_CONTROL_ECC_SHIFT;
-	} else {
-		acc_control &= ~ecc_flags; /* disable RD/WR ECC */
-		acc_control &= ~brcmnand_ecc_level_mask(ctrl);
-	}
-
-	nand_writereg(ctrl, offs, acc_control);
-}
-
-static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
-{
-	if (ctrl->nand_version >= 0x0702)
-		return 9;
-	else if (ctrl->nand_version >= 0x0600)
-		return 7;
-	else if (ctrl->nand_version >= 0x0500)
-		return 6;
-	else
-		return -1;
-}
-
-static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	int shift = brcmnand_sector_1k_shift(ctrl);
-	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
-						  BRCMNAND_CS_ACC_CONTROL);
-
-	if (shift < 0)
-		return 0;
-
-	return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
-}
-
-static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	int shift = brcmnand_sector_1k_shift(ctrl);
-	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
-						  BRCMNAND_CS_ACC_CONTROL);
-	u32 tmp;
-
-	if (shift < 0)
-		return;
-
-	tmp = nand_readreg(ctrl, acc_control_offs);
-	tmp &= ~(1 << shift);
-	tmp |= (!!val) << shift;
-	nand_writereg(ctrl, acc_control_offs, tmp);
-}
-
-/***********************************************************************
- * CS_NAND_SELECT
- ***********************************************************************/
-
-enum {
-	CS_SELECT_NAND_WP			= BIT(29),
-	CS_SELECT_AUTO_DEVICE_ID_CFG		= BIT(30),
-};
-
-static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
-{
-	u32 val = en ? CS_SELECT_NAND_WP : 0;
-
-	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
-}
-
-/***********************************************************************
- * Flash DMA
- ***********************************************************************/
-
-enum flash_dma_reg {
-	FLASH_DMA_REVISION		= 0x00,
-	FLASH_DMA_FIRST_DESC		= 0x04,
-	FLASH_DMA_FIRST_DESC_EXT	= 0x08,
-	FLASH_DMA_CTRL			= 0x0c,
-	FLASH_DMA_MODE			= 0x10,
-	FLASH_DMA_STATUS		= 0x14,
-	FLASH_DMA_INTERRUPT_DESC	= 0x18,
-	FLASH_DMA_INTERRUPT_DESC_EXT	= 0x1c,
-	FLASH_DMA_ERROR_STATUS		= 0x20,
-	FLASH_DMA_CURRENT_DESC		= 0x24,
-	FLASH_DMA_CURRENT_DESC_EXT	= 0x28,
-};
-
-static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
-{
-	return ctrl->flash_dma_base;
-}
-
-static inline bool flash_dma_buf_ok(const void *buf)
-{
-	return buf && !is_vmalloc_addr(buf) &&
-		likely(IS_ALIGNED((uintptr_t)buf, 4));
-}
-
-static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs,
-				    u32 val)
-{
-	brcmnand_writel(val, ctrl->flash_dma_base + offs);
-}
-
-static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs)
-{
-	return brcmnand_readl(ctrl->flash_dma_base + offs);
-}
-
-/* Low-level operation types: command, address, write, or read */
-enum brcmnand_llop_type {
-	LL_OP_CMD,
-	LL_OP_ADDR,
-	LL_OP_WR,
-	LL_OP_RD,
-};
-
-/***********************************************************************
- * Internal support functions
- ***********************************************************************/
-
-static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
-				  struct brcmnand_cfg *cfg)
-{
-	if (ctrl->nand_version <= 0x0701)
-		return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
-			cfg->ecc_level == 15;
-	else
-		return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
-			cfg->ecc_level == 15) ||
-			(cfg->spare_area_size == 28 && cfg->ecc_level == 16));
-}
-
-/*
- * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
- * the layout/configuration.
- * Returns -ERRCODE on failure.
- */
-static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
-					  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_cfg *cfg = &host->hwcfg;
-	int sas = cfg->spare_area_size << cfg->sector_size_1k;
-	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
-
-	if (section >= sectors)
-		return -ERANGE;
-
-	oobregion->offset = (section * sas) + 6;
-	oobregion->length = 3;
-
-	return 0;
-}
-
-static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
-					   struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_cfg *cfg = &host->hwcfg;
-	int sas = cfg->spare_area_size << cfg->sector_size_1k;
-	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
-
-	if (section >= sectors * 2)
-		return -ERANGE;
-
-	oobregion->offset = (section / 2) * sas;
-
-	if (section & 1) {
-		oobregion->offset += 9;
-		oobregion->length = 7;
-	} else {
-		oobregion->length = 6;
-
-		/* First sector of each page may have BBI */
-		if (!section) {
-			/*
-			 * Small-page NAND use byte 6 for BBI while large-page
-			 * NAND use byte 0.
-			 */
-			if (cfg->page_size > 512)
-				oobregion->offset++;
-			oobregion->length--;
-		}
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
-	.ecc = brcmnand_hamming_ooblayout_ecc,
-	.free = brcmnand_hamming_ooblayout_free,
-};
-
-static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
-				      struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_cfg *cfg = &host->hwcfg;
-	int sas = cfg->spare_area_size << cfg->sector_size_1k;
-	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
-
-	if (section >= sectors)
-		return -ERANGE;
-
-	oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes;
-	oobregion->length = chip->ecc.bytes;
-
-	return 0;
-}
-
-static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
-					  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_cfg *cfg = &host->hwcfg;
-	int sas = cfg->spare_area_size << cfg->sector_size_1k;
-	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
-
-	if (section >= sectors)
-		return -ERANGE;
-
-	if (sas <= chip->ecc.bytes)
-		return 0;
-
-	oobregion->offset = section * sas;
-	oobregion->length = sas - chip->ecc.bytes;
-
-	if (!section) {
-		oobregion->offset++;
-		oobregion->length--;
-	}
-
-	return 0;
-}
-
-static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
-					  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_cfg *cfg = &host->hwcfg;
-	int sas = cfg->spare_area_size << cfg->sector_size_1k;
-
-	if (section > 1 || sas - chip->ecc.bytes < 6 ||
-	    (section && sas - chip->ecc.bytes == 6))
-		return -ERANGE;
-
-	if (!section) {
-		oobregion->offset = 0;
-		oobregion->length = 5;
-	} else {
-		oobregion->offset = 6;
-		oobregion->length = sas - chip->ecc.bytes - 6;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
-	.ecc = brcmnand_bch_ooblayout_ecc,
-	.free = brcmnand_bch_ooblayout_free_lp,
-};
-
-static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
-	.ecc = brcmnand_bch_ooblayout_ecc,
-	.free = brcmnand_bch_ooblayout_free_sp,
-};
-
-static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
-{
-	struct brcmnand_cfg *p = &host->hwcfg;
-	struct mtd_info *mtd = nand_to_mtd(&host->chip);
-	struct nand_ecc_ctrl *ecc = &host->chip.ecc;
-	unsigned int ecc_level = p->ecc_level;
-	int sas = p->spare_area_size << p->sector_size_1k;
-	int sectors = p->page_size / (512 << p->sector_size_1k);
-
-	if (p->sector_size_1k)
-		ecc_level <<= 1;
-
-	if (is_hamming_ecc(host->ctrl, p)) {
-		ecc->bytes = 3 * sectors;
-		mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
-		return 0;
-	}
-
-	/*
-	 * CONTROLLER_VERSION:
-	 *   < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
-	 *  >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
-	 * But we will just be conservative.
-	 */
-	ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
-	if (p->page_size == 512)
-		mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
-	else
-		mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
-
-	if (ecc->bytes >= sas) {
-		dev_err(&host->pdev->dev,
-			"error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
-			ecc->bytes, sas);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static void brcmnand_wp(struct mtd_info *mtd, int wp)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_controller *ctrl = host->ctrl;
-
-	if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
-		static int old_wp = -1;
-
-		if (old_wp != wp) {
-			dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
-			old_wp = wp;
-		}
-		brcmnand_set_wp(ctrl, wp);
-	}
-}
-
-/* Helper functions for reading and writing OOB registers */
-static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
-{
-	u16 offset0, offset10, reg_offs;
-
-	offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
-	offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
-
-	if (offs >= ctrl->max_oob)
-		return 0x77;
-
-	if (offs >= 16 && offset10)
-		reg_offs = offset10 + ((offs - 0x10) & ~0x03);
-	else
-		reg_offs = offset0 + (offs & ~0x03);
-
-	return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
-}
-
-static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
-				 u32 data)
-{
-	u16 offset0, offset10, reg_offs;
-
-	offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
-	offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
-
-	if (offs >= ctrl->max_oob)
-		return;
-
-	if (offs >= 16 && offset10)
-		reg_offs = offset10 + ((offs - 0x10) & ~0x03);
-	else
-		reg_offs = offset0 + (offs & ~0x03);
-
-	nand_writereg(ctrl, reg_offs, data);
-}
-
-/*
- * read_oob_from_regs - read data from OOB registers
- * @ctrl: NAND controller
- * @i: sub-page sector index
- * @oob: buffer to read to
- * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
- * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
- */
-static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
-			      int sas, int sector_1k)
-{
-	int tbytes = sas << sector_1k;
-	int j;
-
-	/* Adjust OOB values for 1K sector size */
-	if (sector_1k && (i & 0x01))
-		tbytes = max(0, tbytes - (int)ctrl->max_oob);
-	tbytes = min_t(int, tbytes, ctrl->max_oob);
-
-	for (j = 0; j < tbytes; j++)
-		oob[j] = oob_reg_read(ctrl, j);
-	return tbytes;
-}
-
-/*
- * write_oob_to_regs - write data to OOB registers
- * @i: sub-page sector index
- * @oob: buffer to write from
- * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
- * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
- */
-static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
-			     const u8 *oob, int sas, int sector_1k)
-{
-	int tbytes = sas << sector_1k;
-	int j;
-
-	/* Adjust OOB values for 1K sector size */
-	if (sector_1k && (i & 0x01))
-		tbytes = max(0, tbytes - (int)ctrl->max_oob);
-	tbytes = min_t(int, tbytes, ctrl->max_oob);
-
-	for (j = 0; j < tbytes; j += 4)
-		oob_reg_write(ctrl, j,
-				(oob[j + 0] << 24) |
-				(oob[j + 1] << 16) |
-				(oob[j + 2] <<  8) |
-				(oob[j + 3] <<  0));
-	return tbytes;
-}
-
-static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
-{
-	struct brcmnand_controller *ctrl = data;
-
-	/* Discard all NAND_CTLRDY interrupts during DMA */
-	if (ctrl->dma_pending)
-		return IRQ_HANDLED;
-
-	complete(&ctrl->done);
-	return IRQ_HANDLED;
-}
-
-/* Handle SoC-specific interrupt hardware */
-static irqreturn_t brcmnand_irq(int irq, void *data)
-{
-	struct brcmnand_controller *ctrl = data;
-
-	if (ctrl->soc->ctlrdy_ack(ctrl->soc))
-		return brcmnand_ctlrdy_irq(irq, data);
-
-	return IRQ_NONE;
-}
-
-static irqreturn_t brcmnand_dma_irq(int irq, void *data)
-{
-	struct brcmnand_controller *ctrl = data;
-
-	complete(&ctrl->dma_done);
-
-	return IRQ_HANDLED;
-}
-
-static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	u32 intfc;
-
-	dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
-		brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
-	BUG_ON(ctrl->cmd_pending != 0);
-	ctrl->cmd_pending = cmd;
-
-	intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
-	WARN_ON(!(intfc & INTFC_CTLR_READY));
-
-	mb(); /* flush previous writes */
-	brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
-			   cmd << brcmnand_cmd_shift(ctrl));
-}
-
-/***********************************************************************
- * NAND MTD API: read/program/erase
- ***********************************************************************/
-
-static void brcmnand_cmd_ctrl(struct mtd_info *mtd, int dat,
-	unsigned int ctrl)
-{
-	/* intentionally left blank */
-}
-
-static int brcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_controller *ctrl = host->ctrl;
-	unsigned long timeo = msecs_to_jiffies(100);
-
-	dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
-	if (ctrl->cmd_pending &&
-			wait_for_completion_timeout(&ctrl->done, timeo) <= 0) {
-		u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
-					>> brcmnand_cmd_shift(ctrl);
-
-		dev_err_ratelimited(ctrl->dev,
-			"timeout waiting for command %#02x\n", cmd);
-		dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
-			brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
-	}
-	ctrl->cmd_pending = 0;
-	return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
-				 INTFC_FLASH_STATUS;
-}
-
-enum {
-	LLOP_RE				= BIT(16),
-	LLOP_WE				= BIT(17),
-	LLOP_ALE			= BIT(18),
-	LLOP_CLE			= BIT(19),
-	LLOP_RETURN_IDLE		= BIT(31),
-
-	LLOP_DATA_MASK			= GENMASK(15, 0),
-};
-
-static int brcmnand_low_level_op(struct brcmnand_host *host,
-				 enum brcmnand_llop_type type, u32 data,
-				 bool last_op)
-{
-	struct mtd_info *mtd = nand_to_mtd(&host->chip);
-	struct nand_chip *chip = &host->chip;
-	struct brcmnand_controller *ctrl = host->ctrl;
-	u32 tmp;
-
-	tmp = data & LLOP_DATA_MASK;
-	switch (type) {
-	case LL_OP_CMD:
-		tmp |= LLOP_WE | LLOP_CLE;
-		break;
-	case LL_OP_ADDR:
-		/* WE | ALE */
-		tmp |= LLOP_WE | LLOP_ALE;
-		break;
-	case LL_OP_WR:
-		/* WE */
-		tmp |= LLOP_WE;
-		break;
-	case LL_OP_RD:
-		/* RE */
-		tmp |= LLOP_RE;
-		break;
-	}
-	if (last_op)
-		/* RETURN_IDLE */
-		tmp |= LLOP_RETURN_IDLE;
-
-	dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
-
-	brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
-	(void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
-
-	brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
-	return brcmnand_waitfunc(mtd, chip);
-}
-
-static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
-			     int column, int page_addr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_controller *ctrl = host->ctrl;
-	u64 addr = (u64)page_addr << chip->page_shift;
-	int native_cmd = 0;
-
-	if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
-			command == NAND_CMD_RNDOUT)
-		addr = (u64)column;
-	/* Avoid propagating a negative, don't-care address */
-	else if (page_addr < 0)
-		addr = 0;
-
-	dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
-		(unsigned long long)addr);
-
-	host->last_cmd = command;
-	host->last_byte = 0;
-	host->last_addr = addr;
-
-	switch (command) {
-	case NAND_CMD_RESET:
-		native_cmd = CMD_FLASH_RESET;
-		break;
-	case NAND_CMD_STATUS:
-		native_cmd = CMD_STATUS_READ;
-		break;
-	case NAND_CMD_READID:
-		native_cmd = CMD_DEVICE_ID_READ;
-		break;
-	case NAND_CMD_READOOB:
-		native_cmd = CMD_SPARE_AREA_READ;
-		break;
-	case NAND_CMD_ERASE1:
-		native_cmd = CMD_BLOCK_ERASE;
-		brcmnand_wp(mtd, 0);
-		break;
-	case NAND_CMD_PARAM:
-		native_cmd = CMD_PARAMETER_READ;
-		break;
-	case NAND_CMD_SET_FEATURES:
-	case NAND_CMD_GET_FEATURES:
-		brcmnand_low_level_op(host, LL_OP_CMD, command, false);
-		brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
-		break;
-	case NAND_CMD_RNDOUT:
-		native_cmd = CMD_PARAMETER_CHANGE_COL;
-		addr &= ~((u64)(FC_BYTES - 1));
-		/*
-		 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
-		 * NB: hwcfg.sector_size_1k may not be initialized yet
-		 */
-		if (brcmnand_get_sector_size_1k(host)) {
-			host->hwcfg.sector_size_1k =
-				brcmnand_get_sector_size_1k(host);
-			brcmnand_set_sector_size_1k(host, 0);
-		}
-		break;
-	}
-
-	if (!native_cmd)
-		return;
-
-	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
-		(host->cs << 16) | ((addr >> 32) & 0xffff));
-	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
-	brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
-	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
-
-	brcmnand_send_cmd(host, native_cmd);
-	brcmnand_waitfunc(mtd, chip);
-
-	if (native_cmd == CMD_PARAMETER_READ ||
-			native_cmd == CMD_PARAMETER_CHANGE_COL) {
-		/* Copy flash cache word-wise */
-		u32 *flash_cache = (u32 *)ctrl->flash_cache;
-		int i;
-
-		brcmnand_soc_data_bus_prepare(ctrl->soc, true);
-
-		/*
-		 * Must cache the FLASH_CACHE now, since changes in
-		 * SECTOR_SIZE_1K may invalidate it
-		 */
-		for (i = 0; i < FC_WORDS; i++)
-			/*
-			 * Flash cache is big endian for parameter pages, at
-			 * least on STB SoCs
-			 */
-			flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
-
-		brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
-
-		/* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
-		if (host->hwcfg.sector_size_1k)
-			brcmnand_set_sector_size_1k(host,
-						    host->hwcfg.sector_size_1k);
-	}
-
-	/* Re-enable protection is necessary only after erase */
-	if (command == NAND_CMD_ERASE1)
-		brcmnand_wp(mtd, 1);
-}
-
-static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_controller *ctrl = host->ctrl;
-	uint8_t ret = 0;
-	int addr, offs;
-
-	switch (host->last_cmd) {
-	case NAND_CMD_READID:
-		if (host->last_byte < 4)
-			ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
-				(24 - (host->last_byte << 3));
-		else if (host->last_byte < 8)
-			ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
-				(56 - (host->last_byte << 3));
-		break;
-
-	case NAND_CMD_READOOB:
-		ret = oob_reg_read(ctrl, host->last_byte);
-		break;
-
-	case NAND_CMD_STATUS:
-		ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
-					INTFC_FLASH_STATUS;
-		if (wp_on) /* hide WP status */
-			ret |= NAND_STATUS_WP;
-		break;
-
-	case NAND_CMD_PARAM:
-	case NAND_CMD_RNDOUT:
-		addr = host->last_addr + host->last_byte;
-		offs = addr & (FC_BYTES - 1);
-
-		/* At FC_BYTES boundary, switch to next column */
-		if (host->last_byte > 0 && offs == 0)
-			chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
-
-		ret = ctrl->flash_cache[offs];
-		break;
-	case NAND_CMD_GET_FEATURES:
-		if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
-			ret = 0;
-		} else {
-			bool last = host->last_byte ==
-				ONFI_SUBFEATURE_PARAM_LEN - 1;
-			brcmnand_low_level_op(host, LL_OP_RD, 0, last);
-			ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
-		}
-	}
-
-	dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
-	host->last_byte++;
-
-	return ret;
-}
-
-static void brcmnand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	int i;
-
-	for (i = 0; i < len; i++, buf++)
-		*buf = brcmnand_read_byte(mtd);
-}
-
-static void brcmnand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
-				   int len)
-{
-	int i;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-
-	switch (host->last_cmd) {
-	case NAND_CMD_SET_FEATURES:
-		for (i = 0; i < len; i++)
-			brcmnand_low_level_op(host, LL_OP_WR, buf[i],
-						  (i + 1) == len);
-		break;
-	default:
-		BUG();
-		break;
-	}
-}
-
-/**
- * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
- * following ahead of time:
- *  - Is this descriptor the beginning or end of a linked list?
- *  - What is the (DMA) address of the next descriptor in the linked list?
- */
-static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
-				  struct brcm_nand_dma_desc *desc, u64 addr,
-				  dma_addr_t buf, u32 len, u8 dma_cmd,
-				  bool begin, bool end,
-				  dma_addr_t next_desc)
-{
-	memset(desc, 0, sizeof(*desc));
-	/* Descriptors are written in native byte order (wordwise) */
-	desc->next_desc = lower_32_bits(next_desc);
-	desc->next_desc_ext = upper_32_bits(next_desc);
-	desc->cmd_irq = (dma_cmd << 24) |
-		(end ? (0x03 << 8) : 0) | /* IRQ | STOP */
-		(!!begin) | ((!!end) << 1); /* head, tail */
-#ifdef CONFIG_CPU_BIG_ENDIAN
-	desc->cmd_irq |= 0x01 << 12;
-#endif
-	desc->dram_addr = lower_32_bits(buf);
-	desc->dram_addr_ext = upper_32_bits(buf);
-	desc->tfr_len = len;
-	desc->total_len = len;
-	desc->flash_addr = lower_32_bits(addr);
-	desc->flash_addr_ext = upper_32_bits(addr);
-	desc->cs = host->cs;
-	desc->status_valid = 0x01;
-	return 0;
-}
-
-/**
- * Kick the FLASH_DMA engine, with a given DMA descriptor
- */
-static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	unsigned long timeo = msecs_to_jiffies(100);
-
-	flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
-	(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
-	flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
-	(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
-
-	/* Start FLASH_DMA engine */
-	ctrl->dma_pending = true;
-	mb(); /* flush previous writes */
-	flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
-
-	if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
-		dev_err(ctrl->dev,
-				"timeout waiting for DMA; status %#x, error status %#x\n",
-				flash_dma_readl(ctrl, FLASH_DMA_STATUS),
-				flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
-	}
-	ctrl->dma_pending = false;
-	flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
-}
-
-static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
-			      u32 len, u8 dma_cmd)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	dma_addr_t buf_pa;
-	int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
-
-	buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
-	if (dma_mapping_error(ctrl->dev, buf_pa)) {
-		dev_err(ctrl->dev, "unable to map buffer for DMA\n");
-		return -ENOMEM;
-	}
-
-	brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
-				   dma_cmd, true, true, 0);
-
-	brcmnand_dma_run(host, ctrl->dma_pa);
-
-	dma_unmap_single(ctrl->dev, buf_pa, len, dir);
-
-	if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
-		return -EBADMSG;
-	else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
-		return -EUCLEAN;
-
-	return 0;
-}
-
-/*
- * Assumes proper CS is already set
- */
-static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
-				u64 addr, unsigned int trans, u32 *buf,
-				u8 *oob, u64 *err_addr)
-{
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_controller *ctrl = host->ctrl;
-	int i, j, ret = 0;
-
-	/* Clear error addresses */
-	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
-	brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
-	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
-	brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
-
-	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
-			(host->cs << 16) | ((addr >> 32) & 0xffff));
-	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
-
-	for (i = 0; i < trans; i++, addr += FC_BYTES) {
-		brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
-				   lower_32_bits(addr));
-		(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
-		/* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
-		brcmnand_send_cmd(host, CMD_PAGE_READ);
-		brcmnand_waitfunc(mtd, chip);
-
-		if (likely(buf)) {
-			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
-
-			for (j = 0; j < FC_WORDS; j++, buf++)
-				*buf = brcmnand_read_fc(ctrl, j);
-
-			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
-		}
-
-		if (oob)
-			oob += read_oob_from_regs(ctrl, i, oob,
-					mtd->oobsize / trans,
-					host->hwcfg.sector_size_1k);
-
-		if (!ret) {
-			*err_addr = brcmnand_read_reg(ctrl,
-					BRCMNAND_UNCORR_ADDR) |
-				((u64)(brcmnand_read_reg(ctrl,
-						BRCMNAND_UNCORR_EXT_ADDR)
-					& 0xffff) << 32);
-			if (*err_addr)
-				ret = -EBADMSG;
-		}
-
-		if (!ret) {
-			*err_addr = brcmnand_read_reg(ctrl,
-					BRCMNAND_CORR_ADDR) |
-				((u64)(brcmnand_read_reg(ctrl,
-						BRCMNAND_CORR_EXT_ADDR)
-					& 0xffff) << 32);
-			if (*err_addr)
-				ret = -EUCLEAN;
-		}
-	}
-
-	return ret;
-}
-
-/*
- * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
- * error
- *
- * Because the HW ECC signals an ECC error if an erase paged has even a single
- * bitflip, we must check each ECC error to see if it is actually an erased
- * page with bitflips, not a truly corrupted page.
- *
- * On a real error, return a negative error code (-EBADMSG for ECC error), and
- * buf will contain raw data.
- * Otherwise, buf gets filled with 0xffs and return the maximum number of
- * bitflips-per-ECC-sector to the caller.
- *
- */
-static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
-		  struct nand_chip *chip, void *buf, u64 addr)
-{
-	int i, sas;
-	void *oob = chip->oob_poi;
-	int bitflips = 0;
-	int page = addr >> chip->page_shift;
-	int ret;
-
-	if (!buf) {
-		buf = chip->buffers->databuf;
-		/* Invalidate page cache */
-		chip->pagebuf = -1;
-	}
-
-	sas = mtd->oobsize / chip->ecc.steps;
-
-	/* read without ecc for verification */
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-	ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
-	if (ret)
-		return ret;
-
-	for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
-		ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size,
-						  oob, sas, NULL, 0,
-						  chip->ecc.strength);
-		if (ret < 0)
-			return ret;
-
-		bitflips = max(bitflips, ret);
-	}
-
-	return bitflips;
-}
-
-static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
-			 u64 addr, unsigned int trans, u32 *buf, u8 *oob)
-{
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_controller *ctrl = host->ctrl;
-	u64 err_addr = 0;
-	int err;
-	bool retry = true;
-
-	dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
-
-try_dmaread:
-	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
-
-	if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
-		err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
-					     CMD_PAGE_READ);
-		if (err) {
-			if (mtd_is_bitflip_or_eccerr(err))
-				err_addr = addr;
-			else
-				return -EIO;
-		}
-	} else {
-		if (oob)
-			memset(oob, 0x99, mtd->oobsize);
-
-		err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
-					       oob, &err_addr);
-	}
-
-	if (mtd_is_eccerr(err)) {
-		/*
-		 * On controller version and 7.0, 7.1 , DMA read after a
-		 * prior PIO read that reported uncorrectable error,
-		 * the DMA engine captures this error following DMA read
-		 * cleared only on subsequent DMA read, so just retry once
-		 * to clear a possible false error reported for current DMA
-		 * read
-		 */
-		if ((ctrl->nand_version == 0x0700) ||
-		    (ctrl->nand_version == 0x0701)) {
-			if (retry) {
-				retry = false;
-				goto try_dmaread;
-			}
-		}
-
-		/*
-		 * Controller version 7.2 has hw encoder to detect erased page
-		 * bitflips, apply sw verification for older controllers only
-		 */
-		if (ctrl->nand_version < 0x0702) {
-			err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
-							      addr);
-			/* erased page bitflips corrected */
-			if (err > 0)
-				return err;
-		}
-
-		dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
-			(unsigned long long)err_addr);
-		mtd->ecc_stats.failed++;
-		/* NAND layer expects zero on ECC errors */
-		return 0;
-	}
-
-	if (mtd_is_bitflip(err)) {
-		unsigned int corrected = brcmnand_count_corrected(ctrl);
-
-		dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
-			(unsigned long long)err_addr);
-		mtd->ecc_stats.corrected += corrected;
-		/* Always exceed the software-imposed threshold */
-		return max(mtd->bitflip_threshold, corrected);
-	}
-
-	return 0;
-}
-
-static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-			      uint8_t *buf, int oob_required, int page)
-{
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
-
-	return brcmnand_read(mtd, chip, host->last_addr,
-			mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
-}
-
-static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				  uint8_t *buf, int oob_required, int page)
-{
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
-	int ret;
-
-	brcmnand_set_ecc_enabled(host, 0);
-	ret = brcmnand_read(mtd, chip, host->last_addr,
-			mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
-	brcmnand_set_ecc_enabled(host, 1);
-	return ret;
-}
-
-static int brcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			     int page)
-{
-	return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
-			mtd->writesize >> FC_SHIFT,
-			NULL, (u8 *)chip->oob_poi);
-}
-
-static int brcmnand_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				 int page)
-{
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-
-	brcmnand_set_ecc_enabled(host, 0);
-	brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
-		mtd->writesize >> FC_SHIFT,
-		NULL, (u8 *)chip->oob_poi);
-	brcmnand_set_ecc_enabled(host, 1);
-	return 0;
-}
-
-static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
-			  u64 addr, const u32 *buf, u8 *oob)
-{
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	struct brcmnand_controller *ctrl = host->ctrl;
-	unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
-	int status, ret = 0;
-
-	dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
-
-	if (unlikely((unsigned long)buf & 0x03)) {
-		dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
-		buf = (u32 *)((unsigned long)buf & ~0x03);
-	}
-
-	brcmnand_wp(mtd, 0);
-
-	for (i = 0; i < ctrl->max_oob; i += 4)
-		oob_reg_write(ctrl, i, 0xffffffff);
-
-	if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
-		if (brcmnand_dma_trans(host, addr, (u32 *)buf,
-					mtd->writesize, CMD_PROGRAM_PAGE))
-			ret = -EIO;
-		goto out;
-	}
-
-	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
-			(host->cs << 16) | ((addr >> 32) & 0xffff));
-	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
-
-	for (i = 0; i < trans; i++, addr += FC_BYTES) {
-		/* full address MUST be set before populating FC */
-		brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
-				   lower_32_bits(addr));
-		(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
-
-		if (buf) {
-			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
-
-			for (j = 0; j < FC_WORDS; j++, buf++)
-				brcmnand_write_fc(ctrl, j, *buf);
-
-			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
-		} else if (oob) {
-			for (j = 0; j < FC_WORDS; j++)
-				brcmnand_write_fc(ctrl, j, 0xffffffff);
-		}
-
-		if (oob) {
-			oob += write_oob_to_regs(ctrl, i, oob,
-					mtd->oobsize / trans,
-					host->hwcfg.sector_size_1k);
-		}
-
-		/* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
-		brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
-		status = brcmnand_waitfunc(mtd, chip);
-
-		if (status & NAND_STATUS_FAIL) {
-			dev_info(ctrl->dev, "program failed at %llx\n",
-				(unsigned long long)addr);
-			ret = -EIO;
-			goto out;
-		}
-	}
-out:
-	brcmnand_wp(mtd, 1);
-	return ret;
-}
-
-static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-			       const uint8_t *buf, int oob_required, int page)
-{
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	void *oob = oob_required ? chip->oob_poi : NULL;
-
-	brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
-	return 0;
-}
-
-static int brcmnand_write_page_raw(struct mtd_info *mtd,
-				   struct nand_chip *chip, const uint8_t *buf,
-				   int oob_required, int page)
-{
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	void *oob = oob_required ? chip->oob_poi : NULL;
-
-	brcmnand_set_ecc_enabled(host, 0);
-	brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
-	brcmnand_set_ecc_enabled(host, 1);
-	return 0;
-}
-
-static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
-				  int page)
-{
-	return brcmnand_write(mtd, chip, (u64)page << chip->page_shift,
-				  NULL, chip->oob_poi);
-}
-
-static int brcmnand_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				  int page)
-{
-	struct brcmnand_host *host = nand_get_controller_data(chip);
-	int ret;
-
-	brcmnand_set_ecc_enabled(host, 0);
-	ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
-				 (u8 *)chip->oob_poi);
-	brcmnand_set_ecc_enabled(host, 1);
-
-	return ret;
-}
-
-/***********************************************************************
- * Per-CS setup (1 NAND device)
- ***********************************************************************/
-
-static int brcmnand_set_cfg(struct brcmnand_host *host,
-			    struct brcmnand_cfg *cfg)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	struct nand_chip *chip = &host->chip;
-	u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
-	u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
-			BRCMNAND_CS_CFG_EXT);
-	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
-			BRCMNAND_CS_ACC_CONTROL);
-	u8 block_size = 0, page_size = 0, device_size = 0;
-	u32 tmp;
-
-	if (ctrl->block_sizes) {
-		int i, found;
-
-		for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
-			if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
-				block_size = i;
-				found = 1;
-			}
-		if (!found) {
-			dev_warn(ctrl->dev, "invalid block size %u\n",
-					cfg->block_size);
-			return -EINVAL;
-		}
-	} else {
-		block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
-	}
-
-	if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
-				cfg->block_size > ctrl->max_block_size)) {
-		dev_warn(ctrl->dev, "invalid block size %u\n",
-				cfg->block_size);
-		block_size = 0;
-	}
-
-	if (ctrl->page_sizes) {
-		int i, found;
-
-		for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
-			if (ctrl->page_sizes[i] == cfg->page_size) {
-				page_size = i;
-				found = 1;
-			}
-		if (!found) {
-			dev_warn(ctrl->dev, "invalid page size %u\n",
-					cfg->page_size);
-			return -EINVAL;
-		}
-	} else {
-		page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
-	}
-
-	if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
-				cfg->page_size > ctrl->max_page_size)) {
-		dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
-		return -EINVAL;
-	}
-
-	if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
-		dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
-			(unsigned long long)cfg->device_size);
-		return -EINVAL;
-	}
-	device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
-
-	tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
-		(cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
-		(cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
-		(!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
-		(device_size << CFG_DEVICE_SIZE_SHIFT);
-	if (cfg_offs == cfg_ext_offs) {
-		tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
-		       (block_size << CFG_BLK_SIZE_SHIFT);
-		nand_writereg(ctrl, cfg_offs, tmp);
-	} else {
-		nand_writereg(ctrl, cfg_offs, tmp);
-		tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
-		      (block_size << CFG_EXT_BLK_SIZE_SHIFT);
-		nand_writereg(ctrl, cfg_ext_offs, tmp);
-	}
-
-	tmp = nand_readreg(ctrl, acc_control_offs);
-	tmp &= ~brcmnand_ecc_level_mask(ctrl);
-	tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
-	tmp &= ~brcmnand_spare_area_mask(ctrl);
-	tmp |= cfg->spare_area_size;
-	nand_writereg(ctrl, acc_control_offs, tmp);
-
-	brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
-
-	/* threshold = ceil(BCH-level * 0.75) */
-	brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
-
-	return 0;
-}
-
-static void brcmnand_print_cfg(struct brcmnand_host *host,
-			       char *buf, struct brcmnand_cfg *cfg)
-{
-	buf += sprintf(buf,
-		"%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
-		(unsigned long long)cfg->device_size >> 20,
-		cfg->block_size >> 10,
-		cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
-		cfg->page_size >= 1024 ? "KiB" : "B",
-		cfg->spare_area_size, cfg->device_width);
-
-	/* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
-	if (is_hamming_ecc(host->ctrl, cfg))
-		sprintf(buf, ", Hamming ECC");
-	else if (cfg->sector_size_1k)
-		sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
-	else
-		sprintf(buf, ", BCH-%u", cfg->ecc_level);
-}
-
-/*
- * Minimum number of bytes to address a page. Calculated as:
- *     roundup(log2(size / page-size) / 8)
- *
- * NB: the following does not "round up" for non-power-of-2 'size'; but this is
- *     OK because many other things will break if 'size' is irregular...
- */
-static inline int get_blk_adr_bytes(u64 size, u32 writesize)
-{
-	return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
-}
-
-static int brcmnand_setup_dev(struct brcmnand_host *host)
-{
-	struct mtd_info *mtd = nand_to_mtd(&host->chip);
-	struct nand_chip *chip = &host->chip;
-	struct brcmnand_controller *ctrl = host->ctrl;
-	struct brcmnand_cfg *cfg = &host->hwcfg;
-	char msg[128];
-	u32 offs, tmp, oob_sector;
-	int ret;
-
-	memset(cfg, 0, sizeof(*cfg));
-
-	ret = of_property_read_u32(nand_get_flash_node(chip),
-				   "brcm,nand-oob-sector-size",
-				   &oob_sector);
-	if (ret) {
-		/* Use detected size */
-		cfg->spare_area_size = mtd->oobsize /
-					(mtd->writesize >> FC_SHIFT);
-	} else {
-		cfg->spare_area_size = oob_sector;
-	}
-	if (cfg->spare_area_size > ctrl->max_oob)
-		cfg->spare_area_size = ctrl->max_oob;
-	/*
-	 * Set oobsize to be consistent with controller's spare_area_size, as
-	 * the rest is inaccessible.
-	 */
-	mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
-
-	cfg->device_size = mtd->size;
-	cfg->block_size = mtd->erasesize;
-	cfg->page_size = mtd->writesize;
-	cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
-	cfg->col_adr_bytes = 2;
-	cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
-
-	if (chip->ecc.mode != NAND_ECC_HW) {
-		dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
-			chip->ecc.mode);
-		return -EINVAL;
-	}
-
-	if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
-		if (chip->ecc.strength == 1 && chip->ecc.size == 512)
-			/* Default to Hamming for 1-bit ECC, if unspecified */
-			chip->ecc.algo = NAND_ECC_HAMMING;
-		else
-			/* Otherwise, BCH */
-			chip->ecc.algo = NAND_ECC_BCH;
-	}
-
-	if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
-						   chip->ecc.size != 512)) {
-		dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
-			chip->ecc.strength, chip->ecc.size);
-		return -EINVAL;
-	}
-
-	switch (chip->ecc.size) {
-	case 512:
-		if (chip->ecc.algo == NAND_ECC_HAMMING)
-			cfg->ecc_level = 15;
-		else
-			cfg->ecc_level = chip->ecc.strength;
-		cfg->sector_size_1k = 0;
-		break;
-	case 1024:
-		if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
-			dev_err(ctrl->dev, "1KB sectors not supported\n");
-			return -EINVAL;
-		}
-		if (chip->ecc.strength & 0x1) {
-			dev_err(ctrl->dev,
-				"odd ECC not supported with 1KB sectors\n");
-			return -EINVAL;
-		}
-
-		cfg->ecc_level = chip->ecc.strength >> 1;
-		cfg->sector_size_1k = 1;
-		break;
-	default:
-		dev_err(ctrl->dev, "unsupported ECC size: %d\n",
-			chip->ecc.size);
-		return -EINVAL;
-	}
-
-	cfg->ful_adr_bytes = cfg->blk_adr_bytes;
-	if (mtd->writesize > 512)
-		cfg->ful_adr_bytes += cfg->col_adr_bytes;
-	else
-		cfg->ful_adr_bytes += 1;
-
-	ret = brcmnand_set_cfg(host, cfg);
-	if (ret)
-		return ret;
-
-	brcmnand_set_ecc_enabled(host, 1);
-
-	brcmnand_print_cfg(host, msg, cfg);
-	dev_info(ctrl->dev, "detected %s\n", msg);
-
-	/* Configure ACC_CONTROL */
-	offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
-	tmp = nand_readreg(ctrl, offs);
-	tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
-	tmp &= ~ACC_CONTROL_RD_ERASED;
-
-	/* We need to turn on Read from erased paged protected by ECC */
-	if (ctrl->nand_version >= 0x0702)
-		tmp |= ACC_CONTROL_RD_ERASED;
-	tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
-	if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
-		/*
-		 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
-		 * errors
-		 */
-		if (has_flash_dma(ctrl))
-			tmp &= ~ACC_CONTROL_PREFETCH;
-		else
-			tmp |= ACC_CONTROL_PREFETCH;
-	}
-	nand_writereg(ctrl, offs, tmp);
-
-	return 0;
-}
-
-static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	struct platform_device *pdev = host->pdev;
-	struct mtd_info *mtd;
-	struct nand_chip *chip;
-	int ret;
-	u16 cfg_offs;
-
-	ret = of_property_read_u32(dn, "reg", &host->cs);
-	if (ret) {
-		dev_err(&pdev->dev, "can't get chip-select\n");
-		return -ENXIO;
-	}
-
-	mtd = nand_to_mtd(&host->chip);
-	chip = &host->chip;
-
-	nand_set_flash_node(chip, dn);
-	nand_set_controller_data(chip, host);
-	mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
-				   host->cs);
-	mtd->owner = THIS_MODULE;
-	mtd->dev.parent = &pdev->dev;
-
-	chip->IO_ADDR_R = (void __iomem *)0xdeadbeef;
-	chip->IO_ADDR_W = (void __iomem *)0xdeadbeef;
-
-	chip->cmd_ctrl = brcmnand_cmd_ctrl;
-	chip->cmdfunc = brcmnand_cmdfunc;
-	chip->waitfunc = brcmnand_waitfunc;
-	chip->read_byte = brcmnand_read_byte;
-	chip->read_buf = brcmnand_read_buf;
-	chip->write_buf = brcmnand_write_buf;
-
-	chip->ecc.mode = NAND_ECC_HW;
-	chip->ecc.read_page = brcmnand_read_page;
-	chip->ecc.write_page = brcmnand_write_page;
-	chip->ecc.read_page_raw = brcmnand_read_page_raw;
-	chip->ecc.write_page_raw = brcmnand_write_page_raw;
-	chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
-	chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
-	chip->ecc.read_oob = brcmnand_read_oob;
-	chip->ecc.write_oob = brcmnand_write_oob;
-
-	chip->controller = &ctrl->controller;
-
-	/*
-	 * The bootloader might have configured 16bit mode but
-	 * NAND READID command only works in 8bit mode. We force
-	 * 8bit mode here to ensure that NAND READID commands works.
-	 */
-	cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
-	nand_writereg(ctrl, cfg_offs,
-		      nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
-
-	if (nand_scan_ident(mtd, 1, NULL))
-		return -ENXIO;
-
-	chip->options |= NAND_NO_SUBPAGE_WRITE;
-	/*
-	 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
-	 * to/from, and have nand_base pass us a bounce buffer instead, as
-	 * needed.
-	 */
-	chip->options |= NAND_USE_BOUNCE_BUFFER;
-
-	if (chip->bbt_options & NAND_BBT_USE_FLASH)
-		chip->bbt_options |= NAND_BBT_NO_OOB;
-
-	if (brcmnand_setup_dev(host))
-		return -ENXIO;
-
-	chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
-	/* only use our internal HW threshold */
-	mtd->bitflip_threshold = 1;
-
-	ret = brcmstb_choose_ecc_layout(host);
-	if (ret)
-		return ret;
-
-	if (nand_scan_tail(mtd))
-		return -ENXIO;
-
-	return mtd_device_register(mtd, NULL, 0);
-}
-
-static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
-					    int restore)
-{
-	struct brcmnand_controller *ctrl = host->ctrl;
-	u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
-	u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
-			BRCMNAND_CS_CFG_EXT);
-	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
-			BRCMNAND_CS_ACC_CONTROL);
-	u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
-	u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
-
-	if (restore) {
-		nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
-		if (cfg_offs != cfg_ext_offs)
-			nand_writereg(ctrl, cfg_ext_offs,
-				      host->hwcfg.config_ext);
-		nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
-		nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
-		nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
-	} else {
-		host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
-		if (cfg_offs != cfg_ext_offs)
-			host->hwcfg.config_ext =
-				nand_readreg(ctrl, cfg_ext_offs);
-		host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
-		host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
-		host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
-	}
-}
-
-static int brcmnand_suspend(struct device *dev)
-{
-	struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
-	struct brcmnand_host *host;
-
-	list_for_each_entry(host, &ctrl->host_list, node)
-		brcmnand_save_restore_cs_config(host, 0);
-
-	ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
-	ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
-	ctrl->corr_stat_threshold =
-		brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
-
-	if (has_flash_dma(ctrl))
-		ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
-
-	return 0;
-}
-
-static int brcmnand_resume(struct device *dev)
-{
-	struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
-	struct brcmnand_host *host;
-
-	if (has_flash_dma(ctrl)) {
-		flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
-		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
-	}
-
-	brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
-	brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
-	brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
-			ctrl->corr_stat_threshold);
-	if (ctrl->soc) {
-		/* Clear/re-enable interrupt */
-		ctrl->soc->ctlrdy_ack(ctrl->soc);
-		ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
-	}
-
-	list_for_each_entry(host, &ctrl->host_list, node) {
-		struct nand_chip *chip = &host->chip;
-		struct mtd_info *mtd = nand_to_mtd(chip);
-
-		brcmnand_save_restore_cs_config(host, 1);
-
-		/* Reset the chip, required by some chips after power-up */
-		chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-	}
-
-	return 0;
-}
-
-const struct dev_pm_ops brcmnand_pm_ops = {
-	.suspend		= brcmnand_suspend,
-	.resume			= brcmnand_resume,
-};
-EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
-
-static const struct of_device_id brcmnand_of_match[] = {
-	{ .compatible = "brcm,brcmnand-v4.0" },
-	{ .compatible = "brcm,brcmnand-v5.0" },
-	{ .compatible = "brcm,brcmnand-v6.0" },
-	{ .compatible = "brcm,brcmnand-v6.1" },
-	{ .compatible = "brcm,brcmnand-v6.2" },
-	{ .compatible = "brcm,brcmnand-v7.0" },
-	{ .compatible = "brcm,brcmnand-v7.1" },
-	{ .compatible = "brcm,brcmnand-v7.2" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, brcmnand_of_match);
-
-/***********************************************************************
- * Platform driver setup (per controller)
- ***********************************************************************/
-
-int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *dn = dev->of_node, *child;
-	struct brcmnand_controller *ctrl;
-	struct resource *res;
-	int ret;
-
-	/* We only support device-tree instantiation */
-	if (!dn)
-		return -ENODEV;
-
-	if (!of_match_node(brcmnand_of_match, dn))
-		return -ENODEV;
-
-	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
-	if (!ctrl)
-		return -ENOMEM;
-
-	dev_set_drvdata(dev, ctrl);
-	ctrl->dev = dev;
-
-	init_completion(&ctrl->done);
-	init_completion(&ctrl->dma_done);
-	nand_hw_control_init(&ctrl->controller);
-	INIT_LIST_HEAD(&ctrl->host_list);
-
-	/* NAND register range */
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	ctrl->nand_base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(ctrl->nand_base))
-		return PTR_ERR(ctrl->nand_base);
-
-	/* Enable clock before using NAND registers */
-	ctrl->clk = devm_clk_get(dev, "nand");
-	if (!IS_ERR(ctrl->clk)) {
-		ret = clk_prepare_enable(ctrl->clk);
-		if (ret)
-			return ret;
-	} else {
-		ret = PTR_ERR(ctrl->clk);
-		if (ret == -EPROBE_DEFER)
-			return ret;
-
-		ctrl->clk = NULL;
-	}
-
-	/* Initialize NAND revision */
-	ret = brcmnand_revision_init(ctrl);
-	if (ret)
-		goto err;
-
-	/*
-	 * Most chips have this cache at a fixed offset within 'nand' block.
-	 * Some must specify this region separately.
-	 */
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
-	if (res) {
-		ctrl->nand_fc = devm_ioremap_resource(dev, res);
-		if (IS_ERR(ctrl->nand_fc)) {
-			ret = PTR_ERR(ctrl->nand_fc);
-			goto err;
-		}
-	} else {
-		ctrl->nand_fc = ctrl->nand_base +
-				ctrl->reg_offsets[BRCMNAND_FC_BASE];
-	}
-
-	/* FLASH_DMA */
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
-	if (res) {
-		ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
-		if (IS_ERR(ctrl->flash_dma_base)) {
-			ret = PTR_ERR(ctrl->flash_dma_base);
-			goto err;
-		}
-
-		flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
-		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
-
-		/* Allocate descriptor(s) */
-		ctrl->dma_desc = dmam_alloc_coherent(dev,
-						     sizeof(*ctrl->dma_desc),
-						     &ctrl->dma_pa, GFP_KERNEL);
-		if (!ctrl->dma_desc) {
-			ret = -ENOMEM;
-			goto err;
-		}
-
-		ctrl->dma_irq = platform_get_irq(pdev, 1);
-		if ((int)ctrl->dma_irq < 0) {
-			dev_err(dev, "missing FLASH_DMA IRQ\n");
-			ret = -ENODEV;
-			goto err;
-		}
-
-		ret = devm_request_irq(dev, ctrl->dma_irq,
-				brcmnand_dma_irq, 0, DRV_NAME,
-				ctrl);
-		if (ret < 0) {
-			dev_err(dev, "can't allocate IRQ %d: error %d\n",
-					ctrl->dma_irq, ret);
-			goto err;
-		}
-
-		dev_info(dev, "enabling FLASH_DMA\n");
-	}
-
-	/* Disable automatic device ID config, direct addressing */
-	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
-			 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
-	/* Disable XOR addressing */
-	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
-
-	if (ctrl->features & BRCMNAND_HAS_WP) {
-		/* Permanently disable write protection */
-		if (wp_on == 2)
-			brcmnand_set_wp(ctrl, false);
-	} else {
-		wp_on = 0;
-	}
-
-	/* IRQ */
-	ctrl->irq = platform_get_irq(pdev, 0);
-	if ((int)ctrl->irq < 0) {
-		dev_err(dev, "no IRQ defined\n");
-		ret = -ENODEV;
-		goto err;
-	}
-
-	/*
-	 * Some SoCs integrate this controller (e.g., its interrupt bits) in
-	 * interesting ways
-	 */
-	if (soc) {
-		ctrl->soc = soc;
-
-		ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
-				       DRV_NAME, ctrl);
-
-		/* Enable interrupt */
-		ctrl->soc->ctlrdy_ack(ctrl->soc);
-		ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
-	} else {
-		/* Use standard interrupt infrastructure */
-		ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
-				       DRV_NAME, ctrl);
-	}
-	if (ret < 0) {
-		dev_err(dev, "can't allocate IRQ %d: error %d\n",
-			ctrl->irq, ret);
-		goto err;
-	}
-
-	for_each_available_child_of_node(dn, child) {
-		if (of_device_is_compatible(child, "brcm,nandcs")) {
-			struct brcmnand_host *host;
-
-			host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
-			if (!host) {
-				of_node_put(child);
-				ret = -ENOMEM;
-				goto err;
-			}
-			host->pdev = pdev;
-			host->ctrl = ctrl;
-
-			ret = brcmnand_init_cs(host, child);
-			if (ret) {
-				devm_kfree(dev, host);
-				continue; /* Try all chip-selects */
-			}
-
-			list_add_tail(&host->node, &ctrl->host_list);
-		}
-	}
-
-	/* No chip-selects could initialize properly */
-	if (list_empty(&ctrl->host_list)) {
-		ret = -ENODEV;
-		goto err;
-	}
-
-	return 0;
-
-err:
-	clk_disable_unprepare(ctrl->clk);
-	return ret;
-
-}
-EXPORT_SYMBOL_GPL(brcmnand_probe);
-
-int brcmnand_remove(struct platform_device *pdev)
-{
-	struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
-	struct brcmnand_host *host;
-
-	list_for_each_entry(host, &ctrl->host_list, node)
-		nand_release(nand_to_mtd(&host->chip));
-
-	clk_disable_unprepare(ctrl->clk);
-
-	dev_set_drvdata(&pdev->dev, NULL);
-
-	return 0;
-}
-EXPORT_SYMBOL_GPL(brcmnand_remove);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Kevin Cernekee");
-MODULE_AUTHOR("Brian Norris");
-MODULE_DESCRIPTION("NAND driver for Broadcom chips");
-MODULE_ALIAS("platform:brcmnand");
diff --git a/drivers/mtd/nand/brcmnand/brcmnand.h b/drivers/mtd/nand/brcmnand/brcmnand.h
deleted file mode 100644
index 5c44cd4aba87..000000000000
--- a/drivers/mtd/nand/brcmnand/brcmnand.h
+++ /dev/null
@@ -1,74 +0,0 @@ 
-/*
- * Copyright © 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __BRCMNAND_H__
-#define __BRCMNAND_H__
-
-#include <linux/types.h>
-#include <linux/io.h>
-
-struct platform_device;
-struct dev_pm_ops;
-
-struct brcmnand_soc {
-	bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
-	void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
-	void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
-				 bool is_param);
-};
-
-static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc,
-						 bool is_param)
-{
-	if (soc && soc->prepare_data_bus)
-		soc->prepare_data_bus(soc, true, is_param);
-}
-
-static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc,
-						   bool is_param)
-{
-	if (soc && soc->prepare_data_bus)
-		soc->prepare_data_bus(soc, false, is_param);
-}
-
-static inline u32 brcmnand_readl(void __iomem *addr)
-{
-	/*
-	 * MIPS endianness is configured by boot strap, which also reverses all
-	 * bus endianness (i.e., big-endian CPU + big endian bus ==> native
-	 * endian I/O).
-	 *
-	 * Other architectures (e.g., ARM) either do not support big endian, or
-	 * else leave I/O in little endian mode.
-	 */
-	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
-		return __raw_readl(addr);
-	else
-		return readl_relaxed(addr);
-}
-
-static inline void brcmnand_writel(u32 val, void __iomem *addr)
-{
-	/* See brcmnand_readl() comments */
-	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
-		__raw_writel(val, addr);
-	else
-		writel_relaxed(val, addr);
-}
-
-int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
-int brcmnand_remove(struct platform_device *pdev);
-
-extern const struct dev_pm_ops brcmnand_pm_ops;
-
-#endif /* __BRCMNAND_H__ */
diff --git a/drivers/mtd/nand/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/brcmnand/brcmstb_nand.c
deleted file mode 100644
index 5c271077ac87..000000000000
--- a/drivers/mtd/nand/brcmnand/brcmstb_nand.c
+++ /dev/null
@@ -1,44 +0,0 @@ 
-/*
- * Copyright © 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/device.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include "brcmnand.h"
-
-static const struct of_device_id brcmstb_nand_of_match[] = {
-	{ .compatible = "brcm,brcmnand" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, brcmstb_nand_of_match);
-
-static int brcmstb_nand_probe(struct platform_device *pdev)
-{
-	return brcmnand_probe(pdev, NULL);
-}
-
-static struct platform_driver brcmstb_nand_driver = {
-	.probe			= brcmstb_nand_probe,
-	.remove			= brcmnand_remove,
-	.driver = {
-		.name		= "brcmstb_nand",
-		.pm		= &brcmnand_pm_ops,
-		.of_match_table = brcmstb_nand_of_match,
-	}
-};
-module_platform_driver(brcmstb_nand_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Brian Norris");
-MODULE_DESCRIPTION("NAND driver for Broadcom STB chips");
diff --git a/drivers/mtd/nand/brcmnand/iproc_nand.c b/drivers/mtd/nand/brcmnand/iproc_nand.c
deleted file mode 100644
index 4c6ae113664d..000000000000
--- a/drivers/mtd/nand/brcmnand/iproc_nand.c
+++ /dev/null
@@ -1,160 +0,0 @@ 
-/*
- * Copyright © 2015 Broadcom Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/device.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "brcmnand.h"
-
-struct iproc_nand_soc {
-	struct brcmnand_soc soc;
-
-	void __iomem *idm_base;
-	void __iomem *ext_base;
-	spinlock_t idm_lock;
-};
-
-#define IPROC_NAND_CTLR_READY_OFFSET       0x10
-#define IPROC_NAND_CTLR_READY              BIT(0)
-
-#define IPROC_NAND_IO_CTRL_OFFSET          0x00
-#define IPROC_NAND_APB_LE_MODE             BIT(24)
-#define IPROC_NAND_INT_CTRL_READ_ENABLE    BIT(6)
-
-static bool iproc_nand_intc_ack(struct brcmnand_soc *soc)
-{
-	struct iproc_nand_soc *priv =
-			container_of(soc, struct iproc_nand_soc, soc);
-	void __iomem *mmio = priv->ext_base + IPROC_NAND_CTLR_READY_OFFSET;
-	u32 val = brcmnand_readl(mmio);
-
-	if (val & IPROC_NAND_CTLR_READY) {
-		brcmnand_writel(IPROC_NAND_CTLR_READY, mmio);
-		return true;
-	}
-
-	return false;
-}
-
-static void iproc_nand_intc_set(struct brcmnand_soc *soc, bool en)
-{
-	struct iproc_nand_soc *priv =
-			container_of(soc, struct iproc_nand_soc, soc);
-	void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
-	u32 val;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->idm_lock, flags);
-
-	val = brcmnand_readl(mmio);
-
-	if (en)
-		val |= IPROC_NAND_INT_CTRL_READ_ENABLE;
-	else
-		val &= ~IPROC_NAND_INT_CTRL_READ_ENABLE;
-
-	brcmnand_writel(val, mmio);
-
-	spin_unlock_irqrestore(&priv->idm_lock, flags);
-}
-
-static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare,
-				  bool is_param)
-{
-	struct iproc_nand_soc *priv =
-			container_of(soc, struct iproc_nand_soc, soc);
-	void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
-	u32 val;
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->idm_lock, flags);
-
-	val = brcmnand_readl(mmio);
-
-	/*
-	 * In the case of BE or when dealing with NAND data, alway configure
-	 * the APB bus to LE mode before accessing the FIFO and back to BE mode
-	 * after the access is done
-	 */
-	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) || !is_param) {
-		if (prepare)
-			val |= IPROC_NAND_APB_LE_MODE;
-		else
-			val &= ~IPROC_NAND_APB_LE_MODE;
-	} else { /* when in LE accessing the parameter page, keep APB in BE */
-		val &= ~IPROC_NAND_APB_LE_MODE;
-	}
-
-	brcmnand_writel(val, mmio);
-
-	spin_unlock_irqrestore(&priv->idm_lock, flags);
-}
-
-static int iproc_nand_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct iproc_nand_soc *priv;
-	struct brcmnand_soc *soc;
-	struct resource *res;
-
-	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-	soc = &priv->soc;
-
-	spin_lock_init(&priv->idm_lock);
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-idm");
-	priv->idm_base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(priv->idm_base))
-		return PTR_ERR(priv->idm_base);
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-ext");
-	priv->ext_base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(priv->ext_base))
-		return PTR_ERR(priv->ext_base);
-
-	soc->ctlrdy_ack = iproc_nand_intc_ack;
-	soc->ctlrdy_set_enabled = iproc_nand_intc_set;
-	soc->prepare_data_bus = iproc_nand_apb_access;
-
-	return brcmnand_probe(pdev, soc);
-}
-
-static const struct of_device_id iproc_nand_of_match[] = {
-	{ .compatible = "brcm,nand-iproc" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, iproc_nand_of_match);
-
-static struct platform_driver iproc_nand_driver = {
-	.probe			= iproc_nand_probe,
-	.remove			= brcmnand_remove,
-	.driver = {
-		.name		= "iproc_nand",
-		.pm		= &brcmnand_pm_ops,
-		.of_match_table	= iproc_nand_of_match,
-	}
-};
-module_platform_driver(iproc_nand_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Brian Norris");
-MODULE_AUTHOR("Ray Jui");
-MODULE_DESCRIPTION("NAND driver for Broadcom IPROC-based SoCs");
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
deleted file mode 100644
index 93880171740a..000000000000
--- a/drivers/mtd/nand/cafe_nand.c
+++ /dev/null
@@ -1,898 +0,0 @@ 
-/*
- * Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01
- *
- * The data sheet for this device can be found at:
- *    http://wiki.laptop.org/go/Datasheets 
- *
- * Copyright © 2006 Red Hat, Inc.
- * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
- */
-
-#define DEBUG
-
-#include <linux/device.h>
-#undef DEBUG
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/rslib.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <asm/io.h>
-
-#define CAFE_NAND_CTRL1		0x00
-#define CAFE_NAND_CTRL2		0x04
-#define CAFE_NAND_CTRL3		0x08
-#define CAFE_NAND_STATUS	0x0c
-#define CAFE_NAND_IRQ		0x10
-#define CAFE_NAND_IRQ_MASK	0x14
-#define CAFE_NAND_DATA_LEN	0x18
-#define CAFE_NAND_ADDR1		0x1c
-#define CAFE_NAND_ADDR2		0x20
-#define CAFE_NAND_TIMING1	0x24
-#define CAFE_NAND_TIMING2	0x28
-#define CAFE_NAND_TIMING3	0x2c
-#define CAFE_NAND_NONMEM	0x30
-#define CAFE_NAND_ECC_RESULT	0x3C
-#define CAFE_NAND_DMA_CTRL	0x40
-#define CAFE_NAND_DMA_ADDR0	0x44
-#define CAFE_NAND_DMA_ADDR1	0x48
-#define CAFE_NAND_ECC_SYN01	0x50
-#define CAFE_NAND_ECC_SYN23	0x54
-#define CAFE_NAND_ECC_SYN45	0x58
-#define CAFE_NAND_ECC_SYN67	0x5c
-#define CAFE_NAND_READ_DATA	0x1000
-#define CAFE_NAND_WRITE_DATA	0x2000
-
-#define CAFE_GLOBAL_CTRL	0x3004
-#define CAFE_GLOBAL_IRQ		0x3008
-#define CAFE_GLOBAL_IRQ_MASK	0x300c
-#define CAFE_NAND_RESET		0x3034
-
-/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
-#define CTRL1_CHIPSELECT	(1<<19)
-
-struct cafe_priv {
-	struct nand_chip nand;
-	struct pci_dev *pdev;
-	void __iomem *mmio;
-	struct rs_control *rs;
-	uint32_t ctl1;
-	uint32_t ctl2;
-	int datalen;
-	int nr_data;
-	int data_pos;
-	int page_addr;
-	dma_addr_t dmaaddr;
-	unsigned char *dmabuf;
-};
-
-static int usedma = 1;
-module_param(usedma, int, 0644);
-
-static int skipbbt = 0;
-module_param(skipbbt, int, 0644);
-
-static int debug = 0;
-module_param(debug, int, 0644);
-
-static int regdebug = 0;
-module_param(regdebug, int, 0644);
-
-static int checkecc = 1;
-module_param(checkecc, int, 0644);
-
-static unsigned int numtimings;
-static int timing[3];
-module_param_array(timing, int, &numtimings, 0644);
-
-static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
-
-/* Hrm. Why isn't this already conditional on something in the struct device? */
-#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
-
-/* Make it easier to switch to PIO if we need to */
-#define cafe_readl(cafe, addr)			readl((cafe)->mmio + CAFE_##addr)
-#define cafe_writel(cafe, datum, addr)		writel(datum, (cafe)->mmio + CAFE_##addr)
-
-static int cafe_device_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-	int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
-	uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
-
-	cafe_writel(cafe, irqs, NAND_IRQ);
-
-	cafe_dev_dbg(&cafe->pdev->dev, "NAND device is%s ready, IRQ %x (%x) (%x,%x)\n",
-		result?"":" not", irqs, cafe_readl(cafe, NAND_IRQ),
-		cafe_readl(cafe, GLOBAL_IRQ), cafe_readl(cafe, GLOBAL_IRQ_MASK));
-
-	return result;
-}
-
-
-static void cafe_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-
-	if (usedma)
-		memcpy(cafe->dmabuf + cafe->datalen, buf, len);
-	else
-		memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
-
-	cafe->datalen += len;
-
-	cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes to write buffer. datalen 0x%x\n",
-		len, cafe->datalen);
-}
-
-static void cafe_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-
-	if (usedma)
-		memcpy(buf, cafe->dmabuf + cafe->datalen, len);
-	else
-		memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
-
-	cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes from position 0x%x in read buffer.\n",
-		  len, cafe->datalen);
-	cafe->datalen += len;
-}
-
-static uint8_t cafe_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-	uint8_t d;
-
-	cafe_read_buf(mtd, &d, 1);
-	cafe_dev_dbg(&cafe->pdev->dev, "Read %02x\n", d);
-
-	return d;
-}
-
-static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
-			      int column, int page_addr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-	int adrbytes = 0;
-	uint32_t ctl1;
-	uint32_t doneint = 0x80000000;
-
-	cafe_dev_dbg(&cafe->pdev->dev, "cmdfunc %02x, 0x%x, 0x%x\n",
-		command, column, page_addr);
-
-	if (command == NAND_CMD_ERASE2 || command == NAND_CMD_PAGEPROG) {
-		/* Second half of a command we already calculated */
-		cafe_writel(cafe, cafe->ctl2 | 0x100 | command, NAND_CTRL2);
-		ctl1 = cafe->ctl1;
-		cafe->ctl2 &= ~(1<<30);
-		cafe_dev_dbg(&cafe->pdev->dev, "Continue command, ctl1 %08x, #data %d\n",
-			  cafe->ctl1, cafe->nr_data);
-		goto do_command;
-	}
-	/* Reset ECC engine */
-	cafe_writel(cafe, 0, NAND_CTRL2);
-
-	/* Emulate NAND_CMD_READOOB on large-page chips */
-	if (mtd->writesize > 512 &&
-	    command == NAND_CMD_READOOB) {
-		column += mtd->writesize;
-		command = NAND_CMD_READ0;
-	}
-
-	/* FIXME: Do we need to send read command before sending data
-	   for small-page chips, to position the buffer correctly? */
-
-	if (column != -1) {
-		cafe_writel(cafe, column, NAND_ADDR1);
-		adrbytes = 2;
-		if (page_addr != -1)
-			goto write_adr2;
-	} else if (page_addr != -1) {
-		cafe_writel(cafe, page_addr & 0xffff, NAND_ADDR1);
-		page_addr >>= 16;
-	write_adr2:
-		cafe_writel(cafe, page_addr, NAND_ADDR2);
-		adrbytes += 2;
-		if (mtd->size > mtd->writesize << 16)
-			adrbytes++;
-	}
-
-	cafe->data_pos = cafe->datalen = 0;
-
-	/* Set command valid bit, mask in the chip select bit  */
-	ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
-
-	/* Set RD or WR bits as appropriate */
-	if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
-		ctl1 |= (1<<26); /* rd */
-		/* Always 5 bytes, for now */
-		cafe->datalen = 4;
-		/* And one address cycle -- even for STATUS, since the controller doesn't work without */
-		adrbytes = 1;
-	} else if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
-		   command == NAND_CMD_READOOB || command == NAND_CMD_RNDOUT) {
-		ctl1 |= 1<<26; /* rd */
-		/* For now, assume just read to end of page */
-		cafe->datalen = mtd->writesize + mtd->oobsize - column;
-	} else if (command == NAND_CMD_SEQIN)
-		ctl1 |= 1<<25; /* wr */
-
-	/* Set number of address bytes */
-	if (adrbytes)
-		ctl1 |= ((adrbytes-1)|8) << 27;
-
-	if (command == NAND_CMD_SEQIN || command == NAND_CMD_ERASE1) {
-		/* Ignore the first command of a pair; the hardware
-		   deals with them both at once, later */
-		cafe->ctl1 = ctl1;
-		cafe_dev_dbg(&cafe->pdev->dev, "Setup for delayed command, ctl1 %08x, dlen %x\n",
-			  cafe->ctl1, cafe->datalen);
-		return;
-	}
-	/* RNDOUT and READ0 commands need a following byte */
-	if (command == NAND_CMD_RNDOUT)
-		cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_RNDOUTSTART, NAND_CTRL2);
-	else if (command == NAND_CMD_READ0 && mtd->writesize > 512)
-		cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_READSTART, NAND_CTRL2);
-
- do_command:
-	cafe_dev_dbg(&cafe->pdev->dev, "dlen %x, ctl1 %x, ctl2 %x\n",
-		cafe->datalen, ctl1, cafe_readl(cafe, NAND_CTRL2));
-
-	/* NB: The datasheet lies -- we really should be subtracting 1 here */
-	cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
-	cafe_writel(cafe, 0x90000000, NAND_IRQ);
-	if (usedma && (ctl1 & (3<<25))) {
-		uint32_t dmactl = 0xc0000000 + cafe->datalen;
-		/* If WR or RD bits set, set up DMA */
-		if (ctl1 & (1<<26)) {
-			/* It's a read */
-			dmactl |= (1<<29);
-			/* ... so it's done when the DMA is done, not just
-			   the command. */
-			doneint = 0x10000000;
-		}
-		cafe_writel(cafe, dmactl, NAND_DMA_CTRL);
-	}
-	cafe->datalen = 0;
-
-	if (unlikely(regdebug)) {
-		int i;
-		printk("About to write command %08x to register 0\n", ctl1);
-		for (i=4; i< 0x5c; i+=4)
-			printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
-	}
-
-	cafe_writel(cafe, ctl1, NAND_CTRL1);
-	/* Apply this short delay always to ensure that we do wait tWB in
-	 * any case on any machine. */
-	ndelay(100);
-
-	if (1) {
-		int c;
-		uint32_t irqs;
-
-		for (c = 500000; c != 0; c--) {
-			irqs = cafe_readl(cafe, NAND_IRQ);
-			if (irqs & doneint)
-				break;
-			udelay(1);
-			if (!(c % 100000))
-				cafe_dev_dbg(&cafe->pdev->dev, "Wait for ready, IRQ %x\n", irqs);
-			cpu_relax();
-		}
-		cafe_writel(cafe, doneint, NAND_IRQ);
-		cafe_dev_dbg(&cafe->pdev->dev, "Command %x completed after %d usec, irqs %x (%x)\n",
-			     command, 500000-c, irqs, cafe_readl(cafe, NAND_IRQ));
-	}
-
-	WARN_ON(cafe->ctl2 & (1<<30));
-
-	switch (command) {
-
-	case NAND_CMD_CACHEDPROG:
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_RNDIN:
-	case NAND_CMD_STATUS:
-	case NAND_CMD_RNDOUT:
-		cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
-		return;
-	}
-	nand_wait_ready(mtd);
-	cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
-}
-
-static void cafe_select_chip(struct mtd_info *mtd, int chipnr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-
-	cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
-
-	/* Mask the appropriate bit into the stored value of ctl1
-	   which will be used by cafe_nand_cmdfunc() */
-	if (chipnr)
-		cafe->ctl1 |= CTRL1_CHIPSELECT;
-	else
-		cafe->ctl1 &= ~CTRL1_CHIPSELECT;
-}
-
-static irqreturn_t cafe_nand_interrupt(int irq, void *id)
-{
-	struct mtd_info *mtd = id;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-	uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
-	cafe_writel(cafe, irqs & ~0x90000000, NAND_IRQ);
-	if (!irqs)
-		return IRQ_NONE;
-
-	cafe_dev_dbg(&cafe->pdev->dev, "irq, bits %x (%x)\n", irqs, cafe_readl(cafe, NAND_IRQ));
-	return IRQ_HANDLED;
-}
-
-static void cafe_nand_bug(struct mtd_info *mtd)
-{
-	BUG();
-}
-
-static int cafe_nand_write_oob(struct mtd_info *mtd,
-			       struct nand_chip *chip, int page)
-{
-	int status = 0;
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-	status = chip->waitfunc(mtd, chip);
-
-	return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-/* Don't use -- use nand_read_oob_std for now */
-static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			      int page)
-{
-	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 0;
-}
-/**
- * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	buffer to store read data
- * @oob_required:	caller expects OOB data read to chip->oob_poi
- *
- * The hw generator calculates the error syndrome automatically. Therefore
- * we need a special oob layout and handling.
- */
-static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-			       uint8_t *buf, int oob_required, int page)
-{
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-	unsigned int max_bitflips = 0;
-
-	cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
-		     cafe_readl(cafe, NAND_ECC_RESULT),
-		     cafe_readl(cafe, NAND_ECC_SYN01));
-
-	chip->read_buf(mtd, buf, mtd->writesize);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
-		unsigned short syn[8], pat[4];
-		int pos[4];
-		u8 *oob = chip->oob_poi;
-		int i, n;
-
-		for (i=0; i<8; i+=2) {
-			uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
-			syn[i] = cafe->rs->index_of[tmp & 0xfff];
-			syn[i+1] = cafe->rs->index_of[(tmp >> 16) & 0xfff];
-		}
-
-		n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
-		                pat);
-
-		for (i = 0; i < n; i++) {
-			int p = pos[i];
-
-			/* The 12-bit symbols are mapped to bytes here */
-
-			if (p > 1374) {
-				/* out of range */
-				n = -1374;
-			} else if (p == 0) {
-				/* high four bits do not correspond to data */
-				if (pat[i] > 0xff)
-					n = -2048;
-				else
-					buf[0] ^= pat[i];
-			} else if (p == 1365) {
-				buf[2047] ^= pat[i] >> 4;
-				oob[0] ^= pat[i] << 4;
-			} else if (p > 1365) {
-				if ((p & 1) == 1) {
-					oob[3*p/2 - 2048] ^= pat[i] >> 4;
-					oob[3*p/2 - 2047] ^= pat[i] << 4;
-				} else {
-					oob[3*p/2 - 2049] ^= pat[i] >> 8;
-					oob[3*p/2 - 2048] ^= pat[i];
-				}
-			} else if ((p & 1) == 1) {
-				buf[3*p/2] ^= pat[i] >> 4;
-				buf[3*p/2 + 1] ^= pat[i] << 4;
-			} else {
-				buf[3*p/2 - 1] ^= pat[i] >> 8;
-				buf[3*p/2] ^= pat[i];
-			}
-		}
-
-		if (n < 0) {
-			dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
-				cafe_readl(cafe, NAND_ADDR2) * 2048);
-			for (i = 0; i < 0x5c; i += 4)
-				printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
-			mtd->ecc_stats.failed++;
-		} else {
-			dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
-			mtd->ecc_stats.corrected += n;
-			max_bitflips = max_t(unsigned int, max_bitflips, n);
-		}
-	}
-
-	return max_bitflips;
-}
-
-static int cafe_ooblayout_ecc(struct mtd_info *mtd, int section,
-			      struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 0;
-	oobregion->length = chip->ecc.total;
-
-	return 0;
-}
-
-static int cafe_ooblayout_free(struct mtd_info *mtd, int section,
-			       struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = chip->ecc.total;
-	oobregion->length = mtd->oobsize - chip->ecc.total;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops cafe_ooblayout_ops = {
-	.ecc = cafe_ooblayout_ecc,
-	.free = cafe_ooblayout_free,
-};
-
-/* Ick. The BBT code really ought to be able to work this bit out
-   for itself from the above, at least for the 2KiB case */
-static uint8_t cafe_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
-static uint8_t cafe_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
-
-static uint8_t cafe_bbt_pattern_512[] = { 0xBB };
-static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
-
-
-static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	14,
-	.len = 4,
-	.veroffs = 18,
-	.maxblocks = 4,
-	.pattern = cafe_bbt_pattern_2048
-};
-
-static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	14,
-	.len = 4,
-	.veroffs = 18,
-	.maxblocks = 4,
-	.pattern = cafe_mirror_pattern_2048
-};
-
-static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	14,
-	.len = 1,
-	.veroffs = 15,
-	.maxblocks = 4,
-	.pattern = cafe_bbt_pattern_512
-};
-
-static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	14,
-	.len = 1,
-	.veroffs = 15,
-	.maxblocks = 4,
-	.pattern = cafe_mirror_pattern_512
-};
-
-
-static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
-					  struct nand_chip *chip,
-					  const uint8_t *buf, int oob_required,
-					  int page)
-{
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-
-	chip->write_buf(mtd, buf, mtd->writesize);
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	/* Set up ECC autogeneration */
-	cafe->ctl2 |= (1<<30);
-
-	return 0;
-}
-
-static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
-{
-	return 0;
-}
-
-/* F_2[X]/(X**6+X+1)  */
-static unsigned short gf64_mul(u8 a, u8 b)
-{
-	u8 c;
-	unsigned int i;
-
-	c = 0;
-	for (i = 0; i < 6; i++) {
-		if (a & 1)
-			c ^= b;
-		a >>= 1;
-		b <<= 1;
-		if ((b & 0x40) != 0)
-			b ^= 0x43;
-	}
-
-	return c;
-}
-
-/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X]  */
-static u16 gf4096_mul(u16 a, u16 b)
-{
-	u8 ah, al, bh, bl, ch, cl;
-
-	ah = a >> 6;
-	al = a & 0x3f;
-	bh = b >> 6;
-	bl = b & 0x3f;
-
-	ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
-	cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
-
-	return (ch << 6) ^ cl;
-}
-
-static int cafe_mul(int x)
-{
-	if (x == 0)
-		return 1;
-	return gf4096_mul(x, 0xe01);
-}
-
-static int cafe_nand_probe(struct pci_dev *pdev,
-				     const struct pci_device_id *ent)
-{
-	struct mtd_info *mtd;
-	struct cafe_priv *cafe;
-	uint32_t ctrl;
-	int err = 0;
-	int old_dma;
-	struct nand_buffers *nbuf;
-
-	/* Very old versions shared the same PCI ident for all three
-	   functions on the chip. Verify the class too... */
-	if ((pdev->class >> 8) != PCI_CLASS_MEMORY_FLASH)
-		return -ENODEV;
-
-	err = pci_enable_device(pdev);
-	if (err)
-		return err;
-
-	pci_set_master(pdev);
-
-	cafe = kzalloc(sizeof(*cafe), GFP_KERNEL);
-	if (!cafe)
-		return  -ENOMEM;
-
-	mtd = nand_to_mtd(&cafe->nand);
-	mtd->dev.parent = &pdev->dev;
-	nand_set_controller_data(&cafe->nand, cafe);
-
-	cafe->pdev = pdev;
-	cafe->mmio = pci_iomap(pdev, 0, 0);
-	if (!cafe->mmio) {
-		dev_warn(&pdev->dev, "failed to iomap\n");
-		err = -ENOMEM;
-		goto out_free_mtd;
-	}
-
-	cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
-	if (!cafe->rs) {
-		err = -ENOMEM;
-		goto out_ior;
-	}
-
-	cafe->nand.cmdfunc = cafe_nand_cmdfunc;
-	cafe->nand.dev_ready = cafe_device_ready;
-	cafe->nand.read_byte = cafe_read_byte;
-	cafe->nand.read_buf = cafe_read_buf;
-	cafe->nand.write_buf = cafe_write_buf;
-	cafe->nand.select_chip = cafe_select_chip;
-
-	cafe->nand.chip_delay = 0;
-
-	/* Enable the following for a flash based bad block table */
-	cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
-	cafe->nand.options = NAND_OWN_BUFFERS;
-
-	if (skipbbt) {
-		cafe->nand.options |= NAND_SKIP_BBTSCAN;
-		cafe->nand.block_bad = cafe_nand_block_bad;
-	}
-
-	if (numtimings && numtimings != 3) {
-		dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
-	}
-
-	if (numtimings == 3) {
-		cafe_dev_dbg(&cafe->pdev->dev, "Using provided timings (%08x %08x %08x)\n",
-			     timing[0], timing[1], timing[2]);
-	} else {
-		timing[0] = cafe_readl(cafe, NAND_TIMING1);
-		timing[1] = cafe_readl(cafe, NAND_TIMING2);
-		timing[2] = cafe_readl(cafe, NAND_TIMING3);
-
-		if (timing[0] | timing[1] | timing[2]) {
-			cafe_dev_dbg(&cafe->pdev->dev, "Timing registers already set (%08x %08x %08x)\n",
-				     timing[0], timing[1], timing[2]);
-		} else {
-			dev_warn(&cafe->pdev->dev, "Timing registers unset; using most conservative defaults\n");
-			timing[0] = timing[1] = timing[2] = 0xffffffff;
-		}
-	}
-
-	/* Start off by resetting the NAND controller completely */
-	cafe_writel(cafe, 1, NAND_RESET);
-	cafe_writel(cafe, 0, NAND_RESET);
-
-	cafe_writel(cafe, timing[0], NAND_TIMING1);
-	cafe_writel(cafe, timing[1], NAND_TIMING2);
-	cafe_writel(cafe, timing[2], NAND_TIMING3);
-
-	cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
-	err = request_irq(pdev->irq, &cafe_nand_interrupt, IRQF_SHARED,
-			  "CAFE NAND", mtd);
-	if (err) {
-		dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
-		goto out_ior;
-	}
-
-	/* Disable master reset, enable NAND clock */
-	ctrl = cafe_readl(cafe, GLOBAL_CTRL);
-	ctrl &= 0xffffeff0;
-	ctrl |= 0x00007000;
-	cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
-	cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
-	cafe_writel(cafe, 0, NAND_DMA_CTRL);
-
-	cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
-	cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
-
-	/* Enable NAND IRQ in global IRQ mask register */
-	cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
-	cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
-		cafe_readl(cafe, GLOBAL_CTRL),
-		cafe_readl(cafe, GLOBAL_IRQ_MASK));
-
-	/* Do not use the DMA for the nand_scan_ident() */
-	old_dma = usedma;
-	usedma = 0;
-
-	/* Scan to find existence of the device */
-	if (nand_scan_ident(mtd, 2, NULL)) {
-		err = -ENXIO;
-		goto out_irq;
-	}
-
-	cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
-				2112 + sizeof(struct nand_buffers) +
-				mtd->writesize + mtd->oobsize,
-				&cafe->dmaaddr, GFP_KERNEL);
-	if (!cafe->dmabuf) {
-		err = -ENOMEM;
-		goto out_irq;
-	}
-	cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
-
-	/* Set up DMA address */
-	cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
-	if (sizeof(cafe->dmaaddr) > 4)
-		/* Shift in two parts to shut the compiler up */
-		cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
-	else
-		cafe_writel(cafe, 0, NAND_DMA_ADDR1);
-
-	cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
-		cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
-
-	/* this driver does not need the @ecccalc and @ecccode */
-	nbuf->ecccalc = NULL;
-	nbuf->ecccode = NULL;
-	nbuf->databuf = (uint8_t *)(nbuf + 1);
-
-	/* Restore the DMA flag */
-	usedma = old_dma;
-
-	cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */
-	if (mtd->writesize == 2048)
-		cafe->ctl2 |= 1<<29; /* 2KiB page size */
-
-	/* Set up ECC according to the type of chip we found */
-	mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
-	if (mtd->writesize == 2048) {
-		cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
-		cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
-	} else if (mtd->writesize == 512) {
-		cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
-		cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
-	} else {
-		printk(KERN_WARNING "Unexpected NAND flash writesize %d. Aborting\n",
-		       mtd->writesize);
-		goto out_free_dma;
-	}
-	cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
-	cafe->nand.ecc.size = mtd->writesize;
-	cafe->nand.ecc.bytes = 14;
-	cafe->nand.ecc.strength = 4;
-	cafe->nand.ecc.hwctl  = (void *)cafe_nand_bug;
-	cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
-	cafe->nand.ecc.correct  = (void *)cafe_nand_bug;
-	cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
-	cafe->nand.ecc.write_oob = cafe_nand_write_oob;
-	cafe->nand.ecc.read_page = cafe_nand_read_page;
-	cafe->nand.ecc.read_oob = cafe_nand_read_oob;
-
-	err = nand_scan_tail(mtd);
-	if (err)
-		goto out_free_dma;
-
-	pci_set_drvdata(pdev, mtd);
-
-	mtd->name = "cafe_nand";
-	mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
-
-	goto out;
-
- out_free_dma:
-	dma_free_coherent(&cafe->pdev->dev,
-			2112 + sizeof(struct nand_buffers) +
-			mtd->writesize + mtd->oobsize,
-			cafe->dmabuf, cafe->dmaaddr);
- out_irq:
-	/* Disable NAND IRQ in global IRQ mask register */
-	cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
-	free_irq(pdev->irq, mtd);
- out_ior:
-	pci_iounmap(pdev, cafe->mmio);
- out_free_mtd:
-	kfree(cafe);
- out:
-	return err;
-}
-
-static void cafe_nand_remove(struct pci_dev *pdev)
-{
-	struct mtd_info *mtd = pci_get_drvdata(pdev);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-
-	/* Disable NAND IRQ in global IRQ mask register */
-	cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
-	free_irq(pdev->irq, mtd);
-	nand_release(mtd);
-	free_rs(cafe->rs);
-	pci_iounmap(pdev, cafe->mmio);
-	dma_free_coherent(&cafe->pdev->dev,
-			2112 + sizeof(struct nand_buffers) +
-			mtd->writesize + mtd->oobsize,
-			cafe->dmabuf, cafe->dmaaddr);
-	kfree(cafe);
-}
-
-static const struct pci_device_id cafe_nand_tbl[] = {
-	{ PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
-	  PCI_ANY_ID, PCI_ANY_ID },
-	{ }
-};
-
-MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
-
-static int cafe_nand_resume(struct pci_dev *pdev)
-{
-	uint32_t ctrl;
-	struct mtd_info *mtd = pci_get_drvdata(pdev);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct cafe_priv *cafe = nand_get_controller_data(chip);
-
-       /* Start off by resetting the NAND controller completely */
-	cafe_writel(cafe, 1, NAND_RESET);
-	cafe_writel(cafe, 0, NAND_RESET);
-	cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
-
-	/* Restore timing configuration */
-	cafe_writel(cafe, timing[0], NAND_TIMING1);
-	cafe_writel(cafe, timing[1], NAND_TIMING2);
-	cafe_writel(cafe, timing[2], NAND_TIMING3);
-
-        /* Disable master reset, enable NAND clock */
-	ctrl = cafe_readl(cafe, GLOBAL_CTRL);
-	ctrl &= 0xffffeff0;
-	ctrl |= 0x00007000;
-	cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
-	cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
-	cafe_writel(cafe, 0, NAND_DMA_CTRL);
-	cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
-	cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
-
-	/* Set up DMA address */
-	cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
-	if (sizeof(cafe->dmaaddr) > 4)
-	/* Shift in two parts to shut the compiler up */
-		cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
-	else
-		cafe_writel(cafe, 0, NAND_DMA_ADDR1);
-
-	/* Enable NAND IRQ in global IRQ mask register */
-	cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
-	return 0;
-}
-
-static struct pci_driver cafe_nand_pci_driver = {
-	.name = "CAFÉ NAND",
-	.id_table = cafe_nand_tbl,
-	.probe = cafe_nand_probe,
-	.remove = cafe_nand_remove,
-	.resume = cafe_nand_resume,
-};
-
-module_pci_driver(cafe_nand_pci_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("NAND flash driver for OLPC CAFÉ chip");
diff --git a/drivers/mtd/nand/cmx270_nand.c b/drivers/mtd/nand/cmx270_nand.c
deleted file mode 100644
index 2efe6a56557f..000000000000
--- a/drivers/mtd/nand/cmx270_nand.c
+++ /dev/null
@@ -1,246 +0,0 @@ 
-/*
- *  linux/drivers/mtd/nand/cmx270-nand.c
- *
- *  Copyright (C) 2006 Compulab, Ltd.
- *  Mike Rapoport <mike@compulab.co.il>
- *
- *  Derived from drivers/mtd/nand/h1910.c
- *       Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
- *       Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Overview:
- *   This is a device driver for the NAND flash device found on the
- *   CM-X270 board.
- */
-
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/slab.h>
-#include <linux/gpio.h>
-#include <linux/module.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/mach-types.h>
-
-#include <mach/pxa2xx-regs.h>
-
-#define GPIO_NAND_CS	(11)
-#define GPIO_NAND_RB	(89)
-
-/* MTD structure for CM-X270 board */
-static struct mtd_info *cmx270_nand_mtd;
-
-/* remaped IO address of the device */
-static void __iomem *cmx270_nand_io;
-
-/*
- * Define static partitions for flash device
- */
-static struct mtd_partition partition_info[] = {
-	[0] = {
-		.name	= "cmx270-0",
-		.offset	= 0,
-		.size	= MTDPART_SIZ_FULL
-	}
-};
-#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
-
-static u_char cmx270_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-
-	return (readl(this->IO_ADDR_R) >> 16);
-}
-
-static void cmx270_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	int i;
-	struct nand_chip *this = mtd_to_nand(mtd);
-
-	for (i=0; i<len; i++)
-		writel((*buf++ << 16), this->IO_ADDR_W);
-}
-
-static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	int i;
-	struct nand_chip *this = mtd_to_nand(mtd);
-
-	for (i=0; i<len; i++)
-		*buf++ = readl(this->IO_ADDR_R) >> 16;
-}
-
-static inline void nand_cs_on(void)
-{
-	gpio_set_value(GPIO_NAND_CS, 0);
-}
-
-static void nand_cs_off(void)
-{
-	dsb();
-
-	gpio_set_value(GPIO_NAND_CS, 1);
-}
-
-/*
- *	hardware specific access to control-lines
- */
-static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
-			     unsigned int ctrl)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
-
-	dsb();
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		if ( ctrl & NAND_ALE )
-			nandaddr |=  (1 << 3);
-		else
-			nandaddr &= ~(1 << 3);
-		if ( ctrl & NAND_CLE )
-			nandaddr |=  (1 << 2);
-		else
-			nandaddr &= ~(1 << 2);
-		if ( ctrl & NAND_NCE )
-			nand_cs_on();
-		else
-			nand_cs_off();
-	}
-
-	dsb();
-	this->IO_ADDR_W = (void __iomem*)nandaddr;
-	if (dat != NAND_CMD_NONE)
-		writel((dat << 16), this->IO_ADDR_W);
-
-	dsb();
-}
-
-/*
- *	read device ready pin
- */
-static int cmx270_device_ready(struct mtd_info *mtd)
-{
-	dsb();
-
-	return (gpio_get_value(GPIO_NAND_RB));
-}
-
-/*
- * Main initialization routine
- */
-static int __init cmx270_init(void)
-{
-	struct nand_chip *this;
-	int ret;
-
-	if (!(machine_is_armcore() && cpu_is_pxa27x()))
-		return -ENODEV;
-
-	ret = gpio_request(GPIO_NAND_CS, "NAND CS");
-	if (ret) {
-		pr_warning("CM-X270: failed to request NAND CS gpio\n");
-		return ret;
-	}
-
-	gpio_direction_output(GPIO_NAND_CS, 1);
-
-	ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
-	if (ret) {
-		pr_warning("CM-X270: failed to request NAND R/B gpio\n");
-		goto err_gpio_request;
-	}
-
-	gpio_direction_input(GPIO_NAND_RB);
-
-	/* Allocate memory for MTD device structure and private data */
-	this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-	if (!this) {
-		ret = -ENOMEM;
-		goto err_kzalloc;
-	}
-
-	cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
-	if (!cmx270_nand_io) {
-		pr_debug("Unable to ioremap NAND device\n");
-		ret = -EINVAL;
-		goto err_ioremap;
-	}
-
-	cmx270_nand_mtd = nand_to_mtd(this);
-
-	/* Link the private data with the MTD structure */
-	cmx270_nand_mtd->owner = THIS_MODULE;
-
-	/* insert callbacks */
-	this->IO_ADDR_R = cmx270_nand_io;
-	this->IO_ADDR_W = cmx270_nand_io;
-	this->cmd_ctrl = cmx270_hwcontrol;
-	this->dev_ready = cmx270_device_ready;
-
-	/* 15 us command delay time */
-	this->chip_delay = 20;
-	this->ecc.mode = NAND_ECC_SOFT;
-	this->ecc.algo = NAND_ECC_HAMMING;
-
-	/* read/write functions */
-	this->read_byte = cmx270_read_byte;
-	this->read_buf = cmx270_read_buf;
-	this->write_buf = cmx270_write_buf;
-
-	/* Scan to find existence of the device */
-	if (nand_scan (cmx270_nand_mtd, 1)) {
-		pr_notice("No NAND device\n");
-		ret = -ENXIO;
-		goto err_scan;
-	}
-
-	/* Register the partitions */
-	ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
-					partition_info, NUM_PARTITIONS);
-	if (ret)
-		goto err_scan;
-
-	/* Return happy */
-	return 0;
-
-err_scan:
-	iounmap(cmx270_nand_io);
-err_ioremap:
-	kfree(this);
-err_kzalloc:
-	gpio_free(GPIO_NAND_RB);
-err_gpio_request:
-	gpio_free(GPIO_NAND_CS);
-
-	return ret;
-
-}
-module_init(cmx270_init);
-
-/*
- * Clean up routine
- */
-static void __exit cmx270_cleanup(void)
-{
-	/* Release resources, unregister device */
-	nand_release(cmx270_nand_mtd);
-
-	gpio_free(GPIO_NAND_RB);
-	gpio_free(GPIO_NAND_CS);
-
-	iounmap(cmx270_nand_io);
-
-	kfree(mtd_to_nand(cmx270_nand_mtd));
-}
-module_exit(cmx270_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
-MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
deleted file mode 100644
index 8fafb4b4488d..000000000000
--- a/drivers/mtd/nand/cs553x_nand.c
+++ /dev/null
@@ -1,358 +0,0 @@ 
-/*
- * drivers/mtd/nand/cs553x_nand.c
- *
- * (C) 2005, 2006 Red Hat Inc.
- *
- * Author: David Woodhouse <dwmw2@infradead.org>
- *	   Tom Sylla <tom.sylla@amd.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *  Overview:
- *   This is a device driver for the NAND flash controller found on
- *   the AMD CS5535/CS5536 companion chipsets for the Geode processor.
- *   mtd-id for command line partitioning is cs553x_nand_cs[0-3]
- *   where 0-3 reflects the chip select for NAND.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/msr.h>
-#include <asm/io.h>
-
-#define NR_CS553X_CONTROLLERS	4
-
-#define MSR_DIVIL_GLD_CAP	0x51400000	/* DIVIL capabilitiies */
-#define CAP_CS5535		0x2df000ULL
-#define CAP_CS5536		0x5df500ULL
-
-/* NAND Timing MSRs */
-#define MSR_NANDF_DATA		0x5140001b	/* NAND Flash Data Timing MSR */
-#define MSR_NANDF_CTL		0x5140001c	/* NAND Flash Control Timing */
-#define MSR_NANDF_RSVD		0x5140001d	/* Reserved */
-
-/* NAND BAR MSRs */
-#define MSR_DIVIL_LBAR_FLSH0	0x51400010	/* Flash Chip Select 0 */
-#define MSR_DIVIL_LBAR_FLSH1	0x51400011	/* Flash Chip Select 1 */
-#define MSR_DIVIL_LBAR_FLSH2	0x51400012	/* Flash Chip Select 2 */
-#define MSR_DIVIL_LBAR_FLSH3	0x51400013	/* Flash Chip Select 3 */
-	/* Each made up of... */
-#define FLSH_LBAR_EN		(1ULL<<32)
-#define FLSH_NOR_NAND		(1ULL<<33)	/* 1 for NAND */
-#define FLSH_MEM_IO		(1ULL<<34)	/* 1 for MMIO */
-	/* I/O BARs have BASE_ADDR in bits 15:4, IO_MASK in 47:36 */
-	/* MMIO BARs have BASE_ADDR in bits 31:12, MEM_MASK in 63:44 */
-
-/* Pin function selection MSR (IDE vs. flash on the IDE pins) */
-#define MSR_DIVIL_BALL_OPTS	0x51400015
-#define PIN_OPT_IDE		(1<<0)	/* 0 for flash, 1 for IDE */
-
-/* Registers within the NAND flash controller BAR -- memory mapped */
-#define MM_NAND_DATA		0x00	/* 0 to 0x7ff, in fact */
-#define MM_NAND_CTL		0x800	/* Any even address 0x800-0x80e */
-#define MM_NAND_IO		0x801	/* Any odd address 0x801-0x80f */
-#define MM_NAND_STS		0x810
-#define MM_NAND_ECC_LSB		0x811
-#define MM_NAND_ECC_MSB		0x812
-#define MM_NAND_ECC_COL		0x813
-#define MM_NAND_LAC		0x814
-#define MM_NAND_ECC_CTL		0x815
-
-/* Registers within the NAND flash controller BAR -- I/O mapped */
-#define IO_NAND_DATA		0x00	/* 0 to 3, in fact */
-#define IO_NAND_CTL		0x04
-#define IO_NAND_IO		0x05
-#define IO_NAND_STS		0x06
-#define IO_NAND_ECC_CTL		0x08
-#define IO_NAND_ECC_LSB		0x09
-#define IO_NAND_ECC_MSB		0x0a
-#define IO_NAND_ECC_COL		0x0b
-#define IO_NAND_LAC		0x0c
-
-#define CS_NAND_CTL_DIST_EN	(1<<4)	/* Enable NAND Distract interrupt */
-#define CS_NAND_CTL_RDY_INT_MASK	(1<<3)	/* Enable RDY/BUSY# interrupt */
-#define CS_NAND_CTL_ALE		(1<<2)
-#define CS_NAND_CTL_CLE		(1<<1)
-#define CS_NAND_CTL_CE		(1<<0)	/* Keep low; 1 to reset */
-
-#define CS_NAND_STS_FLASH_RDY	(1<<3)
-#define CS_NAND_CTLR_BUSY	(1<<2)
-#define CS_NAND_CMD_COMP	(1<<1)
-#define CS_NAND_DIST_ST		(1<<0)
-
-#define CS_NAND_ECC_PARITY	(1<<2)
-#define CS_NAND_ECC_CLRECC	(1<<1)
-#define CS_NAND_ECC_ENECC	(1<<0)
-
-static void cs553x_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-
-	while (unlikely(len > 0x800)) {
-		memcpy_fromio(buf, this->IO_ADDR_R, 0x800);
-		buf += 0x800;
-		len -= 0x800;
-	}
-	memcpy_fromio(buf, this->IO_ADDR_R, len);
-}
-
-static void cs553x_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-
-	while (unlikely(len > 0x800)) {
-		memcpy_toio(this->IO_ADDR_R, buf, 0x800);
-		buf += 0x800;
-		len -= 0x800;
-	}
-	memcpy_toio(this->IO_ADDR_R, buf, len);
-}
-
-static unsigned char cs553x_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	return readb(this->IO_ADDR_R);
-}
-
-static void cs553x_write_byte(struct mtd_info *mtd, u_char byte)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int i = 100000;
-
-	while (i && readb(this->IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
-		udelay(1);
-		i--;
-	}
-	writeb(byte, this->IO_ADDR_W + 0x801);
-}
-
-static void cs553x_hwcontrol(struct mtd_info *mtd, int cmd,
-			     unsigned int ctrl)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	void __iomem *mmio_base = this->IO_ADDR_R;
-	if (ctrl & NAND_CTRL_CHANGE) {
-		unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01;
-		writeb(ctl, mmio_base + MM_NAND_CTL);
-	}
-	if (cmd != NAND_CMD_NONE)
-		cs553x_write_byte(mtd, cmd);
-}
-
-static int cs553x_device_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	void __iomem *mmio_base = this->IO_ADDR_R;
-	unsigned char foo = readb(mmio_base + MM_NAND_STS);
-
-	return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY);
-}
-
-static void cs_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	void __iomem *mmio_base = this->IO_ADDR_R;
-
-	writeb(0x07, mmio_base + MM_NAND_ECC_CTL);
-}
-
-static int cs_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
-{
-	uint32_t ecc;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	void __iomem *mmio_base = this->IO_ADDR_R;
-
-	ecc = readl(mmio_base + MM_NAND_STS);
-
-	ecc_code[1] = ecc >> 8;
-	ecc_code[0] = ecc >> 16;
-	ecc_code[2] = ecc >> 24;
-	return 0;
-}
-
-static struct mtd_info *cs553x_mtd[4];
-
-static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
-{
-	int err = 0;
-	struct nand_chip *this;
-	struct mtd_info *new_mtd;
-
-	printk(KERN_NOTICE "Probing CS553x NAND controller CS#%d at %sIO 0x%08lx\n", cs, mmio?"MM":"P", adr);
-
-	if (!mmio) {
-		printk(KERN_NOTICE "PIO mode not yet implemented for CS553X NAND controller\n");
-		return -ENXIO;
-	}
-
-	/* Allocate memory for MTD device structure and private data */
-	this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-	if (!this) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	new_mtd = nand_to_mtd(this);
-
-	/* Link the private data with the MTD structure */
-	new_mtd->owner = THIS_MODULE;
-
-	/* map physical address */
-	this->IO_ADDR_R = this->IO_ADDR_W = ioremap(adr, 4096);
-	if (!this->IO_ADDR_R) {
-		printk(KERN_WARNING "ioremap cs553x NAND @0x%08lx failed\n", adr);
-		err = -EIO;
-		goto out_mtd;
-	}
-
-	this->cmd_ctrl = cs553x_hwcontrol;
-	this->dev_ready = cs553x_device_ready;
-	this->read_byte = cs553x_read_byte;
-	this->read_buf = cs553x_read_buf;
-	this->write_buf = cs553x_write_buf;
-
-	this->chip_delay = 0;
-
-	this->ecc.mode = NAND_ECC_HW;
-	this->ecc.size = 256;
-	this->ecc.bytes = 3;
-	this->ecc.hwctl  = cs_enable_hwecc;
-	this->ecc.calculate = cs_calculate_ecc;
-	this->ecc.correct  = nand_correct_data;
-	this->ecc.strength = 1;
-
-	/* Enable the following for a flash based bad block table */
-	this->bbt_options = NAND_BBT_USE_FLASH;
-
-	new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
-	if (!new_mtd->name) {
-		err = -ENOMEM;
-		goto out_ior;
-	}
-
-	/* Scan to find existence of the device */
-	if (nand_scan(new_mtd, 1)) {
-		err = -ENXIO;
-		goto out_free;
-	}
-
-	cs553x_mtd[cs] = new_mtd;
-	goto out;
-
-out_free:
-	kfree(new_mtd->name);
-out_ior:
-	iounmap(this->IO_ADDR_R);
-out_mtd:
-	kfree(this);
-out:
-	return err;
-}
-
-static int is_geode(void)
-{
-	/* These are the CPUs which will have a CS553[56] companion chip */
-	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
-	    boot_cpu_data.x86 == 5 &&
-	    boot_cpu_data.x86_model == 10)
-		return 1; /* Geode LX */
-
-	if ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC ||
-	     boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX) &&
-	    boot_cpu_data.x86 == 5 &&
-	    boot_cpu_data.x86_model == 5)
-		return 1; /* Geode GX (née GX2) */
-
-	return 0;
-}
-
-static int __init cs553x_init(void)
-{
-	int err = -ENXIO;
-	int i;
-	uint64_t val;
-
-	/* If the CPU isn't a Geode GX or LX, abort */
-	if (!is_geode())
-		return -ENXIO;
-
-	/* If it doesn't have the CS553[56], abort */
-	rdmsrl(MSR_DIVIL_GLD_CAP, val);
-	val &= ~0xFFULL;
-	if (val != CAP_CS5535 && val != CAP_CS5536)
-		return -ENXIO;
-
-	/* If it doesn't have the NAND controller enabled, abort */
-	rdmsrl(MSR_DIVIL_BALL_OPTS, val);
-	if (val & PIN_OPT_IDE) {
-		printk(KERN_INFO "CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
-		return -ENXIO;
-	}
-
-	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
-		rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
-
-		if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
-			err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
-	}
-
-	/* Register all devices together here. This means we can easily hack it to
-	   do mtdconcat etc. if we want to. */
-	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
-		if (cs553x_mtd[i]) {
-			/* If any devices registered, return success. Else the last error. */
-			mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
-						  NULL, 0);
-			err = 0;
-		}
-	}
-
-	return err;
-}
-
-module_init(cs553x_init);
-
-static void __exit cs553x_cleanup(void)
-{
-	int i;
-
-	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
-		struct mtd_info *mtd = cs553x_mtd[i];
-		struct nand_chip *this;
-		void __iomem *mmio_base;
-
-		if (!mtd)
-			continue;
-
-		this = mtd_to_nand(mtd);
-		mmio_base = this->IO_ADDR_R;
-
-		/* Release resources, unregister device */
-		nand_release(mtd);
-		kfree(mtd->name);
-		cs553x_mtd[i] = NULL;
-
-		/* unmap physical address */
-		iounmap(mmio_base);
-
-		/* Free the MTD device structure */
-		kfree(this);
-	}
-}
-
-module_exit(cs553x_cleanup);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("NAND controller driver for AMD CS5535/CS5536 companion chip");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
deleted file mode 100644
index fcc533261c06..000000000000
--- a/drivers/mtd/nand/davinci_nand.c
+++ /dev/null
@@ -1,862 +0,0 @@ 
-/*
- * davinci_nand.c - NAND Flash Driver for DaVinci family chips
- *
- * Copyright © 2006 Texas Instruments.
- *
- * Port to 2.6.23 Copyright © 2008 by:
- *   Sander Huijsen <Shuijsen@optelecom-nkf.com>
- *   Troy Kisky <troy.kisky@boundarydevices.com>
- *   Dirk Behme <Dirk.Behme@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/slab.h>
-#include <linux/of_device.h>
-#include <linux/of.h>
-
-#include <linux/platform_data/mtd-davinci.h>
-#include <linux/platform_data/mtd-davinci-aemif.h>
-
-/*
- * This is a device driver for the NAND flash controller found on the
- * various DaVinci family chips.  It handles up to four SoC chipselects,
- * and some flavors of secondary chipselect (e.g. based on A12) as used
- * with multichip packages.
- *
- * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
- * available on chips like the DM355 and OMAP-L137 and needed with the
- * more error-prone MLC NAND chips.
- *
- * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
- * outputs in a "wire-AND" configuration, with no per-chip signals.
- */
-struct davinci_nand_info {
-	struct nand_chip	chip;
-
-	struct device		*dev;
-	struct clk		*clk;
-
-	bool			is_readmode;
-
-	void __iomem		*base;
-	void __iomem		*vaddr;
-
-	uint32_t		ioaddr;
-	uint32_t		current_cs;
-
-	uint32_t		mask_chipsel;
-	uint32_t		mask_ale;
-	uint32_t		mask_cle;
-
-	uint32_t		core_chipsel;
-
-	struct davinci_aemif_timing	*timing;
-};
-
-static DEFINE_SPINLOCK(davinci_nand_lock);
-static bool ecc4_busy;
-
-static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
-}
-
-static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
-		int offset)
-{
-	return __raw_readl(info->base + offset);
-}
-
-static inline void davinci_nand_writel(struct davinci_nand_info *info,
-		int offset, unsigned long value)
-{
-	__raw_writel(value, info->base + offset);
-}
-
-/*----------------------------------------------------------------------*/
-
-/*
- * Access to hardware control lines:  ALE, CLE, secondary chipselect.
- */
-
-static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
-				   unsigned int ctrl)
-{
-	struct davinci_nand_info	*info = to_davinci_nand(mtd);
-	uint32_t			addr = info->current_cs;
-	struct nand_chip		*nand = mtd_to_nand(mtd);
-
-	/* Did the control lines change? */
-	if (ctrl & NAND_CTRL_CHANGE) {
-		if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
-			addr |= info->mask_cle;
-		else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
-			addr |= info->mask_ale;
-
-		nand->IO_ADDR_W = (void __iomem __force *)addr;
-	}
-
-	if (cmd != NAND_CMD_NONE)
-		iowrite8(cmd, nand->IO_ADDR_W);
-}
-
-static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct davinci_nand_info	*info = to_davinci_nand(mtd);
-	uint32_t			addr = info->ioaddr;
-
-	/* maybe kick in a second chipselect */
-	if (chip > 0)
-		addr |= info->mask_chipsel;
-	info->current_cs = addr;
-
-	info->chip.IO_ADDR_W = (void __iomem __force *)addr;
-	info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
-}
-
-/*----------------------------------------------------------------------*/
-
-/*
- * 1-bit hardware ECC ... context maintained for each core chipselect
- */
-
-static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
-{
-	struct davinci_nand_info *info = to_davinci_nand(mtd);
-
-	return davinci_nand_readl(info, NANDF1ECC_OFFSET
-			+ 4 * info->core_chipsel);
-}
-
-static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
-{
-	struct davinci_nand_info *info;
-	uint32_t nandcfr;
-	unsigned long flags;
-
-	info = to_davinci_nand(mtd);
-
-	/* Reset ECC hardware */
-	nand_davinci_readecc_1bit(mtd);
-
-	spin_lock_irqsave(&davinci_nand_lock, flags);
-
-	/* Restart ECC hardware */
-	nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
-	nandcfr |= BIT(8 + info->core_chipsel);
-	davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
-
-	spin_unlock_irqrestore(&davinci_nand_lock, flags);
-}
-
-/*
- * Read hardware ECC value and pack into three bytes
- */
-static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
-				      const u_char *dat, u_char *ecc_code)
-{
-	unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
-	unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
-
-	/* invert so that erased block ecc is correct */
-	ecc24 = ~ecc24;
-	ecc_code[0] = (u_char)(ecc24);
-	ecc_code[1] = (u_char)(ecc24 >> 8);
-	ecc_code[2] = (u_char)(ecc24 >> 16);
-
-	return 0;
-}
-
-static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
-				     u_char *read_ecc, u_char *calc_ecc)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
-					  (read_ecc[2] << 16);
-	uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
-					  (calc_ecc[2] << 16);
-	uint32_t diff = eccCalc ^ eccNand;
-
-	if (diff) {
-		if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
-			/* Correctable error */
-			if ((diff >> (12 + 3)) < chip->ecc.size) {
-				dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
-				return 1;
-			} else {
-				return -EBADMSG;
-			}
-		} else if (!(diff & (diff - 1))) {
-			/* Single bit ECC error in the ECC itself,
-			 * nothing to fix */
-			return 1;
-		} else {
-			/* Uncorrectable error */
-			return -EBADMSG;
-		}
-
-	}
-	return 0;
-}
-
-/*----------------------------------------------------------------------*/
-
-/*
- * 4-bit hardware ECC ... context maintained over entire AEMIF
- *
- * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
- * since that forces use of a problematic "infix OOB" layout.
- * Among other things, it trashes manufacturer bad block markers.
- * Also, and specific to this hardware, it ECC-protects the "prepad"
- * in the OOB ... while having ECC protection for parts of OOB would
- * seem useful, the current MTD stack sometimes wants to update the
- * OOB without recomputing ECC.
- */
-
-static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
-{
-	struct davinci_nand_info *info = to_davinci_nand(mtd);
-	unsigned long flags;
-	u32 val;
-
-	spin_lock_irqsave(&davinci_nand_lock, flags);
-
-	/* Start 4-bit ECC calculation for read/write */
-	val = davinci_nand_readl(info, NANDFCR_OFFSET);
-	val &= ~(0x03 << 4);
-	val |= (info->core_chipsel << 4) | BIT(12);
-	davinci_nand_writel(info, NANDFCR_OFFSET, val);
-
-	info->is_readmode = (mode == NAND_ECC_READ);
-
-	spin_unlock_irqrestore(&davinci_nand_lock, flags);
-}
-
-/* Read raw ECC code after writing to NAND. */
-static void
-nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
-{
-	const u32 mask = 0x03ff03ff;
-
-	code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
-	code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
-	code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
-	code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
-}
-
-/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
-static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
-		const u_char *dat, u_char *ecc_code)
-{
-	struct davinci_nand_info *info = to_davinci_nand(mtd);
-	u32 raw_ecc[4], *p;
-	unsigned i;
-
-	/* After a read, terminate ECC calculation by a dummy read
-	 * of some 4-bit ECC register.  ECC covers everything that
-	 * was read; correct() just uses the hardware state, so
-	 * ecc_code is not needed.
-	 */
-	if (info->is_readmode) {
-		davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
-		return 0;
-	}
-
-	/* Pack eight raw 10-bit ecc values into ten bytes, making
-	 * two passes which each convert four values (in upper and
-	 * lower halves of two 32-bit words) into five bytes.  The
-	 * ROM boot loader uses this same packing scheme.
-	 */
-	nand_davinci_readecc_4bit(info, raw_ecc);
-	for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
-		*ecc_code++ =   p[0]        & 0xff;
-		*ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
-		*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
-		*ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
-		*ecc_code++ =  (p[1] >> 18) & 0xff;
-	}
-
-	return 0;
-}
-
-/* Correct up to 4 bits in data we just read, using state left in the
- * hardware plus the ecc_code computed when it was first written.
- */
-static int nand_davinci_correct_4bit(struct mtd_info *mtd,
-		u_char *data, u_char *ecc_code, u_char *null)
-{
-	int i;
-	struct davinci_nand_info *info = to_davinci_nand(mtd);
-	unsigned short ecc10[8];
-	unsigned short *ecc16;
-	u32 syndrome[4];
-	u32 ecc_state;
-	unsigned num_errors, corrected;
-	unsigned long timeo;
-
-	/* Unpack ten bytes into eight 10 bit values.  We know we're
-	 * little-endian, and use type punning for less shifting/masking.
-	 */
-	if (WARN_ON(0x01 & (unsigned) ecc_code))
-		return -EINVAL;
-	ecc16 = (unsigned short *)ecc_code;
-
-	ecc10[0] =  (ecc16[0] >>  0) & 0x3ff;
-	ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
-	ecc10[2] =  (ecc16[1] >>  4) & 0x3ff;
-	ecc10[3] = ((ecc16[1] >> 14) & 0x3)  | ((ecc16[2] << 2) & 0x3fc);
-	ecc10[4] =  (ecc16[2] >>  8)         | ((ecc16[3] << 8) & 0x300);
-	ecc10[5] =  (ecc16[3] >>  2) & 0x3ff;
-	ecc10[6] = ((ecc16[3] >> 12) & 0xf)  | ((ecc16[4] << 4) & 0x3f0);
-	ecc10[7] =  (ecc16[4] >>  6) & 0x3ff;
-
-	/* Tell ECC controller about the expected ECC codes. */
-	for (i = 7; i >= 0; i--)
-		davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
-
-	/* Allow time for syndrome calculation ... then read it.
-	 * A syndrome of all zeroes 0 means no detected errors.
-	 */
-	davinci_nand_readl(info, NANDFSR_OFFSET);
-	nand_davinci_readecc_4bit(info, syndrome);
-	if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
-		return 0;
-
-	/*
-	 * Clear any previous address calculation by doing a dummy read of an
-	 * error address register.
-	 */
-	davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
-
-	/* Start address calculation, and wait for it to complete.
-	 * We _could_ start reading more data while this is working,
-	 * to speed up the overall page read.
-	 */
-	davinci_nand_writel(info, NANDFCR_OFFSET,
-			davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
-
-	/*
-	 * ECC_STATE field reads 0x3 (Error correction complete) immediately
-	 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
-	 * begin trying to poll for the state, you may fall right out of your
-	 * loop without any of the correction calculations having taken place.
-	 * The recommendation from the hardware team is to initially delay as
-	 * long as ECC_STATE reads less than 4. After that, ECC HW has entered
-	 * correction state.
-	 */
-	timeo = jiffies + usecs_to_jiffies(100);
-	do {
-		ecc_state = (davinci_nand_readl(info,
-				NANDFSR_OFFSET) >> 8) & 0x0f;
-		cpu_relax();
-	} while ((ecc_state < 4) && time_before(jiffies, timeo));
-
-	for (;;) {
-		u32	fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
-
-		switch ((fsr >> 8) & 0x0f) {
-		case 0:		/* no error, should not happen */
-			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
-			return 0;
-		case 1:		/* five or more errors detected */
-			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
-			return -EBADMSG;
-		case 2:		/* error addresses computed */
-		case 3:
-			num_errors = 1 + ((fsr >> 16) & 0x03);
-			goto correct;
-		default:	/* still working on it */
-			cpu_relax();
-			continue;
-		}
-	}
-
-correct:
-	/* correct each error */
-	for (i = 0, corrected = 0; i < num_errors; i++) {
-		int error_address, error_value;
-
-		if (i > 1) {
-			error_address = davinci_nand_readl(info,
-						NAND_ERR_ADD2_OFFSET);
-			error_value = davinci_nand_readl(info,
-						NAND_ERR_ERRVAL2_OFFSET);
-		} else {
-			error_address = davinci_nand_readl(info,
-						NAND_ERR_ADD1_OFFSET);
-			error_value = davinci_nand_readl(info,
-						NAND_ERR_ERRVAL1_OFFSET);
-		}
-
-		if (i & 1) {
-			error_address >>= 16;
-			error_value >>= 16;
-		}
-		error_address &= 0x3ff;
-		error_address = (512 + 7) - error_address;
-
-		if (error_address < 512) {
-			data[error_address] ^= error_value;
-			corrected++;
-		}
-	}
-
-	return corrected;
-}
-
-/*----------------------------------------------------------------------*/
-
-/*
- * NOTE:  NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
- * how these chips are normally wired.  This translates to both 8 and 16
- * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
- *
- * For now we assume that configuration, or any other one which ignores
- * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
- * and have that transparently morphed into multiple NAND operations.
- */
-static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
-		ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
-	else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
-		ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
-	else
-		ioread8_rep(chip->IO_ADDR_R, buf, len);
-}
-
-static void nand_davinci_write_buf(struct mtd_info *mtd,
-		const uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
-		iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
-	else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
-		iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
-	else
-		iowrite8_rep(chip->IO_ADDR_R, buf, len);
-}
-
-/*
- * Check hardware register for wait status. Returns 1 if device is ready,
- * 0 if it is still busy.
- */
-static int nand_davinci_dev_ready(struct mtd_info *mtd)
-{
-	struct davinci_nand_info *info = to_davinci_nand(mtd);
-
-	return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
-}
-
-/*----------------------------------------------------------------------*/
-
-/* An ECC layout for using 4-bit ECC with small-page flash, storing
- * ten ECC bytes plus the manufacturer's bad block marker byte, and
- * and not overlapping the default BBT markers.
- */
-static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
-				      struct mtd_oob_region *oobregion)
-{
-	if (section > 2)
-		return -ERANGE;
-
-	if (!section) {
-		oobregion->offset = 0;
-		oobregion->length = 5;
-	} else if (section == 1) {
-		oobregion->offset = 6;
-		oobregion->length = 2;
-	} else {
-		oobregion->offset = 13;
-		oobregion->length = 3;
-	}
-
-	return 0;
-}
-
-static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
-				       struct mtd_oob_region *oobregion)
-{
-	if (section > 1)
-		return -ERANGE;
-
-	if (!section) {
-		oobregion->offset = 8;
-		oobregion->length = 5;
-	} else {
-		oobregion->offset = 16;
-		oobregion->length = mtd->oobsize - 16;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
-	.ecc = hwecc4_ooblayout_small_ecc,
-	.free = hwecc4_ooblayout_small_free,
-};
-
-#if defined(CONFIG_OF)
-static const struct of_device_id davinci_nand_of_match[] = {
-	{.compatible = "ti,davinci-nand", },
-	{.compatible = "ti,keystone-nand", },
-	{},
-};
-MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
-
-static struct davinci_nand_pdata
-	*nand_davinci_get_pdata(struct platform_device *pdev)
-{
-	if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
-		struct davinci_nand_pdata *pdata;
-		const char *mode;
-		u32 prop;
-
-		pdata =  devm_kzalloc(&pdev->dev,
-				sizeof(struct davinci_nand_pdata),
-				GFP_KERNEL);
-		pdev->dev.platform_data = pdata;
-		if (!pdata)
-			return ERR_PTR(-ENOMEM);
-		if (!of_property_read_u32(pdev->dev.of_node,
-			"ti,davinci-chipselect", &prop))
-			pdev->id = prop;
-		else
-			return ERR_PTR(-EINVAL);
-
-		if (!of_property_read_u32(pdev->dev.of_node,
-			"ti,davinci-mask-ale", &prop))
-			pdata->mask_ale = prop;
-		if (!of_property_read_u32(pdev->dev.of_node,
-			"ti,davinci-mask-cle", &prop))
-			pdata->mask_cle = prop;
-		if (!of_property_read_u32(pdev->dev.of_node,
-			"ti,davinci-mask-chipsel", &prop))
-			pdata->mask_chipsel = prop;
-		if (!of_property_read_string(pdev->dev.of_node,
-			"ti,davinci-ecc-mode", &mode)) {
-			if (!strncmp("none", mode, 4))
-				pdata->ecc_mode = NAND_ECC_NONE;
-			if (!strncmp("soft", mode, 4))
-				pdata->ecc_mode = NAND_ECC_SOFT;
-			if (!strncmp("hw", mode, 2))
-				pdata->ecc_mode = NAND_ECC_HW;
-		}
-		if (!of_property_read_u32(pdev->dev.of_node,
-			"ti,davinci-ecc-bits", &prop))
-			pdata->ecc_bits = prop;
-
-		if (!of_property_read_u32(pdev->dev.of_node,
-			"ti,davinci-nand-buswidth", &prop) && prop == 16)
-			pdata->options |= NAND_BUSWIDTH_16;
-
-		if (of_property_read_bool(pdev->dev.of_node,
-			"ti,davinci-nand-use-bbt"))
-			pdata->bbt_options = NAND_BBT_USE_FLASH;
-
-		if (of_device_is_compatible(pdev->dev.of_node,
-					    "ti,keystone-nand")) {
-			pdata->options |= NAND_NO_SUBPAGE_WRITE;
-		}
-	}
-
-	return dev_get_platdata(&pdev->dev);
-}
-#else
-static struct davinci_nand_pdata
-	*nand_davinci_get_pdata(struct platform_device *pdev)
-{
-	return dev_get_platdata(&pdev->dev);
-}
-#endif
-
-static int nand_davinci_probe(struct platform_device *pdev)
-{
-	struct davinci_nand_pdata	*pdata;
-	struct davinci_nand_info	*info;
-	struct resource			*res1;
-	struct resource			*res2;
-	void __iomem			*vaddr;
-	void __iomem			*base;
-	int				ret;
-	uint32_t			val;
-	struct mtd_info			*mtd;
-
-	pdata = nand_davinci_get_pdata(pdev);
-	if (IS_ERR(pdata))
-		return PTR_ERR(pdata);
-
-	/* insist on board-specific configuration */
-	if (!pdata)
-		return -ENODEV;
-
-	/* which external chipselect will we be managing? */
-	if (pdev->id < 0 || pdev->id > 3)
-		return -ENODEV;
-
-	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, info);
-
-	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	if (!res1 || !res2) {
-		dev_err(&pdev->dev, "resource missing\n");
-		return -EINVAL;
-	}
-
-	vaddr = devm_ioremap_resource(&pdev->dev, res1);
-	if (IS_ERR(vaddr))
-		return PTR_ERR(vaddr);
-
-	/*
-	 * This registers range is used to setup NAND settings. In case with
-	 * TI AEMIF driver, the same memory address range is requested already
-	 * by AEMIF, so we cannot request it twice, just ioremap.
-	 * The AEMIF and NAND drivers not use the same registers in this range.
-	 */
-	base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
-	if (!base) {
-		dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
-		return -EADDRNOTAVAIL;
-	}
-
-	info->dev		= &pdev->dev;
-	info->base		= base;
-	info->vaddr		= vaddr;
-
-	mtd			= nand_to_mtd(&info->chip);
-	mtd->dev.parent		= &pdev->dev;
-	nand_set_flash_node(&info->chip, pdev->dev.of_node);
-
-	info->chip.IO_ADDR_R	= vaddr;
-	info->chip.IO_ADDR_W	= vaddr;
-	info->chip.chip_delay	= 0;
-	info->chip.select_chip	= nand_davinci_select_chip;
-
-	/* options such as NAND_BBT_USE_FLASH */
-	info->chip.bbt_options	= pdata->bbt_options;
-	/* options such as 16-bit widths */
-	info->chip.options	= pdata->options;
-	info->chip.bbt_td	= pdata->bbt_td;
-	info->chip.bbt_md	= pdata->bbt_md;
-	info->timing		= pdata->timing;
-
-	info->ioaddr		= (uint32_t __force) vaddr;
-
-	info->current_cs	= info->ioaddr;
-	info->core_chipsel	= pdev->id;
-	info->mask_chipsel	= pdata->mask_chipsel;
-
-	/* use nandboot-capable ALE/CLE masks by default */
-	info->mask_ale		= pdata->mask_ale ? : MASK_ALE;
-	info->mask_cle		= pdata->mask_cle ? : MASK_CLE;
-
-	/* Set address of hardware control function */
-	info->chip.cmd_ctrl	= nand_davinci_hwcontrol;
-	info->chip.dev_ready	= nand_davinci_dev_ready;
-
-	/* Speed up buffer I/O */
-	info->chip.read_buf     = nand_davinci_read_buf;
-	info->chip.write_buf    = nand_davinci_write_buf;
-
-	/* Use board-specific ECC config */
-	info->chip.ecc.mode	= pdata->ecc_mode;
-
-	ret = -EINVAL;
-
-	info->clk = devm_clk_get(&pdev->dev, "aemif");
-	if (IS_ERR(info->clk)) {
-		ret = PTR_ERR(info->clk);
-		dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
-		return ret;
-	}
-
-	ret = clk_prepare_enable(info->clk);
-	if (ret < 0) {
-		dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
-			ret);
-		goto err_clk_enable;
-	}
-
-	spin_lock_irq(&davinci_nand_lock);
-
-	/* put CSxNAND into NAND mode */
-	val = davinci_nand_readl(info, NANDFCR_OFFSET);
-	val |= BIT(info->core_chipsel);
-	davinci_nand_writel(info, NANDFCR_OFFSET, val);
-
-	spin_unlock_irq(&davinci_nand_lock);
-
-	/* Scan to find existence of the device(s) */
-	ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
-	if (ret < 0) {
-		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
-		goto err;
-	}
-
-	switch (info->chip.ecc.mode) {
-	case NAND_ECC_NONE:
-		pdata->ecc_bits = 0;
-		break;
-	case NAND_ECC_SOFT:
-		pdata->ecc_bits = 0;
-		/*
-		 * This driver expects Hamming based ECC when ecc_mode is set
-		 * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
-		 * avoid adding an extra ->ecc_algo field to
-		 * davinci_nand_pdata.
-		 */
-		info->chip.ecc.algo = NAND_ECC_HAMMING;
-		break;
-	case NAND_ECC_HW:
-		if (pdata->ecc_bits == 4) {
-			/* No sanity checks:  CPUs must support this,
-			 * and the chips may not use NAND_BUSWIDTH_16.
-			 */
-
-			/* No sharing 4-bit hardware between chipselects yet */
-			spin_lock_irq(&davinci_nand_lock);
-			if (ecc4_busy)
-				ret = -EBUSY;
-			else
-				ecc4_busy = true;
-			spin_unlock_irq(&davinci_nand_lock);
-
-			if (ret == -EBUSY)
-				return ret;
-
-			info->chip.ecc.calculate = nand_davinci_calculate_4bit;
-			info->chip.ecc.correct = nand_davinci_correct_4bit;
-			info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
-			info->chip.ecc.bytes = 10;
-			info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
-		} else {
-			info->chip.ecc.calculate = nand_davinci_calculate_1bit;
-			info->chip.ecc.correct = nand_davinci_correct_1bit;
-			info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
-			info->chip.ecc.bytes = 3;
-		}
-		info->chip.ecc.size = 512;
-		info->chip.ecc.strength = pdata->ecc_bits;
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	/* Update ECC layout if needed ... for 1-bit HW ECC, the default
-	 * is OK, but it allocates 6 bytes when only 3 are needed (for
-	 * each 512 bytes).  For the 4-bit HW ECC, that default is not
-	 * usable:  10 bytes are needed, not 6.
-	 */
-	if (pdata->ecc_bits == 4) {
-		int	chunks = mtd->writesize / 512;
-
-		if (!chunks || mtd->oobsize < 16) {
-			dev_dbg(&pdev->dev, "too small\n");
-			ret = -EINVAL;
-			goto err;
-		}
-
-		/* For small page chips, preserve the manufacturer's
-		 * badblock marking data ... and make sure a flash BBT
-		 * table marker fits in the free bytes.
-		 */
-		if (chunks == 1) {
-			mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
-		} else if (chunks == 4 || chunks == 8) {
-			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-			info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
-		} else {
-			ret = -EIO;
-			goto err;
-		}
-	}
-
-	ret = nand_scan_tail(mtd);
-	if (ret < 0)
-		goto err;
-
-	if (pdata->parts)
-		ret = mtd_device_parse_register(mtd, NULL, NULL,
-					pdata->parts, pdata->nr_parts);
-	else
-		ret = mtd_device_register(mtd, NULL, 0);
-	if (ret < 0)
-		goto err;
-
-	val = davinci_nand_readl(info, NRCSR_OFFSET);
-	dev_info(&pdev->dev, "controller rev. %d.%d\n",
-	       (val >> 8) & 0xff, val & 0xff);
-
-	return 0;
-
-err:
-	clk_disable_unprepare(info->clk);
-
-err_clk_enable:
-	spin_lock_irq(&davinci_nand_lock);
-	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
-		ecc4_busy = false;
-	spin_unlock_irq(&davinci_nand_lock);
-	return ret;
-}
-
-static int nand_davinci_remove(struct platform_device *pdev)
-{
-	struct davinci_nand_info *info = platform_get_drvdata(pdev);
-
-	spin_lock_irq(&davinci_nand_lock);
-	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
-		ecc4_busy = false;
-	spin_unlock_irq(&davinci_nand_lock);
-
-	nand_release(nand_to_mtd(&info->chip));
-
-	clk_disable_unprepare(info->clk);
-
-	return 0;
-}
-
-static struct platform_driver nand_davinci_driver = {
-	.probe		= nand_davinci_probe,
-	.remove		= nand_davinci_remove,
-	.driver		= {
-		.name	= "davinci_nand",
-		.of_match_table = of_match_ptr(davinci_nand_of_match),
-	},
-};
-MODULE_ALIAS("platform:davinci_nand");
-
-module_platform_driver(nand_davinci_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Texas Instruments");
-MODULE_DESCRIPTION("Davinci NAND flash driver");
-
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
deleted file mode 100644
index 0476ae8776d9..000000000000
--- a/drivers/mtd/nand/denali.c
+++ /dev/null
@@ -1,1663 +0,0 @@ 
-/*
- * NAND Flash Controller Device Driver
- * Copyright © 2009-2010, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/module.h>
-
-#include "denali.h"
-
-MODULE_LICENSE("GPL");
-
-/*
- * We define a module parameter that allows the user to override
- * the hardware and decide what timing mode should be used.
- */
-#define NAND_DEFAULT_TIMINGS	-1
-
-static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
-module_param(onfi_timing_mode, int, S_IRUGO);
-MODULE_PARM_DESC(onfi_timing_mode,
-	   "Overrides default ONFI setting. -1 indicates use default timings");
-
-#define DENALI_NAND_NAME    "denali-nand"
-
-/*
- * We define a macro here that combines all interrupts this driver uses into
- * a single constant value, for convenience.
- */
-#define DENALI_IRQ_ALL	(INTR_STATUS__DMA_CMD_COMP | \
-			INTR_STATUS__ECC_TRANSACTION_DONE | \
-			INTR_STATUS__ECC_ERR | \
-			INTR_STATUS__PROGRAM_FAIL | \
-			INTR_STATUS__LOAD_COMP | \
-			INTR_STATUS__PROGRAM_COMP | \
-			INTR_STATUS__TIME_OUT | \
-			INTR_STATUS__ERASE_FAIL | \
-			INTR_STATUS__RST_COMP | \
-			INTR_STATUS__ERASE_COMP)
-
-/*
- * indicates whether or not the internal value for the flash bank is
- * valid or not
- */
-#define CHIP_SELECT_INVALID	-1
-
-#define SUPPORT_8BITECC		1
-
-/*
- * This macro divides two integers and rounds fractional values up
- * to the nearest integer value.
- */
-#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
-
-/*
- * this macro allows us to convert from an MTD structure to our own
- * device context (denali) structure.
- */
-static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
-}
-
-/*
- * These constants are defined by the driver to enable common driver
- * configuration options.
- */
-#define SPARE_ACCESS		0x41
-#define MAIN_ACCESS		0x42
-#define MAIN_SPARE_ACCESS	0x43
-#define PIPELINE_ACCESS		0x2000
-
-#define DENALI_READ	0
-#define DENALI_WRITE	0x100
-
-/* types of device accesses. We can issue commands and get status */
-#define COMMAND_CYCLE	0
-#define ADDR_CYCLE	1
-#define STATUS_CYCLE	2
-
-/*
- * this is a helper macro that allows us to
- * format the bank into the proper bits for the controller
- */
-#define BANK(x) ((x) << 24)
-
-/* forward declarations */
-static void clear_interrupts(struct denali_nand_info *denali);
-static uint32_t wait_for_irq(struct denali_nand_info *denali,
-							uint32_t irq_mask);
-static void denali_irq_enable(struct denali_nand_info *denali,
-							uint32_t int_mask);
-static uint32_t read_interrupt_status(struct denali_nand_info *denali);
-
-/*
- * Certain operations for the denali NAND controller use an indexed mode to
- * read/write data. The operation is performed by writing the address value
- * of the command to the device memory followed by the data. This function
- * abstracts this common operation.
- */
-static void index_addr(struct denali_nand_info *denali,
-				uint32_t address, uint32_t data)
-{
-	iowrite32(address, denali->flash_mem);
-	iowrite32(data, denali->flash_mem + 0x10);
-}
-
-/* Perform an indexed read of the device */
-static void index_addr_read_data(struct denali_nand_info *denali,
-				 uint32_t address, uint32_t *pdata)
-{
-	iowrite32(address, denali->flash_mem);
-	*pdata = ioread32(denali->flash_mem + 0x10);
-}
-
-/*
- * We need to buffer some data for some of the NAND core routines.
- * The operations manage buffering that data.
- */
-static void reset_buf(struct denali_nand_info *denali)
-{
-	denali->buf.head = denali->buf.tail = 0;
-}
-
-static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
-{
-	denali->buf.buf[denali->buf.tail++] = byte;
-}
-
-/* reads the status of the device */
-static void read_status(struct denali_nand_info *denali)
-{
-	uint32_t cmd;
-
-	/* initialize the data buffer to store status */
-	reset_buf(denali);
-
-	cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
-	if (cmd)
-		write_byte_to_buf(denali, NAND_STATUS_WP);
-	else
-		write_byte_to_buf(denali, 0);
-}
-
-/* resets a specific device connected to the core */
-static void reset_bank(struct denali_nand_info *denali)
-{
-	uint32_t irq_status;
-	uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT;
-
-	clear_interrupts(denali);
-
-	iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
-
-	irq_status = wait_for_irq(denali, irq_mask);
-
-	if (irq_status & INTR_STATUS__TIME_OUT)
-		dev_err(denali->dev, "reset bank failed.\n");
-}
-
-/* Reset the flash controller */
-static uint16_t denali_nand_reset(struct denali_nand_info *denali)
-{
-	int i;
-
-	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
-		__FILE__, __LINE__, __func__);
-
-	for (i = 0; i < denali->max_banks; i++)
-		iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
-		denali->flash_reg + INTR_STATUS(i));
-
-	for (i = 0; i < denali->max_banks; i++) {
-		iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
-		while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
-			(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
-			cpu_relax();
-		if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
-			INTR_STATUS__TIME_OUT)
-			dev_dbg(denali->dev,
-			"NAND Reset operation timed out on bank %d\n", i);
-	}
-
-	for (i = 0; i < denali->max_banks; i++)
-		iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
-			  denali->flash_reg + INTR_STATUS(i));
-
-	return PASS;
-}
-
-/*
- * this routine calculates the ONFI timing values for a given mode and
- * programs the clocking register accordingly. The mode is determined by
- * the get_onfi_nand_para routine.
- */
-static void nand_onfi_timing_set(struct denali_nand_info *denali,
-								uint16_t mode)
-{
-	uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
-	uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
-	uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
-	uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
-	uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
-	uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
-	uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
-	uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
-	uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
-	uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
-	uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
-	uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
-
-	uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
-	uint16_t dv_window = 0;
-	uint16_t en_lo, en_hi;
-	uint16_t acc_clks;
-	uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
-
-	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
-		__FILE__, __LINE__, __func__);
-
-	en_lo = CEIL_DIV(Trp[mode], CLK_X);
-	en_hi = CEIL_DIV(Treh[mode], CLK_X);
-#if ONFI_BLOOM_TIME
-	if ((en_hi * CLK_X) < (Treh[mode] + 2))
-		en_hi++;
-#endif
-
-	if ((en_lo + en_hi) * CLK_X < Trc[mode])
-		en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
-
-	if ((en_lo + en_hi) < CLK_MULTI)
-		en_lo += CLK_MULTI - en_lo - en_hi;
-
-	while (dv_window < 8) {
-		data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
-
-		data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
-
-		data_invalid = data_invalid_rhoh < data_invalid_rloh ?
-					data_invalid_rhoh : data_invalid_rloh;
-
-		dv_window = data_invalid - Trea[mode];
-
-		if (dv_window < 8)
-			en_lo++;
-	}
-
-	acc_clks = CEIL_DIV(Trea[mode], CLK_X);
-
-	while (acc_clks * CLK_X - Trea[mode] < 3)
-		acc_clks++;
-
-	if (data_invalid - acc_clks * CLK_X < 2)
-		dev_warn(denali->dev, "%s, Line %d: Warning!\n",
-			 __FILE__, __LINE__);
-
-	addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
-	re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
-	re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
-	we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
-	cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
-	if (cs_cnt == 0)
-		cs_cnt = 1;
-
-	if (Tcea[mode]) {
-		while (cs_cnt * CLK_X + Trea[mode] < Tcea[mode])
-			cs_cnt++;
-	}
-
-#if MODE5_WORKAROUND
-	if (mode == 5)
-		acc_clks = 5;
-#endif
-
-	/* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
-	if (ioread32(denali->flash_reg + MANUFACTURER_ID) == 0 &&
-		ioread32(denali->flash_reg + DEVICE_ID) == 0x88)
-		acc_clks = 6;
-
-	iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
-	iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
-	iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
-	iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
-	iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
-	iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
-	iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
-	iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
-}
-
-/* queries the NAND device to see what ONFI modes it supports. */
-static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
-{
-	int i;
-
-	/*
-	 * we needn't to do a reset here because driver has already
-	 * reset all the banks before
-	 */
-	if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
-		ONFI_TIMING_MODE__VALUE))
-		return FAIL;
-
-	for (i = 5; i > 0; i--) {
-		if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
-			(0x01 << i))
-			break;
-	}
-
-	nand_onfi_timing_set(denali, i);
-
-	/*
-	 * By now, all the ONFI devices we know support the page cache
-	 * rw feature. So here we enable the pipeline_rw_ahead feature
-	 */
-	/* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
-	/* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE);  */
-
-	return PASS;
-}
-
-static void get_samsung_nand_para(struct denali_nand_info *denali,
-							uint8_t device_id)
-{
-	if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
-		/* Set timing register values according to datasheet */
-		iowrite32(5, denali->flash_reg + ACC_CLKS);
-		iowrite32(20, denali->flash_reg + RE_2_WE);
-		iowrite32(12, denali->flash_reg + WE_2_RE);
-		iowrite32(14, denali->flash_reg + ADDR_2_DATA);
-		iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
-		iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
-		iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
-	}
-}
-
-static void get_toshiba_nand_para(struct denali_nand_info *denali)
-{
-	uint32_t tmp;
-
-	/*
-	 * Workaround to fix a controller bug which reports a wrong
-	 * spare area size for some kind of Toshiba NAND device
-	 */
-	if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
-		(ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
-		iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
-		tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
-			ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
-		iowrite32(tmp,
-				denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
-#if SUPPORT_15BITECC
-		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
-#elif SUPPORT_8BITECC
-		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
-#endif
-	}
-}
-
-static void get_hynix_nand_para(struct denali_nand_info *denali,
-							uint8_t device_id)
-{
-	uint32_t main_size, spare_size;
-
-	switch (device_id) {
-	case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
-	case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
-		iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
-		iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
-		iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
-		main_size = 4096 *
-			ioread32(denali->flash_reg + DEVICES_CONNECTED);
-		spare_size = 224 *
-			ioread32(denali->flash_reg + DEVICES_CONNECTED);
-		iowrite32(main_size,
-				denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
-		iowrite32(spare_size,
-				denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
-		iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
-#if SUPPORT_15BITECC
-		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
-#elif SUPPORT_8BITECC
-		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
-#endif
-		break;
-	default:
-		dev_warn(denali->dev,
-			 "Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n"
-			 "Will use default parameter values instead.\n",
-			 device_id);
-	}
-}
-
-/*
- * determines how many NAND chips are connected to the controller. Note for
- * Intel CE4100 devices we don't support more than one device.
- */
-static void find_valid_banks(struct denali_nand_info *denali)
-{
-	uint32_t id[denali->max_banks];
-	int i;
-
-	denali->total_used_banks = 1;
-	for (i = 0; i < denali->max_banks; i++) {
-		index_addr(denali, MODE_11 | (i << 24) | 0, 0x90);
-		index_addr(denali, MODE_11 | (i << 24) | 1, 0);
-		index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]);
-
-		dev_dbg(denali->dev,
-			"Return 1st ID for bank[%d]: %x\n", i, id[i]);
-
-		if (i == 0) {
-			if (!(id[i] & 0x0ff))
-				break; /* WTF? */
-		} else {
-			if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
-				denali->total_used_banks++;
-			else
-				break;
-		}
-	}
-
-	if (denali->platform == INTEL_CE4100) {
-		/*
-		 * Platform limitations of the CE4100 device limit
-		 * users to a single chip solution for NAND.
-		 * Multichip support is not enabled.
-		 */
-		if (denali->total_used_banks != 1) {
-			dev_err(denali->dev,
-				"Sorry, Intel CE4100 only supports a single NAND device.\n");
-			BUG();
-		}
-	}
-	dev_dbg(denali->dev,
-		"denali->total_used_banks: %d\n", denali->total_used_banks);
-}
-
-/*
- * Use the configuration feature register to determine the maximum number of
- * banks that the hardware supports.
- */
-static void detect_max_banks(struct denali_nand_info *denali)
-{
-	uint32_t features = ioread32(denali->flash_reg + FEATURES);
-	/*
-	 * Read the revision register, so we can calculate the max_banks
-	 * properly: the encoding changed from rev 5.0 to 5.1
-	 */
-	u32 revision = MAKE_COMPARABLE_REVISION(
-				ioread32(denali->flash_reg + REVISION));
-
-	if (revision < REVISION_5_1)
-		denali->max_banks = 2 << (features & FEATURES__N_BANKS);
-	else
-		denali->max_banks = 1 << (features & FEATURES__N_BANKS);
-}
-
-static void detect_partition_feature(struct denali_nand_info *denali)
-{
-	/*
-	 * For MRST platform, denali->fwblks represent the
-	 * number of blocks firmware is taken,
-	 * FW is in protect partition and MTD driver has no
-	 * permission to access it. So let driver know how many
-	 * blocks it can't touch.
-	 */
-	if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
-		if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
-			PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
-			denali->fwblks =
-			    ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
-			      MIN_MAX_BANK__MIN_VALUE) *
-			     denali->blksperchip)
-			    +
-			    (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
-			    MIN_BLK_ADDR__VALUE);
-		} else {
-			denali->fwblks = SPECTRA_START_BLOCK;
-		}
-	} else {
-		denali->fwblks = SPECTRA_START_BLOCK;
-	}
-}
-
-static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
-{
-	uint16_t status = PASS;
-	uint32_t id_bytes[8], addr;
-	uint8_t maf_id, device_id;
-	int i;
-
-	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
-			__FILE__, __LINE__, __func__);
-
-	/*
-	 * Use read id method to get device ID and other params.
-	 * For some NAND chips, controller can't report the correct
-	 * device ID by reading from DEVICE_ID register
-	 */
-	addr = MODE_11 | BANK(denali->flash_bank);
-	index_addr(denali, addr | 0, 0x90);
-	index_addr(denali, addr | 1, 0);
-	for (i = 0; i < 8; i++)
-		index_addr_read_data(denali, addr | 2, &id_bytes[i]);
-	maf_id = id_bytes[0];
-	device_id = id_bytes[1];
-
-	if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
-		ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
-		if (FAIL == get_onfi_nand_para(denali))
-			return FAIL;
-	} else if (maf_id == 0xEC) { /* Samsung NAND */
-		get_samsung_nand_para(denali, device_id);
-	} else if (maf_id == 0x98) { /* Toshiba NAND */
-		get_toshiba_nand_para(denali);
-	} else if (maf_id == 0xAD) { /* Hynix NAND */
-		get_hynix_nand_para(denali, device_id);
-	}
-
-	dev_info(denali->dev,
-			"Dump timing register values:\n"
-			"acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
-			"we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
-			"rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
-			ioread32(denali->flash_reg + ACC_CLKS),
-			ioread32(denali->flash_reg + RE_2_WE),
-			ioread32(denali->flash_reg + RE_2_RE),
-			ioread32(denali->flash_reg + WE_2_RE),
-			ioread32(denali->flash_reg + ADDR_2_DATA),
-			ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
-			ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
-			ioread32(denali->flash_reg + CS_SETUP_CNT));
-
-	find_valid_banks(denali);
-
-	detect_partition_feature(denali);
-
-	/*
-	 * If the user specified to override the default timings
-	 * with a specific ONFI mode, we apply those changes here.
-	 */
-	if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
-		nand_onfi_timing_set(denali, onfi_timing_mode);
-
-	return status;
-}
-
-static void denali_set_intr_modes(struct denali_nand_info *denali,
-					uint16_t INT_ENABLE)
-{
-	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
-		__FILE__, __LINE__, __func__);
-
-	if (INT_ENABLE)
-		iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
-	else
-		iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
-}
-
-/*
- * validation function to verify that the controlling software is making
- * a valid request
- */
-static inline bool is_flash_bank_valid(int flash_bank)
-{
-	return flash_bank >= 0 && flash_bank < 4;
-}
-
-static void denali_irq_init(struct denali_nand_info *denali)
-{
-	uint32_t int_mask;
-	int i;
-
-	/* Disable global interrupts */
-	denali_set_intr_modes(denali, false);
-
-	int_mask = DENALI_IRQ_ALL;
-
-	/* Clear all status bits */
-	for (i = 0; i < denali->max_banks; ++i)
-		iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
-
-	denali_irq_enable(denali, int_mask);
-}
-
-static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
-{
-	denali_set_intr_modes(denali, false);
-	free_irq(irqnum, denali);
-}
-
-static void denali_irq_enable(struct denali_nand_info *denali,
-							uint32_t int_mask)
-{
-	int i;
-
-	for (i = 0; i < denali->max_banks; ++i)
-		iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
-}
-
-/*
- * This function only returns when an interrupt that this driver cares about
- * occurs. This is to reduce the overhead of servicing interrupts
- */
-static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
-{
-	return read_interrupt_status(denali) & DENALI_IRQ_ALL;
-}
-
-/* Interrupts are cleared by writing a 1 to the appropriate status bit */
-static inline void clear_interrupt(struct denali_nand_info *denali,
-							uint32_t irq_mask)
-{
-	uint32_t intr_status_reg;
-
-	intr_status_reg = INTR_STATUS(denali->flash_bank);
-
-	iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
-}
-
-static void clear_interrupts(struct denali_nand_info *denali)
-{
-	uint32_t status;
-
-	spin_lock_irq(&denali->irq_lock);
-
-	status = read_interrupt_status(denali);
-	clear_interrupt(denali, status);
-
-	denali->irq_status = 0x0;
-	spin_unlock_irq(&denali->irq_lock);
-}
-
-static uint32_t read_interrupt_status(struct denali_nand_info *denali)
-{
-	uint32_t intr_status_reg;
-
-	intr_status_reg = INTR_STATUS(denali->flash_bank);
-
-	return ioread32(denali->flash_reg + intr_status_reg);
-}
-
-/*
- * This is the interrupt service routine. It handles all interrupts
- * sent to this device. Note that on CE4100, this is a shared interrupt.
- */
-static irqreturn_t denali_isr(int irq, void *dev_id)
-{
-	struct denali_nand_info *denali = dev_id;
-	uint32_t irq_status;
-	irqreturn_t result = IRQ_NONE;
-
-	spin_lock(&denali->irq_lock);
-
-	/* check to see if a valid NAND chip has been selected. */
-	if (is_flash_bank_valid(denali->flash_bank)) {
-		/*
-		 * check to see if controller generated the interrupt,
-		 * since this is a shared interrupt
-		 */
-		irq_status = denali_irq_detected(denali);
-		if (irq_status != 0) {
-			/* handle interrupt */
-			/* first acknowledge it */
-			clear_interrupt(denali, irq_status);
-			/*
-			 * store the status in the device context for someone
-			 * to read
-			 */
-			denali->irq_status |= irq_status;
-			/* notify anyone who cares that it happened */
-			complete(&denali->complete);
-			/* tell the OS that we've handled this */
-			result = IRQ_HANDLED;
-		}
-	}
-	spin_unlock(&denali->irq_lock);
-	return result;
-}
-#define BANK(x) ((x) << 24)
-
-static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
-{
-	unsigned long comp_res;
-	uint32_t intr_status;
-	unsigned long timeout = msecs_to_jiffies(1000);
-
-	do {
-		comp_res =
-			wait_for_completion_timeout(&denali->complete, timeout);
-		spin_lock_irq(&denali->irq_lock);
-		intr_status = denali->irq_status;
-
-		if (intr_status & irq_mask) {
-			denali->irq_status &= ~irq_mask;
-			spin_unlock_irq(&denali->irq_lock);
-			/* our interrupt was detected */
-			break;
-		}
-
-		/*
-		 * these are not the interrupts you are looking for -
-		 * need to wait again
-		 */
-		spin_unlock_irq(&denali->irq_lock);
-	} while (comp_res != 0);
-
-	if (comp_res == 0) {
-		/* timeout */
-		pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
-				intr_status, irq_mask);
-
-		intr_status = 0;
-	}
-	return intr_status;
-}
-
-/*
- * This helper function setups the registers for ECC and whether or not
- * the spare area will be transferred.
- */
-static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
-				bool transfer_spare)
-{
-	int ecc_en_flag, transfer_spare_flag;
-
-	/* set ECC, transfer spare bits if needed */
-	ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
-	transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
-
-	/* Enable spare area/ECC per user's request. */
-	iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
-	iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
-}
-
-/*
- * sends a pipeline command operation to the controller. See the Denali NAND
- * controller's user guide for more information (section 4.2.3.6).
- */
-static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
-				    bool ecc_en, bool transfer_spare,
-				    int access_type, int op)
-{
-	int status = PASS;
-	uint32_t page_count = 1;
-	uint32_t addr, cmd, irq_status, irq_mask;
-
-	if (op == DENALI_READ)
-		irq_mask = INTR_STATUS__LOAD_COMP;
-	else if (op == DENALI_WRITE)
-		irq_mask = 0;
-	else
-		BUG();
-
-	setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
-
-	clear_interrupts(denali);
-
-	addr = BANK(denali->flash_bank) | denali->page;
-
-	if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
-		cmd = MODE_01 | addr;
-		iowrite32(cmd, denali->flash_mem);
-	} else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
-		/* read spare area */
-		cmd = MODE_10 | addr;
-		index_addr(denali, cmd, access_type);
-
-		cmd = MODE_01 | addr;
-		iowrite32(cmd, denali->flash_mem);
-	} else if (op == DENALI_READ) {
-		/* setup page read request for access type */
-		cmd = MODE_10 | addr;
-		index_addr(denali, cmd, access_type);
-
-		/*
-		 * page 33 of the NAND controller spec indicates we should not
-		 * use the pipeline commands in Spare area only mode.
-		 * So we don't.
-		 */
-		if (access_type == SPARE_ACCESS) {
-			cmd = MODE_01 | addr;
-			iowrite32(cmd, denali->flash_mem);
-		} else {
-			index_addr(denali, cmd,
-					PIPELINE_ACCESS | op | page_count);
-
-			/*
-			 * wait for command to be accepted
-			 * can always use status0 bit as the
-			 * mask is identical for each bank.
-			 */
-			irq_status = wait_for_irq(denali, irq_mask);
-
-			if (irq_status == 0) {
-				dev_err(denali->dev,
-					"cmd, page, addr on timeout (0x%x, 0x%x, 0x%x)\n",
-					cmd, denali->page, addr);
-				status = FAIL;
-			} else {
-				cmd = MODE_01 | addr;
-				iowrite32(cmd, denali->flash_mem);
-			}
-		}
-	}
-	return status;
-}
-
-/* helper function that simply writes a buffer to the flash */
-static int write_data_to_flash_mem(struct denali_nand_info *denali,
-				   const uint8_t *buf, int len)
-{
-	uint32_t *buf32;
-	int i;
-
-	/*
-	 * verify that the len is a multiple of 4.
-	 * see comment in read_data_from_flash_mem()
-	 */
-	BUG_ON((len % 4) != 0);
-
-	/* write the data to the flash memory */
-	buf32 = (uint32_t *)buf;
-	for (i = 0; i < len / 4; i++)
-		iowrite32(*buf32++, denali->flash_mem + 0x10);
-	return i * 4; /* intent is to return the number of bytes read */
-}
-
-/* helper function that simply reads a buffer from the flash */
-static int read_data_from_flash_mem(struct denali_nand_info *denali,
-				    uint8_t *buf, int len)
-{
-	uint32_t *buf32;
-	int i;
-
-	/*
-	 * we assume that len will be a multiple of 4, if not it would be nice
-	 * to know about it ASAP rather than have random failures...
-	 * This assumption is based on the fact that this function is designed
-	 * to be used to read flash pages, which are typically multiples of 4.
-	 */
-	BUG_ON((len % 4) != 0);
-
-	/* transfer the data from the flash */
-	buf32 = (uint32_t *)buf;
-	for (i = 0; i < len / 4; i++)
-		*buf32++ = ioread32(denali->flash_mem + 0x10);
-	return i * 4; /* intent is to return the number of bytes read */
-}
-
-/* writes OOB data to the device */
-static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-	uint32_t irq_status;
-	uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
-						INTR_STATUS__PROGRAM_FAIL;
-	int status = 0;
-
-	denali->page = page;
-
-	if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
-							DENALI_WRITE) == PASS) {
-		write_data_to_flash_mem(denali, buf, mtd->oobsize);
-
-		/* wait for operation to complete */
-		irq_status = wait_for_irq(denali, irq_mask);
-
-		if (irq_status == 0) {
-			dev_err(denali->dev, "OOB write failed\n");
-			status = -EIO;
-		}
-	} else {
-		dev_err(denali->dev, "unable to send pipeline command\n");
-		status = -EIO;
-	}
-	return status;
-}
-
-/* reads OOB data from the device */
-static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-	uint32_t irq_mask = INTR_STATUS__LOAD_COMP;
-	uint32_t irq_status, addr, cmd;
-
-	denali->page = page;
-
-	if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
-							DENALI_READ) == PASS) {
-		read_data_from_flash_mem(denali, buf, mtd->oobsize);
-
-		/*
-		 * wait for command to be accepted
-		 * can always use status0 bit as the
-		 * mask is identical for each bank.
-		 */
-		irq_status = wait_for_irq(denali, irq_mask);
-
-		if (irq_status == 0)
-			dev_err(denali->dev, "page on OOB timeout %d\n",
-					denali->page);
-
-		/*
-		 * We set the device back to MAIN_ACCESS here as I observed
-		 * instability with the controller if you do a block erase
-		 * and the last transaction was a SPARE_ACCESS. Block erase
-		 * is reliable (according to the MTD test infrastructure)
-		 * if you are in MAIN_ACCESS.
-		 */
-		addr = BANK(denali->flash_bank) | denali->page;
-		cmd = MODE_10 | addr;
-		index_addr(denali, cmd, MAIN_ACCESS);
-	}
-}
-
-/*
- * this function examines buffers to see if they contain data that
- * indicate that the buffer is part of an erased region of flash.
- */
-static bool is_erased(uint8_t *buf, int len)
-{
-	int i;
-
-	for (i = 0; i < len; i++)
-		if (buf[i] != 0xFF)
-			return false;
-	return true;
-}
-#define ECC_SECTOR_SIZE 512
-
-#define ECC_SECTOR(x)	(((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
-#define ECC_BYTE(x)	(((x) & ECC_ERROR_ADDRESS__OFFSET))
-#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
-#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
-#define ECC_ERR_DEVICE(x)	(((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
-#define ECC_LAST_ERR(x)		((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
-
-static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
-		       uint32_t irq_status, unsigned int *max_bitflips)
-{
-	bool check_erased_page = false;
-	unsigned int bitflips = 0;
-
-	if (irq_status & INTR_STATUS__ECC_ERR) {
-		/* read the ECC errors. we'll ignore them for now */
-		uint32_t err_address, err_correction_info, err_byte,
-			 err_sector, err_device, err_correction_value;
-		denali_set_intr_modes(denali, false);
-
-		do {
-			err_address = ioread32(denali->flash_reg +
-						ECC_ERROR_ADDRESS);
-			err_sector = ECC_SECTOR(err_address);
-			err_byte = ECC_BYTE(err_address);
-
-			err_correction_info = ioread32(denali->flash_reg +
-						ERR_CORRECTION_INFO);
-			err_correction_value =
-				ECC_CORRECTION_VALUE(err_correction_info);
-			err_device = ECC_ERR_DEVICE(err_correction_info);
-
-			if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
-				/*
-				 * If err_byte is larger than ECC_SECTOR_SIZE,
-				 * means error happened in OOB, so we ignore
-				 * it. It's no need for us to correct it
-				 * err_device is represented the NAND error
-				 * bits are happened in if there are more
-				 * than one NAND connected.
-				 */
-				if (err_byte < ECC_SECTOR_SIZE) {
-					struct mtd_info *mtd =
-						nand_to_mtd(&denali->nand);
-					int offset;
-
-					offset = (err_sector *
-							ECC_SECTOR_SIZE +
-							err_byte) *
-							denali->devnum +
-							err_device;
-					/* correct the ECC error */
-					buf[offset] ^= err_correction_value;
-					mtd->ecc_stats.corrected++;
-					bitflips++;
-				}
-			} else {
-				/*
-				 * if the error is not correctable, need to
-				 * look at the page to see if it is an erased
-				 * page. if so, then it's not a real ECC error
-				 */
-				check_erased_page = true;
-			}
-		} while (!ECC_LAST_ERR(err_correction_info));
-		/*
-		 * Once handle all ecc errors, controller will triger
-		 * a ECC_TRANSACTION_DONE interrupt, so here just wait
-		 * for a while for this interrupt
-		 */
-		while (!(read_interrupt_status(denali) &
-				INTR_STATUS__ECC_TRANSACTION_DONE))
-			cpu_relax();
-		clear_interrupts(denali);
-		denali_set_intr_modes(denali, true);
-	}
-	*max_bitflips = bitflips;
-	return check_erased_page;
-}
-
-/* programs the controller to either enable/disable DMA transfers */
-static void denali_enable_dma(struct denali_nand_info *denali, bool en)
-{
-	iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
-	ioread32(denali->flash_reg + DMA_ENABLE);
-}
-
-/* setups the HW to perform the data DMA */
-static void denali_setup_dma(struct denali_nand_info *denali, int op)
-{
-	uint32_t mode;
-	const int page_count = 1;
-	uint32_t addr = denali->buf.dma_buf;
-
-	mode = MODE_10 | BANK(denali->flash_bank);
-
-	/* DMA is a four step process */
-
-	/* 1. setup transfer type and # of pages */
-	index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
-
-	/* 2. set memory high address bits 23:8 */
-	index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
-
-	/* 3. set memory low address bits 23:8 */
-	index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
-
-	/* 4. interrupt when complete, burst len = 64 bytes */
-	index_addr(denali, mode | 0x14000, 0x2400);
-}
-
-/*
- * writes a page. user specifies type, and this function handles the
- * configuration details.
- */
-static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
-			const uint8_t *buf, bool raw_xfer)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-	dma_addr_t addr = denali->buf.dma_buf;
-	size_t size = mtd->writesize + mtd->oobsize;
-	uint32_t irq_status;
-	uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
-						INTR_STATUS__PROGRAM_FAIL;
-
-	/*
-	 * if it is a raw xfer, we want to disable ecc and send the spare area.
-	 * !raw_xfer - enable ecc
-	 * raw_xfer - transfer spare
-	 */
-	setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
-
-	/* copy buffer into DMA buffer */
-	memcpy(denali->buf.buf, buf, mtd->writesize);
-
-	if (raw_xfer) {
-		/* transfer the data to the spare area */
-		memcpy(denali->buf.buf + mtd->writesize,
-			chip->oob_poi,
-			mtd->oobsize);
-	}
-
-	dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
-
-	clear_interrupts(denali);
-	denali_enable_dma(denali, true);
-
-	denali_setup_dma(denali, DENALI_WRITE);
-
-	/* wait for operation to complete */
-	irq_status = wait_for_irq(denali, irq_mask);
-
-	if (irq_status == 0) {
-		dev_err(denali->dev, "timeout on write_page (type = %d)\n",
-			raw_xfer);
-		denali->status = NAND_STATUS_FAIL;
-	}
-
-	denali_enable_dma(denali, false);
-	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
-
-	return 0;
-}
-
-/* NAND core entry points */
-
-/*
- * this is the callback that the NAND core calls to write a page. Since
- * writing a page with ECC or without is similar, all the work is done
- * by write_page above.
- */
-static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-				const uint8_t *buf, int oob_required, int page)
-{
-	/*
-	 * for regular page writes, we let HW handle all the ECC
-	 * data written to the device.
-	 */
-	return write_page(mtd, chip, buf, false);
-}
-
-/*
- * This is the callback that the NAND core calls to write a page without ECC.
- * raw access is similar to ECC page writes, so all the work is done in the
- * write_page() function above.
- */
-static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				 const uint8_t *buf, int oob_required,
-				 int page)
-{
-	/*
-	 * for raw page writes, we want to disable ECC and simply write
-	 * whatever data is in the buffer.
-	 */
-	return write_page(mtd, chip, buf, true);
-}
-
-static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			    int page)
-{
-	return write_oob_data(mtd, chip->oob_poi, page);
-}
-
-static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			   int page)
-{
-	read_oob_data(mtd, chip->oob_poi, page);
-
-	return 0;
-}
-
-static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-			    uint8_t *buf, int oob_required, int page)
-{
-	unsigned int max_bitflips;
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-
-	dma_addr_t addr = denali->buf.dma_buf;
-	size_t size = mtd->writesize + mtd->oobsize;
-
-	uint32_t irq_status;
-	uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
-			    INTR_STATUS__ECC_ERR;
-	bool check_erased_page = false;
-
-	if (page != denali->page) {
-		dev_err(denali->dev,
-			"IN %s: page %d is not equal to denali->page %d",
-			__func__, page, denali->page);
-		BUG();
-	}
-
-	setup_ecc_for_xfer(denali, true, false);
-
-	denali_enable_dma(denali, true);
-	dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
-
-	clear_interrupts(denali);
-	denali_setup_dma(denali, DENALI_READ);
-
-	/* wait for operation to complete */
-	irq_status = wait_for_irq(denali, irq_mask);
-
-	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
-
-	memcpy(buf, denali->buf.buf, mtd->writesize);
-
-	check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
-	denali_enable_dma(denali, false);
-
-	if (check_erased_page) {
-		read_oob_data(mtd, chip->oob_poi, denali->page);
-
-		/* check ECC failures that may have occurred on erased pages */
-		if (check_erased_page) {
-			if (!is_erased(buf, mtd->writesize))
-				mtd->ecc_stats.failed++;
-			if (!is_erased(buf, mtd->oobsize))
-				mtd->ecc_stats.failed++;
-		}
-	}
-	return max_bitflips;
-}
-
-static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int oob_required, int page)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-	dma_addr_t addr = denali->buf.dma_buf;
-	size_t size = mtd->writesize + mtd->oobsize;
-	uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
-
-	if (page != denali->page) {
-		dev_err(denali->dev,
-			"IN %s: page %d is not equal to denali->page %d",
-			__func__, page, denali->page);
-		BUG();
-	}
-
-	setup_ecc_for_xfer(denali, false, true);
-	denali_enable_dma(denali, true);
-
-	dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
-
-	clear_interrupts(denali);
-	denali_setup_dma(denali, DENALI_READ);
-
-	/* wait for operation to complete */
-	wait_for_irq(denali, irq_mask);
-
-	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
-
-	denali_enable_dma(denali, false);
-
-	memcpy(buf, denali->buf.buf, mtd->writesize);
-	memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
-
-	return 0;
-}
-
-static uint8_t denali_read_byte(struct mtd_info *mtd)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-	uint8_t result = 0xff;
-
-	if (denali->buf.head < denali->buf.tail)
-		result = denali->buf.buf[denali->buf.head++];
-
-	return result;
-}
-
-static void denali_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-
-	spin_lock_irq(&denali->irq_lock);
-	denali->flash_bank = chip;
-	spin_unlock_irq(&denali->irq_lock);
-}
-
-static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-	int status = denali->status;
-
-	denali->status = 0;
-
-	return status;
-}
-
-static int denali_erase(struct mtd_info *mtd, int page)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-
-	uint32_t cmd, irq_status;
-
-	clear_interrupts(denali);
-
-	/* setup page read request for access type */
-	cmd = MODE_10 | BANK(denali->flash_bank) | page;
-	index_addr(denali, cmd, 0x1);
-
-	/* wait for erase to complete or failure to occur */
-	irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
-					INTR_STATUS__ERASE_FAIL);
-
-	return irq_status & INTR_STATUS__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
-}
-
-static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
-			   int page)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-	uint32_t addr, id;
-	int i;
-
-	switch (cmd) {
-	case NAND_CMD_PAGEPROG:
-		break;
-	case NAND_CMD_STATUS:
-		read_status(denali);
-		break;
-	case NAND_CMD_READID:
-	case NAND_CMD_PARAM:
-		reset_buf(denali);
-		/*
-		 * sometimes ManufactureId read from register is not right
-		 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
-		 * So here we send READID cmd to NAND insteand
-		 */
-		addr = MODE_11 | BANK(denali->flash_bank);
-		index_addr(denali, addr | 0, 0x90);
-		index_addr(denali, addr | 1, col);
-		for (i = 0; i < 8; i++) {
-			index_addr_read_data(denali, addr | 2, &id);
-			write_byte_to_buf(denali, id);
-		}
-		break;
-	case NAND_CMD_READ0:
-	case NAND_CMD_SEQIN:
-		denali->page = page;
-		break;
-	case NAND_CMD_RESET:
-		reset_bank(denali);
-		break;
-	case NAND_CMD_READOOB:
-		/* TODO: Read OOB data */
-		break;
-	default:
-		pr_err(": unsupported command received 0x%x\n", cmd);
-		break;
-	}
-}
-/* end NAND core entry points */
-
-/* Initialization code to bring the device up to a known good state */
-static void denali_hw_init(struct denali_nand_info *denali)
-{
-	/*
-	 * tell driver how many bit controller will skip before
-	 * writing ECC code in OOB, this register may be already
-	 * set by firmware. So we read this value out.
-	 * if this value is 0, just let it be.
-	 */
-	denali->bbtskipbytes = ioread32(denali->flash_reg +
-						SPARE_AREA_SKIP_BYTES);
-	detect_max_banks(denali);
-	denali_nand_reset(denali);
-	iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
-	iowrite32(CHIP_EN_DONT_CARE__FLAG,
-			denali->flash_reg + CHIP_ENABLE_DONT_CARE);
-
-	iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
-
-	/* Should set value for these registers when init */
-	iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
-	iowrite32(1, denali->flash_reg + ECC_ENABLE);
-	denali_nand_timing_set(denali);
-	denali_irq_init(denali);
-}
-
-/*
- * Althogh controller spec said SLC ECC is forceb to be 4bit,
- * but denali controller in MRST only support 15bit and 8bit ECC
- * correction
- */
-#define ECC_8BITS	14
-#define ECC_15BITS	26
-
-static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
-				struct mtd_oob_region *oobregion)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = denali->bbtskipbytes;
-	oobregion->length = chip->ecc.total;
-
-	return 0;
-}
-
-static int denali_ooblayout_free(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct denali_nand_info *denali = mtd_to_denali(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
-	oobregion->length = mtd->oobsize - oobregion->offset;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
-	.ecc = denali_ooblayout_ecc,
-	.free = denali_ooblayout_free,
-};
-
-static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
-static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr bbt_main_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs =	8,
-	.len = 4,
-	.veroffs = 12,
-	.maxblocks = 4,
-	.pattern = bbt_pattern,
-};
-
-static struct nand_bbt_descr bbt_mirror_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs =	8,
-	.len = 4,
-	.veroffs = 12,
-	.maxblocks = 4,
-	.pattern = mirror_pattern,
-};
-
-/* initialize driver data structures */
-static void denali_drv_init(struct denali_nand_info *denali)
-{
-	denali->idx = 0;
-
-	/* setup interrupt handler */
-	/*
-	 * the completion object will be used to notify
-	 * the callee that the interrupt is done
-	 */
-	init_completion(&denali->complete);
-
-	/*
-	 * the spinlock will be used to synchronize the ISR with any
-	 * element that might be access shared data (interrupt status)
-	 */
-	spin_lock_init(&denali->irq_lock);
-
-	/* indicate that MTD has not selected a valid bank yet */
-	denali->flash_bank = CHIP_SELECT_INVALID;
-
-	/* initialize our irq_status variable to indicate no interrupts */
-	denali->irq_status = 0;
-}
-
-int denali_init(struct denali_nand_info *denali)
-{
-	struct mtd_info *mtd = nand_to_mtd(&denali->nand);
-	int ret;
-
-	if (denali->platform == INTEL_CE4100) {
-		/*
-		 * Due to a silicon limitation, we can only support
-		 * ONFI timing mode 1 and below.
-		 */
-		if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
-			pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
-			return -EINVAL;
-		}
-	}
-
-	/* allocate a temporary buffer for nand_scan_ident() */
-	denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
-					GFP_DMA | GFP_KERNEL);
-	if (!denali->buf.buf)
-		return -ENOMEM;
-
-	mtd->dev.parent = denali->dev;
-	denali_hw_init(denali);
-	denali_drv_init(denali);
-
-	/*
-	 * denali_isr register is done after all the hardware
-	 * initilization is finished
-	 */
-	if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
-			DENALI_NAND_NAME, denali)) {
-		pr_err("Spectra: Unable to allocate IRQ\n");
-		return -ENODEV;
-	}
-
-	/* now that our ISR is registered, we can enable interrupts */
-	denali_set_intr_modes(denali, true);
-	mtd->name = "denali-nand";
-
-	/* register the driver with the NAND core subsystem */
-	denali->nand.select_chip = denali_select_chip;
-	denali->nand.cmdfunc = denali_cmdfunc;
-	denali->nand.read_byte = denali_read_byte;
-	denali->nand.waitfunc = denali_waitfunc;
-
-	/*
-	 * scan for NAND devices attached to the controller
-	 * this is the first stage in a two step process to register
-	 * with the nand subsystem
-	 */
-	if (nand_scan_ident(mtd, denali->max_banks, NULL)) {
-		ret = -ENXIO;
-		goto failed_req_irq;
-	}
-
-	/* allocate the right size buffer now */
-	devm_kfree(denali->dev, denali->buf.buf);
-	denali->buf.buf = devm_kzalloc(denali->dev,
-			     mtd->writesize + mtd->oobsize,
-			     GFP_KERNEL);
-	if (!denali->buf.buf) {
-		ret = -ENOMEM;
-		goto failed_req_irq;
-	}
-
-	/* Is 32-bit DMA supported? */
-	ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
-	if (ret) {
-		pr_err("Spectra: no usable DMA configuration\n");
-		goto failed_req_irq;
-	}
-
-	denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
-			     mtd->writesize + mtd->oobsize,
-			     DMA_BIDIRECTIONAL);
-	if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
-		dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
-		ret = -EIO;
-		goto failed_req_irq;
-	}
-
-	/*
-	 * support for multi nand
-	 * MTD known nothing about multi nand, so we should tell it
-	 * the real pagesize and anything necessery
-	 */
-	denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
-	denali->nand.chipsize <<= (denali->devnum - 1);
-	denali->nand.page_shift += (denali->devnum - 1);
-	denali->nand.pagemask = (denali->nand.chipsize >>
-						denali->nand.page_shift) - 1;
-	denali->nand.bbt_erase_shift += (denali->devnum - 1);
-	denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
-	denali->nand.chip_shift += (denali->devnum - 1);
-	mtd->writesize <<= (denali->devnum - 1);
-	mtd->oobsize <<= (denali->devnum - 1);
-	mtd->erasesize <<= (denali->devnum - 1);
-	mtd->size = denali->nand.numchips * denali->nand.chipsize;
-	denali->bbtskipbytes *= denali->devnum;
-
-	/*
-	 * second stage of the NAND scan
-	 * this stage requires information regarding ECC and
-	 * bad block management.
-	 */
-
-	/* Bad block management */
-	denali->nand.bbt_td = &bbt_main_descr;
-	denali->nand.bbt_md = &bbt_mirror_descr;
-
-	/* skip the scan for now until we have OOB read and write support */
-	denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
-	denali->nand.options |= NAND_SKIP_BBTSCAN;
-	denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
-
-	/* no subpage writes on denali */
-	denali->nand.options |= NAND_NO_SUBPAGE_WRITE;
-
-	/*
-	 * Denali Controller only support 15bit and 8bit ECC in MRST,
-	 * so just let controller do 15bit ECC for MLC and 8bit ECC for
-	 * SLC if possible.
-	 * */
-	if (!nand_is_slc(&denali->nand) &&
-			(mtd->oobsize > (denali->bbtskipbytes +
-			ECC_15BITS * (mtd->writesize /
-			ECC_SECTOR_SIZE)))) {
-		/* if MLC OOB size is large enough, use 15bit ECC*/
-		denali->nand.ecc.strength = 15;
-		denali->nand.ecc.bytes = ECC_15BITS;
-		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
-	} else if (mtd->oobsize < (denali->bbtskipbytes +
-			ECC_8BITS * (mtd->writesize /
-			ECC_SECTOR_SIZE))) {
-		pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes");
-		goto failed_req_irq;
-	} else {
-		denali->nand.ecc.strength = 8;
-		denali->nand.ecc.bytes = ECC_8BITS;
-		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
-	}
-
-	mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
-	denali->nand.ecc.bytes *= denali->devnum;
-	denali->nand.ecc.strength *= denali->devnum;
-
-	/*
-	 * Let driver know the total blocks number and how many blocks
-	 * contained by each nand chip. blksperchip will help driver to
-	 * know how many blocks is taken by FW.
-	 */
-	denali->totalblks = mtd->size >> denali->nand.phys_erase_shift;
-	denali->blksperchip = denali->totalblks / denali->nand.numchips;
-
-	/* override the default read operations */
-	denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
-	denali->nand.ecc.read_page = denali_read_page;
-	denali->nand.ecc.read_page_raw = denali_read_page_raw;
-	denali->nand.ecc.write_page = denali_write_page;
-	denali->nand.ecc.write_page_raw = denali_write_page_raw;
-	denali->nand.ecc.read_oob = denali_read_oob;
-	denali->nand.ecc.write_oob = denali_write_oob;
-	denali->nand.erase = denali_erase;
-
-	if (nand_scan_tail(mtd)) {
-		ret = -ENXIO;
-		goto failed_req_irq;
-	}
-
-	ret = mtd_device_register(mtd, NULL, 0);
-	if (ret) {
-		dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
-				ret);
-		goto failed_req_irq;
-	}
-	return 0;
-
-failed_req_irq:
-	denali_irq_cleanup(denali->irq, denali);
-
-	return ret;
-}
-EXPORT_SYMBOL(denali_init);
-
-/* driver exit point */
-void denali_remove(struct denali_nand_info *denali)
-{
-	struct mtd_info *mtd = nand_to_mtd(&denali->nand);
-	/*
-	 * Pre-compute DMA buffer size to avoid any problems in case
-	 * nand_release() ever changes in a way that mtd->writesize and
-	 * mtd->oobsize are not reliable after this call.
-	 */
-	int bufsize = mtd->writesize + mtd->oobsize;
-
-	nand_release(mtd);
-	denali_irq_cleanup(denali->irq, denali);
-	dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize,
-			 DMA_BIDIRECTIONAL);
-}
-EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
deleted file mode 100644
index 37618b532317..000000000000
--- a/drivers/mtd/nand/denali.h
+++ /dev/null
@@ -1,484 +0,0 @@ 
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef __DENALI_H__
-#define __DENALI_H__
-
-#include <linux/mtd/rawnand.h>
-
-#define DEVICE_RESET				0x0
-#define     DEVICE_RESET__BANK0				0x0001
-#define     DEVICE_RESET__BANK1				0x0002
-#define     DEVICE_RESET__BANK2				0x0004
-#define     DEVICE_RESET__BANK3				0x0008
-
-#define TRANSFER_SPARE_REG			0x10
-#define     TRANSFER_SPARE_REG__FLAG			0x0001
-
-#define LOAD_WAIT_CNT				0x20
-#define     LOAD_WAIT_CNT__VALUE			0xffff
-
-#define PROGRAM_WAIT_CNT			0x30
-#define     PROGRAM_WAIT_CNT__VALUE			0xffff
-
-#define ERASE_WAIT_CNT				0x40
-#define     ERASE_WAIT_CNT__VALUE			0xffff
-
-#define INT_MON_CYCCNT				0x50
-#define     INT_MON_CYCCNT__VALUE			0xffff
-
-#define RB_PIN_ENABLED				0x60
-#define     RB_PIN_ENABLED__BANK0			0x0001
-#define     RB_PIN_ENABLED__BANK1			0x0002
-#define     RB_PIN_ENABLED__BANK2			0x0004
-#define     RB_PIN_ENABLED__BANK3			0x0008
-
-#define MULTIPLANE_OPERATION			0x70
-#define     MULTIPLANE_OPERATION__FLAG			0x0001
-
-#define MULTIPLANE_READ_ENABLE			0x80
-#define     MULTIPLANE_READ_ENABLE__FLAG		0x0001
-
-#define COPYBACK_DISABLE			0x90
-#define     COPYBACK_DISABLE__FLAG			0x0001
-
-#define CACHE_WRITE_ENABLE			0xa0
-#define     CACHE_WRITE_ENABLE__FLAG			0x0001
-
-#define CACHE_READ_ENABLE			0xb0
-#define     CACHE_READ_ENABLE__FLAG			0x0001
-
-#define PREFETCH_MODE				0xc0
-#define     PREFETCH_MODE__PREFETCH_EN			0x0001
-#define     PREFETCH_MODE__PREFETCH_BURST_LENGTH	0xfff0
-
-#define CHIP_ENABLE_DONT_CARE			0xd0
-#define     CHIP_EN_DONT_CARE__FLAG			0x01
-
-#define ECC_ENABLE				0xe0
-#define     ECC_ENABLE__FLAG				0x0001
-
-#define GLOBAL_INT_ENABLE			0xf0
-#define     GLOBAL_INT_EN_FLAG				0x01
-
-#define WE_2_RE					0x100
-#define     WE_2_RE__VALUE				0x003f
-
-#define ADDR_2_DATA				0x110
-#define     ADDR_2_DATA__VALUE				0x003f
-
-#define RE_2_WE					0x120
-#define     RE_2_WE__VALUE				0x003f
-
-#define ACC_CLKS				0x130
-#define     ACC_CLKS__VALUE				0x000f
-
-#define NUMBER_OF_PLANES			0x140
-#define     NUMBER_OF_PLANES__VALUE			0x0007
-
-#define PAGES_PER_BLOCK				0x150
-#define     PAGES_PER_BLOCK__VALUE			0xffff
-
-#define DEVICE_WIDTH				0x160
-#define     DEVICE_WIDTH__VALUE				0x0003
-
-#define DEVICE_MAIN_AREA_SIZE			0x170
-#define     DEVICE_MAIN_AREA_SIZE__VALUE		0xffff
-
-#define DEVICE_SPARE_AREA_SIZE			0x180
-#define     DEVICE_SPARE_AREA_SIZE__VALUE		0xffff
-
-#define TWO_ROW_ADDR_CYCLES			0x190
-#define     TWO_ROW_ADDR_CYCLES__FLAG			0x0001
-
-#define MULTIPLANE_ADDR_RESTRICT		0x1a0
-#define     MULTIPLANE_ADDR_RESTRICT__FLAG		0x0001
-
-#define ECC_CORRECTION				0x1b0
-#define     ECC_CORRECTION__VALUE			0x001f
-
-#define READ_MODE				0x1c0
-#define     READ_MODE__VALUE				0x000f
-
-#define WRITE_MODE				0x1d0
-#define     WRITE_MODE__VALUE				0x000f
-
-#define COPYBACK_MODE				0x1e0
-#define     COPYBACK_MODE__VALUE			0x000f
-
-#define RDWR_EN_LO_CNT				0x1f0
-#define     RDWR_EN_LO_CNT__VALUE			0x001f
-
-#define RDWR_EN_HI_CNT				0x200
-#define     RDWR_EN_HI_CNT__VALUE			0x001f
-
-#define MAX_RD_DELAY				0x210
-#define     MAX_RD_DELAY__VALUE				0x000f
-
-#define CS_SETUP_CNT				0x220
-#define     CS_SETUP_CNT__VALUE				0x001f
-
-#define SPARE_AREA_SKIP_BYTES			0x230
-#define     SPARE_AREA_SKIP_BYTES__VALUE		0x003f
-
-#define SPARE_AREA_MARKER			0x240
-#define     SPARE_AREA_MARKER__VALUE			0xffff
-
-#define DEVICES_CONNECTED			0x250
-#define     DEVICES_CONNECTED__VALUE			0x0007
-
-#define DIE_MASK				0x260
-#define     DIE_MASK__VALUE				0x00ff
-
-#define FIRST_BLOCK_OF_NEXT_PLANE		0x270
-#define     FIRST_BLOCK_OF_NEXT_PLANE__VALUE		0xffff
-
-#define WRITE_PROTECT				0x280
-#define     WRITE_PROTECT__FLAG				0x0001
-
-#define RE_2_RE					0x290
-#define     RE_2_RE__VALUE				0x003f
-
-#define MANUFACTURER_ID				0x300
-#define     MANUFACTURER_ID__VALUE			0x00ff
-
-#define DEVICE_ID				0x310
-#define     DEVICE_ID__VALUE				0x00ff
-
-#define DEVICE_PARAM_0				0x320
-#define     DEVICE_PARAM_0__VALUE			0x00ff
-
-#define DEVICE_PARAM_1				0x330
-#define     DEVICE_PARAM_1__VALUE			0x00ff
-
-#define DEVICE_PARAM_2				0x340
-#define     DEVICE_PARAM_2__VALUE			0x00ff
-
-#define LOGICAL_PAGE_DATA_SIZE			0x350
-#define     LOGICAL_PAGE_DATA_SIZE__VALUE		0xffff
-
-#define LOGICAL_PAGE_SPARE_SIZE			0x360
-#define     LOGICAL_PAGE_SPARE_SIZE__VALUE		0xffff
-
-#define REVISION				0x370
-#define     REVISION__VALUE				0xffff
-#define MAKE_COMPARABLE_REVISION(x)		swab16((x) & REVISION__VALUE)
-#define REVISION_5_1				0x00000501
-
-#define ONFI_DEVICE_FEATURES			0x380
-#define     ONFI_DEVICE_FEATURES__VALUE			0x003f
-
-#define ONFI_OPTIONAL_COMMANDS			0x390
-#define     ONFI_OPTIONAL_COMMANDS__VALUE		0x003f
-
-#define ONFI_TIMING_MODE			0x3a0
-#define     ONFI_TIMING_MODE__VALUE			0x003f
-
-#define ONFI_PGM_CACHE_TIMING_MODE		0x3b0
-#define     ONFI_PGM_CACHE_TIMING_MODE__VALUE		0x003f
-
-#define ONFI_DEVICE_NO_OF_LUNS			0x3c0
-#define     ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS		0x00ff
-#define     ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE		0x0100
-
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L	0x3d0
-#define     ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE	0xffff
-
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U	0x3e0
-#define     ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE	0xffff
-
-#define FEATURES					0x3f0
-#define     FEATURES__N_BANKS				0x0003
-#define     FEATURES__ECC_MAX_ERR			0x003c
-#define     FEATURES__DMA				0x0040
-#define     FEATURES__CMD_DMA				0x0080
-#define     FEATURES__PARTITION				0x0100
-#define     FEATURES__XDMA_SIDEBAND			0x0200
-#define     FEATURES__GPREG				0x0400
-#define     FEATURES__INDEX_ADDR			0x0800
-
-#define TRANSFER_MODE				0x400
-#define     TRANSFER_MODE__VALUE			0x0003
-
-#define INTR_STATUS(__bank)	(0x410 + ((__bank) * 0x50))
-#define INTR_EN(__bank)		(0x420 + ((__bank) * 0x50))
-
-#define     INTR_STATUS__ECC_TRANSACTION_DONE		0x0001
-#define     INTR_STATUS__ECC_ERR			0x0002
-#define     INTR_STATUS__DMA_CMD_COMP			0x0004
-#define     INTR_STATUS__TIME_OUT			0x0008
-#define     INTR_STATUS__PROGRAM_FAIL			0x0010
-#define     INTR_STATUS__ERASE_FAIL			0x0020
-#define     INTR_STATUS__LOAD_COMP			0x0040
-#define     INTR_STATUS__PROGRAM_COMP			0x0080
-#define     INTR_STATUS__ERASE_COMP			0x0100
-#define     INTR_STATUS__PIPE_CPYBCK_CMD_COMP		0x0200
-#define     INTR_STATUS__LOCKED_BLK			0x0400
-#define     INTR_STATUS__UNSUP_CMD			0x0800
-#define     INTR_STATUS__INT_ACT			0x1000
-#define     INTR_STATUS__RST_COMP			0x2000
-#define     INTR_STATUS__PIPE_CMD_ERR			0x4000
-#define     INTR_STATUS__PAGE_XFER_INC			0x8000
-
-#define     INTR_EN__ECC_TRANSACTION_DONE		0x0001
-#define     INTR_EN__ECC_ERR				0x0002
-#define     INTR_EN__DMA_CMD_COMP			0x0004
-#define     INTR_EN__TIME_OUT				0x0008
-#define     INTR_EN__PROGRAM_FAIL			0x0010
-#define     INTR_EN__ERASE_FAIL				0x0020
-#define     INTR_EN__LOAD_COMP				0x0040
-#define     INTR_EN__PROGRAM_COMP			0x0080
-#define     INTR_EN__ERASE_COMP				0x0100
-#define     INTR_EN__PIPE_CPYBCK_CMD_COMP		0x0200
-#define     INTR_EN__LOCKED_BLK				0x0400
-#define     INTR_EN__UNSUP_CMD				0x0800
-#define     INTR_EN__INT_ACT				0x1000
-#define     INTR_EN__RST_COMP				0x2000
-#define     INTR_EN__PIPE_CMD_ERR			0x4000
-#define     INTR_EN__PAGE_XFER_INC			0x8000
-
-#define PAGE_CNT(__bank)	(0x430 + ((__bank) * 0x50))
-#define ERR_PAGE_ADDR(__bank)	(0x440 + ((__bank) * 0x50))
-#define ERR_BLOCK_ADDR(__bank)	(0x450 + ((__bank) * 0x50))
-
-#define DATA_INTR				0x550
-#define     DATA_INTR__WRITE_SPACE_AV			0x0001
-#define     DATA_INTR__READ_DATA_AV			0x0002
-
-#define DATA_INTR_EN				0x560
-#define     DATA_INTR_EN__WRITE_SPACE_AV		0x0001
-#define     DATA_INTR_EN__READ_DATA_AV			0x0002
-
-#define GPREG_0					0x570
-#define     GPREG_0__VALUE				0xffff
-
-#define GPREG_1					0x580
-#define     GPREG_1__VALUE				0xffff
-
-#define GPREG_2					0x590
-#define     GPREG_2__VALUE				0xffff
-
-#define GPREG_3					0x5a0
-#define     GPREG_3__VALUE				0xffff
-
-#define ECC_THRESHOLD				0x600
-#define     ECC_THRESHOLD__VALUE			0x03ff
-
-#define ECC_ERROR_BLOCK_ADDRESS			0x610
-#define     ECC_ERROR_BLOCK_ADDRESS__VALUE		0xffff
-
-#define ECC_ERROR_PAGE_ADDRESS			0x620
-#define     ECC_ERROR_PAGE_ADDRESS__VALUE		0x0fff
-#define     ECC_ERROR_PAGE_ADDRESS__BANK		0xf000
-
-#define ECC_ERROR_ADDRESS			0x630
-#define     ECC_ERROR_ADDRESS__OFFSET			0x0fff
-#define     ECC_ERROR_ADDRESS__SECTOR_NR		0xf000
-
-#define ERR_CORRECTION_INFO			0x640
-#define     ERR_CORRECTION_INFO__BYTEMASK		0x00ff
-#define     ERR_CORRECTION_INFO__DEVICE_NR		0x0f00
-#define     ERR_CORRECTION_INFO__ERROR_TYPE		0x4000
-#define     ERR_CORRECTION_INFO__LAST_ERR_INFO		0x8000
-
-#define DMA_ENABLE				0x700
-#define     DMA_ENABLE__FLAG				0x0001
-
-#define IGNORE_ECC_DONE				0x710
-#define     IGNORE_ECC_DONE__FLAG			0x0001
-
-#define DMA_INTR				0x720
-#define     DMA_INTR__TARGET_ERROR			0x0001
-#define     DMA_INTR__DESC_COMP_CHANNEL0		0x0002
-#define     DMA_INTR__DESC_COMP_CHANNEL1		0x0004
-#define     DMA_INTR__DESC_COMP_CHANNEL2		0x0008
-#define     DMA_INTR__DESC_COMP_CHANNEL3		0x0010
-#define     DMA_INTR__MEMCOPY_DESC_COMP		0x0020
-
-#define DMA_INTR_EN				0x730
-#define     DMA_INTR_EN__TARGET_ERROR			0x0001
-#define     DMA_INTR_EN__DESC_COMP_CHANNEL0		0x0002
-#define     DMA_INTR_EN__DESC_COMP_CHANNEL1		0x0004
-#define     DMA_INTR_EN__DESC_COMP_CHANNEL2		0x0008
-#define     DMA_INTR_EN__DESC_COMP_CHANNEL3		0x0010
-#define     DMA_INTR_EN__MEMCOPY_DESC_COMP		0x0020
-
-#define TARGET_ERR_ADDR_LO			0x740
-#define     TARGET_ERR_ADDR_LO__VALUE			0xffff
-
-#define TARGET_ERR_ADDR_HI			0x750
-#define     TARGET_ERR_ADDR_HI__VALUE			0xffff
-
-#define CHNL_ACTIVE				0x760
-#define     CHNL_ACTIVE__CHANNEL0			0x0001
-#define     CHNL_ACTIVE__CHANNEL1			0x0002
-#define     CHNL_ACTIVE__CHANNEL2			0x0004
-#define     CHNL_ACTIVE__CHANNEL3			0x0008
-
-#define ACTIVE_SRC_ID				0x800
-#define     ACTIVE_SRC_ID__VALUE			0x00ff
-
-#define PTN_INTR					0x810
-#define     PTN_INTR__CONFIG_ERROR			0x0001
-#define     PTN_INTR__ACCESS_ERROR_BANK0		0x0002
-#define     PTN_INTR__ACCESS_ERROR_BANK1		0x0004
-#define     PTN_INTR__ACCESS_ERROR_BANK2		0x0008
-#define     PTN_INTR__ACCESS_ERROR_BANK3		0x0010
-#define     PTN_INTR__REG_ACCESS_ERROR			0x0020
-
-#define PTN_INTR_EN				0x820
-#define     PTN_INTR_EN__CONFIG_ERROR			0x0001
-#define     PTN_INTR_EN__ACCESS_ERROR_BANK0		0x0002
-#define     PTN_INTR_EN__ACCESS_ERROR_BANK1		0x0004
-#define     PTN_INTR_EN__ACCESS_ERROR_BANK2		0x0008
-#define     PTN_INTR_EN__ACCESS_ERROR_BANK3		0x0010
-#define     PTN_INTR_EN__REG_ACCESS_ERROR		0x0020
-
-#define PERM_SRC_ID(__bank)	(0x830 + ((__bank) * 0x40))
-#define     PERM_SRC_ID__SRCID				0x00ff
-#define     PERM_SRC_ID__DIRECT_ACCESS_ACTIVE		0x0800
-#define     PERM_SRC_ID__WRITE_ACTIVE			0x2000
-#define     PERM_SRC_ID__READ_ACTIVE			0x4000
-#define     PERM_SRC_ID__PARTITION_VALID		0x8000
-
-#define MIN_BLK_ADDR(__bank)	(0x840 + ((__bank) * 0x40))
-#define     MIN_BLK_ADDR__VALUE				0xffff
-
-#define MAX_BLK_ADDR(__bank)	(0x850 + ((__bank) * 0x40))
-#define     MAX_BLK_ADDR__VALUE				0xffff
-
-#define MIN_MAX_BANK(__bank)	(0x860 + ((__bank) * 0x40))
-#define     MIN_MAX_BANK__MIN_VALUE			0x0003
-#define     MIN_MAX_BANK__MAX_VALUE			0x000c
-
-
-/* ffsdefs.h */
-#define CLEAR 0                 /*use this to clear a field instead of "fail"*/
-#define SET   1                 /*use this to set a field instead of "pass"*/
-#define FAIL 1                  /*failed flag*/
-#define PASS 0                  /*success flag*/
-#define ERR -1                  /*error flag*/
-
-/* lld.h */
-#define GOOD_BLOCK 0
-#define DEFECTIVE_BLOCK 1
-#define READ_ERROR 2
-
-#define CLK_X  5
-#define CLK_MULTI 4
-
-/* spectraswconfig.h */
-#define CMD_DMA 0
-
-#define SPECTRA_PARTITION_ID    0
-/**** Block Table and Reserved Block Parameters *****/
-#define SPECTRA_START_BLOCK     3
-#define NUM_FREE_BLOCKS_GATE    30
-
-/* KBV - Updated to LNW scratch register address */
-#define SCRATCH_REG_ADDR    CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
-#define SCRATCH_REG_SIZE    64
-
-#define GLOB_HWCTL_DEFAULT_BLKS    2048
-
-#define SUPPORT_15BITECC        1
-#define SUPPORT_8BITECC         1
-
-#define CUSTOM_CONF_PARAMS      0
-
-#define ONFI_BLOOM_TIME         1
-#define MODE5_WORKAROUND        0
-
-
-#define MODE_00    0x00000000
-#define MODE_01    0x04000000
-#define MODE_10    0x08000000
-#define MODE_11    0x0C000000
-
-
-#define DATA_TRANSFER_MODE              0
-#define PROTECTION_PER_BLOCK            1
-#define LOAD_WAIT_COUNT                 2
-#define PROGRAM_WAIT_COUNT              3
-#define ERASE_WAIT_COUNT                4
-#define INT_MONITOR_CYCLE_COUNT         5
-#define READ_BUSY_PIN_ENABLED           6
-#define MULTIPLANE_OPERATION_SUPPORT    7
-#define PRE_FETCH_MODE                  8
-#define CE_DONT_CARE_SUPPORT            9
-#define COPYBACK_SUPPORT                10
-#define CACHE_WRITE_SUPPORT             11
-#define CACHE_READ_SUPPORT              12
-#define NUM_PAGES_IN_BLOCK              13
-#define ECC_ENABLE_SELECT               14
-#define WRITE_ENABLE_2_READ_ENABLE      15
-#define ADDRESS_2_DATA                  16
-#define READ_ENABLE_2_WRITE_ENABLE      17
-#define TWO_ROW_ADDRESS_CYCLES          18
-#define MULTIPLANE_ADDRESS_RESTRICT     19
-#define ACC_CLOCKS                      20
-#define READ_WRITE_ENABLE_LOW_COUNT     21
-#define READ_WRITE_ENABLE_HIGH_COUNT    22
-
-#define ECC_SECTOR_SIZE     512
-
-struct nand_buf {
-	int head;
-	int tail;
-	uint8_t *buf;
-	dma_addr_t dma_buf;
-};
-
-#define INTEL_CE4100	1
-#define INTEL_MRST	2
-#define DT		3
-
-struct denali_nand_info {
-	struct nand_chip nand;
-	int flash_bank; /* currently selected chip */
-	int status;
-	int platform;
-	struct nand_buf buf;
-	struct device *dev;
-	int total_used_banks;
-	uint32_t block;  /* stored for future use */
-	uint16_t page;
-	void __iomem *flash_reg;  /* Mapped io reg base address */
-	void __iomem *flash_mem;  /* Mapped io reg base address */
-
-	/* elements used by ISR */
-	struct completion complete;
-	spinlock_t irq_lock;
-	uint32_t irq_status;
-	int irq_debug_array[32];
-	int idx;
-	int irq;
-
-	uint32_t devnum;	/* represent how many nands connected */
-	uint32_t fwblks; /* represent how many blocks FW used */
-	uint32_t totalblks;
-	uint32_t blksperchip;
-	uint32_t bbtskipbytes;
-	uint32_t max_banks;
-};
-
-extern int denali_init(struct denali_nand_info *denali);
-extern void denali_remove(struct denali_nand_info *denali);
-
-#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c
deleted file mode 100644
index 0cb1e8d9fbfc..000000000000
--- a/drivers/mtd/nand/denali_dt.c
+++ /dev/null
@@ -1,131 +0,0 @@ 
-/*
- * NAND Flash Controller Device Driver for DT
- *
- * Copyright © 2011, Picochip.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- */
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/slab.h>
-
-#include "denali.h"
-
-struct denali_dt {
-	struct denali_nand_info	denali;
-	struct clk		*clk;
-};
-
-static const struct of_device_id denali_nand_dt_ids[] = {
-		{ .compatible = "denali,denali-nand-dt" },
-		{ /* sentinel */ }
-	};
-
-MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
-
-static u64 denali_dma_mask;
-
-static int denali_dt_probe(struct platform_device *ofdev)
-{
-	struct resource *denali_reg, *nand_data;
-	struct denali_dt *dt;
-	struct denali_nand_info *denali;
-	int ret;
-	const struct of_device_id *of_id;
-
-	of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev);
-	if (of_id) {
-		ofdev->id_entry = of_id->data;
-	} else {
-		pr_err("Failed to find the right device id.\n");
-		return -ENOMEM;
-	}
-
-	dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
-	if (!dt)
-		return -ENOMEM;
-	denali = &dt->denali;
-
-	denali->platform = DT;
-	denali->dev = &ofdev->dev;
-	denali->irq = platform_get_irq(ofdev, 0);
-	if (denali->irq < 0) {
-		dev_err(&ofdev->dev, "no irq defined\n");
-		return denali->irq;
-	}
-
-	denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
-	denali->flash_reg = devm_ioremap_resource(&ofdev->dev, denali_reg);
-	if (IS_ERR(denali->flash_reg))
-		return PTR_ERR(denali->flash_reg);
-
-	nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
-	denali->flash_mem = devm_ioremap_resource(&ofdev->dev, nand_data);
-	if (IS_ERR(denali->flash_mem))
-		return PTR_ERR(denali->flash_mem);
-
-	if (!of_property_read_u32(ofdev->dev.of_node,
-		"dma-mask", (u32 *)&denali_dma_mask)) {
-		denali->dev->dma_mask = &denali_dma_mask;
-	} else {
-		denali->dev->dma_mask = NULL;
-	}
-
-	dt->clk = devm_clk_get(&ofdev->dev, NULL);
-	if (IS_ERR(dt->clk)) {
-		dev_err(&ofdev->dev, "no clk available\n");
-		return PTR_ERR(dt->clk);
-	}
-	clk_prepare_enable(dt->clk);
-
-	ret = denali_init(denali);
-	if (ret)
-		goto out_disable_clk;
-
-	platform_set_drvdata(ofdev, dt);
-	return 0;
-
-out_disable_clk:
-	clk_disable_unprepare(dt->clk);
-
-	return ret;
-}
-
-static int denali_dt_remove(struct platform_device *ofdev)
-{
-	struct denali_dt *dt = platform_get_drvdata(ofdev);
-
-	denali_remove(&dt->denali);
-	clk_disable(dt->clk);
-
-	return 0;
-}
-
-static struct platform_driver denali_dt_driver = {
-	.probe		= denali_dt_probe,
-	.remove		= denali_dt_remove,
-	.driver		= {
-		.name	= "denali-nand-dt",
-		.of_match_table	= denali_nand_dt_ids,
-	},
-};
-
-module_platform_driver(denali_dt_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jamie Iles");
-MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
deleted file mode 100644
index de31514df282..000000000000
--- a/drivers/mtd/nand/denali_pci.c
+++ /dev/null
@@ -1,121 +0,0 @@ 
-/*
- * NAND Flash Controller Device Driver
- * Copyright © 2009-2010, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-
-#include "denali.h"
-
-#define DENALI_NAND_NAME    "denali-nand-pci"
-
-/* List of platforms this NAND controller has be integrated into */
-static const struct pci_device_id denali_pci_ids[] = {
-	{ PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
-	{ PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
-	{ /* end: all zeroes */ }
-};
-MODULE_DEVICE_TABLE(pci, denali_pci_ids);
-
-static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
-	int ret;
-	resource_size_t csr_base, mem_base;
-	unsigned long csr_len, mem_len;
-	struct denali_nand_info *denali;
-
-	denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
-	if (!denali)
-		return -ENOMEM;
-
-	ret = pcim_enable_device(dev);
-	if (ret) {
-		dev_err(&dev->dev, "Spectra: pci_enable_device failed.\n");
-		return ret;
-	}
-
-	if (id->driver_data == INTEL_CE4100) {
-		denali->platform = INTEL_CE4100;
-		mem_base = pci_resource_start(dev, 0);
-		mem_len = pci_resource_len(dev, 1);
-		csr_base = pci_resource_start(dev, 1);
-		csr_len = pci_resource_len(dev, 1);
-	} else {
-		denali->platform = INTEL_MRST;
-		csr_base = pci_resource_start(dev, 0);
-		csr_len = pci_resource_len(dev, 0);
-		mem_base = pci_resource_start(dev, 1);
-		mem_len = pci_resource_len(dev, 1);
-		if (!mem_len) {
-			mem_base = csr_base + csr_len;
-			mem_len = csr_len;
-		}
-	}
-
-	pci_set_master(dev);
-	denali->dev = &dev->dev;
-	denali->irq = dev->irq;
-
-	ret = pci_request_regions(dev, DENALI_NAND_NAME);
-	if (ret) {
-		dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
-		return ret;
-	}
-
-	denali->flash_reg = ioremap_nocache(csr_base, csr_len);
-	if (!denali->flash_reg) {
-		dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
-		return -ENOMEM;
-	}
-
-	denali->flash_mem = ioremap_nocache(mem_base, mem_len);
-	if (!denali->flash_mem) {
-		dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
-		ret = -ENOMEM;
-		goto failed_remap_reg;
-	}
-
-	ret = denali_init(denali);
-	if (ret)
-		goto failed_remap_mem;
-
-	pci_set_drvdata(dev, denali);
-
-	return 0;
-
-failed_remap_mem:
-	iounmap(denali->flash_mem);
-failed_remap_reg:
-	iounmap(denali->flash_reg);
-	return ret;
-}
-
-/* driver exit point */
-static void denali_pci_remove(struct pci_dev *dev)
-{
-	struct denali_nand_info *denali = pci_get_drvdata(dev);
-
-	denali_remove(denali);
-	iounmap(denali->flash_reg);
-	iounmap(denali->flash_mem);
-}
-
-static struct pci_driver denali_pci_driver = {
-	.name = DENALI_NAND_NAME,
-	.id_table = denali_pci_ids,
-	.probe = denali_pci_probe,
-	.remove = denali_pci_remove,
-};
-
-module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
deleted file mode 100644
index c3aa53caab5c..000000000000
--- a/drivers/mtd/nand/diskonchip.c
+++ /dev/null
@@ -1,1712 +0,0 @@ 
-/*
- * drivers/mtd/nand/diskonchip.c
- *
- * (C) 2003 Red Hat, Inc.
- * (C) 2004 Dan Brown <dan_brown@ieee.org>
- * (C) 2004 Kalev Lember <kalev@smartlink.ee>
- *
- * Author: David Woodhouse <dwmw2@infradead.org>
- * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
- * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
- *
- * Error correction code lifted from the old docecc code
- * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
- * Copyright (C) 2000 Netgem S.A.
- * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
- *
- * Interface to generic NAND code for M-Systems DiskOnChip devices
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/rslib.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/doc2000.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/inftl.h>
-#include <linux/module.h>
-
-/* Where to look for the devices? */
-#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
-#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
-#endif
-
-static unsigned long doc_locations[] __initdata = {
-#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
-#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
-	0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
-	0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
-	0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
-	0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
-	0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
-#else
-	0xc8000, 0xca000, 0xcc000, 0xce000,
-	0xd0000, 0xd2000, 0xd4000, 0xd6000,
-	0xd8000, 0xda000, 0xdc000, 0xde000,
-	0xe0000, 0xe2000, 0xe4000, 0xe6000,
-	0xe8000, 0xea000, 0xec000, 0xee000,
-#endif
-#endif
-	0xffffffff };
-
-static struct mtd_info *doclist = NULL;
-
-struct doc_priv {
-	void __iomem *virtadr;
-	unsigned long physadr;
-	u_char ChipID;
-	u_char CDSNControl;
-	int chips_per_floor;	/* The number of chips detected on each floor */
-	int curfloor;
-	int curchip;
-	int mh0_page;
-	int mh1_page;
-	struct mtd_info *nextdoc;
-
-	/* Handle the last stage of initialization (BBT scan, partitioning) */
-	int (*late_init)(struct mtd_info *mtd);
-};
-
-/* This is the ecc value computed by the HW ecc generator upon writing an empty
-   page, one with all 0xff for data. */
-static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
-
-#define INFTL_BBT_RESERVED_BLOCKS 4
-
-#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
-#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
-#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
-
-static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
-			      unsigned int bitmask);
-static void doc200x_select_chip(struct mtd_info *mtd, int chip);
-
-static int debug = 0;
-module_param(debug, int, 0);
-
-static int try_dword = 1;
-module_param(try_dword, int, 0);
-
-static int no_ecc_failures = 0;
-module_param(no_ecc_failures, int, 0);
-
-static int no_autopart = 0;
-module_param(no_autopart, int, 0);
-
-static int show_firmware_partition = 0;
-module_param(show_firmware_partition, int, 0);
-
-#ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
-static int inftl_bbt_write = 1;
-#else
-static int inftl_bbt_write = 0;
-#endif
-module_param(inftl_bbt_write, int, 0);
-
-static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
-module_param(doc_config_location, ulong, 0);
-MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
-
-/* Sector size for HW ECC */
-#define SECTOR_SIZE 512
-/* The sector bytes are packed into NB_DATA 10 bit words */
-#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
-/* Number of roots */
-#define NROOTS 4
-/* First consective root */
-#define FCR 510
-/* Number of symbols */
-#define NN 1023
-
-/* the Reed Solomon control structure */
-static struct rs_control *rs_decoder;
-
-/*
- * The HW decoder in the DoC ASIC's provides us a error syndrome,
- * which we must convert to a standard syndrome usable by the generic
- * Reed-Solomon library code.
- *
- * Fabrice Bellard figured this out in the old docecc code. I added
- * some comments, improved a minor bit and converted it to make use
- * of the generic Reed-Solomon library. tglx
- */
-static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
-{
-	int i, j, nerr, errpos[8];
-	uint8_t parity;
-	uint16_t ds[4], s[5], tmp, errval[8], syn[4];
-
-	memset(syn, 0, sizeof(syn));
-	/* Convert the ecc bytes into words */
-	ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
-	ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
-	ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
-	ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
-	parity = ecc[1];
-
-	/* Initialize the syndrome buffer */
-	for (i = 0; i < NROOTS; i++)
-		s[i] = ds[0];
-	/*
-	 *  Evaluate
-	 *  s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
-	 *  where x = alpha^(FCR + i)
-	 */
-	for (j = 1; j < NROOTS; j++) {
-		if (ds[j] == 0)
-			continue;
-		tmp = rs->index_of[ds[j]];
-		for (i = 0; i < NROOTS; i++)
-			s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
-	}
-
-	/* Calc syn[i] = s[i] / alpha^(v + i) */
-	for (i = 0; i < NROOTS; i++) {
-		if (s[i])
-			syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
-	}
-	/* Call the decoder library */
-	nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
-
-	/* Incorrectable errors ? */
-	if (nerr < 0)
-		return nerr;
-
-	/*
-	 * Correct the errors. The bitpositions are a bit of magic,
-	 * but they are given by the design of the de/encoder circuit
-	 * in the DoC ASIC's.
-	 */
-	for (i = 0; i < nerr; i++) {
-		int index, bitpos, pos = 1015 - errpos[i];
-		uint8_t val;
-		if (pos >= NB_DATA && pos < 1019)
-			continue;
-		if (pos < NB_DATA) {
-			/* extract bit position (MSB first) */
-			pos = 10 * (NB_DATA - 1 - pos) - 6;
-			/* now correct the following 10 bits. At most two bytes
-			   can be modified since pos is even */
-			index = (pos >> 3) ^ 1;
-			bitpos = pos & 7;
-			if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
-				val = (uint8_t) (errval[i] >> (2 + bitpos));
-				parity ^= val;
-				if (index < SECTOR_SIZE)
-					data[index] ^= val;
-			}
-			index = ((pos >> 3) + 1) ^ 1;
-			bitpos = (bitpos + 10) & 7;
-			if (bitpos == 0)
-				bitpos = 8;
-			if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
-				val = (uint8_t) (errval[i] << (8 - bitpos));
-				parity ^= val;
-				if (index < SECTOR_SIZE)
-					data[index] ^= val;
-			}
-		}
-	}
-	/* If the parity is wrong, no rescue possible */
-	return parity ? -EBADMSG : nerr;
-}
-
-static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
-{
-	volatile char dummy;
-	int i;
-
-	for (i = 0; i < cycles; i++) {
-		if (DoC_is_Millennium(doc))
-			dummy = ReadDOC(doc->virtadr, NOP);
-		else if (DoC_is_MillenniumPlus(doc))
-			dummy = ReadDOC(doc->virtadr, Mplus_NOP);
-		else
-			dummy = ReadDOC(doc->virtadr, DOCStatus);
-	}
-
-}
-
-#define CDSN_CTRL_FR_B_MASK	(CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
-
-/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
-static int _DoC_WaitReady(struct doc_priv *doc)
-{
-	void __iomem *docptr = doc->virtadr;
-	unsigned long timeo = jiffies + (HZ * 10);
-
-	if (debug)
-		printk("_DoC_WaitReady...\n");
-	/* Out-of-line routine to wait for chip response */
-	if (DoC_is_MillenniumPlus(doc)) {
-		while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
-			if (time_after(jiffies, timeo)) {
-				printk("_DoC_WaitReady timed out.\n");
-				return -EIO;
-			}
-			udelay(1);
-			cond_resched();
-		}
-	} else {
-		while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
-			if (time_after(jiffies, timeo)) {
-				printk("_DoC_WaitReady timed out.\n");
-				return -EIO;
-			}
-			udelay(1);
-			cond_resched();
-		}
-	}
-
-	return 0;
-}
-
-static inline int DoC_WaitReady(struct doc_priv *doc)
-{
-	void __iomem *docptr = doc->virtadr;
-	int ret = 0;
-
-	if (DoC_is_MillenniumPlus(doc)) {
-		DoC_Delay(doc, 4);
-
-		if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
-			/* Call the out-of-line routine to wait */
-			ret = _DoC_WaitReady(doc);
-	} else {
-		DoC_Delay(doc, 4);
-
-		if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
-			/* Call the out-of-line routine to wait */
-			ret = _DoC_WaitReady(doc);
-		DoC_Delay(doc, 2);
-	}
-
-	if (debug)
-		printk("DoC_WaitReady OK\n");
-	return ret;
-}
-
-static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	if (debug)
-		printk("write_byte %02x\n", datum);
-	WriteDOC(datum, docptr, CDSNSlowIO);
-	WriteDOC(datum, docptr, 2k_CDSN_IO);
-}
-
-static u_char doc2000_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	u_char ret;
-
-	ReadDOC(docptr, CDSNSlowIO);
-	DoC_Delay(doc, 2);
-	ret = ReadDOC(docptr, 2k_CDSN_IO);
-	if (debug)
-		printk("read_byte returns %02x\n", ret);
-	return ret;
-}
-
-static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int i;
-	if (debug)
-		printk("writebuf of %d bytes: ", len);
-	for (i = 0; i < len; i++) {
-		WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
-		if (debug && i < 16)
-			printk("%02x ", buf[i]);
-	}
-	if (debug)
-		printk("\n");
-}
-
-static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int i;
-
-	if (debug)
-		printk("readbuf of %d bytes: ", len);
-
-	for (i = 0; i < len; i++) {
-		buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
-	}
-}
-
-static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int i;
-
-	if (debug)
-		printk("readbuf_dword of %d bytes: ", len);
-
-	if (unlikely((((unsigned long)buf) | len) & 3)) {
-		for (i = 0; i < len; i++) {
-			*(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
-		}
-	} else {
-		for (i = 0; i < len; i += 4) {
-			*(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
-		}
-	}
-}
-
-static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	uint16_t ret;
-
-	doc200x_select_chip(mtd, nr);
-	doc200x_hwcontrol(mtd, NAND_CMD_READID,
-			  NAND_CTRL_CLE | NAND_CTRL_CHANGE);
-	doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
-	doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
-
-	/* We can't use dev_ready here, but at least we wait for the
-	 * command to complete
-	 */
-	udelay(50);
-
-	ret = this->read_byte(mtd) << 8;
-	ret |= this->read_byte(mtd);
-
-	if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
-		/* First chip probe. See if we get same results by 32-bit access */
-		union {
-			uint32_t dword;
-			uint8_t byte[4];
-		} ident;
-		void __iomem *docptr = doc->virtadr;
-
-		doc200x_hwcontrol(mtd, NAND_CMD_READID,
-				  NAND_CTRL_CLE | NAND_CTRL_CHANGE);
-		doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
-		doc200x_hwcontrol(mtd, NAND_CMD_NONE,
-				  NAND_NCE | NAND_CTRL_CHANGE);
-
-		udelay(50);
-
-		ident.dword = readl(docptr + DoC_2k_CDSN_IO);
-		if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
-			printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n");
-			this->read_buf = &doc2000_readbuf_dword;
-		}
-	}
-
-	return ret;
-}
-
-static void __init doc2000_count_chips(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	uint16_t mfrid;
-	int i;
-
-	/* Max 4 chips per floor on DiskOnChip 2000 */
-	doc->chips_per_floor = 4;
-
-	/* Find out what the first chip is */
-	mfrid = doc200x_ident_chip(mtd, 0);
-
-	/* Find how many chips in each floor. */
-	for (i = 1; i < 4; i++) {
-		if (doc200x_ident_chip(mtd, i) != mfrid)
-			break;
-	}
-	doc->chips_per_floor = i;
-	printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
-}
-
-static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
-{
-	struct doc_priv *doc = nand_get_controller_data(this);
-
-	int status;
-
-	DoC_WaitReady(doc);
-	this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
-	DoC_WaitReady(doc);
-	status = (int)this->read_byte(mtd);
-
-	return status;
-}
-
-static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	WriteDOC(datum, docptr, CDSNSlowIO);
-	WriteDOC(datum, docptr, Mil_CDSN_IO);
-	WriteDOC(datum, docptr, WritePipeTerm);
-}
-
-static u_char doc2001_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	//ReadDOC(docptr, CDSNSlowIO);
-	/* 11.4.5 -- delay twice to allow extended length cycle */
-	DoC_Delay(doc, 2);
-	ReadDOC(docptr, ReadPipeInit);
-	//return ReadDOC(docptr, Mil_CDSN_IO);
-	return ReadDOC(docptr, LastDataRead);
-}
-
-static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int i;
-
-	for (i = 0; i < len; i++)
-		WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
-	/* Terminate write pipeline */
-	WriteDOC(0x00, docptr, WritePipeTerm);
-}
-
-static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int i;
-
-	/* Start read pipeline */
-	ReadDOC(docptr, ReadPipeInit);
-
-	for (i = 0; i < len - 1; i++)
-		buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
-
-	/* Terminate read pipeline */
-	buf[i] = ReadDOC(docptr, LastDataRead);
-}
-
-static u_char doc2001plus_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	u_char ret;
-
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-	ret = ReadDOC(docptr, Mplus_LastDataRead);
-	if (debug)
-		printk("read_byte returns %02x\n", ret);
-	return ret;
-}
-
-static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int i;
-
-	if (debug)
-		printk("writebuf of %d bytes: ", len);
-	for (i = 0; i < len; i++) {
-		WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
-		if (debug && i < 16)
-			printk("%02x ", buf[i]);
-	}
-	if (debug)
-		printk("\n");
-}
-
-static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int i;
-
-	if (debug)
-		printk("readbuf of %d bytes: ", len);
-
-	/* Start read pipeline */
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-	ReadDOC(docptr, Mplus_ReadPipeInit);
-
-	for (i = 0; i < len - 2; i++) {
-		buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
-		if (debug && i < 16)
-			printk("%02x ", buf[i]);
-	}
-
-	/* Terminate read pipeline */
-	buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
-	if (debug && i < 16)
-		printk("%02x ", buf[len - 2]);
-	buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
-	if (debug && i < 16)
-		printk("%02x ", buf[len - 1]);
-	if (debug)
-		printk("\n");
-}
-
-static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int floor = 0;
-
-	if (debug)
-		printk("select chip (%d)\n", chip);
-
-	if (chip == -1) {
-		/* Disable flash internally */
-		WriteDOC(0, docptr, Mplus_FlashSelect);
-		return;
-	}
-
-	floor = chip / doc->chips_per_floor;
-	chip -= (floor * doc->chips_per_floor);
-
-	/* Assert ChipEnable and deassert WriteProtect */
-	WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
-	this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
-	doc->curchip = chip;
-	doc->curfloor = floor;
-}
-
-static void doc200x_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int floor = 0;
-
-	if (debug)
-		printk("select chip (%d)\n", chip);
-
-	if (chip == -1)
-		return;
-
-	floor = chip / doc->chips_per_floor;
-	chip -= (floor * doc->chips_per_floor);
-
-	/* 11.4.4 -- deassert CE before changing chip */
-	doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
-
-	WriteDOC(floor, docptr, FloorSelect);
-	WriteDOC(chip, docptr, CDSNDeviceSelect);
-
-	doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
-
-	doc->curchip = chip;
-	doc->curfloor = floor;
-}
-
-#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
-
-static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
-			      unsigned int ctrl)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		doc->CDSNControl &= ~CDSN_CTRL_MSK;
-		doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
-		if (debug)
-			printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
-		WriteDOC(doc->CDSNControl, docptr, CDSNControl);
-		/* 11.4.3 -- 4 NOPs after CSDNControl write */
-		DoC_Delay(doc, 4);
-	}
-	if (cmd != NAND_CMD_NONE) {
-		if (DoC_is_2000(doc))
-			doc2000_write_byte(mtd, cmd);
-		else
-			doc2001_write_byte(mtd, cmd);
-	}
-}
-
-static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	/*
-	 * Must terminate write pipeline before sending any commands
-	 * to the device.
-	 */
-	if (command == NAND_CMD_PAGEPROG) {
-		WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-		WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
-	}
-
-	/*
-	 * Write out the command to the device.
-	 */
-	if (command == NAND_CMD_SEQIN) {
-		int readcmd;
-
-		if (column >= mtd->writesize) {
-			/* OOB area */
-			column -= mtd->writesize;
-			readcmd = NAND_CMD_READOOB;
-		} else if (column < 256) {
-			/* First 256 bytes --> READ0 */
-			readcmd = NAND_CMD_READ0;
-		} else {
-			column -= 256;
-			readcmd = NAND_CMD_READ1;
-		}
-		WriteDOC(readcmd, docptr, Mplus_FlashCmd);
-	}
-	WriteDOC(command, docptr, Mplus_FlashCmd);
-	WriteDOC(0, docptr, Mplus_WritePipeTerm);
-	WriteDOC(0, docptr, Mplus_WritePipeTerm);
-
-	if (column != -1 || page_addr != -1) {
-		/* Serially input address */
-		if (column != -1) {
-			/* Adjust columns for 16 bit buswidth */
-			if (this->options & NAND_BUSWIDTH_16 &&
-					!nand_opcode_8bits(command))
-				column >>= 1;
-			WriteDOC(column, docptr, Mplus_FlashAddress);
-		}
-		if (page_addr != -1) {
-			WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
-			WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
-			/* One more address cycle for higher density devices */
-			if (this->chipsize & 0x0c000000) {
-				WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
-				printk("high density\n");
-			}
-		}
-		WriteDOC(0, docptr, Mplus_WritePipeTerm);
-		WriteDOC(0, docptr, Mplus_WritePipeTerm);
-		/* deassert ALE */
-		if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
-		    command == NAND_CMD_READOOB || command == NAND_CMD_READID)
-			WriteDOC(0, docptr, Mplus_FlashControl);
-	}
-
-	/*
-	 * program and erase have their own busy handlers
-	 * status and sequential in needs no delay
-	 */
-	switch (command) {
-
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_STATUS:
-		return;
-
-	case NAND_CMD_RESET:
-		if (this->dev_ready)
-			break;
-		udelay(this->chip_delay);
-		WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
-		WriteDOC(0, docptr, Mplus_WritePipeTerm);
-		WriteDOC(0, docptr, Mplus_WritePipeTerm);
-		while (!(this->read_byte(mtd) & 0x40)) ;
-		return;
-
-		/* This applies to read commands */
-	default:
-		/*
-		 * If we don't have access to the busy pin, we apply the given
-		 * command delay
-		 */
-		if (!this->dev_ready) {
-			udelay(this->chip_delay);
-			return;
-		}
-	}
-
-	/* Apply this short delay always to ensure that we do wait tWB in
-	 * any case on any machine. */
-	ndelay(100);
-	/* wait until command is processed */
-	while (!this->dev_ready(mtd)) ;
-}
-
-static int doc200x_dev_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	if (DoC_is_MillenniumPlus(doc)) {
-		/* 11.4.2 -- must NOP four times before checking FR/B# */
-		DoC_Delay(doc, 4);
-		if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
-			if (debug)
-				printk("not ready\n");
-			return 0;
-		}
-		if (debug)
-			printk("was ready\n");
-		return 1;
-	} else {
-		/* 11.4.2 -- must NOP four times before checking FR/B# */
-		DoC_Delay(doc, 4);
-		if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
-			if (debug)
-				printk("not ready\n");
-			return 0;
-		}
-		/* 11.4.2 -- Must NOP twice if it's ready */
-		DoC_Delay(doc, 2);
-		if (debug)
-			printk("was ready\n");
-		return 1;
-	}
-}
-
-static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs)
-{
-	/* This is our last resort if we couldn't find or create a BBT.  Just
-	   pretend all blocks are good. */
-	return 0;
-}
-
-static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	/* Prime the ECC engine */
-	switch (mode) {
-	case NAND_ECC_READ:
-		WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
-		WriteDOC(DOC_ECC_EN, docptr, ECCConf);
-		break;
-	case NAND_ECC_WRITE:
-		WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
-		WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
-		break;
-	}
-}
-
-static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-
-	/* Prime the ECC engine */
-	switch (mode) {
-	case NAND_ECC_READ:
-		WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-		WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
-		break;
-	case NAND_ECC_WRITE:
-		WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
-		WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
-		break;
-	}
-}
-
-/* This code is only called on write */
-static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	int i;
-	int emptymatch = 1;
-
-	/* flush the pipeline */
-	if (DoC_is_2000(doc)) {
-		WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
-		WriteDOC(0, docptr, 2k_CDSN_IO);
-		WriteDOC(0, docptr, 2k_CDSN_IO);
-		WriteDOC(0, docptr, 2k_CDSN_IO);
-		WriteDOC(doc->CDSNControl, docptr, CDSNControl);
-	} else if (DoC_is_MillenniumPlus(doc)) {
-		WriteDOC(0, docptr, Mplus_NOP);
-		WriteDOC(0, docptr, Mplus_NOP);
-		WriteDOC(0, docptr, Mplus_NOP);
-	} else {
-		WriteDOC(0, docptr, NOP);
-		WriteDOC(0, docptr, NOP);
-		WriteDOC(0, docptr, NOP);
-	}
-
-	for (i = 0; i < 6; i++) {
-		if (DoC_is_MillenniumPlus(doc))
-			ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
-		else
-			ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
-		if (ecc_code[i] != empty_write_ecc[i])
-			emptymatch = 0;
-	}
-	if (DoC_is_MillenniumPlus(doc))
-		WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
-	else
-		WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
-#if 0
-	/* If emptymatch=1, we might have an all-0xff data buffer.  Check. */
-	if (emptymatch) {
-		/* Note: this somewhat expensive test should not be triggered
-		   often.  It could be optimized away by examining the data in
-		   the writebuf routine, and remembering the result. */
-		for (i = 0; i < 512; i++) {
-			if (dat[i] == 0xff)
-				continue;
-			emptymatch = 0;
-			break;
-		}
-	}
-	/* If emptymatch still =1, we do have an all-0xff data buffer.
-	   Return all-0xff ecc value instead of the computed one, so
-	   it'll look just like a freshly-erased page. */
-	if (emptymatch)
-		memset(ecc_code, 0xff, 6);
-#endif
-	return 0;
-}
-
-static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
-				u_char *read_ecc, u_char *isnull)
-{
-	int i, ret = 0;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	void __iomem *docptr = doc->virtadr;
-	uint8_t calc_ecc[6];
-	volatile u_char dummy;
-
-	/* flush the pipeline */
-	if (DoC_is_2000(doc)) {
-		dummy = ReadDOC(docptr, 2k_ECCStatus);
-		dummy = ReadDOC(docptr, 2k_ECCStatus);
-		dummy = ReadDOC(docptr, 2k_ECCStatus);
-	} else if (DoC_is_MillenniumPlus(doc)) {
-		dummy = ReadDOC(docptr, Mplus_ECCConf);
-		dummy = ReadDOC(docptr, Mplus_ECCConf);
-		dummy = ReadDOC(docptr, Mplus_ECCConf);
-	} else {
-		dummy = ReadDOC(docptr, ECCConf);
-		dummy = ReadDOC(docptr, ECCConf);
-		dummy = ReadDOC(docptr, ECCConf);
-	}
-
-	/* Error occurred ? */
-	if (dummy & 0x80) {
-		for (i = 0; i < 6; i++) {
-			if (DoC_is_MillenniumPlus(doc))
-				calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
-			else
-				calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
-		}
-
-		ret = doc_ecc_decode(rs_decoder, dat, calc_ecc);
-		if (ret > 0)
-			printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
-	}
-	if (DoC_is_MillenniumPlus(doc))
-		WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
-	else
-		WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
-	if (no_ecc_failures && mtd_is_eccerr(ret)) {
-		printk(KERN_ERR "suppressing ECC failure\n");
-		ret = 0;
-	}
-	return ret;
-}
-
-//u_char mydatabuf[528];
-
-static int doc200x_ooblayout_ecc(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 0;
-	oobregion->length = 6;
-
-	return 0;
-}
-
-static int doc200x_ooblayout_free(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	if (section > 1)
-		return -ERANGE;
-
-	/*
-	 * The strange out-of-order free bytes definition is a (possibly
-	 * unneeded) attempt to retain compatibility.  It used to read:
-	 *	.oobfree = { {8, 8} }
-	 * Since that leaves two bytes unusable, it was changed.  But the
-	 * following scheme might affect existing jffs2 installs by moving the
-	 * cleanmarker:
-	 *	.oobfree = { {6, 10} }
-	 * jffs2 seems to handle the above gracefully, but the current scheme
-	 * seems safer. The only problem with it is that any code retrieving
-	 * free bytes position must be able to handle out-of-order segments.
-	 */
-	if (!section) {
-		oobregion->offset = 8;
-		oobregion->length = 8;
-	} else {
-		oobregion->offset = 6;
-		oobregion->length = 2;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops doc200x_ooblayout_ops = {
-	.ecc = doc200x_ooblayout_ecc,
-	.free = doc200x_ooblayout_free,
-};
-
-/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
-   On successful return, buf will contain a copy of the media header for
-   further processing.  id is the string to scan for, and will presumably be
-   either "ANAND" or "BNAND".  If findmirror=1, also look for the mirror media
-   header.  The page #s of the found media headers are placed in mh0_page and
-   mh1_page in the DOC private structure. */
-static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	unsigned offs;
-	int ret;
-	size_t retlen;
-
-	for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
-		ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
-		if (retlen != mtd->writesize)
-			continue;
-		if (ret) {
-			printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", offs);
-		}
-		if (memcmp(buf, id, 6))
-			continue;
-		printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
-		if (doc->mh0_page == -1) {
-			doc->mh0_page = offs >> this->page_shift;
-			if (!findmirror)
-				return 1;
-			continue;
-		}
-		doc->mh1_page = offs >> this->page_shift;
-		return 2;
-	}
-	if (doc->mh0_page == -1) {
-		printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id);
-		return 0;
-	}
-	/* Only one mediaheader was found.  We want buf to contain a
-	   mediaheader on return, so we'll have to re-read the one we found. */
-	offs = doc->mh0_page << this->page_shift;
-	ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
-	if (retlen != mtd->writesize) {
-		/* Insanity.  Give up. */
-		printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
-		return 0;
-	}
-	return 1;
-}
-
-static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	int ret = 0;
-	u_char *buf;
-	struct NFTLMediaHeader *mh;
-	const unsigned psize = 1 << this->page_shift;
-	int numparts = 0;
-	unsigned blocks, maxblocks;
-	int offs, numheaders;
-
-	buf = kmalloc(mtd->writesize, GFP_KERNEL);
-	if (!buf) {
-		return 0;
-	}
-	if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
-		goto out;
-	mh = (struct NFTLMediaHeader *)buf;
-
-	le16_to_cpus(&mh->NumEraseUnits);
-	le16_to_cpus(&mh->FirstPhysicalEUN);
-	le32_to_cpus(&mh->FormattedSize);
-
-	printk(KERN_INFO "    DataOrgID        = %s\n"
-			 "    NumEraseUnits    = %d\n"
-			 "    FirstPhysicalEUN = %d\n"
-			 "    FormattedSize    = %d\n"
-			 "    UnitSizeFactor   = %d\n",
-		mh->DataOrgID, mh->NumEraseUnits,
-		mh->FirstPhysicalEUN, mh->FormattedSize,
-		mh->UnitSizeFactor);
-
-	blocks = mtd->size >> this->phys_erase_shift;
-	maxblocks = min(32768U, mtd->erasesize - psize);
-
-	if (mh->UnitSizeFactor == 0x00) {
-		/* Auto-determine UnitSizeFactor.  The constraints are:
-		   - There can be at most 32768 virtual blocks.
-		   - There can be at most (virtual block size - page size)
-		   virtual blocks (because MediaHeader+BBT must fit in 1).
-		 */
-		mh->UnitSizeFactor = 0xff;
-		while (blocks > maxblocks) {
-			blocks >>= 1;
-			maxblocks = min(32768U, (maxblocks << 1) + psize);
-			mh->UnitSizeFactor--;
-		}
-		printk(KERN_WARNING "UnitSizeFactor=0x00 detected.  Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
-	}
-
-	/* NOTE: The lines below modify internal variables of the NAND and MTD
-	   layers; variables with have already been configured by nand_scan.
-	   Unfortunately, we didn't know before this point what these values
-	   should be.  Thus, this code is somewhat dependent on the exact
-	   implementation of the NAND layer.  */
-	if (mh->UnitSizeFactor != 0xff) {
-		this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
-		mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
-		printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
-		blocks = mtd->size >> this->bbt_erase_shift;
-		maxblocks = min(32768U, mtd->erasesize - psize);
-	}
-
-	if (blocks > maxblocks) {
-		printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size.  Aborting.\n", mh->UnitSizeFactor);
-		goto out;
-	}
-
-	/* Skip past the media headers. */
-	offs = max(doc->mh0_page, doc->mh1_page);
-	offs <<= this->page_shift;
-	offs += mtd->erasesize;
-
-	if (show_firmware_partition == 1) {
-		parts[0].name = " DiskOnChip Firmware / Media Header partition";
-		parts[0].offset = 0;
-		parts[0].size = offs;
-		numparts = 1;
-	}
-
-	parts[numparts].name = " DiskOnChip BDTL partition";
-	parts[numparts].offset = offs;
-	parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
-
-	offs += parts[numparts].size;
-	numparts++;
-
-	if (offs < mtd->size) {
-		parts[numparts].name = " DiskOnChip Remainder partition";
-		parts[numparts].offset = offs;
-		parts[numparts].size = mtd->size - offs;
-		numparts++;
-	}
-
-	ret = numparts;
- out:
-	kfree(buf);
-	return ret;
-}
-
-/* This is a stripped-down copy of the code in inftlmount.c */
-static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	int ret = 0;
-	u_char *buf;
-	struct INFTLMediaHeader *mh;
-	struct INFTLPartition *ip;
-	int numparts = 0;
-	int blocks;
-	int vshift, lastvunit = 0;
-	int i;
-	int end = mtd->size;
-
-	if (inftl_bbt_write)
-		end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
-
-	buf = kmalloc(mtd->writesize, GFP_KERNEL);
-	if (!buf) {
-		return 0;
-	}
-
-	if (!find_media_headers(mtd, buf, "BNAND", 0))
-		goto out;
-	doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
-	mh = (struct INFTLMediaHeader *)buf;
-
-	le32_to_cpus(&mh->NoOfBootImageBlocks);
-	le32_to_cpus(&mh->NoOfBinaryPartitions);
-	le32_to_cpus(&mh->NoOfBDTLPartitions);
-	le32_to_cpus(&mh->BlockMultiplierBits);
-	le32_to_cpus(&mh->FormatFlags);
-	le32_to_cpus(&mh->PercentUsed);
-
-	printk(KERN_INFO "    bootRecordID          = %s\n"
-			 "    NoOfBootImageBlocks   = %d\n"
-			 "    NoOfBinaryPartitions  = %d\n"
-			 "    NoOfBDTLPartitions    = %d\n"
-			 "    BlockMultiplerBits    = %d\n"
-			 "    FormatFlgs            = %d\n"
-			 "    OsakVersion           = %d.%d.%d.%d\n"
-			 "    PercentUsed           = %d\n",
-		mh->bootRecordID, mh->NoOfBootImageBlocks,
-		mh->NoOfBinaryPartitions,
-		mh->NoOfBDTLPartitions,
-		mh->BlockMultiplierBits, mh->FormatFlags,
-		((unsigned char *) &mh->OsakVersion)[0] & 0xf,
-		((unsigned char *) &mh->OsakVersion)[1] & 0xf,
-		((unsigned char *) &mh->OsakVersion)[2] & 0xf,
-		((unsigned char *) &mh->OsakVersion)[3] & 0xf,
-		mh->PercentUsed);
-
-	vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
-
-	blocks = mtd->size >> vshift;
-	if (blocks > 32768) {
-		printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size.  Aborting.\n", mh->BlockMultiplierBits);
-		goto out;
-	}
-
-	blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
-	if (inftl_bbt_write && (blocks > mtd->erasesize)) {
-		printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported.  FIX ME!\n");
-		goto out;
-	}
-
-	/* Scan the partitions */
-	for (i = 0; (i < 4); i++) {
-		ip = &(mh->Partitions[i]);
-		le32_to_cpus(&ip->virtualUnits);
-		le32_to_cpus(&ip->firstUnit);
-		le32_to_cpus(&ip->lastUnit);
-		le32_to_cpus(&ip->flags);
-		le32_to_cpus(&ip->spareUnits);
-		le32_to_cpus(&ip->Reserved0);
-
-		printk(KERN_INFO	"    PARTITION[%d] ->\n"
-			"        virtualUnits    = %d\n"
-			"        firstUnit       = %d\n"
-			"        lastUnit        = %d\n"
-			"        flags           = 0x%x\n"
-			"        spareUnits      = %d\n",
-			i, ip->virtualUnits, ip->firstUnit,
-			ip->lastUnit, ip->flags,
-			ip->spareUnits);
-
-		if ((show_firmware_partition == 1) &&
-		    (i == 0) && (ip->firstUnit > 0)) {
-			parts[0].name = " DiskOnChip IPL / Media Header partition";
-			parts[0].offset = 0;
-			parts[0].size = mtd->erasesize * ip->firstUnit;
-			numparts = 1;
-		}
-
-		if (ip->flags & INFTL_BINARY)
-			parts[numparts].name = " DiskOnChip BDK partition";
-		else
-			parts[numparts].name = " DiskOnChip BDTL partition";
-		parts[numparts].offset = ip->firstUnit << vshift;
-		parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
-		numparts++;
-		if (ip->lastUnit > lastvunit)
-			lastvunit = ip->lastUnit;
-		if (ip->flags & INFTL_LAST)
-			break;
-	}
-	lastvunit++;
-	if ((lastvunit << vshift) < end) {
-		parts[numparts].name = " DiskOnChip Remainder partition";
-		parts[numparts].offset = lastvunit << vshift;
-		parts[numparts].size = end - parts[numparts].offset;
-		numparts++;
-	}
-	ret = numparts;
- out:
-	kfree(buf);
-	return ret;
-}
-
-static int __init nftl_scan_bbt(struct mtd_info *mtd)
-{
-	int ret, numparts;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	struct mtd_partition parts[2];
-
-	memset((char *)parts, 0, sizeof(parts));
-	/* On NFTL, we have to find the media headers before we can read the
-	   BBTs, since they're stored in the media header eraseblocks. */
-	numparts = nftl_partscan(mtd, parts);
-	if (!numparts)
-		return -EIO;
-	this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
-				NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
-				NAND_BBT_VERSION;
-	this->bbt_td->veroffs = 7;
-	this->bbt_td->pages[0] = doc->mh0_page + 1;
-	if (doc->mh1_page != -1) {
-		this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
-					NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
-					NAND_BBT_VERSION;
-		this->bbt_md->veroffs = 7;
-		this->bbt_md->pages[0] = doc->mh1_page + 1;
-	} else {
-		this->bbt_md = NULL;
-	}
-
-	ret = this->scan_bbt(mtd);
-	if (ret)
-		return ret;
-
-	return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
-}
-
-static int __init inftl_scan_bbt(struct mtd_info *mtd)
-{
-	int ret, numparts;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-	struct mtd_partition parts[5];
-
-	if (this->numchips > doc->chips_per_floor) {
-		printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n");
-		return -EIO;
-	}
-
-	if (DoC_is_MillenniumPlus(doc)) {
-		this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
-		if (inftl_bbt_write)
-			this->bbt_td->options |= NAND_BBT_WRITE;
-		this->bbt_td->pages[0] = 2;
-		this->bbt_md = NULL;
-	} else {
-		this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
-		if (inftl_bbt_write)
-			this->bbt_td->options |= NAND_BBT_WRITE;
-		this->bbt_td->offs = 8;
-		this->bbt_td->len = 8;
-		this->bbt_td->veroffs = 7;
-		this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
-		this->bbt_td->reserved_block_code = 0x01;
-		this->bbt_td->pattern = "MSYS_BBT";
-
-		this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
-		if (inftl_bbt_write)
-			this->bbt_md->options |= NAND_BBT_WRITE;
-		this->bbt_md->offs = 8;
-		this->bbt_md->len = 8;
-		this->bbt_md->veroffs = 7;
-		this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
-		this->bbt_md->reserved_block_code = 0x01;
-		this->bbt_md->pattern = "TBB_SYSM";
-	}
-
-	ret = this->scan_bbt(mtd);
-	if (ret)
-		return ret;
-
-	memset((char *)parts, 0, sizeof(parts));
-	numparts = inftl_partscan(mtd, parts);
-	/* At least for now, require the INFTL Media Header.  We could probably
-	   do without it for non-INFTL use, since all it gives us is
-	   autopartitioning, but I want to give it more thought. */
-	if (!numparts)
-		return -EIO;
-	return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
-}
-
-static inline int __init doc2000_init(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-
-	this->read_byte = doc2000_read_byte;
-	this->write_buf = doc2000_writebuf;
-	this->read_buf = doc2000_readbuf;
-	doc->late_init = nftl_scan_bbt;
-
-	doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
-	doc2000_count_chips(mtd);
-	mtd->name = "DiskOnChip 2000 (NFTL Model)";
-	return (4 * doc->chips_per_floor);
-}
-
-static inline int __init doc2001_init(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-
-	this->read_byte = doc2001_read_byte;
-	this->write_buf = doc2001_writebuf;
-	this->read_buf = doc2001_readbuf;
-
-	ReadDOC(doc->virtadr, ChipID);
-	ReadDOC(doc->virtadr, ChipID);
-	ReadDOC(doc->virtadr, ChipID);
-	if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
-		/* It's not a Millennium; it's one of the newer
-		   DiskOnChip 2000 units with a similar ASIC.
-		   Treat it like a Millennium, except that it
-		   can have multiple chips. */
-		doc2000_count_chips(mtd);
-		mtd->name = "DiskOnChip 2000 (INFTL Model)";
-		doc->late_init = inftl_scan_bbt;
-		return (4 * doc->chips_per_floor);
-	} else {
-		/* Bog-standard Millennium */
-		doc->chips_per_floor = 1;
-		mtd->name = "DiskOnChip Millennium";
-		doc->late_init = nftl_scan_bbt;
-		return 1;
-	}
-}
-
-static inline int __init doc2001plus_init(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct doc_priv *doc = nand_get_controller_data(this);
-
-	this->read_byte = doc2001plus_read_byte;
-	this->write_buf = doc2001plus_writebuf;
-	this->read_buf = doc2001plus_readbuf;
-	doc->late_init = inftl_scan_bbt;
-	this->cmd_ctrl = NULL;
-	this->select_chip = doc2001plus_select_chip;
-	this->cmdfunc = doc2001plus_command;
-	this->ecc.hwctl = doc2001plus_enable_hwecc;
-
-	doc->chips_per_floor = 1;
-	mtd->name = "DiskOnChip Millennium Plus";
-
-	return 1;
-}
-
-static int __init doc_probe(unsigned long physadr)
-{
-	unsigned char ChipID;
-	struct mtd_info *mtd;
-	struct nand_chip *nand;
-	struct doc_priv *doc;
-	void __iomem *virtadr;
-	unsigned char save_control;
-	unsigned char tmp, tmpb, tmpc;
-	int reg, len, numchips;
-	int ret = 0;
-
-	if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip"))
-		return -EBUSY;
-	virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
-	if (!virtadr) {
-		printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
-		ret = -EIO;
-		goto error_ioremap;
-	}
-
-	/* It's not possible to cleanly detect the DiskOnChip - the
-	 * bootup procedure will put the device into reset mode, and
-	 * it's not possible to talk to it without actually writing
-	 * to the DOCControl register. So we store the current contents
-	 * of the DOCControl register's location, in case we later decide
-	 * that it's not a DiskOnChip, and want to put it back how we
-	 * found it.
-	 */
-	save_control = ReadDOC(virtadr, DOCControl);
-
-	/* Reset the DiskOnChip ASIC */
-	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
-	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
-
-	/* Enable the DiskOnChip ASIC */
-	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
-	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
-
-	ChipID = ReadDOC(virtadr, ChipID);
-
-	switch (ChipID) {
-	case DOC_ChipID_Doc2k:
-		reg = DoC_2k_ECCStatus;
-		break;
-	case DOC_ChipID_DocMil:
-		reg = DoC_ECCConf;
-		break;
-	case DOC_ChipID_DocMilPlus16:
-	case DOC_ChipID_DocMilPlus32:
-	case 0:
-		/* Possible Millennium Plus, need to do more checks */
-		/* Possibly release from power down mode */
-		for (tmp = 0; (tmp < 4); tmp++)
-			ReadDOC(virtadr, Mplus_Power);
-
-		/* Reset the Millennium Plus ASIC */
-		tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
-		WriteDOC(tmp, virtadr, Mplus_DOCControl);
-		WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
-
-		mdelay(1);
-		/* Enable the Millennium Plus ASIC */
-		tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
-		WriteDOC(tmp, virtadr, Mplus_DOCControl);
-		WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
-		mdelay(1);
-
-		ChipID = ReadDOC(virtadr, ChipID);
-
-		switch (ChipID) {
-		case DOC_ChipID_DocMilPlus16:
-			reg = DoC_Mplus_Toggle;
-			break;
-		case DOC_ChipID_DocMilPlus32:
-			printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
-		default:
-			ret = -ENODEV;
-			goto notfound;
-		}
-		break;
-
-	default:
-		ret = -ENODEV;
-		goto notfound;
-	}
-	/* Check the TOGGLE bit in the ECC register */
-	tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
-	tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
-	tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
-	if ((tmp == tmpb) || (tmp != tmpc)) {
-		printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
-		ret = -ENODEV;
-		goto notfound;
-	}
-
-	for (mtd = doclist; mtd; mtd = doc->nextdoc) {
-		unsigned char oldval;
-		unsigned char newval;
-		nand = mtd_to_nand(mtd);
-		doc = nand_get_controller_data(nand);
-		/* Use the alias resolution register to determine if this is
-		   in fact the same DOC aliased to a new address.  If writes
-		   to one chip's alias resolution register change the value on
-		   the other chip, they're the same chip. */
-		if (ChipID == DOC_ChipID_DocMilPlus16) {
-			oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
-			newval = ReadDOC(virtadr, Mplus_AliasResolution);
-		} else {
-			oldval = ReadDOC(doc->virtadr, AliasResolution);
-			newval = ReadDOC(virtadr, AliasResolution);
-		}
-		if (oldval != newval)
-			continue;
-		if (ChipID == DOC_ChipID_DocMilPlus16) {
-			WriteDOC(~newval, virtadr, Mplus_AliasResolution);
-			oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
-			WriteDOC(newval, virtadr, Mplus_AliasResolution);	// restore it
-		} else {
-			WriteDOC(~newval, virtadr, AliasResolution);
-			oldval = ReadDOC(doc->virtadr, AliasResolution);
-			WriteDOC(newval, virtadr, AliasResolution);	// restore it
-		}
-		newval = ~newval;
-		if (oldval == newval) {
-			printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
-			goto notfound;
-		}
-	}
-
-	printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
-
-	len = sizeof(struct nand_chip) + sizeof(struct doc_priv) +
-	      (2 * sizeof(struct nand_bbt_descr));
-	nand = kzalloc(len, GFP_KERNEL);
-	if (!nand) {
-		ret = -ENOMEM;
-		goto fail;
-	}
-
-	mtd			= nand_to_mtd(nand);
-	doc			= (struct doc_priv *) (nand + 1);
-	nand->bbt_td		= (struct nand_bbt_descr *) (doc + 1);
-	nand->bbt_md		= nand->bbt_td + 1;
-
-	mtd->owner		= THIS_MODULE;
-	mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
-
-	nand_set_controller_data(nand, doc);
-	nand->select_chip	= doc200x_select_chip;
-	nand->cmd_ctrl		= doc200x_hwcontrol;
-	nand->dev_ready		= doc200x_dev_ready;
-	nand->waitfunc		= doc200x_wait;
-	nand->block_bad		= doc200x_block_bad;
-	nand->ecc.hwctl		= doc200x_enable_hwecc;
-	nand->ecc.calculate	= doc200x_calculate_ecc;
-	nand->ecc.correct	= doc200x_correct_data;
-
-	nand->ecc.mode		= NAND_ECC_HW_SYNDROME;
-	nand->ecc.size		= 512;
-	nand->ecc.bytes		= 6;
-	nand->ecc.strength	= 2;
-	nand->ecc.options	= NAND_ECC_GENERIC_ERASED_CHECK;
-	nand->bbt_options	= NAND_BBT_USE_FLASH;
-	/* Skip the automatic BBT scan so we can run it manually */
-	nand->options		|= NAND_SKIP_BBTSCAN;
-
-	doc->physadr		= physadr;
-	doc->virtadr		= virtadr;
-	doc->ChipID		= ChipID;
-	doc->curfloor		= -1;
-	doc->curchip		= -1;
-	doc->mh0_page		= -1;
-	doc->mh1_page		= -1;
-	doc->nextdoc		= doclist;
-
-	if (ChipID == DOC_ChipID_Doc2k)
-		numchips = doc2000_init(mtd);
-	else if (ChipID == DOC_ChipID_DocMilPlus16)
-		numchips = doc2001plus_init(mtd);
-	else
-		numchips = doc2001_init(mtd);
-
-	if ((ret = nand_scan(mtd, numchips)) || (ret = doc->late_init(mtd))) {
-		/* DBB note: i believe nand_release is necessary here, as
-		   buffers may have been allocated in nand_base.  Check with
-		   Thomas. FIX ME! */
-		/* nand_release will call mtd_device_unregister, but we
-		   haven't yet added it.  This is handled without incident by
-		   mtd_device_unregister, as far as I can tell. */
-		nand_release(mtd);
-		kfree(nand);
-		goto fail;
-	}
-
-	/* Success! */
-	doclist = mtd;
-	return 0;
-
- notfound:
-	/* Put back the contents of the DOCControl register, in case it's not
-	   actually a DiskOnChip.  */
-	WriteDOC(save_control, virtadr, DOCControl);
- fail:
-	iounmap(virtadr);
-
-error_ioremap:
-	release_mem_region(physadr, DOC_IOREMAP_LEN);
-
-	return ret;
-}
-
-static void release_nanddoc(void)
-{
-	struct mtd_info *mtd, *nextmtd;
-	struct nand_chip *nand;
-	struct doc_priv *doc;
-
-	for (mtd = doclist; mtd; mtd = nextmtd) {
-		nand = mtd_to_nand(mtd);
-		doc = nand_get_controller_data(nand);
-
-		nextmtd = doc->nextdoc;
-		nand_release(mtd);
-		iounmap(doc->virtadr);
-		release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
-		kfree(nand);
-	}
-}
-
-static int __init init_nanddoc(void)
-{
-	int i, ret = 0;
-
-	/* We could create the decoder on demand, if memory is a concern.
-	 * This way we have it handy, if an error happens
-	 *
-	 * Symbolsize is 10 (bits)
-	 * Primitve polynomial is x^10+x^3+1
-	 * first consecutive root is 510
-	 * primitve element to generate roots = 1
-	 * generator polinomial degree = 4
-	 */
-	rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
-	if (!rs_decoder) {
-		printk(KERN_ERR "DiskOnChip: Could not create a RS decoder\n");
-		return -ENOMEM;
-	}
-
-	if (doc_config_location) {
-		printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
-		ret = doc_probe(doc_config_location);
-		if (ret < 0)
-			goto outerr;
-	} else {
-		for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
-			doc_probe(doc_locations[i]);
-		}
-	}
-	/* No banner message any more. Print a message if no DiskOnChip
-	   found, so the user knows we at least tried. */
-	if (!doclist) {
-		printk(KERN_INFO "No valid DiskOnChip devices found\n");
-		ret = -ENODEV;
-		goto outerr;
-	}
-	return 0;
- outerr:
-	free_rs(rs_decoder);
-	return ret;
-}
-
-static void __exit cleanup_nanddoc(void)
-{
-	/* Cleanup the nand/DoC resources */
-	release_nanddoc();
-
-	/* Free the reed solomon resources */
-	if (rs_decoder) {
-		free_rs(rs_decoder);
-	}
-}
-
-module_init(init_nanddoc);
-module_exit(cleanup_nanddoc);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
-MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
deleted file mode 100644
index e038130b7206..000000000000
--- a/drivers/mtd/nand/docg4.c
+++ /dev/null
@@ -1,1410 +0,0 @@ 
-/*
- *  Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
- *
- * mtd nand driver for M-Systems DiskOnChip G4
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Tested on the Palm Treo 680.  The G4 is also present on Toshiba Portege, Asus
- * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
- * Should work on these as well.  Let me know!
- *
- * TODO:
- *
- *  Mechanism for management of password-protected areas
- *
- *  Hamming ecc when reading oob only
- *
- *  According to the M-Sys documentation, this device is also available in a
- *  "dual-die" configuration having a 256MB capacity, but no mechanism for
- *  detecting this variant is documented.  Currently this driver assumes 128MB
- *  capacity.
- *
- *  Support for multiple cascaded devices ("floors").  Not sure which gadgets
- *  contain multiple G4s in a cascaded configuration, if any.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/export.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-#include <linux/bitops.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/bch.h>
-#include <linux/bitrev.h>
-#include <linux/jiffies.h>
-
-/*
- * In "reliable mode" consecutive 2k pages are used in parallel (in some
- * fashion) to store the same data.  The data can be read back from the
- * even-numbered pages in the normal manner; odd-numbered pages will appear to
- * contain junk.  Systems that boot from the docg4 typically write the secondary
- * program loader (SPL) code in this mode.  The SPL is loaded by the initial
- * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
- * to the reset vector address).  This module parameter enables you to use this
- * driver to write the SPL.  When in this mode, no more than 2k of data can be
- * written at a time, because the addresses do not increment in the normal
- * manner, and the starting offset must be within an even-numbered 2k region;
- * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
- * 0x1a00, ...  Reliable mode is a special case and should not be used unless
- * you know what you're doing.
- */
-static bool reliable_mode;
-module_param(reliable_mode, bool, 0);
-MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
-
-/*
- * You'll want to ignore badblocks if you're reading a partition that contains
- * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
- * it does not use mtd nand's method for marking bad blocks (using oob area).
- * This will also skip the check of the "page written" flag.
- */
-static bool ignore_badblocks;
-module_param(ignore_badblocks, bool, 0);
-MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
-
-struct docg4_priv {
-	struct mtd_info	*mtd;
-	struct device *dev;
-	void __iomem *virtadr;
-	int status;
-	struct {
-		unsigned int command;
-		int column;
-		int page;
-	} last_command;
-	uint8_t oob_buf[16];
-	uint8_t ecc_buf[7];
-	int oob_page;
-	struct bch_control *bch;
-};
-
-/*
- * Defines prefixed with DOCG4 are unique to the diskonchip G4.  All others are
- * shared with other diskonchip devices (P3, G3 at least).
- *
- * Functions with names prefixed with docg4_ are mtd / nand interface functions
- * (though they may also be called internally).  All others are internal.
- */
-
-#define DOC_IOSPACE_DATA		0x0800
-
-/* register offsets */
-#define DOC_CHIPID			0x1000
-#define DOC_DEVICESELECT		0x100a
-#define DOC_ASICMODE			0x100c
-#define DOC_DATAEND			0x101e
-#define DOC_NOP				0x103e
-
-#define DOC_FLASHSEQUENCE		0x1032
-#define DOC_FLASHCOMMAND		0x1034
-#define DOC_FLASHADDRESS		0x1036
-#define DOC_FLASHCONTROL		0x1038
-#define DOC_ECCCONF0			0x1040
-#define DOC_ECCCONF1			0x1042
-#define DOC_HAMMINGPARITY		0x1046
-#define DOC_BCH_SYNDROM(idx)		(0x1048 + idx)
-
-#define DOC_ASICMODECONFIRM		0x1072
-#define DOC_CHIPID_INV			0x1074
-#define DOC_POWERMODE			0x107c
-
-#define DOCG4_MYSTERY_REG		0x1050
-
-/* apparently used only to write oob bytes 6 and 7 */
-#define DOCG4_OOB_6_7			0x1052
-
-/* DOC_FLASHSEQUENCE register commands */
-#define DOC_SEQ_RESET			0x00
-#define DOCG4_SEQ_PAGE_READ		0x03
-#define DOCG4_SEQ_FLUSH			0x29
-#define DOCG4_SEQ_PAGEWRITE		0x16
-#define DOCG4_SEQ_PAGEPROG		0x1e
-#define DOCG4_SEQ_BLOCKERASE		0x24
-#define DOCG4_SEQ_SETMODE		0x45
-
-/* DOC_FLASHCOMMAND register commands */
-#define DOCG4_CMD_PAGE_READ             0x00
-#define DOC_CMD_ERASECYCLE2		0xd0
-#define DOCG4_CMD_FLUSH                 0x70
-#define DOCG4_CMD_READ2                 0x30
-#define DOC_CMD_PROG_BLOCK_ADDR		0x60
-#define DOCG4_CMD_PAGEWRITE		0x80
-#define DOC_CMD_PROG_CYCLE2		0x10
-#define DOCG4_CMD_FAST_MODE		0xa3 /* functionality guessed */
-#define DOC_CMD_RELIABLE_MODE		0x22
-#define DOC_CMD_RESET			0xff
-
-/* DOC_POWERMODE register bits */
-#define DOC_POWERDOWN_READY		0x80
-
-/* DOC_FLASHCONTROL register bits */
-#define DOC_CTRL_CE			0x10
-#define DOC_CTRL_UNKNOWN		0x40
-#define DOC_CTRL_FLASHREADY		0x01
-
-/* DOC_ECCCONF0 register bits */
-#define DOC_ECCCONF0_READ_MODE		0x8000
-#define DOC_ECCCONF0_UNKNOWN		0x2000
-#define DOC_ECCCONF0_ECC_ENABLE	        0x1000
-#define DOC_ECCCONF0_DATA_BYTES_MASK	0x07ff
-
-/* DOC_ECCCONF1 register bits */
-#define DOC_ECCCONF1_BCH_SYNDROM_ERR	0x80
-#define DOC_ECCCONF1_ECC_ENABLE         0x07
-#define DOC_ECCCONF1_PAGE_IS_WRITTEN	0x20
-
-/* DOC_ASICMODE register bits */
-#define DOC_ASICMODE_RESET		0x00
-#define DOC_ASICMODE_NORMAL		0x01
-#define DOC_ASICMODE_POWERDOWN		0x02
-#define DOC_ASICMODE_MDWREN		0x04
-#define DOC_ASICMODE_BDETCT_RESET	0x08
-#define DOC_ASICMODE_RSTIN_RESET	0x10
-#define DOC_ASICMODE_RAM_WE		0x20
-
-/* good status values read after read/write/erase operations */
-#define DOCG4_PROGSTATUS_GOOD          0x51
-#define DOCG4_PROGSTATUS_GOOD_2        0xe0
-
-/*
- * On read operations (page and oob-only), the first byte read from I/O reg is a
- * status.  On error, it reads 0x73; otherwise, it reads either 0x71 (first read
- * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
- */
-#define DOCG4_READ_ERROR           0x02 /* bit 1 indicates read error */
-
-/* anatomy of the device */
-#define DOCG4_CHIP_SIZE        0x8000000
-#define DOCG4_PAGE_SIZE        0x200
-#define DOCG4_PAGES_PER_BLOCK  0x200
-#define DOCG4_BLOCK_SIZE       (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
-#define DOCG4_NUMBLOCKS        (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
-#define DOCG4_OOB_SIZE         0x10
-#define DOCG4_CHIP_SHIFT       27    /* log_2(DOCG4_CHIP_SIZE) */
-#define DOCG4_PAGE_SHIFT       9     /* log_2(DOCG4_PAGE_SIZE) */
-#define DOCG4_ERASE_SHIFT      18    /* log_2(DOCG4_BLOCK_SIZE) */
-
-/* all but the last byte is included in ecc calculation */
-#define DOCG4_BCH_SIZE         (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
-
-#define DOCG4_USERDATA_LEN     520 /* 512 byte page plus 8 oob avail to user */
-
-/* expected values from the ID registers */
-#define DOCG4_IDREG1_VALUE     0x0400
-#define DOCG4_IDREG2_VALUE     0xfbff
-
-/* primitive polynomial used to build the Galois field used by hw ecc gen */
-#define DOCG4_PRIMITIVE_POLY   0x4443
-
-#define DOCG4_M                14  /* Galois field is of order 2^14 */
-#define DOCG4_T                4   /* BCH alg corrects up to 4 bit errors */
-
-#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
-#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
-
-/*
- * Bytes 0, 1 are used as badblock marker.
- * Bytes 2 - 6 are available to the user.
- * Byte 7 is hamming ecc for first 7 oob bytes only.
- * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
- * Byte 15 (the last) is used by the driver as a "page written" flag.
- */
-static int docg4_ooblayout_ecc(struct mtd_info *mtd, int section,
-			       struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 7;
-	oobregion->length = 9;
-
-	return 0;
-}
-
-static int docg4_ooblayout_free(struct mtd_info *mtd, int section,
-				struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 2;
-	oobregion->length = 5;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops docg4_ooblayout_ops = {
-	.ecc = docg4_ooblayout_ecc,
-	.free = docg4_ooblayout_free,
-};
-
-/*
- * The device has a nop register which M-Sys claims is for the purpose of
- * inserting precise delays.  But beware; at least some operations fail if the
- * nop writes are replaced with a generic delay!
- */
-static inline void write_nop(void __iomem *docptr)
-{
-	writew(0, docptr + DOC_NOP);
-}
-
-static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	int i;
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	uint16_t *p = (uint16_t *) buf;
-	len >>= 1;
-
-	for (i = 0; i < len; i++)
-		p[i] = readw(nand->IO_ADDR_R);
-}
-
-static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	int i;
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	uint16_t *p = (uint16_t *) buf;
-	len >>= 1;
-
-	for (i = 0; i < len; i++)
-		writew(p[i], nand->IO_ADDR_W);
-}
-
-static int poll_status(struct docg4_priv *doc)
-{
-	/*
-	 * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
-	 * register.  Operations known to take a long time (e.g., block erase)
-	 * should sleep for a while before calling this.
-	 */
-
-	uint16_t flash_status;
-	unsigned long timeo;
-	void __iomem *docptr = doc->virtadr;
-
-	dev_dbg(doc->dev, "%s...\n", __func__);
-
-	/* hardware quirk requires reading twice initially */
-	flash_status = readw(docptr + DOC_FLASHCONTROL);
-
-	timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */
-	do {
-		cpu_relax();
-		flash_status = readb(docptr + DOC_FLASHCONTROL);
-	} while (!(flash_status & DOC_CTRL_FLASHREADY) &&
-		 time_before(jiffies, timeo));
-
-	if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {
-		dev_err(doc->dev, "%s: timed out!\n", __func__);
-		return NAND_STATUS_FAIL;
-	}
-
-	return 0;
-}
-
-
-static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
-{
-
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	int status = NAND_STATUS_WP;       /* inverse logic?? */
-	dev_dbg(doc->dev, "%s...\n", __func__);
-
-	/* report any previously unreported error */
-	if (doc->status) {
-		status |= doc->status;
-		doc->status = 0;
-		return status;
-	}
-
-	status |= poll_status(doc);
-	return status;
-}
-
-static void docg4_select_chip(struct mtd_info *mtd, int chip)
-{
-	/*
-	 * Select among multiple cascaded chips ("floors").  Multiple floors are
-	 * not yet supported, so the only valid non-negative value is 0.
-	 */
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-
-	dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
-
-	if (chip < 0)
-		return;		/* deselected */
-
-	if (chip > 0)
-		dev_warn(doc->dev, "multiple floors currently unsupported\n");
-
-	writew(0, docptr + DOC_DEVICESELECT);
-}
-
-static void reset(struct mtd_info *mtd)
-{
-	/* full device reset */
-
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-
-	writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
-	       docptr + DOC_ASICMODE);
-	writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
-	       docptr + DOC_ASICMODECONFIRM);
-	write_nop(docptr);
-
-	writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
-	       docptr + DOC_ASICMODE);
-	writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
-	       docptr + DOC_ASICMODECONFIRM);
-
-	writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
-
-	poll_status(doc);
-}
-
-static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
-{
-	/* read the 7 hw-generated ecc bytes */
-
-	int i;
-	for (i = 0; i < 7; i++) { /* hw quirk; read twice */
-		ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
-		ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
-	}
-}
-
-static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
-{
-	/*
-	 * Called after a page read when hardware reports bitflips.
-	 * Up to four bitflips can be corrected.
-	 */
-
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-	int i, numerrs, errpos[4];
-	const uint8_t blank_read_hwecc[8] = {
-		0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
-
-	read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
-
-	/* check if read error is due to a blank page */
-	if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
-		return 0;	/* yes */
-
-	/* skip additional check of "written flag" if ignore_badblocks */
-	if (ignore_badblocks == false) {
-
-		/*
-		 * If the hw ecc bytes are not those of a blank page, there's
-		 * still a chance that the page is blank, but was read with
-		 * errors.  Check the "written flag" in last oob byte, which
-		 * is set to zero when a page is written.  If more than half
-		 * the bits are set, assume a blank page.  Unfortunately, the
-		 * bit flips(s) are not reported in stats.
-		 */
-
-		if (nand->oob_poi[15]) {
-			int bit, numsetbits = 0;
-			unsigned long written_flag = nand->oob_poi[15];
-			for_each_set_bit(bit, &written_flag, 8)
-				numsetbits++;
-			if (numsetbits > 4) { /* assume blank */
-				dev_warn(doc->dev,
-					 "error(s) in blank page "
-					 "at offset %08x\n",
-					 page * DOCG4_PAGE_SIZE);
-				return 0;
-			}
-		}
-	}
-
-	/*
-	 * The hardware ecc unit produces oob_ecc ^ calc_ecc.  The kernel's bch
-	 * algorithm is used to decode this.  However the hw operates on page
-	 * data in a bit order that is the reverse of that of the bch alg,
-	 * requiring that the bits be reversed on the result.  Thanks to Ivan
-	 * Djelic for his analysis!
-	 */
-	for (i = 0; i < 7; i++)
-		doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
-
-	numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
-			     doc->ecc_buf, NULL, errpos);
-
-	if (numerrs == -EBADMSG) {
-		dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
-			 page * DOCG4_PAGE_SIZE);
-		return -EBADMSG;
-	}
-
-	BUG_ON(numerrs < 0);	/* -EINVAL, or anything other than -EBADMSG */
-
-	/* undo last step in BCH alg (modulo mirroring not needed) */
-	for (i = 0; i < numerrs; i++)
-		errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
-
-	/* fix the errors */
-	for (i = 0; i < numerrs; i++) {
-
-		/* ignore if error within oob ecc bytes */
-		if (errpos[i] > DOCG4_USERDATA_LEN * 8)
-			continue;
-
-		/* if error within oob area preceeding ecc bytes... */
-		if (errpos[i] > DOCG4_PAGE_SIZE * 8)
-			change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
-				   (unsigned long *)nand->oob_poi);
-
-		else    /* error in page data */
-			change_bit(errpos[i], (unsigned long *)buf);
-	}
-
-	dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
-		   numerrs, page * DOCG4_PAGE_SIZE);
-
-	return numerrs;
-}
-
-static uint8_t docg4_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-
-	dev_dbg(doc->dev, "%s\n", __func__);
-
-	if (doc->last_command.command == NAND_CMD_STATUS) {
-		int status;
-
-		/*
-		 * Previous nand command was status request, so nand
-		 * infrastructure code expects to read the status here.  If an
-		 * error occurred in a previous operation, report it.
-		 */
-		doc->last_command.command = 0;
-
-		if (doc->status) {
-			status = doc->status;
-			doc->status = 0;
-		}
-
-		/* why is NAND_STATUS_WP inverse logic?? */
-		else
-			status = NAND_STATUS_WP | NAND_STATUS_READY;
-
-		return status;
-	}
-
-	dev_warn(doc->dev, "unexpected call to read_byte()\n");
-
-	return 0;
-}
-
-static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
-{
-	/* write the four address bytes packed in docg4_addr to the device */
-
-	void __iomem *docptr = doc->virtadr;
-	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
-	docg4_addr >>= 8;
-	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
-	docg4_addr >>= 8;
-	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
-	docg4_addr >>= 8;
-	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
-}
-
-static int read_progstatus(struct docg4_priv *doc)
-{
-	/*
-	 * This apparently checks the status of programming.  Done after an
-	 * erasure, and after page data is written.  On error, the status is
-	 * saved, to be later retrieved by the nand infrastructure code.
-	 */
-	void __iomem *docptr = doc->virtadr;
-
-	/* status is read from the I/O reg */
-	uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
-	uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
-	uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
-
-	dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
-	      __func__, status1, status2, status3);
-
-	if (status1 != DOCG4_PROGSTATUS_GOOD
-	    || status2 != DOCG4_PROGSTATUS_GOOD_2
-	    || status3 != DOCG4_PROGSTATUS_GOOD_2) {
-		doc->status = NAND_STATUS_FAIL;
-		dev_warn(doc->dev, "read_progstatus failed: "
-			 "%02x, %02x, %02x\n", status1, status2, status3);
-		return -EIO;
-	}
-	return 0;
-}
-
-static int pageprog(struct mtd_info *mtd)
-{
-	/*
-	 * Final step in writing a page.  Writes the contents of its
-	 * internal buffer out to the flash array, or some such.
-	 */
-
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-	int retval = 0;
-
-	dev_dbg(doc->dev, "docg4: %s\n", __func__);
-
-	writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
-	writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
-	write_nop(docptr);
-	write_nop(docptr);
-
-	/* Just busy-wait; usleep_range() slows things down noticeably. */
-	poll_status(doc);
-
-	writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
-	writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
-	writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-
-	retval = read_progstatus(doc);
-	writew(0, docptr + DOC_DATAEND);
-	write_nop(docptr);
-	poll_status(doc);
-	write_nop(docptr);
-
-	return retval;
-}
-
-static void sequence_reset(struct mtd_info *mtd)
-{
-	/* common starting sequence for all operations */
-
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-
-	writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
-	writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
-	writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
-	write_nop(docptr);
-	write_nop(docptr);
-	poll_status(doc);
-	write_nop(docptr);
-}
-
-static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
-{
-	/* first step in reading a page */
-
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-
-	dev_dbg(doc->dev,
-	      "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
-
-	sequence_reset(mtd);
-
-	writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
-	writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
-	write_nop(docptr);
-
-	write_addr(doc, docg4_addr);
-
-	write_nop(docptr);
-	writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
-	write_nop(docptr);
-	write_nop(docptr);
-
-	poll_status(doc);
-}
-
-static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
-{
-	/* first step in writing a page */
-
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-
-	dev_dbg(doc->dev,
-	      "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
-	sequence_reset(mtd);
-
-	if (unlikely(reliable_mode)) {
-		writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
-		writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
-		writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
-		write_nop(docptr);
-	}
-
-	writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
-	writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
-	write_nop(docptr);
-	write_addr(doc, docg4_addr);
-	write_nop(docptr);
-	write_nop(docptr);
-	poll_status(doc);
-}
-
-static uint32_t mtd_to_docg4_address(int page, int column)
-{
-	/*
-	 * Convert mtd address to format used by the device, 32 bit packed.
-	 *
-	 * Some notes on G4 addressing... The M-Sys documentation on this device
-	 * claims that pages are 2K in length, and indeed, the format of the
-	 * address used by the device reflects that.  But within each page are
-	 * four 512 byte "sub-pages", each with its own oob data that is
-	 * read/written immediately after the 512 bytes of page data.  This oob
-	 * data contains the ecc bytes for the preceeding 512 bytes.
-	 *
-	 * Rather than tell the mtd nand infrastructure that page size is 2k,
-	 * with four sub-pages each, we engage in a little subterfuge and tell
-	 * the infrastructure code that pages are 512 bytes in size.  This is
-	 * done because during the course of reverse-engineering the device, I
-	 * never observed an instance where an entire 2K "page" was read or
-	 * written as a unit.  Each "sub-page" is always addressed individually,
-	 * its data read/written, and ecc handled before the next "sub-page" is
-	 * addressed.
-	 *
-	 * This requires us to convert addresses passed by the mtd nand
-	 * infrastructure code to those used by the device.
-	 *
-	 * The address that is written to the device consists of four bytes: the
-	 * first two are the 2k page number, and the second is the index into
-	 * the page.  The index is in terms of 16-bit half-words and includes
-	 * the preceeding oob data, so e.g., the index into the second
-	 * "sub-page" is 0x108, and the full device address of the start of mtd
-	 * page 0x201 is 0x00800108.
-	 */
-	int g4_page = page / 4;	                      /* device's 2K page */
-	int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
-	return (g4_page << 16) | g4_index;	      /* pack */
-}
-
-static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
-			  int page_addr)
-{
-	/* handle standard nand commands */
-
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
-
-	dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
-	      __func__, command, page_addr, column);
-
-	/*
-	 * Save the command and its arguments.  This enables emulation of
-	 * standard flash devices, and also some optimizations.
-	 */
-	doc->last_command.command = command;
-	doc->last_command.column = column;
-	doc->last_command.page = page_addr;
-
-	switch (command) {
-
-	case NAND_CMD_RESET:
-		reset(mtd);
-		break;
-
-	case NAND_CMD_READ0:
-		read_page_prologue(mtd, g4_addr);
-		break;
-
-	case NAND_CMD_STATUS:
-		/* next call to read_byte() will expect a status */
-		break;
-
-	case NAND_CMD_SEQIN:
-		if (unlikely(reliable_mode)) {
-			uint16_t g4_page = g4_addr >> 16;
-
-			/* writes to odd-numbered 2k pages are invalid */
-			if (g4_page & 0x01)
-				dev_warn(doc->dev,
-					 "invalid reliable mode address\n");
-		}
-
-		write_page_prologue(mtd, g4_addr);
-
-		/* hack for deferred write of oob bytes */
-		if (doc->oob_page == page_addr)
-			memcpy(nand->oob_poi, doc->oob_buf, 16);
-		break;
-
-	case NAND_CMD_PAGEPROG:
-		pageprog(mtd);
-		break;
-
-	/* we don't expect these, based on review of nand_base.c */
-	case NAND_CMD_READOOB:
-	case NAND_CMD_READID:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-		dev_warn(doc->dev, "docg4_command: "
-			 "unexpected nand command 0x%x\n", command);
-		break;
-
-	}
-}
-
-static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
-		     uint8_t *buf, int page, bool use_ecc)
-{
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-	uint16_t status, edc_err, *buf16;
-	int bits_corrected = 0;
-
-	dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
-
-	writew(DOC_ECCCONF0_READ_MODE |
-	       DOC_ECCCONF0_ECC_ENABLE |
-	       DOC_ECCCONF0_UNKNOWN |
-	       DOCG4_BCH_SIZE,
-	       docptr + DOC_ECCCONF0);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-
-	/* the 1st byte from the I/O reg is a status; the rest is page data */
-	status = readw(docptr + DOC_IOSPACE_DATA);
-	if (status & DOCG4_READ_ERROR) {
-		dev_err(doc->dev,
-			"docg4_read_page: bad status: 0x%02x\n", status);
-		writew(0, docptr + DOC_DATAEND);
-		return -EIO;
-	}
-
-	dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
-
-	docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
-
-	/* this device always reads oob after page data */
-	/* first 14 oob bytes read from I/O reg */
-	docg4_read_buf(mtd, nand->oob_poi, 14);
-
-	/* last 2 read from another reg */
-	buf16 = (uint16_t *)(nand->oob_poi + 14);
-	*buf16 = readw(docptr + DOCG4_MYSTERY_REG);
-
-	write_nop(docptr);
-
-	if (likely(use_ecc == true)) {
-
-		/* read the register that tells us if bitflip(s) detected  */
-		edc_err = readw(docptr + DOC_ECCCONF1);
-		edc_err = readw(docptr + DOC_ECCCONF1);
-		dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
-
-		/* If bitflips are reported, attempt to correct with ecc */
-		if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
-			bits_corrected = correct_data(mtd, buf, page);
-			if (bits_corrected == -EBADMSG)
-				mtd->ecc_stats.failed++;
-			else
-				mtd->ecc_stats.corrected += bits_corrected;
-		}
-	}
-
-	writew(0, docptr + DOC_DATAEND);
-	if (bits_corrected == -EBADMSG)	  /* uncorrectable errors */
-		return 0;
-	return bits_corrected;
-}
-
-
-static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
-			       uint8_t *buf, int oob_required, int page)
-{
-	return read_page(mtd, nand, buf, page, false);
-}
-
-static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
-			   uint8_t *buf, int oob_required, int page)
-{
-	return read_page(mtd, nand, buf, page, true);
-}
-
-static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
-			  int page)
-{
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-	uint16_t status;
-
-	dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
-
-	docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
-
-	writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-
-	/* the 1st byte from the I/O reg is a status; the rest is oob data */
-	status = readw(docptr + DOC_IOSPACE_DATA);
-	if (status & DOCG4_READ_ERROR) {
-		dev_warn(doc->dev,
-			 "docg4_read_oob failed: status = 0x%02x\n", status);
-		return -EIO;
-	}
-
-	dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
-
-	docg4_read_buf(mtd, nand->oob_poi, 16);
-
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-	writew(0, docptr + DOC_DATAEND);
-	write_nop(docptr);
-
-	return 0;
-}
-
-static int docg4_erase_block(struct mtd_info *mtd, int page)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-	uint16_t g4_page;
-
-	dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
-
-	sequence_reset(mtd);
-
-	writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
-	writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
-	write_nop(docptr);
-
-	/* only 2 bytes of address are written to specify erase block */
-	g4_page = (uint16_t)(page / 4);  /* to g4's 2k page addressing */
-	writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
-	g4_page >>= 8;
-	writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
-	write_nop(docptr);
-
-	/* start the erasure */
-	writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
-	write_nop(docptr);
-	write_nop(docptr);
-
-	usleep_range(500, 1000); /* erasure is long; take a snooze */
-	poll_status(doc);
-	writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
-	writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
-	writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-	write_nop(docptr);
-
-	read_progstatus(doc);
-
-	writew(0, docptr + DOC_DATAEND);
-	write_nop(docptr);
-	poll_status(doc);
-	write_nop(docptr);
-
-	return nand->waitfunc(mtd, nand);
-}
-
-static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
-		       const uint8_t *buf, bool use_ecc)
-{
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-	uint8_t ecc_buf[8];
-
-	dev_dbg(doc->dev, "%s...\n", __func__);
-
-	writew(DOC_ECCCONF0_ECC_ENABLE |
-	       DOC_ECCCONF0_UNKNOWN |
-	       DOCG4_BCH_SIZE,
-	       docptr + DOC_ECCCONF0);
-	write_nop(docptr);
-
-	/* write the page data */
-	docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
-
-	/* oob bytes 0 through 5 are written to I/O reg */
-	docg4_write_buf16(mtd, nand->oob_poi, 6);
-
-	/* oob byte 6 written to a separate reg */
-	writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
-
-	write_nop(docptr);
-	write_nop(docptr);
-
-	/* write hw-generated ecc bytes to oob */
-	if (likely(use_ecc == true)) {
-		/* oob byte 7 is hamming code */
-		uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
-		hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
-		writew(hamming, docptr + DOCG4_OOB_6_7);
-		write_nop(docptr);
-
-		/* read the 7 bch bytes from ecc regs */
-		read_hw_ecc(docptr, ecc_buf);
-		ecc_buf[7] = 0;         /* clear the "page written" flag */
-	}
-
-	/* write user-supplied bytes to oob */
-	else {
-		writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
-		write_nop(docptr);
-		memcpy(ecc_buf, &nand->oob_poi[8], 8);
-	}
-
-	docg4_write_buf16(mtd, ecc_buf, 8);
-	write_nop(docptr);
-	write_nop(docptr);
-	writew(0, docptr + DOC_DATAEND);
-	write_nop(docptr);
-
-	return 0;
-}
-
-static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
-				const uint8_t *buf, int oob_required, int page)
-{
-	return write_page(mtd, nand, buf, false);
-}
-
-static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
-			     const uint8_t *buf, int oob_required, int page)
-{
-	return write_page(mtd, nand, buf, true);
-}
-
-static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
-			   int page)
-{
-	/*
-	 * Writing oob-only is not really supported, because MLC nand must write
-	 * oob bytes at the same time as page data.  Nonetheless, we save the
-	 * oob buffer contents here, and then write it along with the page data
-	 * if the same page is subsequently written.  This allows user space
-	 * utilities that write the oob data prior to the page data to work
-	 * (e.g., nandwrite).  The disdvantage is that, if the intention was to
-	 * write oob only, the operation is quietly ignored.  Also, oob can get
-	 * corrupted if two concurrent processes are running nandwrite.
-	 */
-
-	/* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	doc->oob_page = page;
-	memcpy(doc->oob_buf, nand->oob_poi, 16);
-	return 0;
-}
-
-static int __init read_factory_bbt(struct mtd_info *mtd)
-{
-	/*
-	 * The device contains a read-only factory bad block table.  Read it and
-	 * update the memory-based bbt accordingly.
-	 */
-
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
-	uint8_t *buf;
-	int i, block;
-	__u32 eccfailed_stats = mtd->ecc_stats.failed;
-
-	buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
-	if (buf == NULL)
-		return -ENOMEM;
-
-	read_page_prologue(mtd, g4_addr);
-	docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
-
-	/*
-	 * If no memory-based bbt was created, exit.  This will happen if module
-	 * parameter ignore_badblocks is set.  Then why even call this function?
-	 * For an unknown reason, block erase always fails if it's the first
-	 * operation after device power-up.  The above read ensures it never is.
-	 * Ugly, I know.
-	 */
-	if (nand->bbt == NULL)  /* no memory-based bbt */
-		goto exit;
-
-	if (mtd->ecc_stats.failed > eccfailed_stats) {
-		/*
-		 * Whoops, an ecc failure ocurred reading the factory bbt.
-		 * It is stored redundantly, so we get another chance.
-		 */
-		eccfailed_stats = mtd->ecc_stats.failed;
-		docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
-		if (mtd->ecc_stats.failed > eccfailed_stats) {
-			dev_warn(doc->dev,
-				 "The factory bbt could not be read!\n");
-			goto exit;
-		}
-	}
-
-	/*
-	 * Parse factory bbt and update memory-based bbt.  Factory bbt format is
-	 * simple: one bit per block, block numbers increase left to right (msb
-	 * to lsb).  Bit clear means bad block.
-	 */
-	for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
-		int bitnum;
-		unsigned long bits = ~buf[i];
-		for_each_set_bit(bitnum, &bits, 8) {
-			int badblock = block + 7 - bitnum;
-			nand->bbt[badblock / 4] |=
-				0x03 << ((badblock % 4) * 2);
-			mtd->ecc_stats.badblocks++;
-			dev_notice(doc->dev, "factory-marked bad block: %d\n",
-				   badblock);
-		}
-	}
- exit:
-	kfree(buf);
-	return 0;
-}
-
-static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
-	/*
-	 * Mark a block as bad.  Bad blocks are marked in the oob area of the
-	 * first page of the block.  The default scan_bbt() in the nand
-	 * infrastructure code works fine for building the memory-based bbt
-	 * during initialization, as does the nand infrastructure function that
-	 * checks if a block is bad by reading the bbt.  This function replaces
-	 * the nand default because writes to oob-only are not supported.
-	 */
-
-	int ret, i;
-	uint8_t *buf;
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	struct nand_bbt_descr *bbtd = nand->badblock_pattern;
-	int page = (int)(ofs >> nand->page_shift);
-	uint32_t g4_addr = mtd_to_docg4_address(page, 0);
-
-	dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
-
-	if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
-		dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
-			 __func__, ofs);
-
-	/* allocate blank buffer for page data */
-	buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
-	if (buf == NULL)
-		return -ENOMEM;
-
-	/* write bit-wise negation of pattern to oob buffer */
-	memset(nand->oob_poi, 0xff, mtd->oobsize);
-	for (i = 0; i < bbtd->len; i++)
-		nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
-
-	/* write first page of block */
-	write_page_prologue(mtd, g4_addr);
-	docg4_write_page(mtd, nand, buf, 1, page);
-	ret = pageprog(mtd);
-
-	kfree(buf);
-
-	return ret;
-}
-
-static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs)
-{
-	/* only called when module_param ignore_badblocks is set */
-	return 0;
-}
-
-static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
-{
-	/*
-	 * Put the device into "deep power-down" mode.  Note that CE# must be
-	 * deasserted for this to take effect.  The xscale, e.g., can be
-	 * configured to float this signal when the processor enters power-down,
-	 * and a suitable pull-up ensures its deassertion.
-	 */
-
-	int i;
-	uint8_t pwr_down;
-	struct docg4_priv *doc = platform_get_drvdata(pdev);
-	void __iomem *docptr = doc->virtadr;
-
-	dev_dbg(doc->dev, "%s...\n", __func__);
-
-	/* poll the register that tells us we're ready to go to sleep */
-	for (i = 0; i < 10; i++) {
-		pwr_down = readb(docptr + DOC_POWERMODE);
-		if (pwr_down & DOC_POWERDOWN_READY)
-			break;
-		usleep_range(1000, 4000);
-	}
-
-	if (pwr_down & DOC_POWERDOWN_READY) {
-		dev_err(doc->dev, "suspend failed; "
-			"timeout polling DOC_POWERDOWN_READY\n");
-		return -EIO;
-	}
-
-	writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
-	       docptr + DOC_ASICMODE);
-	writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
-	       docptr + DOC_ASICMODECONFIRM);
-
-	write_nop(docptr);
-
-	return 0;
-}
-
-static int docg4_resume(struct platform_device *pdev)
-{
-
-	/*
-	 * Exit power-down.  Twelve consecutive reads of the address below
-	 * accomplishes this, assuming CE# has been asserted.
-	 */
-
-	struct docg4_priv *doc = platform_get_drvdata(pdev);
-	void __iomem *docptr = doc->virtadr;
-	int i;
-
-	dev_dbg(doc->dev, "%s...\n", __func__);
-
-	for (i = 0; i < 12; i++)
-		readb(docptr + 0x1fff);
-
-	return 0;
-}
-
-static void __init init_mtd_structs(struct mtd_info *mtd)
-{
-	/* initialize mtd and nand data structures */
-
-	/*
-	 * Note that some of the following initializations are not usually
-	 * required within a nand driver because they are performed by the nand
-	 * infrastructure code as part of nand_scan().  In this case they need
-	 * to be initialized here because we skip call to nand_scan_ident() (the
-	 * first half of nand_scan()).  The call to nand_scan_ident() is skipped
-	 * because for this device the chip id is not read in the manner of a
-	 * standard nand device.  Unfortunately, nand_scan_ident() does other
-	 * things as well, such as call nand_set_defaults().
-	 */
-
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-
-	mtd->size = DOCG4_CHIP_SIZE;
-	mtd->name = "Msys_Diskonchip_G4";
-	mtd->writesize = DOCG4_PAGE_SIZE;
-	mtd->erasesize = DOCG4_BLOCK_SIZE;
-	mtd->oobsize = DOCG4_OOB_SIZE;
-	mtd_set_ooblayout(mtd, &docg4_ooblayout_ops);
-	nand->chipsize = DOCG4_CHIP_SIZE;
-	nand->chip_shift = DOCG4_CHIP_SHIFT;
-	nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
-	nand->chip_delay = 20;
-	nand->page_shift = DOCG4_PAGE_SHIFT;
-	nand->pagemask = 0x3ffff;
-	nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
-	nand->badblockbits = 8;
-	nand->ecc.mode = NAND_ECC_HW_SYNDROME;
-	nand->ecc.size = DOCG4_PAGE_SIZE;
-	nand->ecc.prepad = 8;
-	nand->ecc.bytes	= 8;
-	nand->ecc.strength = DOCG4_T;
-	nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
-	nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
-	nand->controller = &nand->hwcontrol;
-	nand_hw_control_init(nand->controller);
-
-	/* methods */
-	nand->cmdfunc = docg4_command;
-	nand->waitfunc = docg4_wait;
-	nand->select_chip = docg4_select_chip;
-	nand->read_byte = docg4_read_byte;
-	nand->block_markbad = docg4_block_markbad;
-	nand->read_buf = docg4_read_buf;
-	nand->write_buf = docg4_write_buf16;
-	nand->erase = docg4_erase_block;
-	nand->ecc.read_page = docg4_read_page;
-	nand->ecc.write_page = docg4_write_page;
-	nand->ecc.read_page_raw = docg4_read_page_raw;
-	nand->ecc.write_page_raw = docg4_write_page_raw;
-	nand->ecc.read_oob = docg4_read_oob;
-	nand->ecc.write_oob = docg4_write_oob;
-
-	/*
-	 * The way the nand infrastructure code is written, a memory-based bbt
-	 * is not created if NAND_SKIP_BBTSCAN is set.  With no memory bbt,
-	 * nand->block_bad() is used.  So when ignoring bad blocks, we skip the
-	 * scan and define a dummy block_bad() which always returns 0.
-	 */
-	if (ignore_badblocks) {
-		nand->options |= NAND_SKIP_BBTSCAN;
-		nand->block_bad	= docg4_block_neverbad;
-	}
-
-}
-
-static int __init read_id_reg(struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct docg4_priv *doc = nand_get_controller_data(nand);
-	void __iomem *docptr = doc->virtadr;
-	uint16_t id1, id2;
-
-	/* check for presence of g4 chip by reading id registers */
-	id1 = readw(docptr + DOC_CHIPID);
-	id1 = readw(docptr + DOCG4_MYSTERY_REG);
-	id2 = readw(docptr + DOC_CHIPID_INV);
-	id2 = readw(docptr + DOCG4_MYSTERY_REG);
-
-	if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
-		dev_info(doc->dev,
-			 "NAND device: 128MiB Diskonchip G4 detected\n");
-		return 0;
-	}
-
-	return -ENODEV;
-}
-
-static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
-
-static int __init probe_docg4(struct platform_device *pdev)
-{
-	struct mtd_info *mtd;
-	struct nand_chip *nand;
-	void __iomem *virtadr;
-	struct docg4_priv *doc;
-	int len, retval;
-	struct resource *r;
-	struct device *dev = &pdev->dev;
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (r == NULL) {
-		dev_err(dev, "no io memory resource defined!\n");
-		return -ENODEV;
-	}
-
-	virtadr = ioremap(r->start, resource_size(r));
-	if (!virtadr) {
-		dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
-		return -EIO;
-	}
-
-	len = sizeof(struct nand_chip) + sizeof(struct docg4_priv);
-	nand = kzalloc(len, GFP_KERNEL);
-	if (nand == NULL) {
-		retval = -ENOMEM;
-		goto fail_unmap;
-	}
-
-	mtd = nand_to_mtd(nand);
-	doc = (struct docg4_priv *) (nand + 1);
-	nand_set_controller_data(nand, doc);
-	mtd->dev.parent = &pdev->dev;
-	doc->virtadr = virtadr;
-	doc->dev = dev;
-
-	init_mtd_structs(mtd);
-
-	/* initialize kernel bch algorithm */
-	doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
-	if (doc->bch == NULL) {
-		retval = -EINVAL;
-		goto fail;
-	}
-
-	platform_set_drvdata(pdev, doc);
-
-	reset(mtd);
-	retval = read_id_reg(mtd);
-	if (retval == -ENODEV) {
-		dev_warn(dev, "No diskonchip G4 device found.\n");
-		goto fail;
-	}
-
-	retval = nand_scan_tail(mtd);
-	if (retval)
-		goto fail;
-
-	retval = read_factory_bbt(mtd);
-	if (retval)
-		goto fail;
-
-	retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
-	if (retval)
-		goto fail;
-
-	doc->mtd = mtd;
-	return 0;
-
-fail:
-	nand_release(mtd); /* deletes partitions and mtd devices */
-	free_bch(doc->bch);
-	kfree(nand);
-
-fail_unmap:
-	iounmap(virtadr);
-
-	return retval;
-}
-
-static int __exit cleanup_docg4(struct platform_device *pdev)
-{
-	struct docg4_priv *doc = platform_get_drvdata(pdev);
-	nand_release(doc->mtd);
-	free_bch(doc->bch);
-	kfree(mtd_to_nand(doc->mtd));
-	iounmap(doc->virtadr);
-	return 0;
-}
-
-static struct platform_driver docg4_driver = {
-	.driver		= {
-		.name	= "docg4",
-	},
-	.suspend	= docg4_suspend,
-	.resume		= docg4_resume,
-	.remove		= __exit_p(cleanup_docg4),
-};
-
-module_platform_driver_probe(docg4_driver, probe_docg4);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Mike Dunn");
-MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
deleted file mode 100644
index 7d8453eb4d0f..000000000000
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ /dev/null
@@ -1,977 +0,0 @@ 
-/* Freescale Enhanced Local Bus Controller NAND driver
- *
- * Copyright © 2006-2007, 2010 Freescale Semiconductor
- *
- * Authors: Nick Spence <nick.spence@freescale.com>,
- *          Scott Wood <scottwood@freescale.com>
- *          Jack Lan <jack.lan@freescale.com>
- *          Roy Zang <tie-fei.zang@freescale.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/ioport.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <asm/fsl_lbc.h>
-
-#define MAX_BANKS 8
-#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
-#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
-
-/* mtd information per set */
-
-struct fsl_elbc_mtd {
-	struct nand_chip chip;
-	struct fsl_lbc_ctrl *ctrl;
-
-	struct device *dev;
-	int bank;               /* Chip select bank number           */
-	u8 __iomem *vbase;      /* Chip select base virtual address  */
-	int page_size;          /* NAND page size (0=512, 1=2048)    */
-	unsigned int fmr;       /* FCM Flash Mode Register value     */
-};
-
-/* Freescale eLBC FCM controller information */
-
-struct fsl_elbc_fcm_ctrl {
-	struct nand_hw_control controller;
-	struct fsl_elbc_mtd *chips[MAX_BANKS];
-
-	u8 __iomem *addr;        /* Address of assigned FCM buffer        */
-	unsigned int page;       /* Last page written to / read from      */
-	unsigned int read_bytes; /* Number of bytes read during command   */
-	unsigned int column;     /* Saved column from SEQIN               */
-	unsigned int index;      /* Pointer to next byte to 'read'        */
-	unsigned int status;     /* status read from LTESR after last op  */
-	unsigned int mdr;        /* UPM/FCM Data Register value           */
-	unsigned int use_mdr;    /* Non zero if the MDR is to be set      */
-	unsigned int oob;        /* Non zero if operating on OOB data     */
-	unsigned int counter;	 /* counter for the initializations	  */
-	unsigned int max_bitflips;  /* Saved during READ0 cmd		  */
-};
-
-/* These map to the positions used by the FCM hardware ECC generator */
-
-static int fsl_elbc_ooblayout_ecc(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-
-	if (section >= chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = (16 * section) + 6;
-	if (priv->fmr & FMR_ECCM)
-		oobregion->offset += 2;
-
-	oobregion->length = chip->ecc.bytes;
-
-	return 0;
-}
-
-static int fsl_elbc_ooblayout_free(struct mtd_info *mtd, int section,
-				   struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-
-	if (section > chip->ecc.steps)
-		return -ERANGE;
-
-	if (!section) {
-		oobregion->offset = 0;
-		if (mtd->writesize > 512)
-			oobregion->offset++;
-		oobregion->length = (priv->fmr & FMR_ECCM) ? 7 : 5;
-	} else {
-		oobregion->offset = (16 * section) -
-				    ((priv->fmr & FMR_ECCM) ? 5 : 7);
-		if (section < chip->ecc.steps)
-			oobregion->length = 13;
-		else
-			oobregion->length = mtd->oobsize - oobregion->offset;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops fsl_elbc_ooblayout_ops = {
-	.ecc = fsl_elbc_ooblayout_ecc,
-	.free = fsl_elbc_ooblayout_free,
-};
-
-/*
- * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
- * interfere with ECC positions, that's why we implement our own descriptors.
- * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
- */
-static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
-static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr bbt_main_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
-		   NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	11,
-	.len = 4,
-	.veroffs = 15,
-	.maxblocks = 4,
-	.pattern = bbt_pattern,
-};
-
-static struct nand_bbt_descr bbt_mirror_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
-		   NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	11,
-	.len = 4,
-	.veroffs = 15,
-	.maxblocks = 4,
-	.pattern = mirror_pattern,
-};
-
-/*=================================*/
-
-/*
- * Set up the FCM hardware block and page address fields, and the fcm
- * structure addr field to point to the correct FCM buffer in memory
- */
-static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
-	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
-	int buf_num;
-
-	elbc_fcm_ctrl->page = page_addr;
-
-	if (priv->page_size) {
-		/*
-		 * large page size chip : FPAR[PI] save the lowest 6 bits,
-		 *                        FBAR[BLK] save the other bits.
-		 */
-		out_be32(&lbc->fbar, page_addr >> 6);
-		out_be32(&lbc->fpar,
-		         ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
-		         (oob ? FPAR_LP_MS : 0) | column);
-		buf_num = (page_addr & 1) << 2;
-	} else {
-		/*
-		 * small page size chip : FPAR[PI] save the lowest 5 bits,
-		 *                        FBAR[BLK] save the other bits.
-		 */
-		out_be32(&lbc->fbar, page_addr >> 5);
-		out_be32(&lbc->fpar,
-		         ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
-		         (oob ? FPAR_SP_MS : 0) | column);
-		buf_num = page_addr & 7;
-	}
-
-	elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024;
-	elbc_fcm_ctrl->index = column;
-
-	/* for OOB data point to the second half of the buffer */
-	if (oob)
-		elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512;
-
-	dev_vdbg(priv->dev, "set_addr: bank=%d, "
-			    "elbc_fcm_ctrl->addr=0x%p (0x%p), "
-	                    "index %x, pes %d ps %d\n",
-		 buf_num, elbc_fcm_ctrl->addr, priv->vbase,
-		 elbc_fcm_ctrl->index,
-	         chip->phys_erase_shift, chip->page_shift);
-}
-
-/*
- * execute FCM command and wait for it to complete
- */
-static int fsl_elbc_run_command(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
-	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
-
-	/* Setup the FMR[OP] to execute without write protection */
-	out_be32(&lbc->fmr, priv->fmr | 3);
-	if (elbc_fcm_ctrl->use_mdr)
-		out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr);
-
-	dev_vdbg(priv->dev,
-	         "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
-	         in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
-	dev_vdbg(priv->dev,
-	         "fsl_elbc_run_command: fbar=%08x fpar=%08x "
-	         "fbcr=%08x bank=%d\n",
-	         in_be32(&lbc->fbar), in_be32(&lbc->fpar),
-	         in_be32(&lbc->fbcr), priv->bank);
-
-	ctrl->irq_status = 0;
-	/* execute special operation */
-	out_be32(&lbc->lsor, priv->bank);
-
-	/* wait for FCM complete flag or timeout */
-	wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
-	                   FCM_TIMEOUT_MSECS * HZ/1000);
-	elbc_fcm_ctrl->status = ctrl->irq_status;
-	/* store mdr value in case it was needed */
-	if (elbc_fcm_ctrl->use_mdr)
-		elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr);
-
-	elbc_fcm_ctrl->use_mdr = 0;
-
-	if (elbc_fcm_ctrl->status != LTESR_CC) {
-		dev_info(priv->dev,
-		         "command failed: fir %x fcr %x status %x mdr %x\n",
-		         in_be32(&lbc->fir), in_be32(&lbc->fcr),
-			 elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr);
-		return -EIO;
-	}
-
-	if (chip->ecc.mode != NAND_ECC_HW)
-		return 0;
-
-	elbc_fcm_ctrl->max_bitflips = 0;
-
-	if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
-		uint32_t lteccr = in_be32(&lbc->lteccr);
-		/*
-		 * if command was a full page read and the ELBC
-		 * has the LTECCR register, then bits 12-15 (ppc order) of
-		 * LTECCR indicates which 512 byte sub-pages had fixed errors.
-		 * bits 28-31 are uncorrectable errors, marked elsewhere.
-		 * for small page nand only 1 bit is used.
-		 * if the ELBC doesn't have the lteccr register it reads 0
-		 * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
-		 * count the number of sub-pages with bitflips and update
-		 * ecc_stats.corrected accordingly.
-		 */
-		if (lteccr & 0x000F000F)
-			out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
-		if (lteccr & 0x000F0000) {
-			mtd->ecc_stats.corrected++;
-			elbc_fcm_ctrl->max_bitflips = 1;
-		}
-	}
-
-	return 0;
-}
-
-static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
-{
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
-	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
-
-	if (priv->page_size) {
-		out_be32(&lbc->fir,
-		         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
-		         (FIR_OP_CA  << FIR_OP1_SHIFT) |
-		         (FIR_OP_PA  << FIR_OP2_SHIFT) |
-		         (FIR_OP_CM1 << FIR_OP3_SHIFT) |
-		         (FIR_OP_RBW << FIR_OP4_SHIFT));
-
-		out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
-		                    (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
-	} else {
-		out_be32(&lbc->fir,
-		         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
-		         (FIR_OP_CA  << FIR_OP1_SHIFT) |
-		         (FIR_OP_PA  << FIR_OP2_SHIFT) |
-		         (FIR_OP_RBW << FIR_OP3_SHIFT));
-
-		if (oob)
-			out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
-		else
-			out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
-	}
-}
-
-/* cmdfunc send commands to the FCM */
-static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
-                             int column, int page_addr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
-	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
-
-	elbc_fcm_ctrl->use_mdr = 0;
-
-	/* clear the read buffer */
-	elbc_fcm_ctrl->read_bytes = 0;
-	if (command != NAND_CMD_PAGEPROG)
-		elbc_fcm_ctrl->index = 0;
-
-	switch (command) {
-	/* READ0 and READ1 read the entire buffer to use hardware ECC. */
-	case NAND_CMD_READ1:
-		column += 256;
-
-	/* fall-through */
-	case NAND_CMD_READ0:
-		dev_dbg(priv->dev,
-		        "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
-		        " 0x%x, column: 0x%x.\n", page_addr, column);
-
-
-		out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
-		set_addr(mtd, 0, page_addr, 0);
-
-		elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
-		elbc_fcm_ctrl->index += column;
-
-		fsl_elbc_do_read(chip, 0);
-		fsl_elbc_run_command(mtd);
-		return;
-
-	/* READOOB reads only the OOB because no ECC is performed. */
-	case NAND_CMD_READOOB:
-		dev_vdbg(priv->dev,
-		         "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
-			 " 0x%x, column: 0x%x.\n", page_addr, column);
-
-		out_be32(&lbc->fbcr, mtd->oobsize - column);
-		set_addr(mtd, column, page_addr, 1);
-
-		elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
-
-		fsl_elbc_do_read(chip, 1);
-		fsl_elbc_run_command(mtd);
-		return;
-
-	case NAND_CMD_READID:
-	case NAND_CMD_PARAM:
-		dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
-
-		out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
-		                    (FIR_OP_UA  << FIR_OP1_SHIFT) |
-		                    (FIR_OP_RBW << FIR_OP2_SHIFT));
-		out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
-		/*
-		 * although currently it's 8 bytes for READID, we always read
-		 * the maximum 256 bytes(for PARAM)
-		 */
-		out_be32(&lbc->fbcr, 256);
-		elbc_fcm_ctrl->read_bytes = 256;
-		elbc_fcm_ctrl->use_mdr = 1;
-		elbc_fcm_ctrl->mdr = column;
-		set_addr(mtd, 0, 0, 0);
-		fsl_elbc_run_command(mtd);
-		return;
-
-	/* ERASE1 stores the block and page address */
-	case NAND_CMD_ERASE1:
-		dev_vdbg(priv->dev,
-		         "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
-		         "page_addr: 0x%x.\n", page_addr);
-		set_addr(mtd, 0, page_addr, 0);
-		return;
-
-	/* ERASE2 uses the block and page address from ERASE1 */
-	case NAND_CMD_ERASE2:
-		dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
-
-		out_be32(&lbc->fir,
-		         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
-		         (FIR_OP_PA  << FIR_OP1_SHIFT) |
-		         (FIR_OP_CM2 << FIR_OP2_SHIFT) |
-		         (FIR_OP_CW1 << FIR_OP3_SHIFT) |
-		         (FIR_OP_RS  << FIR_OP4_SHIFT));
-
-		out_be32(&lbc->fcr,
-		         (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
-		         (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
-		         (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
-
-		out_be32(&lbc->fbcr, 0);
-		elbc_fcm_ctrl->read_bytes = 0;
-		elbc_fcm_ctrl->use_mdr = 1;
-
-		fsl_elbc_run_command(mtd);
-		return;
-
-	/* SEQIN sets up the addr buffer and all registers except the length */
-	case NAND_CMD_SEQIN: {
-		__be32 fcr;
-		dev_vdbg(priv->dev,
-			 "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
-		         "page_addr: 0x%x, column: 0x%x.\n",
-		         page_addr, column);
-
-		elbc_fcm_ctrl->column = column;
-		elbc_fcm_ctrl->use_mdr = 1;
-
-		if (column >= mtd->writesize) {
-			/* OOB area */
-			column -= mtd->writesize;
-			elbc_fcm_ctrl->oob = 1;
-		} else {
-			WARN_ON(column != 0);
-			elbc_fcm_ctrl->oob = 0;
-		}
-
-		fcr = (NAND_CMD_STATUS   << FCR_CMD1_SHIFT) |
-		      (NAND_CMD_SEQIN    << FCR_CMD2_SHIFT) |
-		      (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
-
-		if (priv->page_size) {
-			out_be32(&lbc->fir,
-			         (FIR_OP_CM2 << FIR_OP0_SHIFT) |
-			         (FIR_OP_CA  << FIR_OP1_SHIFT) |
-			         (FIR_OP_PA  << FIR_OP2_SHIFT) |
-			         (FIR_OP_WB  << FIR_OP3_SHIFT) |
-			         (FIR_OP_CM3 << FIR_OP4_SHIFT) |
-			         (FIR_OP_CW1 << FIR_OP5_SHIFT) |
-			         (FIR_OP_RS  << FIR_OP6_SHIFT));
-		} else {
-			out_be32(&lbc->fir,
-			         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
-			         (FIR_OP_CM2 << FIR_OP1_SHIFT) |
-			         (FIR_OP_CA  << FIR_OP2_SHIFT) |
-			         (FIR_OP_PA  << FIR_OP3_SHIFT) |
-			         (FIR_OP_WB  << FIR_OP4_SHIFT) |
-			         (FIR_OP_CM3 << FIR_OP5_SHIFT) |
-			         (FIR_OP_CW1 << FIR_OP6_SHIFT) |
-			         (FIR_OP_RS  << FIR_OP7_SHIFT));
-
-			if (elbc_fcm_ctrl->oob)
-				/* OOB area --> READOOB */
-				fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
-			else
-				/* First 256 bytes --> READ0 */
-				fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
-		}
-
-		out_be32(&lbc->fcr, fcr);
-		set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob);
-		return;
-	}
-
-	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
-	case NAND_CMD_PAGEPROG: {
-		dev_vdbg(priv->dev,
-		         "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
-			 "writing %d bytes.\n", elbc_fcm_ctrl->index);
-
-		/* if the write did not start at 0 or is not a full page
-		 * then set the exact length, otherwise use a full page
-		 * write so the HW generates the ECC.
-		 */
-		if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
-		    elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
-			out_be32(&lbc->fbcr,
-				elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
-		else
-			out_be32(&lbc->fbcr, 0);
-
-		fsl_elbc_run_command(mtd);
-		return;
-	}
-
-	/* CMD_STATUS must read the status byte while CEB is active */
-	/* Note - it does not wait for the ready line */
-	case NAND_CMD_STATUS:
-		out_be32(&lbc->fir,
-		         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
-		         (FIR_OP_RBW << FIR_OP1_SHIFT));
-		out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
-		out_be32(&lbc->fbcr, 1);
-		set_addr(mtd, 0, 0, 0);
-		elbc_fcm_ctrl->read_bytes = 1;
-
-		fsl_elbc_run_command(mtd);
-
-		/* The chip always seems to report that it is
-		 * write-protected, even when it is not.
-		 */
-		setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP);
-		return;
-
-	/* RESET without waiting for the ready line */
-	case NAND_CMD_RESET:
-		dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
-		out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
-		out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
-		fsl_elbc_run_command(mtd);
-		return;
-
-	default:
-		dev_err(priv->dev,
-		        "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
-		        command);
-	}
-}
-
-static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
-{
-	/* The hardware does not seem to support multiple
-	 * chips per bank.
-	 */
-}
-
-/*
- * Write buf to the FCM Controller Data Buffer
- */
-static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
-	unsigned int bufsize = mtd->writesize + mtd->oobsize;
-
-	if (len <= 0) {
-		dev_err(priv->dev, "write_buf of %d bytes", len);
-		elbc_fcm_ctrl->status = 0;
-		return;
-	}
-
-	if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) {
-		dev_err(priv->dev,
-		        "write_buf beyond end of buffer "
-		        "(%d requested, %u available)\n",
-			len, bufsize - elbc_fcm_ctrl->index);
-		len = bufsize - elbc_fcm_ctrl->index;
-	}
-
-	memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len);
-	/*
-	 * This is workaround for the weird elbc hangs during nand write,
-	 * Scott Wood says: "...perhaps difference in how long it takes a
-	 * write to make it through the localbus compared to a write to IMMR
-	 * is causing problems, and sync isn't helping for some reason."
-	 * Reading back the last byte helps though.
-	 */
-	in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1);
-
-	elbc_fcm_ctrl->index += len;
-}
-
-/*
- * read a byte from either the FCM hardware buffer if it has any data left
- * otherwise issue a command to read a single byte.
- */
-static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
-
-	/* If there are still bytes in the FCM, then use the next byte. */
-	if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes)
-		return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]);
-
-	dev_err(priv->dev, "read_byte beyond end of buffer\n");
-	return ERR_BYTE;
-}
-
-/*
- * Read from the FCM Controller Data Buffer
- */
-static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
-	int avail;
-
-	if (len < 0)
-		return;
-
-	avail = min((unsigned int)len,
-			elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
-	memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail);
-	elbc_fcm_ctrl->index += avail;
-
-	if (len > avail)
-		dev_err(priv->dev,
-		        "read_buf beyond end of buffer "
-		        "(%d requested, %d available)\n",
-		        len, avail);
-}
-
-/* This function is called after Program and Erase Operations to
- * check for success or failure.
- */
-static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
-{
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
-
-	if (elbc_fcm_ctrl->status != LTESR_CC)
-		return NAND_STATUS_FAIL;
-
-	/* The chip always seems to report that it is
-	 * write-protected, even when it is not.
-	 */
-	return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
-}
-
-static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
-	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
-	unsigned int al;
-
-	/* calculate FMR Address Length field */
-	al = 0;
-	if (chip->pagemask & 0xffff0000)
-		al++;
-	if (chip->pagemask & 0xff000000)
-		al++;
-
-	priv->fmr |= al << FMR_AL_SHIFT;
-
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
-	        chip->numchips);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
-	        chip->chipsize);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
-	        chip->pagemask);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
-	        chip->chip_delay);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
-	        chip->badblockpos);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
-	        chip->chip_shift);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
-	        chip->page_shift);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
-	        chip->phys_erase_shift);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
-	        chip->ecc.mode);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
-	        chip->ecc.steps);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
-	        chip->ecc.bytes);
-	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
-	        chip->ecc.total);
-	dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
-		mtd->ooblayout);
-	dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
-	dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
-	dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
-	        mtd->erasesize);
-	dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
-	        mtd->writesize);
-	dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
-	        mtd->oobsize);
-
-	/* adjust Option Register and ECC to match Flash page size */
-	if (mtd->writesize == 512) {
-		priv->page_size = 0;
-		clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
-	} else if (mtd->writesize == 2048) {
-		priv->page_size = 1;
-		setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
-	} else {
-		dev_err(priv->dev,
-		        "fsl_elbc_init: page size %d is not supported\n",
-		        mtd->writesize);
-		return -1;
-	}
-
-	return 0;
-}
-
-static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-			      uint8_t *buf, int oob_required, int page)
-{
-	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
-
-	fsl_elbc_read_buf(mtd, buf, mtd->writesize);
-	if (oob_required)
-		fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
-		mtd->ecc_stats.failed++;
-
-	return elbc_fcm_ctrl->max_bitflips;
-}
-
-/* ECC will be calculated automatically, and errors will be detected in
- * waitfunc.
- */
-static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-				const uint8_t *buf, int oob_required, int page)
-{
-	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
-	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-/* ECC will be calculated automatically, and errors will be detected in
- * waitfunc.
- */
-static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
-				uint32_t offset, uint32_t data_len,
-				const uint8_t *buf, int oob_required, int page)
-{
-	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
-	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
-{
-	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
-	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
-	struct nand_chip *chip = &priv->chip;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-
-	dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
-
-	/* Fill in fsl_elbc_mtd structure */
-	mtd->dev.parent = priv->dev;
-	nand_set_flash_node(chip, priv->dev->of_node);
-
-	/* set timeout to maximum */
-	priv->fmr = 15 << FMR_CWTO_SHIFT;
-	if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
-		priv->fmr |= FMR_ECCM;
-
-	/* fill in nand_chip structure */
-	/* set up function call table */
-	chip->read_byte = fsl_elbc_read_byte;
-	chip->write_buf = fsl_elbc_write_buf;
-	chip->read_buf = fsl_elbc_read_buf;
-	chip->select_chip = fsl_elbc_select_chip;
-	chip->cmdfunc = fsl_elbc_cmdfunc;
-	chip->waitfunc = fsl_elbc_wait;
-
-	chip->bbt_td = &bbt_main_descr;
-	chip->bbt_md = &bbt_mirror_descr;
-
-	/* set up nand options */
-	chip->bbt_options = NAND_BBT_USE_FLASH;
-
-	chip->controller = &elbc_fcm_ctrl->controller;
-	nand_set_controller_data(chip, priv);
-
-	chip->ecc.read_page = fsl_elbc_read_page;
-	chip->ecc.write_page = fsl_elbc_write_page;
-	chip->ecc.write_subpage = fsl_elbc_write_subpage;
-
-	/* If CS Base Register selects full hardware ECC then use it */
-	if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
-	    BR_DECC_CHK_GEN) {
-		chip->ecc.mode = NAND_ECC_HW;
-		mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
-		chip->ecc.size = 512;
-		chip->ecc.bytes = 3;
-		chip->ecc.strength = 1;
-	} else {
-		/* otherwise fall back to default software ECC */
-		chip->ecc.mode = NAND_ECC_SOFT;
-		chip->ecc.algo = NAND_ECC_HAMMING;
-	}
-
-	return 0;
-}
-
-static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
-{
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
-	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
-
-	nand_release(mtd);
-
-	kfree(mtd->name);
-
-	if (priv->vbase)
-		iounmap(priv->vbase);
-
-	elbc_fcm_ctrl->chips[priv->bank] = NULL;
-	kfree(priv);
-	return 0;
-}
-
-static DEFINE_MUTEX(fsl_elbc_nand_mutex);
-
-static int fsl_elbc_nand_probe(struct platform_device *pdev)
-{
-	struct fsl_lbc_regs __iomem *lbc;
-	struct fsl_elbc_mtd *priv;
-	struct resource res;
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
-	static const char *part_probe_types[]
-		= { "cmdlinepart", "RedBoot", "ofpart", NULL };
-	int ret;
-	int bank;
-	struct device *dev;
-	struct device_node *node = pdev->dev.of_node;
-	struct mtd_info *mtd;
-
-	if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
-		return -ENODEV;
-	lbc = fsl_lbc_ctrl_dev->regs;
-	dev = fsl_lbc_ctrl_dev->dev;
-
-	/* get, allocate and map the memory resource */
-	ret = of_address_to_resource(node, 0, &res);
-	if (ret) {
-		dev_err(dev, "failed to get resource\n");
-		return ret;
-	}
-
-	/* find which chip select it is connected to */
-	for (bank = 0; bank < MAX_BANKS; bank++)
-		if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
-		    (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
-		    (in_be32(&lbc->bank[bank].br) &
-		     in_be32(&lbc->bank[bank].or) & BR_BA)
-		     == fsl_lbc_addr(res.start))
-			break;
-
-	if (bank >= MAX_BANKS) {
-		dev_err(dev, "address did not match any chip selects\n");
-		return -ENODEV;
-	}
-
-	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	mutex_lock(&fsl_elbc_nand_mutex);
-	if (!fsl_lbc_ctrl_dev->nand) {
-		elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
-		if (!elbc_fcm_ctrl) {
-			mutex_unlock(&fsl_elbc_nand_mutex);
-			ret = -ENOMEM;
-			goto err;
-		}
-		elbc_fcm_ctrl->counter++;
-
-		nand_hw_control_init(&elbc_fcm_ctrl->controller);
-		fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
-	} else {
-		elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
-	}
-	mutex_unlock(&fsl_elbc_nand_mutex);
-
-	elbc_fcm_ctrl->chips[bank] = priv;
-	priv->bank = bank;
-	priv->ctrl = fsl_lbc_ctrl_dev;
-	priv->dev = &pdev->dev;
-	dev_set_drvdata(priv->dev, priv);
-
-	priv->vbase = ioremap(res.start, resource_size(&res));
-	if (!priv->vbase) {
-		dev_err(dev, "failed to map chip region\n");
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	mtd = nand_to_mtd(&priv->chip);
-	mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
-	if (!nand_to_mtd(&priv->chip)->name) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	ret = fsl_elbc_chip_init(priv);
-	if (ret)
-		goto err;
-
-	ret = nand_scan_ident(mtd, 1, NULL);
-	if (ret)
-		goto err;
-
-	ret = fsl_elbc_chip_init_tail(mtd);
-	if (ret)
-		goto err;
-
-	ret = nand_scan_tail(mtd);
-	if (ret)
-		goto err;
-
-	/* First look for RedBoot table or partitions on the command
-	 * line, these take precedence over device tree information */
-	mtd_device_parse_register(mtd, part_probe_types, NULL,
-				  NULL, 0);
-
-	printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
-	       (unsigned long long)res.start, priv->bank);
-	return 0;
-
-err:
-	fsl_elbc_chip_remove(priv);
-	return ret;
-}
-
-static int fsl_elbc_nand_remove(struct platform_device *pdev)
-{
-	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
-	struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
-
-	fsl_elbc_chip_remove(priv);
-
-	mutex_lock(&fsl_elbc_nand_mutex);
-	elbc_fcm_ctrl->counter--;
-	if (!elbc_fcm_ctrl->counter) {
-		fsl_lbc_ctrl_dev->nand = NULL;
-		kfree(elbc_fcm_ctrl);
-	}
-	mutex_unlock(&fsl_elbc_nand_mutex);
-
-	return 0;
-
-}
-
-static const struct of_device_id fsl_elbc_nand_match[] = {
-	{ .compatible = "fsl,elbc-fcm-nand", },
-	{}
-};
-MODULE_DEVICE_TABLE(of, fsl_elbc_nand_match);
-
-static struct platform_driver fsl_elbc_nand_driver = {
-	.driver = {
-		.name = "fsl,elbc-fcm-nand",
-		.of_match_table = fsl_elbc_nand_match,
-	},
-	.probe = fsl_elbc_nand_probe,
-	.remove = fsl_elbc_nand_remove,
-};
-
-module_platform_driver(fsl_elbc_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Freescale");
-MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
deleted file mode 100644
index bcf7f0b8abf9..000000000000
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ /dev/null
@@ -1,1095 +0,0 @@ 
-/*
- * Freescale Integrated Flash Controller NAND driver
- *
- * Copyright 2011-2012 Freescale Semiconductor, Inc
- *
- * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/of_address.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/fsl_ifc.h>
-
-#define ERR_BYTE		0xFF /* Value returned for read
-					bytes when read failed	*/
-#define IFC_TIMEOUT_MSECS	500  /* Maximum number of mSecs to wait
-					for IFC NAND Machine	*/
-
-struct fsl_ifc_ctrl;
-
-/* mtd information per set */
-struct fsl_ifc_mtd {
-	struct nand_chip chip;
-	struct fsl_ifc_ctrl *ctrl;
-
-	struct device *dev;
-	int bank;		/* Chip select bank number		*/
-	unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
-	u8 __iomem *vbase;      /* Chip select base virtual address	*/
-};
-
-/* overview of the fsl ifc controller */
-struct fsl_ifc_nand_ctrl {
-	struct nand_hw_control controller;
-	struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
-
-	void __iomem *addr;	/* Address of assigned IFC buffer	*/
-	unsigned int page;	/* Last page written to / read from	*/
-	unsigned int read_bytes;/* Number of bytes read during command	*/
-	unsigned int column;	/* Saved column from SEQIN		*/
-	unsigned int index;	/* Pointer to next byte to 'read'	*/
-	unsigned int oob;	/* Non zero if operating on OOB data	*/
-	unsigned int eccread;	/* Non zero for a full-page ECC read	*/
-	unsigned int counter;	/* counter for the initializations	*/
-	unsigned int max_bitflips;  /* Saved during READ0 cmd		*/
-};
-
-static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
-
-/*
- * Generic flash bbt descriptors
- */
-static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
-static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr bbt_main_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
-		   NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	2, /* 0 on 8-bit small page */
-	.len = 4,
-	.veroffs = 6,
-	.maxblocks = 4,
-	.pattern = bbt_pattern,
-};
-
-static struct nand_bbt_descr bbt_mirror_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
-		   NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	2, /* 0 on 8-bit small page */
-	.len = 4,
-	.veroffs = 6,
-	.maxblocks = 4,
-	.pattern = mirror_pattern,
-};
-
-static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 8;
-	oobregion->length = chip->ecc.total;
-
-	return 0;
-}
-
-static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section > 1)
-		return -ERANGE;
-
-	if (mtd->writesize == 512 &&
-	    !(chip->options & NAND_BUSWIDTH_16)) {
-		if (!section) {
-			oobregion->offset = 0;
-			oobregion->length = 5;
-		} else {
-			oobregion->offset = 6;
-			oobregion->length = 2;
-		}
-
-		return 0;
-	}
-
-	if (!section) {
-		oobregion->offset = 2;
-		oobregion->length = 6;
-	} else {
-		oobregion->offset = chip->ecc.total + 8;
-		oobregion->length = mtd->oobsize - oobregion->offset;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = {
-	.ecc = fsl_ifc_ooblayout_ecc,
-	.free = fsl_ifc_ooblayout_free,
-};
-
-/*
- * Set up the IFC hardware block and page address fields, and the ifc nand
- * structure addr field to point to the correct IFC buffer in memory
- */
-static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
-	int buf_num;
-
-	ifc_nand_ctrl->page = page_addr;
-	/* Program ROW0/COL0 */
-	ifc_out32(page_addr, &ifc->ifc_nand.row0);
-	ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
-
-	buf_num = page_addr & priv->bufnum_mask;
-
-	ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
-	ifc_nand_ctrl->index = column;
-
-	/* for OOB data point to the second half of the buffer */
-	if (oob)
-		ifc_nand_ctrl->index += mtd->writesize;
-}
-
-static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
-	u32 __iomem *mainarea = (u32 __iomem *)addr;
-	u8 __iomem *oob = addr + mtd->writesize;
-	struct mtd_oob_region oobregion = { };
-	int i, section = 0;
-
-	for (i = 0; i < mtd->writesize / 4; i++) {
-		if (__raw_readl(&mainarea[i]) != 0xffffffff)
-			return 0;
-	}
-
-	mtd_ooblayout_ecc(mtd, section++, &oobregion);
-	while (oobregion.length) {
-		for (i = 0; i < oobregion.length; i++) {
-			if (__raw_readb(&oob[oobregion.offset + i]) != 0xff)
-				return 0;
-		}
-
-		mtd_ooblayout_ecc(mtd, section++, &oobregion);
-	}
-
-	return 1;
-}
-
-/* returns nonzero if entire page is blank */
-static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
-			  u32 *eccstat, unsigned int bufnum)
-{
-	u32 reg = eccstat[bufnum / 4];
-	int errors;
-
-	errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
-
-	return errors;
-}
-
-/*
- * execute IFC NAND command and wait for it to complete
- */
-static void fsl_ifc_run_command(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
-	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
-	u32 eccstat[4];
-	int i;
-
-	/* set the chip select for NAND Transaction */
-	ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
-		  &ifc->ifc_nand.nand_csel);
-
-	dev_vdbg(priv->dev,
-			"%s: fir0=%08x fcr0=%08x\n",
-			__func__,
-			ifc_in32(&ifc->ifc_nand.nand_fir0),
-			ifc_in32(&ifc->ifc_nand.nand_fcr0));
-
-	ctrl->nand_stat = 0;
-
-	/* start read/write seq */
-	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
-
-	/* wait for command complete flag or timeout */
-	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
-			   msecs_to_jiffies(IFC_TIMEOUT_MSECS));
-
-	/* ctrl->nand_stat will be updated from IRQ context */
-	if (!ctrl->nand_stat)
-		dev_err(priv->dev, "Controller is not responding\n");
-	if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
-		dev_err(priv->dev, "NAND Flash Timeout Error\n");
-	if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
-		dev_err(priv->dev, "NAND Flash Write Protect Error\n");
-
-	nctrl->max_bitflips = 0;
-
-	if (nctrl->eccread) {
-		int errors;
-		int bufnum = nctrl->page & priv->bufnum_mask;
-		int sector = bufnum * chip->ecc.steps;
-		int sector_end = sector + chip->ecc.steps - 1;
-
-		for (i = sector / 4; i <= sector_end / 4; i++)
-			eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
-
-		for (i = sector; i <= sector_end; i++) {
-			errors = check_read_ecc(mtd, ctrl, eccstat, i);
-
-			if (errors == 15) {
-				/*
-				 * Uncorrectable error.
-				 * OK only if the whole page is blank.
-				 *
-				 * We disable ECCER reporting due to...
-				 * erratum IFC-A002770 -- so report it now if we
-				 * see an uncorrectable error in ECCSTAT.
-				 */
-				if (!is_blank(mtd, bufnum))
-					ctrl->nand_stat |=
-						IFC_NAND_EVTER_STAT_ECCER;
-				break;
-			}
-
-			mtd->ecc_stats.corrected += errors;
-			nctrl->max_bitflips = max_t(unsigned int,
-						    nctrl->max_bitflips,
-						    errors);
-		}
-
-		nctrl->eccread = 0;
-	}
-}
-
-static void fsl_ifc_do_read(struct nand_chip *chip,
-			    int oob,
-			    struct mtd_info *mtd)
-{
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
-
-	/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
-	if (mtd->writesize > 512) {
-		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
-			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
-			  &ifc->ifc_nand.nand_fir0);
-		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
-
-		ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
-			  (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
-			  &ifc->ifc_nand.nand_fcr0);
-	} else {
-		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			  (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
-			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
-			  &ifc->ifc_nand.nand_fir0);
-		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
-
-		if (oob)
-			ifc_out32(NAND_CMD_READOOB <<
-				  IFC_NAND_FCR0_CMD0_SHIFT,
-				  &ifc->ifc_nand.nand_fcr0);
-		else
-			ifc_out32(NAND_CMD_READ0 <<
-				  IFC_NAND_FCR0_CMD0_SHIFT,
-				  &ifc->ifc_nand.nand_fcr0);
-	}
-}
-
-/* cmdfunc send commands to the IFC NAND Machine */
-static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
-			     int column, int page_addr) {
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
-
-	/* clear the read buffer */
-	ifc_nand_ctrl->read_bytes = 0;
-	if (command != NAND_CMD_PAGEPROG)
-		ifc_nand_ctrl->index = 0;
-
-	switch (command) {
-	/* READ0 read the entire buffer to use hardware ECC. */
-	case NAND_CMD_READ0:
-		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
-		set_addr(mtd, 0, page_addr, 0);
-
-		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
-		ifc_nand_ctrl->index += column;
-
-		if (chip->ecc.mode == NAND_ECC_HW)
-			ifc_nand_ctrl->eccread = 1;
-
-		fsl_ifc_do_read(chip, 0, mtd);
-		fsl_ifc_run_command(mtd);
-		return;
-
-	/* READOOB reads only the OOB because no ECC is performed. */
-	case NAND_CMD_READOOB:
-		ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
-		set_addr(mtd, column, page_addr, 1);
-
-		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
-
-		fsl_ifc_do_read(chip, 1, mtd);
-		fsl_ifc_run_command(mtd);
-
-		return;
-
-	case NAND_CMD_READID:
-	case NAND_CMD_PARAM: {
-		int timing = IFC_FIR_OP_RB;
-		if (command == NAND_CMD_PARAM)
-			timing = IFC_FIR_OP_RBCD;
-
-		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-			  (timing << IFC_NAND_FIR0_OP2_SHIFT),
-			  &ifc->ifc_nand.nand_fir0);
-		ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
-			  &ifc->ifc_nand.nand_fcr0);
-		ifc_out32(column, &ifc->ifc_nand.row3);
-
-		/*
-		 * although currently it's 8 bytes for READID, we always read
-		 * the maximum 256 bytes(for PARAM)
-		 */
-		ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
-		ifc_nand_ctrl->read_bytes = 256;
-
-		set_addr(mtd, 0, 0, 0);
-		fsl_ifc_run_command(mtd);
-		return;
-	}
-
-	/* ERASE1 stores the block and page address */
-	case NAND_CMD_ERASE1:
-		set_addr(mtd, 0, page_addr, 0);
-		return;
-
-	/* ERASE2 uses the block and page address from ERASE1 */
-	case NAND_CMD_ERASE2:
-		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
-			  &ifc->ifc_nand.nand_fir0);
-
-		ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
-			  (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
-			  &ifc->ifc_nand.nand_fcr0);
-
-		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
-		ifc_nand_ctrl->read_bytes = 0;
-		fsl_ifc_run_command(mtd);
-		return;
-
-	/* SEQIN sets up the addr buffer and all registers except the length */
-	case NAND_CMD_SEQIN: {
-		u32 nand_fcr0;
-		ifc_nand_ctrl->column = column;
-		ifc_nand_ctrl->oob = 0;
-
-		if (mtd->writesize > 512) {
-			nand_fcr0 =
-				(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
-				(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
-				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
-
-			ifc_out32(
-				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
-				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
-				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
-				&ifc->ifc_nand.nand_fir0);
-			ifc_out32(
-				(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
-				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
-				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
-				&ifc->ifc_nand.nand_fir1);
-		} else {
-			nand_fcr0 = ((NAND_CMD_PAGEPROG <<
-					IFC_NAND_FCR0_CMD1_SHIFT) |
-				    (NAND_CMD_SEQIN <<
-					IFC_NAND_FCR0_CMD2_SHIFT) |
-				    (NAND_CMD_STATUS <<
-					IFC_NAND_FCR0_CMD3_SHIFT));
-
-			ifc_out32(
-				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
-				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
-				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
-				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
-				&ifc->ifc_nand.nand_fir0);
-			ifc_out32(
-				(IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
-				(IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
-				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
-				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
-				&ifc->ifc_nand.nand_fir1);
-
-			if (column >= mtd->writesize)
-				nand_fcr0 |=
-				NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
-			else
-				nand_fcr0 |=
-				NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
-		}
-
-		if (column >= mtd->writesize) {
-			/* OOB area --> READOOB */
-			column -= mtd->writesize;
-			ifc_nand_ctrl->oob = 1;
-		}
-		ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
-		set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
-		return;
-	}
-
-	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
-	case NAND_CMD_PAGEPROG: {
-		if (ifc_nand_ctrl->oob) {
-			ifc_out32(ifc_nand_ctrl->index -
-				  ifc_nand_ctrl->column,
-				  &ifc->ifc_nand.nand_fbcr);
-		} else {
-			ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
-		}
-
-		fsl_ifc_run_command(mtd);
-		return;
-	}
-
-	case NAND_CMD_STATUS: {
-		void __iomem *addr;
-
-		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-			  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
-			  &ifc->ifc_nand.nand_fir0);
-		ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
-			  &ifc->ifc_nand.nand_fcr0);
-		ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
-		set_addr(mtd, 0, 0, 0);
-		ifc_nand_ctrl->read_bytes = 1;
-
-		fsl_ifc_run_command(mtd);
-
-		/*
-		 * The chip always seems to report that it is
-		 * write-protected, even when it is not.
-		 */
-		addr = ifc_nand_ctrl->addr;
-		if (chip->options & NAND_BUSWIDTH_16)
-			ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
-		else
-			ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
-		return;
-	}
-
-	case NAND_CMD_RESET:
-		ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
-			  &ifc->ifc_nand.nand_fir0);
-		ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
-			  &ifc->ifc_nand.nand_fcr0);
-		fsl_ifc_run_command(mtd);
-		return;
-
-	default:
-		dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
-					__func__, command);
-	}
-}
-
-static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
-{
-	/* The hardware does not seem to support multiple
-	 * chips per bank.
-	 */
-}
-
-/*
- * Write buf to the IFC NAND Controller Data Buffer
- */
-static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	unsigned int bufsize = mtd->writesize + mtd->oobsize;
-
-	if (len <= 0) {
-		dev_err(priv->dev, "%s: len %d bytes", __func__, len);
-		return;
-	}
-
-	if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
-		dev_err(priv->dev,
-			"%s: beyond end of buffer (%d requested, %u available)\n",
-			__func__, len, bufsize - ifc_nand_ctrl->index);
-		len = bufsize - ifc_nand_ctrl->index;
-	}
-
-	memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
-	ifc_nand_ctrl->index += len;
-}
-
-/*
- * Read a byte from either the IFC hardware buffer
- * read function for 8-bit buswidth
- */
-static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	unsigned int offset;
-
-	/*
-	 * If there are still bytes in the IFC buffer, then use the
-	 * next byte.
-	 */
-	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
-		offset = ifc_nand_ctrl->index++;
-		return ifc_in8(ifc_nand_ctrl->addr + offset);
-	}
-
-	dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
-	return ERR_BYTE;
-}
-
-/*
- * Read two bytes from the IFC hardware buffer
- * read function for 16-bit buswith
- */
-static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	uint16_t data;
-
-	/*
-	 * If there are still bytes in the IFC buffer, then use the
-	 * next byte.
-	 */
-	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
-		data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
-		ifc_nand_ctrl->index += 2;
-		return (uint8_t) data;
-	}
-
-	dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
-	return ERR_BYTE;
-}
-
-/*
- * Read from the IFC Controller Data Buffer
- */
-static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	int avail;
-
-	if (len < 0) {
-		dev_err(priv->dev, "%s: len %d bytes", __func__, len);
-		return;
-	}
-
-	avail = min((unsigned int)len,
-			ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
-	memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
-	ifc_nand_ctrl->index += avail;
-
-	if (len > avail)
-		dev_err(priv->dev,
-			"%s: beyond end of buffer (%d requested, %d available)\n",
-			__func__, len, avail);
-}
-
-/*
- * This function is called after Program and Erase Operations to
- * check for success or failure.
- */
-static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
-{
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
-	u32 nand_fsr;
-
-	/* Use READ_STATUS command, but wait for the device to be ready */
-	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-		  (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
-		  &ifc->ifc_nand.nand_fir0);
-	ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
-		  &ifc->ifc_nand.nand_fcr0);
-	ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
-	set_addr(mtd, 0, 0, 0);
-	ifc_nand_ctrl->read_bytes = 1;
-
-	fsl_ifc_run_command(mtd);
-
-	nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
-
-	/*
-	 * The chip always seems to report that it is
-	 * write-protected, even when it is not.
-	 */
-	return nand_fsr | NAND_STATUS_WP;
-}
-
-static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-			     uint8_t *buf, int oob_required, int page)
-{
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
-
-	fsl_ifc_read_buf(mtd, buf, mtd->writesize);
-	if (oob_required)
-		fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
-		dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
-
-	if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
-		mtd->ecc_stats.failed++;
-
-	return nctrl->max_bitflips;
-}
-
-/* ECC will be calculated automatically, and errors will be detected in
- * waitfunc.
- */
-static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-			       const uint8_t *buf, int oob_required, int page)
-{
-	fsl_ifc_write_buf(mtd, buf, mtd->writesize);
-	fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
-
-	dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
-							chip->numchips);
-	dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
-							chip->chipsize);
-	dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
-							chip->pagemask);
-	dev_dbg(priv->dev, "%s: nand->chip_delay = %d\n", __func__,
-							chip->chip_delay);
-	dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
-							chip->badblockpos);
-	dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
-							chip->chip_shift);
-	dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
-							chip->page_shift);
-	dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
-							chip->phys_erase_shift);
-	dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
-							chip->ecc.mode);
-	dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
-							chip->ecc.steps);
-	dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
-							chip->ecc.bytes);
-	dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
-							chip->ecc.total);
-	dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__,
-							mtd->ooblayout);
-	dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
-	dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
-	dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
-							mtd->erasesize);
-	dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
-							mtd->writesize);
-	dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
-							mtd->oobsize);
-
-	return 0;
-}
-
-static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
-{
-	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
-	struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
-	uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
-	uint32_t cs = priv->bank;
-
-	/* Save CSOR and CSOR_ext */
-	csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
-	csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
-
-	/* chage PageSize 8K and SpareSize 1K*/
-	csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
-	ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor);
-	ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext);
-
-	/* READID */
-	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
-		    (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
-		    (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
-		    &ifc_runtime->ifc_nand.nand_fir0);
-	ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
-		    &ifc_runtime->ifc_nand.nand_fcr0);
-	ifc_out32(0x0, &ifc_runtime->ifc_nand.row3);
-
-	ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr);
-
-	/* Program ROW0/COL0 */
-	ifc_out32(0x0, &ifc_runtime->ifc_nand.row0);
-	ifc_out32(0x0, &ifc_runtime->ifc_nand.col0);
-
-	/* set the chip select for NAND Transaction */
-	ifc_out32(cs << IFC_NAND_CSEL_SHIFT,
-		&ifc_runtime->ifc_nand.nand_csel);
-
-	/* start read seq */
-	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT,
-		&ifc_runtime->ifc_nand.nandseq_strt);
-
-	/* wait for command complete flag or timeout */
-	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
-			   msecs_to_jiffies(IFC_TIMEOUT_MSECS));
-
-	if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
-		printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
-
-	/* Restore CSOR and CSOR_ext */
-	ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
-	ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
-}
-
-static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
-{
-	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
-	struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
-	struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
-	struct nand_chip *chip = &priv->chip;
-	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
-	u32 csor;
-
-	/* Fill in fsl_ifc_mtd structure */
-	mtd->dev.parent = priv->dev;
-	nand_set_flash_node(chip, priv->dev->of_node);
-
-	/* fill in nand_chip structure */
-	/* set up function call table */
-	if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
-		& CSPR_PORT_SIZE_16)
-		chip->read_byte = fsl_ifc_read_byte16;
-	else
-		chip->read_byte = fsl_ifc_read_byte;
-
-	chip->write_buf = fsl_ifc_write_buf;
-	chip->read_buf = fsl_ifc_read_buf;
-	chip->select_chip = fsl_ifc_select_chip;
-	chip->cmdfunc = fsl_ifc_cmdfunc;
-	chip->waitfunc = fsl_ifc_wait;
-
-	chip->bbt_td = &bbt_main_descr;
-	chip->bbt_md = &bbt_mirror_descr;
-
-	ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr);
-
-	/* set up nand options */
-	chip->bbt_options = NAND_BBT_USE_FLASH;
-	chip->options = NAND_NO_SUBPAGE_WRITE;
-
-	if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
-		& CSPR_PORT_SIZE_16) {
-		chip->read_byte = fsl_ifc_read_byte16;
-		chip->options |= NAND_BUSWIDTH_16;
-	} else {
-		chip->read_byte = fsl_ifc_read_byte;
-	}
-
-	chip->controller = &ifc_nand_ctrl->controller;
-	nand_set_controller_data(chip, priv);
-
-	chip->ecc.read_page = fsl_ifc_read_page;
-	chip->ecc.write_page = fsl_ifc_write_page;
-
-	csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
-
-	switch (csor & CSOR_NAND_PGS_MASK) {
-	case CSOR_NAND_PGS_512:
-		if (!(chip->options & NAND_BUSWIDTH_16)) {
-			/* Avoid conflict with bad block marker */
-			bbt_main_descr.offs = 0;
-			bbt_mirror_descr.offs = 0;
-		}
-
-		priv->bufnum_mask = 15;
-		break;
-
-	case CSOR_NAND_PGS_2K:
-		priv->bufnum_mask = 3;
-		break;
-
-	case CSOR_NAND_PGS_4K:
-		priv->bufnum_mask = 1;
-		break;
-
-	case CSOR_NAND_PGS_8K:
-		priv->bufnum_mask = 0;
-		break;
-
-	default:
-		dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
-		return -ENODEV;
-	}
-
-	/* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
-	if (csor & CSOR_NAND_ECC_DEC_EN) {
-		chip->ecc.mode = NAND_ECC_HW;
-		mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
-
-		/* Hardware generates ECC per 512 Bytes */
-		chip->ecc.size = 512;
-		if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
-			chip->ecc.bytes = 8;
-			chip->ecc.strength = 4;
-		} else {
-			chip->ecc.bytes = 16;
-			chip->ecc.strength = 8;
-		}
-	} else {
-		chip->ecc.mode = NAND_ECC_SOFT;
-		chip->ecc.algo = NAND_ECC_HAMMING;
-	}
-
-	if (ctrl->version == FSL_IFC_VERSION_1_1_0)
-		fsl_ifc_sram_init(priv);
-
-	return 0;
-}
-
-static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
-{
-	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
-
-	nand_release(mtd);
-
-	kfree(mtd->name);
-
-	if (priv->vbase)
-		iounmap(priv->vbase);
-
-	ifc_nand_ctrl->chips[priv->bank] = NULL;
-
-	return 0;
-}
-
-static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank,
-		      phys_addr_t addr)
-{
-	u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr);
-
-	if (!(cspr & CSPR_V))
-		return 0;
-	if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
-		return 0;
-
-	return (cspr & CSPR_BA) == convert_ifc_address(addr);
-}
-
-static DEFINE_MUTEX(fsl_ifc_nand_mutex);
-
-static int fsl_ifc_nand_probe(struct platform_device *dev)
-{
-	struct fsl_ifc_runtime __iomem *ifc;
-	struct fsl_ifc_mtd *priv;
-	struct resource res;
-	static const char *part_probe_types[]
-		= { "cmdlinepart", "RedBoot", "ofpart", NULL };
-	int ret;
-	int bank;
-	struct device_node *node = dev->dev.of_node;
-	struct mtd_info *mtd;
-
-	if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs)
-		return -ENODEV;
-	ifc = fsl_ifc_ctrl_dev->rregs;
-
-	/* get, allocate and map the memory resource */
-	ret = of_address_to_resource(node, 0, &res);
-	if (ret) {
-		dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
-		return ret;
-	}
-
-	/* find which chip select it is connected to */
-	for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
-		if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start))
-			break;
-	}
-
-	if (bank >= fsl_ifc_ctrl_dev->banks) {
-		dev_err(&dev->dev, "%s: address did not match any chip selects\n",
-			__func__);
-		return -ENODEV;
-	}
-
-	priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
-	if (!priv)
-		return -ENOMEM;
-
-	mutex_lock(&fsl_ifc_nand_mutex);
-	if (!fsl_ifc_ctrl_dev->nand) {
-		ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
-		if (!ifc_nand_ctrl) {
-			mutex_unlock(&fsl_ifc_nand_mutex);
-			return -ENOMEM;
-		}
-
-		ifc_nand_ctrl->read_bytes = 0;
-		ifc_nand_ctrl->index = 0;
-		ifc_nand_ctrl->addr = NULL;
-		fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
-
-		nand_hw_control_init(&ifc_nand_ctrl->controller);
-	} else {
-		ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
-	}
-	mutex_unlock(&fsl_ifc_nand_mutex);
-
-	ifc_nand_ctrl->chips[bank] = priv;
-	priv->bank = bank;
-	priv->ctrl = fsl_ifc_ctrl_dev;
-	priv->dev = &dev->dev;
-
-	priv->vbase = ioremap(res.start, resource_size(&res));
-	if (!priv->vbase) {
-		dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	dev_set_drvdata(priv->dev, priv);
-
-	ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
-		  IFC_NAND_EVTER_EN_FTOER_EN |
-		  IFC_NAND_EVTER_EN_WPER_EN,
-		  &ifc->ifc_nand.nand_evter_en);
-
-	/* enable NAND Machine Interrupts */
-	ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
-		  IFC_NAND_EVTER_INTR_FTOERIR_EN |
-		  IFC_NAND_EVTER_INTR_WPERIR_EN,
-		  &ifc->ifc_nand.nand_evter_intr_en);
-
-	mtd = nand_to_mtd(&priv->chip);
-	mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
-	if (!mtd->name) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	ret = fsl_ifc_chip_init(priv);
-	if (ret)
-		goto err;
-
-	ret = nand_scan_ident(mtd, 1, NULL);
-	if (ret)
-		goto err;
-
-	ret = fsl_ifc_chip_init_tail(mtd);
-	if (ret)
-		goto err;
-
-	ret = nand_scan_tail(mtd);
-	if (ret)
-		goto err;
-
-	/* First look for RedBoot table or partitions on the command
-	 * line, these take precedence over device tree information */
-	mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
-
-	dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
-		 (unsigned long long)res.start, priv->bank);
-	return 0;
-
-err:
-	fsl_ifc_chip_remove(priv);
-	return ret;
-}
-
-static int fsl_ifc_nand_remove(struct platform_device *dev)
-{
-	struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
-
-	fsl_ifc_chip_remove(priv);
-
-	mutex_lock(&fsl_ifc_nand_mutex);
-	ifc_nand_ctrl->counter--;
-	if (!ifc_nand_ctrl->counter) {
-		fsl_ifc_ctrl_dev->nand = NULL;
-		kfree(ifc_nand_ctrl);
-	}
-	mutex_unlock(&fsl_ifc_nand_mutex);
-
-	return 0;
-}
-
-static const struct of_device_id fsl_ifc_nand_match[] = {
-	{
-		.compatible = "fsl,ifc-nand",
-	},
-	{}
-};
-MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match);
-
-static struct platform_driver fsl_ifc_nand_driver = {
-	.driver = {
-		.name	= "fsl,ifc-nand",
-		.of_match_table = fsl_ifc_nand_match,
-	},
-	.probe       = fsl_ifc_nand_probe,
-	.remove      = fsl_ifc_nand_remove,
-};
-
-module_platform_driver(fsl_ifc_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Freescale");
-MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
deleted file mode 100644
index a88e2cf66e0f..000000000000
--- a/drivers/mtd/nand/fsl_upm.c
+++ /dev/null
@@ -1,363 +0,0 @@ 
-/*
- * Freescale UPM NAND driver.
- *
- * Copyright © 2007-2008  MontaVista Software, Inc.
- *
- * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/mtd.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <asm/fsl_lbc.h>
-
-#define FSL_UPM_WAIT_RUN_PATTERN  0x1
-#define FSL_UPM_WAIT_WRITE_BYTE   0x2
-#define FSL_UPM_WAIT_WRITE_BUFFER 0x4
-
-struct fsl_upm_nand {
-	struct device *dev;
-	struct nand_chip chip;
-	int last_ctrl;
-	struct mtd_partition *parts;
-	struct fsl_upm upm;
-	uint8_t upm_addr_offset;
-	uint8_t upm_cmd_offset;
-	void __iomem *io_base;
-	int rnb_gpio[NAND_MAX_CHIPS];
-	uint32_t mchip_offsets[NAND_MAX_CHIPS];
-	uint32_t mchip_count;
-	uint32_t mchip_number;
-	int chip_delay;
-	uint32_t wait_flags;
-};
-
-static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
-{
-	return container_of(mtd_to_nand(mtdinfo), struct fsl_upm_nand,
-			    chip);
-}
-
-static int fun_chip_ready(struct mtd_info *mtd)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
-
-	if (gpio_get_value(fun->rnb_gpio[fun->mchip_number]))
-		return 1;
-
-	dev_vdbg(fun->dev, "busy\n");
-	return 0;
-}
-
-static void fun_wait_rnb(struct fsl_upm_nand *fun)
-{
-	if (fun->rnb_gpio[fun->mchip_number] >= 0) {
-		struct mtd_info *mtd = nand_to_mtd(&fun->chip);
-		int cnt = 1000000;
-
-		while (--cnt && !fun_chip_ready(mtd))
-			cpu_relax();
-		if (!cnt)
-			dev_err(fun->dev, "tired waiting for RNB\n");
-	} else {
-		ndelay(100);
-	}
-}
-
-static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
-	u32 mar;
-
-	if (!(ctrl & fun->last_ctrl)) {
-		fsl_upm_end_pattern(&fun->upm);
-
-		if (cmd == NAND_CMD_NONE)
-			return;
-
-		fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
-	}
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		if (ctrl & NAND_ALE)
-			fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
-		else if (ctrl & NAND_CLE)
-			fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
-	}
-
-	mar = (cmd << (32 - fun->upm.width)) |
-		fun->mchip_offsets[fun->mchip_number];
-	fsl_upm_run_pattern(&fun->upm, chip->IO_ADDR_R, mar);
-
-	if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN)
-		fun_wait_rnb(fun);
-}
-
-static void fun_select_chip(struct mtd_info *mtd, int mchip_nr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
-
-	if (mchip_nr == -1) {
-		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
-	} else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
-		fun->mchip_number = mchip_nr;
-		chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
-		chip->IO_ADDR_W = chip->IO_ADDR_R;
-	} else {
-		BUG();
-	}
-}
-
-static uint8_t fun_read_byte(struct mtd_info *mtd)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
-
-	return in_8(fun->chip.IO_ADDR_R);
-}
-
-static void fun_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
-	int i;
-
-	for (i = 0; i < len; i++)
-		buf[i] = in_8(fun->chip.IO_ADDR_R);
-}
-
-static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
-	int i;
-
-	for (i = 0; i < len; i++) {
-		out_8(fun->chip.IO_ADDR_W, buf[i]);
-		if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE)
-			fun_wait_rnb(fun);
-	}
-	if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER)
-		fun_wait_rnb(fun);
-}
-
-static int fun_chip_init(struct fsl_upm_nand *fun,
-			 const struct device_node *upm_np,
-			 const struct resource *io_res)
-{
-	struct mtd_info *mtd = nand_to_mtd(&fun->chip);
-	int ret;
-	struct device_node *flash_np;
-
-	fun->chip.IO_ADDR_R = fun->io_base;
-	fun->chip.IO_ADDR_W = fun->io_base;
-	fun->chip.cmd_ctrl = fun_cmd_ctrl;
-	fun->chip.chip_delay = fun->chip_delay;
-	fun->chip.read_byte = fun_read_byte;
-	fun->chip.read_buf = fun_read_buf;
-	fun->chip.write_buf = fun_write_buf;
-	fun->chip.ecc.mode = NAND_ECC_SOFT;
-	fun->chip.ecc.algo = NAND_ECC_HAMMING;
-	if (fun->mchip_count > 1)
-		fun->chip.select_chip = fun_select_chip;
-
-	if (fun->rnb_gpio[0] >= 0)
-		fun->chip.dev_ready = fun_chip_ready;
-
-	mtd->dev.parent = fun->dev;
-
-	flash_np = of_get_next_child(upm_np, NULL);
-	if (!flash_np)
-		return -ENODEV;
-
-	nand_set_flash_node(&fun->chip, flash_np);
-	mtd->name = kasprintf(GFP_KERNEL, "0x%llx.%s", (u64)io_res->start,
-			      flash_np->name);
-	if (!mtd->name) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	ret = nand_scan(mtd, fun->mchip_count);
-	if (ret)
-		goto err;
-
-	ret = mtd_device_register(mtd, NULL, 0);
-err:
-	of_node_put(flash_np);
-	if (ret)
-		kfree(mtd->name);
-	return ret;
-}
-
-static int fun_probe(struct platform_device *ofdev)
-{
-	struct fsl_upm_nand *fun;
-	struct resource io_res;
-	const __be32 *prop;
-	int rnb_gpio;
-	int ret;
-	int size;
-	int i;
-
-	fun = kzalloc(sizeof(*fun), GFP_KERNEL);
-	if (!fun)
-		return -ENOMEM;
-
-	ret = of_address_to_resource(ofdev->dev.of_node, 0, &io_res);
-	if (ret) {
-		dev_err(&ofdev->dev, "can't get IO base\n");
-		goto err1;
-	}
-
-	ret = fsl_upm_find(io_res.start, &fun->upm);
-	if (ret) {
-		dev_err(&ofdev->dev, "can't find UPM\n");
-		goto err1;
-	}
-
-	prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
-			       &size);
-	if (!prop || size != sizeof(uint32_t)) {
-		dev_err(&ofdev->dev, "can't get UPM address offset\n");
-		ret = -EINVAL;
-		goto err1;
-	}
-	fun->upm_addr_offset = *prop;
-
-	prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
-	if (!prop || size != sizeof(uint32_t)) {
-		dev_err(&ofdev->dev, "can't get UPM command offset\n");
-		ret = -EINVAL;
-		goto err1;
-	}
-	fun->upm_cmd_offset = *prop;
-
-	prop = of_get_property(ofdev->dev.of_node,
-			       "fsl,upm-addr-line-cs-offsets", &size);
-	if (prop && (size / sizeof(uint32_t)) > 0) {
-		fun->mchip_count = size / sizeof(uint32_t);
-		if (fun->mchip_count >= NAND_MAX_CHIPS) {
-			dev_err(&ofdev->dev, "too much multiple chips\n");
-			goto err1;
-		}
-		for (i = 0; i < fun->mchip_count; i++)
-			fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
-	} else {
-		fun->mchip_count = 1;
-	}
-
-	for (i = 0; i < fun->mchip_count; i++) {
-		fun->rnb_gpio[i] = -1;
-		rnb_gpio = of_get_gpio(ofdev->dev.of_node, i);
-		if (rnb_gpio >= 0) {
-			ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev));
-			if (ret) {
-				dev_err(&ofdev->dev,
-					"can't request RNB gpio #%d\n", i);
-				goto err2;
-			}
-			gpio_direction_input(rnb_gpio);
-			fun->rnb_gpio[i] = rnb_gpio;
-		} else if (rnb_gpio == -EINVAL) {
-			dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i);
-			goto err2;
-		}
-	}
-
-	prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL);
-	if (prop)
-		fun->chip_delay = be32_to_cpup(prop);
-	else
-		fun->chip_delay = 50;
-
-	prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size);
-	if (prop && size == sizeof(uint32_t))
-		fun->wait_flags = be32_to_cpup(prop);
-	else
-		fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
-				  FSL_UPM_WAIT_WRITE_BYTE;
-
-	fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
-					    resource_size(&io_res));
-	if (!fun->io_base) {
-		ret = -ENOMEM;
-		goto err2;
-	}
-
-	fun->dev = &ofdev->dev;
-	fun->last_ctrl = NAND_CLE;
-
-	ret = fun_chip_init(fun, ofdev->dev.of_node, &io_res);
-	if (ret)
-		goto err2;
-
-	dev_set_drvdata(&ofdev->dev, fun);
-
-	return 0;
-err2:
-	for (i = 0; i < fun->mchip_count; i++) {
-		if (fun->rnb_gpio[i] < 0)
-			break;
-		gpio_free(fun->rnb_gpio[i]);
-	}
-err1:
-	kfree(fun);
-
-	return ret;
-}
-
-static int fun_remove(struct platform_device *ofdev)
-{
-	struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
-	struct mtd_info *mtd = nand_to_mtd(&fun->chip);
-	int i;
-
-	nand_release(mtd);
-	kfree(mtd->name);
-
-	for (i = 0; i < fun->mchip_count; i++) {
-		if (fun->rnb_gpio[i] < 0)
-			break;
-		gpio_free(fun->rnb_gpio[i]);
-	}
-
-	kfree(fun);
-
-	return 0;
-}
-
-static const struct of_device_id of_fun_match[] = {
-	{ .compatible = "fsl,upm-nand" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, of_fun_match);
-
-static struct platform_driver of_fun_driver = {
-	.driver = {
-		.name = "fsl,upm-nand",
-		.of_match_table = of_fun_match,
-	},
-	.probe		= fun_probe,
-	.remove		= fun_remove,
-};
-
-module_platform_driver(of_fun_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
-MODULE_DESCRIPTION("Driver for NAND chips working through Freescale "
-		   "LocalBus User-Programmable Machine");
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
deleted file mode 100644
index 5c08694aa153..000000000000
--- a/drivers/mtd/nand/fsmc_nand.c
+++ /dev/null
@@ -1,1100 +0,0 @@ 
-/*
- * drivers/mtd/nand/fsmc_nand.c
- *
- * ST Microelectronics
- * Flexible Static Memory Controller (FSMC)
- * Driver for NAND portions
- *
- * Copyright © 2010 ST Microelectronics
- * Vipin Kumar <vipin.kumar@st.com>
- * Ashish Priyadarshi
- *
- * Based on drivers/mtd/nand/nomadik_nand.c
- *
- * This file is licensed under the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/clk.h>
-#include <linux/completion.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-direction.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/resource.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/mtd/fsmc.h>
-#include <linux/amba/bus.h>
-#include <mtd/mtd-abi.h>
-
-static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
-				   struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section >= chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = (section * 16) + 2;
-	oobregion->length = 3;
-
-	return 0;
-}
-
-static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
-				    struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section >= chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = (section * 16) + 8;
-
-	if (section < chip->ecc.steps - 1)
-		oobregion->length = 8;
-	else
-		oobregion->length = mtd->oobsize - oobregion->offset;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
-	.ecc = fsmc_ecc1_ooblayout_ecc,
-	.free = fsmc_ecc1_ooblayout_free,
-};
-
-/*
- * ECC placement definitions in oobfree type format.
- * There are 13 bytes of ecc for every 512 byte block and it has to be read
- * consecutively and immediately after the 512 byte data block for hardware to
- * generate the error bit offsets in 512 byte data.
- */
-static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
-				   struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section >= chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->length = chip->ecc.bytes;
-
-	if (!section && mtd->writesize <= 512)
-		oobregion->offset = 0;
-	else
-		oobregion->offset = (section * 16) + 2;
-
-	return 0;
-}
-
-static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
-				    struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section >= chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = (section * 16) + 15;
-
-	if (section < chip->ecc.steps - 1)
-		oobregion->length = 3;
-	else
-		oobregion->length = mtd->oobsize - oobregion->offset;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
-	.ecc = fsmc_ecc4_ooblayout_ecc,
-	.free = fsmc_ecc4_ooblayout_free,
-};
-
-/**
- * struct fsmc_nand_data - structure for FSMC NAND device state
- *
- * @pid:		Part ID on the AMBA PrimeCell format
- * @mtd:		MTD info for a NAND flash.
- * @nand:		Chip related info for a NAND flash.
- * @partitions:		Partition info for a NAND Flash.
- * @nr_partitions:	Total number of partition of a NAND flash.
- *
- * @bank:		Bank number for probed device.
- * @clk:		Clock structure for FSMC.
- *
- * @read_dma_chan:	DMA channel for read access
- * @write_dma_chan:	DMA channel for write access to NAND
- * @dma_access_complete: Completion structure
- *
- * @data_pa:		NAND Physical port for Data.
- * @data_va:		NAND port for Data.
- * @cmd_va:		NAND port for Command.
- * @addr_va:		NAND port for Address.
- * @regs_va:		FSMC regs base address.
- */
-struct fsmc_nand_data {
-	u32			pid;
-	struct nand_chip	nand;
-	struct mtd_partition	*partitions;
-	unsigned int		nr_partitions;
-
-	unsigned int		bank;
-	struct device		*dev;
-	enum access_mode	mode;
-	struct clk		*clk;
-
-	/* DMA related objects */
-	struct dma_chan		*read_dma_chan;
-	struct dma_chan		*write_dma_chan;
-	struct completion	dma_access_complete;
-
-	struct fsmc_nand_timings *dev_timings;
-
-	dma_addr_t		data_pa;
-	void __iomem		*data_va;
-	void __iomem		*cmd_va;
-	void __iomem		*addr_va;
-	void __iomem		*regs_va;
-
-	void			(*select_chip)(uint32_t bank, uint32_t busw);
-};
-
-static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand);
-}
-
-/* Assert CS signal based on chipnr */
-static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsmc_nand_data *host;
-
-	host = mtd_to_fsmc(mtd);
-
-	switch (chipnr) {
-	case -1:
-		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
-		break;
-	case 0:
-	case 1:
-	case 2:
-	case 3:
-		if (host->select_chip)
-			host->select_chip(chipnr,
-					chip->options & NAND_BUSWIDTH_16);
-		break;
-
-	default:
-		dev_err(host->dev, "unsupported chip-select %d\n", chipnr);
-	}
-}
-
-/*
- * fsmc_cmd_ctrl - For facilitaing Hardware access
- * This routine allows hardware specific access to control-lines(ALE,CLE)
- */
-static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-	void __iomem *regs = host->regs_va;
-	unsigned int bank = host->bank;
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		u32 pc;
-
-		if (ctrl & NAND_CLE) {
-			this->IO_ADDR_R = host->cmd_va;
-			this->IO_ADDR_W = host->cmd_va;
-		} else if (ctrl & NAND_ALE) {
-			this->IO_ADDR_R = host->addr_va;
-			this->IO_ADDR_W = host->addr_va;
-		} else {
-			this->IO_ADDR_R = host->data_va;
-			this->IO_ADDR_W = host->data_va;
-		}
-
-		pc = readl(FSMC_NAND_REG(regs, bank, PC));
-		if (ctrl & NAND_NCE)
-			pc |= FSMC_ENABLE;
-		else
-			pc &= ~FSMC_ENABLE;
-		writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
-	}
-
-	mb();
-
-	if (cmd != NAND_CMD_NONE)
-		writeb_relaxed(cmd, this->IO_ADDR_W);
-}
-
-/*
- * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
- *
- * This routine initializes timing parameters related to NAND memory access in
- * FSMC registers
- */
-static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
-			   uint32_t busw, struct fsmc_nand_timings *timings)
-{
-	uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
-	uint32_t tclr, tar, thiz, thold, twait, tset;
-	struct fsmc_nand_timings *tims;
-	struct fsmc_nand_timings default_timings = {
-		.tclr	= FSMC_TCLR_1,
-		.tar	= FSMC_TAR_1,
-		.thiz	= FSMC_THIZ_1,
-		.thold	= FSMC_THOLD_4,
-		.twait	= FSMC_TWAIT_6,
-		.tset	= FSMC_TSET_0,
-	};
-
-	if (timings)
-		tims = timings;
-	else
-		tims = &default_timings;
-
-	tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
-	tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
-	thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
-	thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
-	twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
-	tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
-
-	if (busw)
-		writel_relaxed(value | FSMC_DEVWID_16,
-				FSMC_NAND_REG(regs, bank, PC));
-	else
-		writel_relaxed(value | FSMC_DEVWID_8,
-				FSMC_NAND_REG(regs, bank, PC));
-
-	writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
-			FSMC_NAND_REG(regs, bank, PC));
-	writel_relaxed(thiz | thold | twait | tset,
-			FSMC_NAND_REG(regs, bank, COMM));
-	writel_relaxed(thiz | thold | twait | tset,
-			FSMC_NAND_REG(regs, bank, ATTRIB));
-}
-
-/*
- * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
- */
-static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-	void __iomem *regs = host->regs_va;
-	uint32_t bank = host->bank;
-
-	writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
-			FSMC_NAND_REG(regs, bank, PC));
-	writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
-			FSMC_NAND_REG(regs, bank, PC));
-	writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
-			FSMC_NAND_REG(regs, bank, PC));
-}
-
-/*
- * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
- * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
- * max of 8-bits)
- */
-static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
-				uint8_t *ecc)
-{
-	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-	void __iomem *regs = host->regs_va;
-	uint32_t bank = host->bank;
-	uint32_t ecc_tmp;
-	unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
-
-	do {
-		if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
-			break;
-		else
-			cond_resched();
-	} while (!time_after_eq(jiffies, deadline));
-
-	if (time_after_eq(jiffies, deadline)) {
-		dev_err(host->dev, "calculate ecc timed out\n");
-		return -ETIMEDOUT;
-	}
-
-	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
-	ecc[0] = (uint8_t) (ecc_tmp >> 0);
-	ecc[1] = (uint8_t) (ecc_tmp >> 8);
-	ecc[2] = (uint8_t) (ecc_tmp >> 16);
-	ecc[3] = (uint8_t) (ecc_tmp >> 24);
-
-	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
-	ecc[4] = (uint8_t) (ecc_tmp >> 0);
-	ecc[5] = (uint8_t) (ecc_tmp >> 8);
-	ecc[6] = (uint8_t) (ecc_tmp >> 16);
-	ecc[7] = (uint8_t) (ecc_tmp >> 24);
-
-	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
-	ecc[8] = (uint8_t) (ecc_tmp >> 0);
-	ecc[9] = (uint8_t) (ecc_tmp >> 8);
-	ecc[10] = (uint8_t) (ecc_tmp >> 16);
-	ecc[11] = (uint8_t) (ecc_tmp >> 24);
-
-	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
-	ecc[12] = (uint8_t) (ecc_tmp >> 16);
-
-	return 0;
-}
-
-/*
- * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
- * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
- * max of 1-bit)
- */
-static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
-				uint8_t *ecc)
-{
-	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-	void __iomem *regs = host->regs_va;
-	uint32_t bank = host->bank;
-	uint32_t ecc_tmp;
-
-	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
-	ecc[0] = (uint8_t) (ecc_tmp >> 0);
-	ecc[1] = (uint8_t) (ecc_tmp >> 8);
-	ecc[2] = (uint8_t) (ecc_tmp >> 16);
-
-	return 0;
-}
-
-/* Count the number of 0's in buff upto a max of max_bits */
-static int count_written_bits(uint8_t *buff, int size, int max_bits)
-{
-	int k, written_bits = 0;
-
-	for (k = 0; k < size; k++) {
-		written_bits += hweight8(~buff[k]);
-		if (written_bits > max_bits)
-			break;
-	}
-
-	return written_bits;
-}
-
-static void dma_complete(void *param)
-{
-	struct fsmc_nand_data *host = param;
-
-	complete(&host->dma_access_complete);
-}
-
-static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
-		enum dma_data_direction direction)
-{
-	struct dma_chan *chan;
-	struct dma_device *dma_dev;
-	struct dma_async_tx_descriptor *tx;
-	dma_addr_t dma_dst, dma_src, dma_addr;
-	dma_cookie_t cookie;
-	unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
-	int ret;
-	unsigned long time_left;
-
-	if (direction == DMA_TO_DEVICE)
-		chan = host->write_dma_chan;
-	else if (direction == DMA_FROM_DEVICE)
-		chan = host->read_dma_chan;
-	else
-		return -EINVAL;
-
-	dma_dev = chan->device;
-	dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
-
-	if (direction == DMA_TO_DEVICE) {
-		dma_src = dma_addr;
-		dma_dst = host->data_pa;
-	} else {
-		dma_src = host->data_pa;
-		dma_dst = dma_addr;
-	}
-
-	tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
-			len, flags);
-	if (!tx) {
-		dev_err(host->dev, "device_prep_dma_memcpy error\n");
-		ret = -EIO;
-		goto unmap_dma;
-	}
-
-	tx->callback = dma_complete;
-	tx->callback_param = host;
-	cookie = tx->tx_submit(tx);
-
-	ret = dma_submit_error(cookie);
-	if (ret) {
-		dev_err(host->dev, "dma_submit_error %d\n", cookie);
-		goto unmap_dma;
-	}
-
-	dma_async_issue_pending(chan);
-
-	time_left =
-	wait_for_completion_timeout(&host->dma_access_complete,
-				msecs_to_jiffies(3000));
-	if (time_left == 0) {
-		dmaengine_terminate_all(chan);
-		dev_err(host->dev, "wait_for_completion_timeout\n");
-		ret = -ETIMEDOUT;
-		goto unmap_dma;
-	}
-
-	ret = 0;
-
-unmap_dma:
-	dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
-
-	return ret;
-}
-
-/*
- * fsmc_write_buf - write buffer to chip
- * @mtd:	MTD device structure
- * @buf:	data buffer
- * @len:	number of bytes to write
- */
-static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	int i;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
-			IS_ALIGNED(len, sizeof(uint32_t))) {
-		uint32_t *p = (uint32_t *)buf;
-		len = len >> 2;
-		for (i = 0; i < len; i++)
-			writel_relaxed(p[i], chip->IO_ADDR_W);
-	} else {
-		for (i = 0; i < len; i++)
-			writeb_relaxed(buf[i], chip->IO_ADDR_W);
-	}
-}
-
-/*
- * fsmc_read_buf - read chip data into buffer
- * @mtd:	MTD device structure
- * @buf:	buffer to store date
- * @len:	number of bytes to read
- */
-static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	int i;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
-			IS_ALIGNED(len, sizeof(uint32_t))) {
-		uint32_t *p = (uint32_t *)buf;
-		len = len >> 2;
-		for (i = 0; i < len; i++)
-			p[i] = readl_relaxed(chip->IO_ADDR_R);
-	} else {
-		for (i = 0; i < len; i++)
-			buf[i] = readb_relaxed(chip->IO_ADDR_R);
-	}
-}
-
-/*
- * fsmc_read_buf_dma - read chip data into buffer
- * @mtd:	MTD device structure
- * @buf:	buffer to store date
- * @len:	number of bytes to read
- */
-static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct fsmc_nand_data *host  = mtd_to_fsmc(mtd);
-
-	dma_xfer(host, buf, len, DMA_FROM_DEVICE);
-}
-
-/*
- * fsmc_write_buf_dma - write buffer to chip
- * @mtd:	MTD device structure
- * @buf:	data buffer
- * @len:	number of bytes to write
- */
-static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
-		int len)
-{
-	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-
-	dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
-}
-
-/*
- * fsmc_read_page_hwecc
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @buf:	buffer to store read data
- * @oob_required:	caller expects OOB data read to chip->oob_poi
- * @page:	page number to read
- *
- * This routine is needed for fsmc version 8 as reading from NAND chip has to be
- * performed in a strict sequence as follows:
- * data(512 byte) -> ecc(13 byte)
- * After this read, fsmc hardware generates and reports error data bits(up to a
- * max of 8 bits)
- */
-static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				 uint8_t *buf, int oob_required, int page)
-{
-	int i, j, s, stat, eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *p = buf;
-	uint8_t *ecc_calc = chip->buffers->ecccalc;
-	uint8_t *ecc_code = chip->buffers->ecccode;
-	int off, len, group = 0;
-	/*
-	 * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
-	 * end up reading 14 bytes (7 words) from oob. The local array is
-	 * to maintain word alignment
-	 */
-	uint16_t ecc_oob[7];
-	uint8_t *oob = (uint8_t *)&ecc_oob[0];
-	unsigned int max_bitflips = 0;
-
-	for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
-		chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
-		chip->ecc.hwctl(mtd, NAND_ECC_READ);
-		chip->read_buf(mtd, p, eccsize);
-
-		for (j = 0; j < eccbytes;) {
-			struct mtd_oob_region oobregion;
-			int ret;
-
-			ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
-			if (ret)
-				return ret;
-
-			off = oobregion.offset;
-			len = oobregion.length;
-
-			/*
-			 * length is intentionally kept a higher multiple of 2
-			 * to read at least 13 bytes even in case of 16 bit NAND
-			 * devices
-			 */
-			if (chip->options & NAND_BUSWIDTH_16)
-				len = roundup(len, 2);
-
-			chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
-			chip->read_buf(mtd, oob + j, len);
-			j += len;
-		}
-
-		memcpy(&ecc_code[i], oob, chip->ecc.bytes);
-		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
-
-		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-		if (stat < 0) {
-			mtd->ecc_stats.failed++;
-		} else {
-			mtd->ecc_stats.corrected += stat;
-			max_bitflips = max_t(unsigned int, max_bitflips, stat);
-		}
-	}
-
-	return max_bitflips;
-}
-
-/*
- * fsmc_bch8_correct_data
- * @mtd:	mtd info structure
- * @dat:	buffer of read data
- * @read_ecc:	ecc read from device spare area
- * @calc_ecc:	ecc calculated from read data
- *
- * calc_ecc is a 104 bit information containing maximum of 8 error
- * offset informations of 13 bits each in 512 bytes of read data.
- */
-static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
-			     uint8_t *read_ecc, uint8_t *calc_ecc)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
-	void __iomem *regs = host->regs_va;
-	unsigned int bank = host->bank;
-	uint32_t err_idx[8];
-	uint32_t num_err, i;
-	uint32_t ecc1, ecc2, ecc3, ecc4;
-
-	num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
-
-	/* no bit flipping */
-	if (likely(num_err == 0))
-		return 0;
-
-	/* too many errors */
-	if (unlikely(num_err > 8)) {
-		/*
-		 * This is a temporary erase check. A newly erased page read
-		 * would result in an ecc error because the oob data is also
-		 * erased to FF and the calculated ecc for an FF data is not
-		 * FF..FF.
-		 * This is a workaround to skip performing correction in case
-		 * data is FF..FF
-		 *
-		 * Logic:
-		 * For every page, each bit written as 0 is counted until these
-		 * number of bits are greater than 8 (the maximum correction
-		 * capability of FSMC for each 512 + 13 bytes)
-		 */
-
-		int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
-		int bits_data = count_written_bits(dat, chip->ecc.size, 8);
-
-		if ((bits_ecc + bits_data) <= 8) {
-			if (bits_data)
-				memset(dat, 0xff, chip->ecc.size);
-			return bits_data;
-		}
-
-		return -EBADMSG;
-	}
-
-	/*
-	 * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
-	 * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
-	 *
-	 * calc_ecc is a 104 bit information containing maximum of 8 error
-	 * offset informations of 13 bits each. calc_ecc is copied into a
-	 * uint64_t array and error offset indexes are populated in err_idx
-	 * array
-	 */
-	ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
-	ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
-	ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
-	ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
-
-	err_idx[0] = (ecc1 >> 0) & 0x1FFF;
-	err_idx[1] = (ecc1 >> 13) & 0x1FFF;
-	err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
-	err_idx[3] = (ecc2 >> 7) & 0x1FFF;
-	err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
-	err_idx[5] = (ecc3 >> 1) & 0x1FFF;
-	err_idx[6] = (ecc3 >> 14) & 0x1FFF;
-	err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
-
-	i = 0;
-	while (num_err--) {
-		change_bit(0, (unsigned long *)&err_idx[i]);
-		change_bit(1, (unsigned long *)&err_idx[i]);
-
-		if (err_idx[i] < chip->ecc.size * 8) {
-			change_bit(err_idx[i], (unsigned long *)dat);
-			i++;
-		}
-	}
-	return i;
-}
-
-static bool filter(struct dma_chan *chan, void *slave)
-{
-	chan->private = slave;
-	return true;
-}
-
-#ifdef CONFIG_OF
-static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
-				     struct device_node *np)
-{
-	struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	u32 val;
-	int ret;
-
-	/* Set default NAND width to 8 bits */
-	pdata->width = 8;
-	if (!of_property_read_u32(np, "bank-width", &val)) {
-		if (val == 2) {
-			pdata->width = 16;
-		} else if (val != 1) {
-			dev_err(&pdev->dev, "invalid bank-width %u\n", val);
-			return -EINVAL;
-		}
-	}
-	if (of_get_property(np, "nand-skip-bbtscan", NULL))
-		pdata->options = NAND_SKIP_BBTSCAN;
-
-	pdata->nand_timings = devm_kzalloc(&pdev->dev,
-				sizeof(*pdata->nand_timings), GFP_KERNEL);
-	if (!pdata->nand_timings)
-		return -ENOMEM;
-	ret = of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
-						sizeof(*pdata->nand_timings));
-	if (ret) {
-		dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n");
-		pdata->nand_timings = NULL;
-	}
-
-	/* Set default NAND bank to 0 */
-	pdata->bank = 0;
-	if (!of_property_read_u32(np, "bank", &val)) {
-		if (val > 3) {
-			dev_err(&pdev->dev, "invalid bank %u\n", val);
-			return -EINVAL;
-		}
-		pdata->bank = val;
-	}
-	return 0;
-}
-#else
-static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
-				     struct device_node *np)
-{
-	return -ENOSYS;
-}
-#endif
-
-/*
- * fsmc_nand_probe - Probe function
- * @pdev:       platform device structure
- */
-static int __init fsmc_nand_probe(struct platform_device *pdev)
-{
-	struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	struct device_node __maybe_unused *np = pdev->dev.of_node;
-	struct fsmc_nand_data *host;
-	struct mtd_info *mtd;
-	struct nand_chip *nand;
-	struct resource *res;
-	dma_cap_mask_t mask;
-	int ret = 0;
-	u32 pid;
-	int i;
-
-	if (np) {
-		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-		pdev->dev.platform_data = pdata;
-		ret = fsmc_nand_probe_config_dt(pdev, np);
-		if (ret) {
-			dev_err(&pdev->dev, "no platform data\n");
-			return -ENODEV;
-		}
-	}
-
-	if (!pdata) {
-		dev_err(&pdev->dev, "platform data is NULL\n");
-		return -EINVAL;
-	}
-
-	/* Allocate memory for the device structure (and zero it) */
-	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
-	if (!host)
-		return -ENOMEM;
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
-	host->data_va = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(host->data_va))
-		return PTR_ERR(host->data_va);
-
-	host->data_pa = (dma_addr_t)res->start;
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
-	host->addr_va = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(host->addr_va))
-		return PTR_ERR(host->addr_va);
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
-	host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(host->cmd_va))
-		return PTR_ERR(host->cmd_va);
-
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
-	host->regs_va = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(host->regs_va))
-		return PTR_ERR(host->regs_va);
-
-	host->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(host->clk)) {
-		dev_err(&pdev->dev, "failed to fetch block clock\n");
-		return PTR_ERR(host->clk);
-	}
-
-	ret = clk_prepare_enable(host->clk);
-	if (ret)
-		goto err_clk_prepare_enable;
-
-	/*
-	 * This device ID is actually a common AMBA ID as used on the
-	 * AMBA PrimeCell bus. However it is not a PrimeCell.
-	 */
-	for (pid = 0, i = 0; i < 4; i++)
-		pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
-	host->pid = pid;
-	dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
-		 "revision %02x, config %02x\n",
-		 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
-		 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
-
-	host->bank = pdata->bank;
-	host->select_chip = pdata->select_bank;
-	host->partitions = pdata->partitions;
-	host->nr_partitions = pdata->nr_partitions;
-	host->dev = &pdev->dev;
-	host->dev_timings = pdata->nand_timings;
-	host->mode = pdata->mode;
-
-	if (host->mode == USE_DMA_ACCESS)
-		init_completion(&host->dma_access_complete);
-
-	/* Link all private pointers */
-	mtd = nand_to_mtd(&host->nand);
-	nand = &host->nand;
-	nand_set_controller_data(nand, host);
-	nand_set_flash_node(nand, np);
-
-	mtd->dev.parent = &pdev->dev;
-	nand->IO_ADDR_R = host->data_va;
-	nand->IO_ADDR_W = host->data_va;
-	nand->cmd_ctrl = fsmc_cmd_ctrl;
-	nand->chip_delay = 30;
-
-	/*
-	 * Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
-	 * can overwrite this value if the DT provides a different value.
-	 */
-	nand->ecc.mode = NAND_ECC_HW;
-	nand->ecc.hwctl = fsmc_enable_hwecc;
-	nand->ecc.size = 512;
-	nand->options = pdata->options;
-	nand->select_chip = fsmc_select_chip;
-	nand->badblockbits = 7;
-	nand_set_flash_node(nand, np);
-
-	if (pdata->width == FSMC_NAND_BW16)
-		nand->options |= NAND_BUSWIDTH_16;
-
-	switch (host->mode) {
-	case USE_DMA_ACCESS:
-		dma_cap_zero(mask);
-		dma_cap_set(DMA_MEMCPY, mask);
-		host->read_dma_chan = dma_request_channel(mask, filter,
-				pdata->read_dma_priv);
-		if (!host->read_dma_chan) {
-			dev_err(&pdev->dev, "Unable to get read dma channel\n");
-			goto err_req_read_chnl;
-		}
-		host->write_dma_chan = dma_request_channel(mask, filter,
-				pdata->write_dma_priv);
-		if (!host->write_dma_chan) {
-			dev_err(&pdev->dev, "Unable to get write dma channel\n");
-			goto err_req_write_chnl;
-		}
-		nand->read_buf = fsmc_read_buf_dma;
-		nand->write_buf = fsmc_write_buf_dma;
-		break;
-
-	default:
-	case USE_WORD_ACCESS:
-		nand->read_buf = fsmc_read_buf;
-		nand->write_buf = fsmc_write_buf;
-		break;
-	}
-
-	fsmc_nand_setup(host->regs_va, host->bank,
-			nand->options & NAND_BUSWIDTH_16,
-			host->dev_timings);
-
-	if (AMBA_REV_BITS(host->pid) >= 8) {
-		nand->ecc.read_page = fsmc_read_page_hwecc;
-		nand->ecc.calculate = fsmc_read_hwecc_ecc4;
-		nand->ecc.correct = fsmc_bch8_correct_data;
-		nand->ecc.bytes = 13;
-		nand->ecc.strength = 8;
-	}
-
-	/*
-	 * Scan to find existence of the device
-	 */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		ret = -ENXIO;
-		dev_err(&pdev->dev, "No NAND Device found!\n");
-		goto err_scan_ident;
-	}
-
-	if (AMBA_REV_BITS(host->pid) >= 8) {
-		switch (mtd->oobsize) {
-		case 16:
-		case 64:
-		case 128:
-		case 224:
-		case 256:
-			break;
-		default:
-			dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
-				 mtd->oobsize);
-			ret = -EINVAL;
-			goto err_probe;
-		}
-
-		mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
-	} else {
-		switch (nand->ecc.mode) {
-		case NAND_ECC_HW:
-			dev_info(&pdev->dev, "Using 1-bit HW ECC scheme\n");
-			nand->ecc.calculate = fsmc_read_hwecc_ecc1;
-			nand->ecc.correct = nand_correct_data;
-			nand->ecc.bytes = 3;
-			nand->ecc.strength = 1;
-			break;
-
-		case NAND_ECC_SOFT:
-			if (nand->ecc.algo == NAND_ECC_BCH) {
-				dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
-				break;
-			}
-
-		default:
-			dev_err(&pdev->dev, "Unsupported ECC mode!\n");
-			goto err_probe;
-		}
-
-		/*
-		 * Don't set layout for BCH4 SW ECC. This will be
-		 * generated later in nand_bch_init() later.
-		 */
-		if (nand->ecc.mode == NAND_ECC_HW) {
-			switch (mtd->oobsize) {
-			case 16:
-			case 64:
-			case 128:
-				mtd_set_ooblayout(mtd,
-						  &fsmc_ecc1_ooblayout_ops);
-				break;
-			default:
-				dev_warn(&pdev->dev,
-					 "No oob scheme defined for oobsize %d\n",
-					 mtd->oobsize);
-				ret = -EINVAL;
-				goto err_probe;
-			}
-		}
-	}
-
-	/* Second stage of scan to fill MTD data-structures */
-	if (nand_scan_tail(mtd)) {
-		ret = -ENXIO;
-		goto err_probe;
-	}
-
-	/*
-	 * The partition information can is accessed by (in the same precedence)
-	 *
-	 * command line through Bootloader,
-	 * platform data,
-	 * default partition information present in driver.
-	 */
-	/*
-	 * Check for partition info passed
-	 */
-	mtd->name = "nand";
-	ret = mtd_device_register(mtd, host->partitions, host->nr_partitions);
-	if (ret)
-		goto err_probe;
-
-	platform_set_drvdata(pdev, host);
-	dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
-	return 0;
-
-err_probe:
-err_scan_ident:
-	if (host->mode == USE_DMA_ACCESS)
-		dma_release_channel(host->write_dma_chan);
-err_req_write_chnl:
-	if (host->mode == USE_DMA_ACCESS)
-		dma_release_channel(host->read_dma_chan);
-err_req_read_chnl:
-	clk_disable_unprepare(host->clk);
-err_clk_prepare_enable:
-	clk_put(host->clk);
-	return ret;
-}
-
-/*
- * Clean up routine
- */
-static int fsmc_nand_remove(struct platform_device *pdev)
-{
-	struct fsmc_nand_data *host = platform_get_drvdata(pdev);
-
-	if (host) {
-		nand_release(nand_to_mtd(&host->nand));
-
-		if (host->mode == USE_DMA_ACCESS) {
-			dma_release_channel(host->write_dma_chan);
-			dma_release_channel(host->read_dma_chan);
-		}
-		clk_disable_unprepare(host->clk);
-		clk_put(host->clk);
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int fsmc_nand_suspend(struct device *dev)
-{
-	struct fsmc_nand_data *host = dev_get_drvdata(dev);
-	if (host)
-		clk_disable_unprepare(host->clk);
-	return 0;
-}
-
-static int fsmc_nand_resume(struct device *dev)
-{
-	struct fsmc_nand_data *host = dev_get_drvdata(dev);
-	if (host) {
-		clk_prepare_enable(host->clk);
-		fsmc_nand_setup(host->regs_va, host->bank,
-				host->nand.options & NAND_BUSWIDTH_16,
-				host->dev_timings);
-	}
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
-
-#ifdef CONFIG_OF
-static const struct of_device_id fsmc_nand_id_table[] = {
-	{ .compatible = "st,spear600-fsmc-nand" },
-	{ .compatible = "stericsson,fsmc-nand" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
-#endif
-
-static struct platform_driver fsmc_nand_driver = {
-	.remove = fsmc_nand_remove,
-	.driver = {
-		.name = "fsmc-nand",
-		.of_match_table = of_match_ptr(fsmc_nand_id_table),
-		.pm = &fsmc_nand_pm_ops,
-	},
-};
-
-module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
-MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
deleted file mode 100644
index 21b19efe1ac7..000000000000
--- a/drivers/mtd/nand/gpio.c
+++ /dev/null
@@ -1,322 +0,0 @@ 
-/*
- * drivers/mtd/nand/gpio.c
- *
- * Updated, and converted to generic GPIO based driver by Russell King.
- *
- * Written by Ben Dooks <ben@simtec.co.uk>
- *   Based on 2.4 version by Mark Whittaker
- *
- * © 2004 Simtec Electronics
- *
- * Device driver for NAND flash that uses a memory mapped interface to
- * read/write the NAND commands and data, and GPIO pins for control signals
- * (the DT binding refers to this as "GPIO assisted NAND flash")
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/nand-gpio.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/of_gpio.h>
-
-struct gpiomtd {
-	void __iomem		*io_sync;
-	struct nand_chip	nand_chip;
-	struct gpio_nand_platdata plat;
-};
-
-static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct gpiomtd, nand_chip);
-}
-
-
-#ifdef CONFIG_ARM
-/* gpio_nand_dosync()
- *
- * Make sure the GPIO state changes occur in-order with writes to NAND
- * memory region.
- * Needed on PXA due to bus-reordering within the SoC itself (see section on
- * I/O ordering in PXA manual (section 2.3, p35)
- */
-static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
-{
-	unsigned long tmp;
-
-	if (gpiomtd->io_sync) {
-		/*
-		 * Linux memory barriers don't cater for what's required here.
-		 * What's required is what's here - a read from a separate
-		 * region with a dependency on that read.
-		 */
-		tmp = readl(gpiomtd->io_sync);
-		asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
-	}
-}
-#else
-static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
-#endif
-
-static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
-	struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
-
-	gpio_nand_dosync(gpiomtd);
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE));
-		gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
-		gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
-		gpio_nand_dosync(gpiomtd);
-	}
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	writeb(cmd, gpiomtd->nand_chip.IO_ADDR_W);
-	gpio_nand_dosync(gpiomtd);
-}
-
-static int gpio_nand_devready(struct mtd_info *mtd)
-{
-	struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
-
-	return gpio_get_value(gpiomtd->plat.gpio_rdy);
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id gpio_nand_id_table[] = {
-	{ .compatible = "gpio-control-nand" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
-
-static int gpio_nand_get_config_of(const struct device *dev,
-				   struct gpio_nand_platdata *plat)
-{
-	u32 val;
-
-	if (!dev->of_node)
-		return -ENODEV;
-
-	if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
-		if (val == 2) {
-			plat->options |= NAND_BUSWIDTH_16;
-		} else if (val != 1) {
-			dev_err(dev, "invalid bank-width %u\n", val);
-			return -EINVAL;
-		}
-	}
-
-	plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
-	plat->gpio_nce = of_get_gpio(dev->of_node, 1);
-	plat->gpio_ale = of_get_gpio(dev->of_node, 2);
-	plat->gpio_cle = of_get_gpio(dev->of_node, 3);
-	plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
-
-	if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
-		plat->chip_delay = val;
-
-	return 0;
-}
-
-static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
-{
-	struct resource *r;
-	u64 addr;
-
-	if (of_property_read_u64(pdev->dev.of_node,
-				       "gpio-control-nand,io-sync-reg", &addr))
-		return NULL;
-
-	r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
-	if (!r)
-		return NULL;
-
-	r->start = addr;
-	r->end = r->start + 0x3;
-	r->flags = IORESOURCE_MEM;
-
-	return r;
-}
-#else /* CONFIG_OF */
-static inline int gpio_nand_get_config_of(const struct device *dev,
-					  struct gpio_nand_platdata *plat)
-{
-	return -ENOSYS;
-}
-
-static inline struct resource *
-gpio_nand_get_io_sync_of(struct platform_device *pdev)
-{
-	return NULL;
-}
-#endif /* CONFIG_OF */
-
-static inline int gpio_nand_get_config(const struct device *dev,
-				       struct gpio_nand_platdata *plat)
-{
-	int ret = gpio_nand_get_config_of(dev, plat);
-
-	if (!ret)
-		return ret;
-
-	if (dev_get_platdata(dev)) {
-		memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-static inline struct resource *
-gpio_nand_get_io_sync(struct platform_device *pdev)
-{
-	struct resource *r = gpio_nand_get_io_sync_of(pdev);
-
-	if (r)
-		return r;
-
-	return platform_get_resource(pdev, IORESOURCE_MEM, 1);
-}
-
-static int gpio_nand_remove(struct platform_device *pdev)
-{
-	struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
-
-	nand_release(nand_to_mtd(&gpiomtd->nand_chip));
-
-	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
-		gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
-	gpio_set_value(gpiomtd->plat.gpio_nce, 1);
-
-	return 0;
-}
-
-static int gpio_nand_probe(struct platform_device *pdev)
-{
-	struct gpiomtd *gpiomtd;
-	struct nand_chip *chip;
-	struct mtd_info *mtd;
-	struct resource *res;
-	int ret = 0;
-
-	if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
-		return -EINVAL;
-
-	gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
-	if (!gpiomtd)
-		return -ENOMEM;
-
-	chip = &gpiomtd->nand_chip;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(chip->IO_ADDR_R))
-		return PTR_ERR(chip->IO_ADDR_R);
-
-	res = gpio_nand_get_io_sync(pdev);
-	if (res) {
-		gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
-		if (IS_ERR(gpiomtd->io_sync))
-			return PTR_ERR(gpiomtd->io_sync);
-	}
-
-	ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
-	if (ret)
-		return ret;
-
-	ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, "NAND NCE");
-	if (ret)
-		return ret;
-	gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
-
-	if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
-		ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
-					"NAND NWP");
-		if (ret)
-			return ret;
-	}
-
-	ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
-	if (ret)
-		return ret;
-	gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
-
-	ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
-	if (ret)
-		return ret;
-	gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
-
-	if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
-		ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
-					"NAND RDY");
-		if (ret)
-			return ret;
-		gpio_direction_input(gpiomtd->plat.gpio_rdy);
-		chip->dev_ready = gpio_nand_devready;
-	}
-
-	nand_set_flash_node(chip, pdev->dev.of_node);
-	chip->IO_ADDR_W		= chip->IO_ADDR_R;
-	chip->ecc.mode		= NAND_ECC_SOFT;
-	chip->ecc.algo		= NAND_ECC_HAMMING;
-	chip->options		= gpiomtd->plat.options;
-	chip->chip_delay	= gpiomtd->plat.chip_delay;
-	chip->cmd_ctrl		= gpio_nand_cmd_ctrl;
-
-	mtd			= nand_to_mtd(chip);
-	mtd->dev.parent		= &pdev->dev;
-
-	platform_set_drvdata(pdev, gpiomtd);
-
-	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
-		gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
-
-	if (nand_scan(mtd, 1)) {
-		ret = -ENXIO;
-		goto err_wp;
-	}
-
-	if (gpiomtd->plat.adjust_parts)
-		gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
-
-	ret = mtd_device_register(mtd, gpiomtd->plat.parts,
-				  gpiomtd->plat.num_parts);
-	if (!ret)
-		return 0;
-
-err_wp:
-	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
-		gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
-
-	return ret;
-}
-
-static struct platform_driver gpio_nand_driver = {
-	.probe		= gpio_nand_probe,
-	.remove		= gpio_nand_remove,
-	.driver		= {
-		.name	= "gpio-nand",
-		.of_match_table = of_match_ptr(gpio_nand_id_table),
-	},
-};
-
-module_platform_driver(gpio_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("GPIO NAND Driver");
diff --git a/drivers/mtd/nand/gpmi-nand/Makefile b/drivers/mtd/nand/gpmi-nand/Makefile
deleted file mode 100644
index 3a462487c35e..000000000000
--- a/drivers/mtd/nand/gpmi-nand/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@ 
-obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
-gpmi_nand-objs += gpmi-nand.o
-gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
deleted file mode 100644
index 05bb91f2f4c4..000000000000
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ /dev/null
@@ -1,128 +0,0 @@ 
-/*
- * Freescale GPMI NAND Flash Driver
- *
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#ifndef __GPMI_NAND_BCH_REGS_H
-#define __GPMI_NAND_BCH_REGS_H
-
-#define HW_BCH_CTRL				0x00000000
-#define HW_BCH_CTRL_SET				0x00000004
-#define HW_BCH_CTRL_CLR				0x00000008
-#define HW_BCH_CTRL_TOG				0x0000000c
-
-#define BM_BCH_CTRL_COMPLETE_IRQ_EN		(1 << 8)
-#define BM_BCH_CTRL_COMPLETE_IRQ		(1 << 0)
-
-#define HW_BCH_STATUS0				0x00000010
-#define HW_BCH_MODE				0x00000020
-#define HW_BCH_ENCODEPTR			0x00000030
-#define HW_BCH_DATAPTR				0x00000040
-#define HW_BCH_METAPTR				0x00000050
-#define HW_BCH_LAYOUTSELECT			0x00000070
-
-#define HW_BCH_FLASH0LAYOUT0			0x00000080
-
-#define BP_BCH_FLASH0LAYOUT0_NBLOCKS		24
-#define BM_BCH_FLASH0LAYOUT0_NBLOCKS	(0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
-#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v)		\
-	(((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
-
-#define BP_BCH_FLASH0LAYOUT0_META_SIZE		16
-#define BM_BCH_FLASH0LAYOUT0_META_SIZE	(0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
-#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v)	\
-	(((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
-					 & BM_BCH_FLASH0LAYOUT0_META_SIZE)
-
-#define BP_BCH_FLASH0LAYOUT0_ECC0		12
-#define BM_BCH_FLASH0LAYOUT0_ECC0	(0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
-#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0		11
-#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0	(0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
-#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x)				\
-	(GPMI_IS_MX6(x)					\
-		? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)	\
-			& MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0)	\
-		: (((v) << BP_BCH_FLASH0LAYOUT0_ECC0)		\
-			& BM_BCH_FLASH0LAYOUT0_ECC0)		\
-	)
-
-#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14	10
-#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14			\
-				(0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
-#define BF_BCH_FLASH0LAYOUT0_GF(v, x)				\
-	((GPMI_IS_MX6(x) && ((v) == 14))			\
-		? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)	\
-			& MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14)	\
-		: 0						\
-	)
-
-#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE		0
-#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE		\
-			(0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
-#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE	\
-			(0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
-#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x)				\
-	(GPMI_IS_MX6(x)						\
-		? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)	\
-		: ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)		\
-	)
-
-#define HW_BCH_FLASH0LAYOUT1			0x00000090
-
-#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE		16
-#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE		\
-			(0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
-#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v)	\
-	(((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
-					 & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
-
-#define BP_BCH_FLASH0LAYOUT1_ECCN		12
-#define BM_BCH_FLASH0LAYOUT1_ECCN	(0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
-#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN		11
-#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN	(0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
-#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x)				\
-	(GPMI_IS_MX6(x)					\
-		? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)	\
-			& MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN)	\
-		: (((v) << BP_BCH_FLASH0LAYOUT1_ECCN)		\
-			& BM_BCH_FLASH0LAYOUT1_ECCN)		\
-	)
-
-#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14	10
-#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14			\
-				(0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
-#define BF_BCH_FLASH0LAYOUT1_GF(v, x)				\
-	((GPMI_IS_MX6(x) && ((v) == 14))			\
-		? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)	\
-			& MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14)	\
-		: 0						\
-	)
-
-#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE		0
-#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE		\
-			(0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
-#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE	\
-			(0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
-#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x)				\
-	(GPMI_IS_MX6(x)						\
-		? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)	\
-		: ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)		\
-	)
-
-#define HW_BCH_VERSION				0x00000160
-#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
deleted file mode 100644
index 0f68a99fc4ad..000000000000
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ /dev/null
@@ -1,1508 +0,0 @@ 
-/*
- * Freescale GPMI NAND Flash Driver
- *
- * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
- * Copyright (C) 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-
-#include "gpmi-nand.h"
-#include "gpmi-regs.h"
-#include "bch-regs.h"
-
-static struct timing_threshod timing_default_threshold = {
-	.max_data_setup_cycles       = (BM_GPMI_TIMING0_DATA_SETUP >>
-						BP_GPMI_TIMING0_DATA_SETUP),
-	.internal_data_setup_in_ns   = 0,
-	.max_sample_delay_factor     = (BM_GPMI_CTRL1_RDN_DELAY >>
-						BP_GPMI_CTRL1_RDN_DELAY),
-	.max_dll_clock_period_in_ns  = 32,
-	.max_dll_delay_in_ns         = 16,
-};
-
-#define MXS_SET_ADDR		0x4
-#define MXS_CLR_ADDR		0x8
-/*
- * Clear the bit and poll it cleared.  This is usually called with
- * a reset address and mask being either SFTRST(bit 31) or CLKGATE
- * (bit 30).
- */
-static int clear_poll_bit(void __iomem *addr, u32 mask)
-{
-	int timeout = 0x400;
-
-	/* clear the bit */
-	writel(mask, addr + MXS_CLR_ADDR);
-
-	/*
-	 * SFTRST needs 3 GPMI clocks to settle, the reference manual
-	 * recommends to wait 1us.
-	 */
-	udelay(1);
-
-	/* poll the bit becoming clear */
-	while ((readl(addr) & mask) && --timeout)
-		/* nothing */;
-
-	return !timeout;
-}
-
-#define MODULE_CLKGATE		(1 << 30)
-#define MODULE_SFTRST		(1 << 31)
-/*
- * The current mxs_reset_block() will do two things:
- *  [1] enable the module.
- *  [2] reset the module.
- *
- * In most of the cases, it's ok.
- * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
- * If you try to soft reset the BCH block, it becomes unusable until
- * the next hard reset. This case occurs in the NAND boot mode. When the board
- * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
- * So If the driver tries to reset the BCH again, the BCH will not work anymore.
- * You will see a DMA timeout in this case. The bug has been fixed
- * in the following chips, such as MX28.
- *
- * To avoid this bug, just add a new parameter `just_enable` for
- * the mxs_reset_block(), and rewrite it here.
- */
-static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
-{
-	int ret;
-	int timeout = 0x400;
-
-	/* clear and poll SFTRST */
-	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
-	if (unlikely(ret))
-		goto error;
-
-	/* clear CLKGATE */
-	writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
-
-	if (!just_enable) {
-		/* set SFTRST to reset the block */
-		writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
-		udelay(1);
-
-		/* poll CLKGATE becoming set */
-		while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
-			/* nothing */;
-		if (unlikely(!timeout))
-			goto error;
-	}
-
-	/* clear and poll SFTRST */
-	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
-	if (unlikely(ret))
-		goto error;
-
-	/* clear and poll CLKGATE */
-	ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
-	if (unlikely(ret))
-		goto error;
-
-	return 0;
-
-error:
-	pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
-	return -ETIMEDOUT;
-}
-
-static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
-{
-	struct clk *clk;
-	int ret;
-	int i;
-
-	for (i = 0; i < GPMI_CLK_MAX; i++) {
-		clk = this->resources.clock[i];
-		if (!clk)
-			break;
-
-		if (v) {
-			ret = clk_prepare_enable(clk);
-			if (ret)
-				goto err_clk;
-		} else {
-			clk_disable_unprepare(clk);
-		}
-	}
-	return 0;
-
-err_clk:
-	for (; i > 0; i--)
-		clk_disable_unprepare(this->resources.clock[i - 1]);
-	return ret;
-}
-
-#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
-#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
-
-int gpmi_init(struct gpmi_nand_data *this)
-{
-	struct resources *r = &this->resources;
-	int ret;
-
-	ret = gpmi_enable_clk(this);
-	if (ret)
-		goto err_out;
-	ret = gpmi_reset_block(r->gpmi_regs, false);
-	if (ret)
-		goto err_out;
-
-	/*
-	 * Reset BCH here, too. We got failures otherwise :(
-	 * See later BCH reset for explanation of MX23 handling
-	 */
-	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
-	if (ret)
-		goto err_out;
-
-
-	/* Choose NAND mode. */
-	writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
-
-	/* Set the IRQ polarity. */
-	writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
-				r->gpmi_regs + HW_GPMI_CTRL1_SET);
-
-	/* Disable Write-Protection. */
-	writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
-
-	/* Select BCH ECC. */
-	writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
-
-	/*
-	 * Decouple the chip select from dma channel. We use dma0 for all
-	 * the chips.
-	 */
-	writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
-
-	gpmi_disable_clk(this);
-	return 0;
-err_out:
-	return ret;
-}
-
-/* This function is very useful. It is called only when the bug occur. */
-void gpmi_dump_info(struct gpmi_nand_data *this)
-{
-	struct resources *r = &this->resources;
-	struct bch_geometry *geo = &this->bch_geometry;
-	u32 reg;
-	int i;
-
-	dev_err(this->dev, "Show GPMI registers :\n");
-	for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
-		reg = readl(r->gpmi_regs + i * 0x10);
-		dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
-	}
-
-	/* start to print out the BCH info */
-	dev_err(this->dev, "Show BCH registers :\n");
-	for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
-		reg = readl(r->bch_regs + i * 0x10);
-		dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
-	}
-	dev_err(this->dev, "BCH Geometry :\n"
-		"GF length              : %u\n"
-		"ECC Strength           : %u\n"
-		"Page Size in Bytes     : %u\n"
-		"Metadata Size in Bytes : %u\n"
-		"ECC Chunk Size in Bytes: %u\n"
-		"ECC Chunk Count        : %u\n"
-		"Payload Size in Bytes  : %u\n"
-		"Auxiliary Size in Bytes: %u\n"
-		"Auxiliary Status Offset: %u\n"
-		"Block Mark Byte Offset : %u\n"
-		"Block Mark Bit Offset  : %u\n",
-		geo->gf_len,
-		geo->ecc_strength,
-		geo->page_size,
-		geo->metadata_size,
-		geo->ecc_chunk_size,
-		geo->ecc_chunk_count,
-		geo->payload_size,
-		geo->auxiliary_size,
-		geo->auxiliary_status_offset,
-		geo->block_mark_byte_offset,
-		geo->block_mark_bit_offset);
-}
-
-/* Configures the geometry for BCH.  */
-int bch_set_geometry(struct gpmi_nand_data *this)
-{
-	struct resources *r = &this->resources;
-	struct bch_geometry *bch_geo = &this->bch_geometry;
-	unsigned int block_count;
-	unsigned int block_size;
-	unsigned int metadata_size;
-	unsigned int ecc_strength;
-	unsigned int page_size;
-	unsigned int gf_len;
-	int ret;
-
-	if (common_nfc_set_geometry(this))
-		return !0;
-
-	block_count   = bch_geo->ecc_chunk_count - 1;
-	block_size    = bch_geo->ecc_chunk_size;
-	metadata_size = bch_geo->metadata_size;
-	ecc_strength  = bch_geo->ecc_strength >> 1;
-	page_size     = bch_geo->page_size;
-	gf_len        = bch_geo->gf_len;
-
-	ret = gpmi_enable_clk(this);
-	if (ret)
-		goto err_out;
-
-	/*
-	* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
-	* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
-	* On the other hand, the MX28 needs the reset, because one case has been
-	* seen where the BCH produced ECC errors constantly after 10000
-	* consecutive reboots. The latter case has not been seen on the MX23
-	* yet, still we don't know if it could happen there as well.
-	*/
-	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
-	if (ret)
-		goto err_out;
-
-	/* Configure layout 0. */
-	writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
-			| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
-			| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
-			| BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
-			| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
-			r->bch_regs + HW_BCH_FLASH0LAYOUT0);
-
-	writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
-			| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
-			| BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
-			| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
-			r->bch_regs + HW_BCH_FLASH0LAYOUT1);
-
-	/* Set *all* chip selects to use layout 0. */
-	writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
-
-	/* Enable interrupts. */
-	writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
-				r->bch_regs + HW_BCH_CTRL_SET);
-
-	gpmi_disable_clk(this);
-	return 0;
-err_out:
-	return ret;
-}
-
-/* Converts time in nanoseconds to cycles. */
-static unsigned int ns_to_cycles(unsigned int time,
-			unsigned int period, unsigned int min)
-{
-	unsigned int k;
-
-	k = (time + period - 1) / period;
-	return max(k, min);
-}
-
-#define DEF_MIN_PROP_DELAY	5
-#define DEF_MAX_PROP_DELAY	9
-/* Apply timing to current hardware conditions. */
-static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
-					struct gpmi_nfc_hardware_timing *hw)
-{
-	struct timing_threshod *nfc = &timing_default_threshold;
-	struct resources *r = &this->resources;
-	struct nand_chip *nand = &this->nand;
-	struct nand_timing target = this->timing;
-	bool improved_timing_is_available;
-	unsigned long clock_frequency_in_hz;
-	unsigned int clock_period_in_ns;
-	bool dll_use_half_periods;
-	unsigned int dll_delay_shift;
-	unsigned int max_sample_delay_in_ns;
-	unsigned int address_setup_in_cycles;
-	unsigned int data_setup_in_ns;
-	unsigned int data_setup_in_cycles;
-	unsigned int data_hold_in_cycles;
-	int ideal_sample_delay_in_ns;
-	unsigned int sample_delay_factor;
-	int tEYE;
-	unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
-	unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
-
-	/*
-	 * If there are multiple chips, we need to relax the timings to allow
-	 * for signal distortion due to higher capacitance.
-	 */
-	if (nand->numchips > 2) {
-		target.data_setup_in_ns    += 10;
-		target.data_hold_in_ns     += 10;
-		target.address_setup_in_ns += 10;
-	} else if (nand->numchips > 1) {
-		target.data_setup_in_ns    += 5;
-		target.data_hold_in_ns     += 5;
-		target.address_setup_in_ns += 5;
-	}
-
-	/* Check if improved timing information is available. */
-	improved_timing_is_available =
-		(target.tREA_in_ns  >= 0) &&
-		(target.tRLOH_in_ns >= 0) &&
-		(target.tRHOH_in_ns >= 0);
-
-	/* Inspect the clock. */
-	nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
-	clock_frequency_in_hz = nfc->clock_frequency_in_hz;
-	clock_period_in_ns    = NSEC_PER_SEC / clock_frequency_in_hz;
-
-	/*
-	 * The NFC quantizes setup and hold parameters in terms of clock cycles.
-	 * Here, we quantize the setup and hold timing parameters to the
-	 * next-highest clock period to make sure we apply at least the
-	 * specified times.
-	 *
-	 * For data setup and data hold, the hardware interprets a value of zero
-	 * as the largest possible delay. This is not what's intended by a zero
-	 * in the input parameter, so we impose a minimum of one cycle.
-	 */
-	data_setup_in_cycles    = ns_to_cycles(target.data_setup_in_ns,
-							clock_period_in_ns, 1);
-	data_hold_in_cycles     = ns_to_cycles(target.data_hold_in_ns,
-							clock_period_in_ns, 1);
-	address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
-							clock_period_in_ns, 0);
-
-	/*
-	 * The clock's period affects the sample delay in a number of ways:
-	 *
-	 * (1) The NFC HAL tells us the maximum clock period the sample delay
-	 *     DLL can tolerate. If the clock period is greater than half that
-	 *     maximum, we must configure the DLL to be driven by half periods.
-	 *
-	 * (2) We need to convert from an ideal sample delay, in ns, to a
-	 *     "sample delay factor," which the NFC uses. This factor depends on
-	 *     whether we're driving the DLL with full or half periods.
-	 *     Paraphrasing the reference manual:
-	 *
-	 *         AD = SDF x 0.125 x RP
-	 *
-	 * where:
-	 *
-	 *     AD   is the applied delay, in ns.
-	 *     SDF  is the sample delay factor, which is dimensionless.
-	 *     RP   is the reference period, in ns, which is a full clock period
-	 *          if the DLL is being driven by full periods, or half that if
-	 *          the DLL is being driven by half periods.
-	 *
-	 * Let's re-arrange this in a way that's more useful to us:
-	 *
-	 *                        8
-	 *         SDF  =  AD x ----
-	 *                       RP
-	 *
-	 * The reference period is either the clock period or half that, so this
-	 * is:
-	 *
-	 *                        8       AD x DDF
-	 *         SDF  =  AD x -----  =  --------
-	 *                      f x P        P
-	 *
-	 * where:
-	 *
-	 *       f  is 1 or 1/2, depending on how we're driving the DLL.
-	 *       P  is the clock period.
-	 *     DDF  is the DLL Delay Factor, a dimensionless value that
-	 *          incorporates all the constants in the conversion.
-	 *
-	 * DDF will be either 8 or 16, both of which are powers of two. We can
-	 * reduce the cost of this conversion by using bit shifts instead of
-	 * multiplication or division. Thus:
-	 *
-	 *                 AD << DDS
-	 *         SDF  =  ---------
-	 *                     P
-	 *
-	 *     or
-	 *
-	 *         AD  =  (SDF >> DDS) x P
-	 *
-	 * where:
-	 *
-	 *     DDS  is the DLL Delay Shift, the logarithm to base 2 of the DDF.
-	 */
-	if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
-		dll_use_half_periods = true;
-		dll_delay_shift      = 3 + 1;
-	} else {
-		dll_use_half_periods = false;
-		dll_delay_shift      = 3;
-	}
-
-	/*
-	 * Compute the maximum sample delay the NFC allows, under current
-	 * conditions. If the clock is running too slowly, no sample delay is
-	 * possible.
-	 */
-	if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
-		max_sample_delay_in_ns = 0;
-	else {
-		/*
-		 * Compute the delay implied by the largest sample delay factor
-		 * the NFC allows.
-		 */
-		max_sample_delay_in_ns =
-			(nfc->max_sample_delay_factor * clock_period_in_ns) >>
-								dll_delay_shift;
-
-		/*
-		 * Check if the implied sample delay larger than the NFC
-		 * actually allows.
-		 */
-		if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
-			max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
-	}
-
-	/*
-	 * Check if improved timing information is available. If not, we have to
-	 * use a less-sophisticated algorithm.
-	 */
-	if (!improved_timing_is_available) {
-		/*
-		 * Fold the read setup time required by the NFC into the ideal
-		 * sample delay.
-		 */
-		ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
-						nfc->internal_data_setup_in_ns;
-
-		/*
-		 * The ideal sample delay may be greater than the maximum
-		 * allowed by the NFC. If so, we can trade off sample delay time
-		 * for more data setup time.
-		 *
-		 * In each iteration of the following loop, we add a cycle to
-		 * the data setup time and subtract a corresponding amount from
-		 * the sample delay until we've satisified the constraints or
-		 * can't do any better.
-		 */
-		while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
-			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
-
-			data_setup_in_cycles++;
-			ideal_sample_delay_in_ns -= clock_period_in_ns;
-
-			if (ideal_sample_delay_in_ns < 0)
-				ideal_sample_delay_in_ns = 0;
-
-		}
-
-		/*
-		 * Compute the sample delay factor that corresponds most closely
-		 * to the ideal sample delay. If the result is too large for the
-		 * NFC, use the maximum value.
-		 *
-		 * Notice that we use the ns_to_cycles function to compute the
-		 * sample delay factor. We do this because the form of the
-		 * computation is the same as that for calculating cycles.
-		 */
-		sample_delay_factor =
-			ns_to_cycles(
-				ideal_sample_delay_in_ns << dll_delay_shift,
-							clock_period_in_ns, 0);
-
-		if (sample_delay_factor > nfc->max_sample_delay_factor)
-			sample_delay_factor = nfc->max_sample_delay_factor;
-
-		/* Skip to the part where we return our results. */
-		goto return_results;
-	}
-
-	/*
-	 * If control arrives here, we have more detailed timing information,
-	 * so we can use a better algorithm.
-	 */
-
-	/*
-	 * Fold the read setup time required by the NFC into the maximum
-	 * propagation delay.
-	 */
-	max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
-
-	/*
-	 * Earlier, we computed the number of clock cycles required to satisfy
-	 * the data setup time. Now, we need to know the actual nanoseconds.
-	 */
-	data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
-
-	/*
-	 * Compute tEYE, the width of the data eye when reading from the NAND
-	 * Flash. The eye width is fundamentally determined by the data setup
-	 * time, perturbed by propagation delays and some characteristics of the
-	 * NAND Flash device.
-	 *
-	 * start of the eye = max_prop_delay + tREA
-	 * end of the eye   = min_prop_delay + tRHOH + data_setup
-	 */
-	tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
-							(int)data_setup_in_ns;
-
-	tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
-
-	/*
-	 * The eye must be open. If it's not, we can try to open it by
-	 * increasing its main forcer, the data setup time.
-	 *
-	 * In each iteration of the following loop, we increase the data setup
-	 * time by a single clock cycle. We do this until either the eye is
-	 * open or we run into NFC limits.
-	 */
-	while ((tEYE <= 0) &&
-			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
-		/* Give a cycle to data setup. */
-		data_setup_in_cycles++;
-		/* Synchronize the data setup time with the cycles. */
-		data_setup_in_ns += clock_period_in_ns;
-		/* Adjust tEYE accordingly. */
-		tEYE += clock_period_in_ns;
-	}
-
-	/*
-	 * When control arrives here, the eye is open. The ideal time to sample
-	 * the data is in the center of the eye:
-	 *
-	 *     end of the eye + start of the eye
-	 *     ---------------------------------  -  data_setup
-	 *                    2
-	 *
-	 * After some algebra, this simplifies to the code immediately below.
-	 */
-	ideal_sample_delay_in_ns =
-		((int)max_prop_delay_in_ns +
-			(int)target.tREA_in_ns +
-				(int)min_prop_delay_in_ns +
-					(int)target.tRHOH_in_ns -
-						(int)data_setup_in_ns) >> 1;
-
-	/*
-	 * The following figure illustrates some aspects of a NAND Flash read:
-	 *
-	 *
-	 *           __                   _____________________________________
-	 * RDN         \_________________/
-	 *
-	 *                                         <---- tEYE ----->
-	 *                                        /-----------------\
-	 * Read Data ----------------------------<                   >---------
-	 *                                        \-----------------/
-	 *             ^                 ^                 ^              ^
-	 *             |                 |                 |              |
-	 *             |<--Data Setup -->|<--Delay Time -->|              |
-	 *             |                 |                 |              |
-	 *             |                 |                                |
-	 *             |                 |<--   Quantized Delay Time   -->|
-	 *             |                 |                                |
-	 *
-	 *
-	 * We have some issues we must now address:
-	 *
-	 * (1) The *ideal* sample delay time must not be negative. If it is, we
-	 *     jam it to zero.
-	 *
-	 * (2) The *ideal* sample delay time must not be greater than that
-	 *     allowed by the NFC. If it is, we can increase the data setup
-	 *     time, which will reduce the delay between the end of the data
-	 *     setup and the center of the eye. It will also make the eye
-	 *     larger, which might help with the next issue...
-	 *
-	 * (3) The *quantized* sample delay time must not fall either before the
-	 *     eye opens or after it closes (the latter is the problem
-	 *     illustrated in the above figure).
-	 */
-
-	/* Jam a negative ideal sample delay to zero. */
-	if (ideal_sample_delay_in_ns < 0)
-		ideal_sample_delay_in_ns = 0;
-
-	/*
-	 * Extend the data setup as needed to reduce the ideal sample delay
-	 * below the maximum permitted by the NFC.
-	 */
-	while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
-			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
-
-		/* Give a cycle to data setup. */
-		data_setup_in_cycles++;
-		/* Synchronize the data setup time with the cycles. */
-		data_setup_in_ns += clock_period_in_ns;
-		/* Adjust tEYE accordingly. */
-		tEYE += clock_period_in_ns;
-
-		/*
-		 * Decrease the ideal sample delay by one half cycle, to keep it
-		 * in the middle of the eye.
-		 */
-		ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
-
-		/* Jam a negative ideal sample delay to zero. */
-		if (ideal_sample_delay_in_ns < 0)
-			ideal_sample_delay_in_ns = 0;
-	}
-
-	/*
-	 * Compute the sample delay factor that corresponds to the ideal sample
-	 * delay. If the result is too large, then use the maximum allowed
-	 * value.
-	 *
-	 * Notice that we use the ns_to_cycles function to compute the sample
-	 * delay factor. We do this because the form of the computation is the
-	 * same as that for calculating cycles.
-	 */
-	sample_delay_factor =
-		ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
-							clock_period_in_ns, 0);
-
-	if (sample_delay_factor > nfc->max_sample_delay_factor)
-		sample_delay_factor = nfc->max_sample_delay_factor;
-
-	/*
-	 * These macros conveniently encapsulate a computation we'll use to
-	 * continuously evaluate whether or not the data sample delay is inside
-	 * the eye.
-	 */
-	#define IDEAL_DELAY  ((int) ideal_sample_delay_in_ns)
-
-	#define QUANTIZED_DELAY  \
-		((int) ((sample_delay_factor * clock_period_in_ns) >> \
-							dll_delay_shift))
-
-	#define DELAY_ERROR  (abs(QUANTIZED_DELAY - IDEAL_DELAY))
-
-	#define SAMPLE_IS_NOT_WITHIN_THE_EYE  (DELAY_ERROR > (tEYE >> 1))
-
-	/*
-	 * While the quantized sample time falls outside the eye, reduce the
-	 * sample delay or extend the data setup to move the sampling point back
-	 * toward the eye. Do not allow the number of data setup cycles to
-	 * exceed the maximum allowed by the NFC.
-	 */
-	while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
-			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
-		/*
-		 * If control arrives here, the quantized sample delay falls
-		 * outside the eye. Check if it's before the eye opens, or after
-		 * the eye closes.
-		 */
-		if (QUANTIZED_DELAY > IDEAL_DELAY) {
-			/*
-			 * If control arrives here, the quantized sample delay
-			 * falls after the eye closes. Decrease the quantized
-			 * delay time and then go back to re-evaluate.
-			 */
-			if (sample_delay_factor != 0)
-				sample_delay_factor--;
-			continue;
-		}
-
-		/*
-		 * If control arrives here, the quantized sample delay falls
-		 * before the eye opens. Shift the sample point by increasing
-		 * data setup time. This will also make the eye larger.
-		 */
-
-		/* Give a cycle to data setup. */
-		data_setup_in_cycles++;
-		/* Synchronize the data setup time with the cycles. */
-		data_setup_in_ns += clock_period_in_ns;
-		/* Adjust tEYE accordingly. */
-		tEYE += clock_period_in_ns;
-
-		/*
-		 * Decrease the ideal sample delay by one half cycle, to keep it
-		 * in the middle of the eye.
-		 */
-		ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
-
-		/* ...and one less period for the delay time. */
-		ideal_sample_delay_in_ns -= clock_period_in_ns;
-
-		/* Jam a negative ideal sample delay to zero. */
-		if (ideal_sample_delay_in_ns < 0)
-			ideal_sample_delay_in_ns = 0;
-
-		/*
-		 * We have a new ideal sample delay, so re-compute the quantized
-		 * delay.
-		 */
-		sample_delay_factor =
-			ns_to_cycles(
-				ideal_sample_delay_in_ns << dll_delay_shift,
-							clock_period_in_ns, 0);
-
-		if (sample_delay_factor > nfc->max_sample_delay_factor)
-			sample_delay_factor = nfc->max_sample_delay_factor;
-	}
-
-	/* Control arrives here when we're ready to return our results. */
-return_results:
-	hw->data_setup_in_cycles    = data_setup_in_cycles;
-	hw->data_hold_in_cycles     = data_hold_in_cycles;
-	hw->address_setup_in_cycles = address_setup_in_cycles;
-	hw->use_half_periods        = dll_use_half_periods;
-	hw->sample_delay_factor     = sample_delay_factor;
-	hw->device_busy_timeout     = GPMI_DEFAULT_BUSY_TIMEOUT;
-	hw->wrn_dly_sel             = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
-
-	/* Return success. */
-	return 0;
-}
-
-/*
- * <1> Firstly, we should know what's the GPMI-clock means.
- *     The GPMI-clock is the internal clock in the gpmi nand controller.
- *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
- *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
- *
- * <2> Secondly, we should know what's the frequency on the nand chip pins.
- *     The frequency on the nand chip pins is derived from the GPMI-clock.
- *     We can get it from the following equation:
- *
- *         F = G / (DS + DH)
- *
- *         F  : the frequency on the nand chip pins.
- *         G  : the GPMI clock, such as 100MHz.
- *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
- *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
- *
- * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
- *     the nand EDO(extended Data Out) timing could be applied.
- *     The GPMI implements a feedback read strobe to sample the read data.
- *     The feedback read strobe can be delayed to support the nand EDO timing
- *     where the read strobe may deasserts before the read data is valid, and
- *     read data is valid for some time after read strobe.
- *
- *     The following figure illustrates some aspects of a NAND Flash read:
- *
- *                   |<---tREA---->|
- *                   |             |
- *                   |         |   |
- *                   |<--tRP-->|   |
- *                   |         |   |
- *                  __          ___|__________________________________
- *     RDN            \________/   |
- *                                 |
- *                                 /---------\
- *     Read Data    --------------<           >---------
- *                                 \---------/
- *                                |     |
- *                                |<-D->|
- *     FeedbackRDN  ________             ____________
- *                          \___________/
- *
- *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
- *
- *
- * <4> Now, we begin to describe how to compute the right RDN_DELAY.
- *
- *  4.1) From the aspect of the nand chip pins:
- *        Delay = (tREA + C - tRP)               {1}
- *
- *        tREA : the maximum read access time. From the ONFI nand standards,
- *               we know that tREA is 16ns in mode 5, tREA is 20ns is mode 4.
- *               Please check it in : www.onfi.org
- *        C    : a constant for adjust the delay. default is 4.
- *        tRP  : the read pulse width.
- *               Specified by the HW_GPMI_TIMING0:DATA_SETUP:
- *                    tRP = (GPMI-clock-period) * DATA_SETUP
- *
- *  4.2) From the aspect of the GPMI nand controller:
- *         Delay = RDN_DELAY * 0.125 * RP        {2}
- *
- *         RP   : the DLL reference period.
- *            if (GPMI-clock-period > DLL_THRETHOLD)
- *                   RP = GPMI-clock-period / 2;
- *            else
- *                   RP = GPMI-clock-period;
- *
- *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
- *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
- *            is 16ns, but in mx6q, we use 12ns.
- *
- *  4.3) since {1} equals {2}, we get:
- *
- *                    (tREA + 4 - tRP) * 8
- *         RDN_DELAY = ---------------------     {3}
- *                           RP
- *
- *  4.4) We only support the fastest asynchronous mode of ONFI nand.
- *       For some ONFI nand, the mode 4 is the fastest mode;
- *       while for some ONFI nand, the mode 5 is the fastest mode.
- *       So we only support the mode 4 and mode 5. It is no need to
- *       support other modes.
- */
-static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
-			struct gpmi_nfc_hardware_timing *hw)
-{
-	struct resources *r = &this->resources;
-	unsigned long rate = clk_get_rate(r->clock[0]);
-	int mode = this->timing_mode;
-	int dll_threshold = this->devdata->max_chain_delay;
-	unsigned long delay;
-	unsigned long clk_period;
-	int t_rea;
-	int c = 4;
-	int t_rp;
-	int rp;
-
-	/*
-	 * [1] for GPMI_HW_GPMI_TIMING0:
-	 *     The async mode requires 40MHz for mode 4, 50MHz for mode 5.
-	 *     The GPMI can support 100MHz at most. So if we want to
-	 *     get the 40MHz or 50MHz, we have to set DS=1, DH=1.
-	 *     Set the ADDRESS_SETUP to 0 in mode 4.
-	 */
-	hw->data_setup_in_cycles = 1;
-	hw->data_hold_in_cycles = 1;
-	hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
-
-	/* [2] for GPMI_HW_GPMI_TIMING1 */
-	hw->device_busy_timeout = 0x9000;
-
-	/* [3] for GPMI_HW_GPMI_CTRL1 */
-	hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
-
-	/*
-	 * Enlarge 10 times for the numerator and denominator in {3}.
-	 * This make us to get more accurate result.
-	 */
-	clk_period = NSEC_PER_SEC / (rate / 10);
-	dll_threshold *= 10;
-	t_rea = ((mode == 5) ? 16 : 20) * 10;
-	c *= 10;
-
-	t_rp = clk_period * 1; /* DATA_SETUP is 1 */
-
-	if (clk_period > dll_threshold) {
-		hw->use_half_periods = 1;
-		rp = clk_period / 2;
-	} else {
-		hw->use_half_periods = 0;
-		rp = clk_period;
-	}
-
-	/*
-	 * Multiply the numerator with 10, we could do a round off:
-	 *      7.8 round up to 8; 7.4 round down to 7.
-	 */
-	delay  = (((t_rea + c - t_rp) * 8) * 10) / rp;
-	delay = (delay + 5) / 10;
-
-	hw->sample_delay_factor = delay;
-}
-
-static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
-{
-	struct resources  *r = &this->resources;
-	struct nand_chip *nand = &this->nand;
-	struct mtd_info	 *mtd = nand_to_mtd(nand);
-	uint8_t *feature;
-	unsigned long rate;
-	int ret;
-
-	feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL);
-	if (!feature)
-		return -ENOMEM;
-
-	nand->select_chip(mtd, 0);
-
-	/* [1] send SET FEATURE commond to NAND */
-	feature[0] = mode;
-	ret = nand->onfi_set_features(mtd, nand,
-				ONFI_FEATURE_ADDR_TIMING_MODE, feature);
-	if (ret)
-		goto err_out;
-
-	/* [2] send GET FEATURE command to double-check the timing mode */
-	memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
-	ret = nand->onfi_get_features(mtd, nand,
-				ONFI_FEATURE_ADDR_TIMING_MODE, feature);
-	if (ret || feature[0] != mode)
-		goto err_out;
-
-	nand->select_chip(mtd, -1);
-
-	/* [3] set the main IO clock, 100MHz for mode 5, 80MHz for mode 4. */
-	rate = (mode == 5) ? 100000000 : 80000000;
-	clk_set_rate(r->clock[0], rate);
-
-	/* Let the gpmi_begin() re-compute the timing again. */
-	this->flags &= ~GPMI_TIMING_INIT_OK;
-
-	this->flags |= GPMI_ASYNC_EDO_ENABLED;
-	this->timing_mode = mode;
-	kfree(feature);
-	dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
-	return 0;
-
-err_out:
-	nand->select_chip(mtd, -1);
-	kfree(feature);
-	dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
-	return -EINVAL;
-}
-
-int gpmi_extra_init(struct gpmi_nand_data *this)
-{
-	struct nand_chip *chip = &this->nand;
-
-	/* Enable the asynchronous EDO feature. */
-	if (GPMI_IS_MX6(this) && chip->onfi_version) {
-		int mode = onfi_get_async_timing_mode(chip);
-
-		/* We only support the timing mode 4 and mode 5. */
-		if (mode & ONFI_TIMING_MODE_5)
-			mode = 5;
-		else if (mode & ONFI_TIMING_MODE_4)
-			mode = 4;
-		else
-			return 0;
-
-		return enable_edo_mode(this, mode);
-	}
-	return 0;
-}
-
-/* Begin the I/O */
-void gpmi_begin(struct gpmi_nand_data *this)
-{
-	struct resources *r = &this->resources;
-	void __iomem *gpmi_regs = r->gpmi_regs;
-	unsigned int   clock_period_in_ns;
-	uint32_t       reg;
-	unsigned int   dll_wait_time_in_us;
-	struct gpmi_nfc_hardware_timing  hw;
-	int ret;
-
-	/* Enable the clock. */
-	ret = gpmi_enable_clk(this);
-	if (ret) {
-		dev_err(this->dev, "We failed in enable the clk\n");
-		goto err_out;
-	}
-
-	/* Only initialize the timing once */
-	if (this->flags & GPMI_TIMING_INIT_OK)
-		return;
-	this->flags |= GPMI_TIMING_INIT_OK;
-
-	if (this->flags & GPMI_ASYNC_EDO_ENABLED)
-		gpmi_compute_edo_timing(this, &hw);
-	else
-		gpmi_nfc_compute_hardware_timing(this, &hw);
-
-	/* [1] Set HW_GPMI_TIMING0 */
-	reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
-		BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles)         |
-		BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);
-
-	writel(reg, gpmi_regs + HW_GPMI_TIMING0);
-
-	/* [2] Set HW_GPMI_TIMING1 */
-	writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
-		gpmi_regs + HW_GPMI_TIMING1);
-
-	/* [3] The following code is to set the HW_GPMI_CTRL1. */
-
-	/* Set the WRN_DLY_SEL */
-	writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
-	writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
-					gpmi_regs + HW_GPMI_CTRL1_SET);
-
-	/* DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. */
-	writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
-
-	/* Clear out the DLL control fields. */
-	reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
-	writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
-
-	/* If no sample delay is called for, return immediately. */
-	if (!hw.sample_delay_factor)
-		return;
-
-	/* Set RDN_DELAY or HALF_PERIOD. */
-	reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
-		| BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
-
-	writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
-
-	/* At last, we enable the DLL. */
-	writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
-
-	/*
-	 * After we enable the GPMI DLL, we have to wait 64 clock cycles before
-	 * we can use the GPMI. Calculate the amount of time we need to wait,
-	 * in microseconds.
-	 */
-	clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
-	dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
-
-	if (!dll_wait_time_in_us)
-		dll_wait_time_in_us = 1;
-
-	/* Wait for the DLL to settle. */
-	udelay(dll_wait_time_in_us);
-
-err_out:
-	return;
-}
-
-void gpmi_end(struct gpmi_nand_data *this)
-{
-	gpmi_disable_clk(this);
-}
-
-/* Clears a BCH interrupt. */
-void gpmi_clear_bch(struct gpmi_nand_data *this)
-{
-	struct resources *r = &this->resources;
-	writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
-}
-
-/* Returns the Ready/Busy status of the given chip. */
-int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
-{
-	struct resources *r = &this->resources;
-	uint32_t mask = 0;
-	uint32_t reg = 0;
-
-	if (GPMI_IS_MX23(this)) {
-		mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
-		reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
-	} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
-		/*
-		 * In the imx6, all the ready/busy pins are bound
-		 * together. So we only need to check chip 0.
-		 */
-		if (GPMI_IS_MX6(this))
-			chip = 0;
-
-		/* MX28 shares the same R/B register as MX6Q. */
-		mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
-		reg = readl(r->gpmi_regs + HW_GPMI_STAT);
-	} else
-		dev_err(this->dev, "unknown arch.\n");
-	return reg & mask;
-}
-
-static inline void set_dma_type(struct gpmi_nand_data *this,
-					enum dma_ops_type type)
-{
-	this->last_dma_type = this->dma_type;
-	this->dma_type = type;
-}
-
-int gpmi_send_command(struct gpmi_nand_data *this)
-{
-	struct dma_chan *channel = get_dma_chan(this);
-	struct dma_async_tx_descriptor *desc;
-	struct scatterlist *sgl;
-	int chip = this->current_chip;
-	u32 pio[3];
-
-	/* [1] send out the PIO words */
-	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
-		| BM_GPMI_CTRL0_WORD_LENGTH
-		| BF_GPMI_CTRL0_CS(chip, this)
-		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
-		| BM_GPMI_CTRL0_ADDRESS_INCREMENT
-		| BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
-	pio[1] = pio[2] = 0;
-	desc = dmaengine_prep_slave_sg(channel,
-					(struct scatterlist *)pio,
-					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
-	if (!desc)
-		return -EINVAL;
-
-	/* [2] send out the COMMAND + ADDRESS string stored in @buffer */
-	sgl = &this->cmd_sgl;
-
-	sg_init_one(sgl, this->cmd_buffer, this->command_length);
-	dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
-	desc = dmaengine_prep_slave_sg(channel,
-				sgl, 1, DMA_MEM_TO_DEV,
-				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	if (!desc)
-		return -EINVAL;
-
-	/* [3] submit the DMA */
-	set_dma_type(this, DMA_FOR_COMMAND);
-	return start_dma_without_bch_irq(this, desc);
-}
-
-int gpmi_send_data(struct gpmi_nand_data *this)
-{
-	struct dma_async_tx_descriptor *desc;
-	struct dma_chan *channel = get_dma_chan(this);
-	int chip = this->current_chip;
-	uint32_t command_mode;
-	uint32_t address;
-	u32 pio[2];
-
-	/* [1] PIO */
-	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
-	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-
-	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-		| BM_GPMI_CTRL0_WORD_LENGTH
-		| BF_GPMI_CTRL0_CS(chip, this)
-		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-		| BF_GPMI_CTRL0_ADDRESS(address)
-		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
-	pio[1] = 0;
-	desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
-					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
-	if (!desc)
-		return -EINVAL;
-
-	/* [2] send DMA request */
-	prepare_data_dma(this, DMA_TO_DEVICE);
-	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
-					1, DMA_MEM_TO_DEV,
-					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	if (!desc)
-		return -EINVAL;
-
-	/* [3] submit the DMA */
-	set_dma_type(this, DMA_FOR_WRITE_DATA);
-	return start_dma_without_bch_irq(this, desc);
-}
-
-int gpmi_read_data(struct gpmi_nand_data *this)
-{
-	struct dma_async_tx_descriptor *desc;
-	struct dma_chan *channel = get_dma_chan(this);
-	int chip = this->current_chip;
-	u32 pio[2];
-
-	/* [1] : send PIO */
-	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
-		| BM_GPMI_CTRL0_WORD_LENGTH
-		| BF_GPMI_CTRL0_CS(chip, this)
-		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
-		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
-	pio[1] = 0;
-	desc = dmaengine_prep_slave_sg(channel,
-					(struct scatterlist *)pio,
-					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
-	if (!desc)
-		return -EINVAL;
-
-	/* [2] : send DMA request */
-	prepare_data_dma(this, DMA_FROM_DEVICE);
-	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
-					1, DMA_DEV_TO_MEM,
-					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	if (!desc)
-		return -EINVAL;
-
-	/* [3] : submit the DMA */
-	set_dma_type(this, DMA_FOR_READ_DATA);
-	return start_dma_without_bch_irq(this, desc);
-}
-
-int gpmi_send_page(struct gpmi_nand_data *this,
-			dma_addr_t payload, dma_addr_t auxiliary)
-{
-	struct bch_geometry *geo = &this->bch_geometry;
-	uint32_t command_mode;
-	uint32_t address;
-	uint32_t ecc_command;
-	uint32_t buffer_mask;
-	struct dma_async_tx_descriptor *desc;
-	struct dma_chan *channel = get_dma_chan(this);
-	int chip = this->current_chip;
-	u32 pio[6];
-
-	/* A DMA descriptor that does an ECC page read. */
-	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
-	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-	ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
-	buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
-				BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
-
-	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-		| BM_GPMI_CTRL0_WORD_LENGTH
-		| BF_GPMI_CTRL0_CS(chip, this)
-		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-		| BF_GPMI_CTRL0_ADDRESS(address)
-		| BF_GPMI_CTRL0_XFER_COUNT(0);
-	pio[1] = 0;
-	pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
-		| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
-		| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
-	pio[3] = geo->page_size;
-	pio[4] = payload;
-	pio[5] = auxiliary;
-
-	desc = dmaengine_prep_slave_sg(channel,
-					(struct scatterlist *)pio,
-					ARRAY_SIZE(pio), DMA_TRANS_NONE,
-					DMA_CTRL_ACK);
-	if (!desc)
-		return -EINVAL;
-
-	set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
-	return start_dma_with_bch_irq(this, desc);
-}
-
-int gpmi_read_page(struct gpmi_nand_data *this,
-				dma_addr_t payload, dma_addr_t auxiliary)
-{
-	struct bch_geometry *geo = &this->bch_geometry;
-	uint32_t command_mode;
-	uint32_t address;
-	uint32_t ecc_command;
-	uint32_t buffer_mask;
-	struct dma_async_tx_descriptor *desc;
-	struct dma_chan *channel = get_dma_chan(this);
-	int chip = this->current_chip;
-	u32 pio[6];
-
-	/* [1] Wait for the chip to report ready. */
-	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
-	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-
-	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-		| BM_GPMI_CTRL0_WORD_LENGTH
-		| BF_GPMI_CTRL0_CS(chip, this)
-		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-		| BF_GPMI_CTRL0_ADDRESS(address)
-		| BF_GPMI_CTRL0_XFER_COUNT(0);
-	pio[1] = 0;
-	desc = dmaengine_prep_slave_sg(channel,
-				(struct scatterlist *)pio, 2,
-				DMA_TRANS_NONE, 0);
-	if (!desc)
-		return -EINVAL;
-
-	/* [2] Enable the BCH block and read. */
-	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
-	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-	ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
-	buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
-			| BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
-
-	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-		| BM_GPMI_CTRL0_WORD_LENGTH
-		| BF_GPMI_CTRL0_CS(chip, this)
-		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-		| BF_GPMI_CTRL0_ADDRESS(address)
-		| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
-
-	pio[1] = 0;
-	pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
-		| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
-		| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
-	pio[3] = geo->page_size;
-	pio[4] = payload;
-	pio[5] = auxiliary;
-	desc = dmaengine_prep_slave_sg(channel,
-					(struct scatterlist *)pio,
-					ARRAY_SIZE(pio), DMA_TRANS_NONE,
-					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	if (!desc)
-		return -EINVAL;
-
-	/* [3] Disable the BCH block */
-	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
-	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
-
-	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
-		| BM_GPMI_CTRL0_WORD_LENGTH
-		| BF_GPMI_CTRL0_CS(chip, this)
-		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
-		| BF_GPMI_CTRL0_ADDRESS(address)
-		| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
-	pio[1] = 0;
-	pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
-	desc = dmaengine_prep_slave_sg(channel,
-				(struct scatterlist *)pio, 3,
-				DMA_TRANS_NONE,
-				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	if (!desc)
-		return -EINVAL;
-
-	/* [4] submit the DMA */
-	set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
-	return start_dma_with_bch_irq(this, desc);
-}
-
-/**
- * gpmi_copy_bits - copy bits from one memory region to another
- * @dst: destination buffer
- * @dst_bit_off: bit offset we're starting to write at
- * @src: source buffer
- * @src_bit_off: bit offset we're starting to read from
- * @nbits: number of bits to copy
- *
- * This functions copies bits from one memory region to another, and is used by
- * the GPMI driver to copy ECC sections which are not guaranteed to be byte
- * aligned.
- *
- * src and dst should not overlap.
- *
- */
-void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
-		    const u8 *src, size_t src_bit_off,
-		    size_t nbits)
-{
-	size_t i;
-	size_t nbytes;
-	u32 src_buffer = 0;
-	size_t bits_in_src_buffer = 0;
-
-	if (!nbits)
-		return;
-
-	/*
-	 * Move src and dst pointers to the closest byte pointer and store bit
-	 * offsets within a byte.
-	 */
-	src += src_bit_off / 8;
-	src_bit_off %= 8;
-
-	dst += dst_bit_off / 8;
-	dst_bit_off %= 8;
-
-	/*
-	 * Initialize the src_buffer value with bits available in the first
-	 * byte of data so that we end up with a byte aligned src pointer.
-	 */
-	if (src_bit_off) {
-		src_buffer = src[0] >> src_bit_off;
-		if (nbits >= (8 - src_bit_off)) {
-			bits_in_src_buffer += 8 - src_bit_off;
-		} else {
-			src_buffer &= GENMASK(nbits - 1, 0);
-			bits_in_src_buffer += nbits;
-		}
-		nbits -= bits_in_src_buffer;
-		src++;
-	}
-
-	/* Calculate the number of bytes that can be copied from src to dst. */
-	nbytes = nbits / 8;
-
-	/* Try to align dst to a byte boundary. */
-	if (dst_bit_off) {
-		if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
-			src_buffer |= src[0] << bits_in_src_buffer;
-			bits_in_src_buffer += 8;
-			src++;
-			nbytes--;
-		}
-
-		if (bits_in_src_buffer >= (8 - dst_bit_off)) {
-			dst[0] &= GENMASK(dst_bit_off - 1, 0);
-			dst[0] |= src_buffer << dst_bit_off;
-			src_buffer >>= (8 - dst_bit_off);
-			bits_in_src_buffer -= (8 - dst_bit_off);
-			dst_bit_off = 0;
-			dst++;
-			if (bits_in_src_buffer > 7) {
-				bits_in_src_buffer -= 8;
-				dst[0] = src_buffer;
-				dst++;
-				src_buffer >>= 8;
-			}
-		}
-	}
-
-	if (!bits_in_src_buffer && !dst_bit_off) {
-		/*
-		 * Both src and dst pointers are byte aligned, thus we can
-		 * just use the optimized memcpy function.
-		 */
-		if (nbytes)
-			memcpy(dst, src, nbytes);
-	} else {
-		/*
-		 * src buffer is not byte aligned, hence we have to copy each
-		 * src byte to the src_buffer variable before extracting a byte
-		 * to store in dst.
-		 */
-		for (i = 0; i < nbytes; i++) {
-			src_buffer |= src[i] << bits_in_src_buffer;
-			dst[i] = src_buffer;
-			src_buffer >>= 8;
-		}
-	}
-	/* Update dst and src pointers */
-	dst += nbytes;
-	src += nbytes;
-
-	/*
-	 * nbits is the number of remaining bits. It should not exceed 8 as
-	 * we've already copied as much bytes as possible.
-	 */
-	nbits %= 8;
-
-	/*
-	 * If there's no more bits to copy to the destination and src buffer
-	 * was already byte aligned, then we're done.
-	 */
-	if (!nbits && !bits_in_src_buffer)
-		return;
-
-	/* Copy the remaining bits to src_buffer */
-	if (nbits)
-		src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
-			      bits_in_src_buffer;
-	bits_in_src_buffer += nbits;
-
-	/*
-	 * In case there were not enough bits to get a byte aligned dst buffer
-	 * prepare the src_buffer variable to match the dst organization (shift
-	 * src_buffer by dst_bit_off and retrieve the least significant bits
-	 * from dst).
-	 */
-	if (dst_bit_off)
-		src_buffer = (src_buffer << dst_bit_off) |
-			     (*dst & GENMASK(dst_bit_off - 1, 0));
-	bits_in_src_buffer += dst_bit_off;
-
-	/*
-	 * Keep most significant bits from dst if we end up with an unaligned
-	 * number of bits.
-	 */
-	nbytes = bits_in_src_buffer / 8;
-	if (bits_in_src_buffer % 8) {
-		src_buffer |= (dst[nbytes] &
-			       GENMASK(7, bits_in_src_buffer % 8)) <<
-			      (nbytes * 8);
-		nbytes++;
-	}
-
-	/* Copy the remaining bytes to dst */
-	for (i = 0; i < nbytes; i++) {
-		dst[i] = src_buffer;
-		src_buffer >>= 8;
-	}
-}
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
deleted file mode 100644
index 6c062b8251d2..000000000000
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ /dev/null
@@ -1,2193 +0,0 @@ 
-/*
- * Freescale GPMI NAND Flash Driver
- *
- * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
- * Copyright (C) 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/interrupt.h>
-#include <linux/module.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include "gpmi-nand.h"
-#include "bch-regs.h"
-
-/* Resource names for the GPMI NAND driver. */
-#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
-#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
-#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
-
-/* add our owner bbt descriptor */
-static uint8_t scan_ff_pattern[] = { 0xff };
-static struct nand_bbt_descr gpmi_bbt_descr = {
-	.options	= 0,
-	.offs		= 0,
-	.len		= 1,
-	.pattern	= scan_ff_pattern
-};
-
-/*
- * We may change the layout if we can get the ECC info from the datasheet,
- * else we will use all the (page + OOB).
- */
-static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
-			      struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	struct bch_geometry *geo = &this->bch_geometry;
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 0;
-	oobregion->length = geo->page_size - mtd->writesize;
-
-	return 0;
-}
-
-static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
-			       struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	struct bch_geometry *geo = &this->bch_geometry;
-
-	if (section)
-		return -ERANGE;
-
-	/* The available oob size we have. */
-	if (geo->page_size < mtd->writesize + mtd->oobsize) {
-		oobregion->offset = geo->page_size - mtd->writesize;
-		oobregion->length = mtd->oobsize - oobregion->offset;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
-	.ecc = gpmi_ooblayout_ecc,
-	.free = gpmi_ooblayout_free,
-};
-
-static const struct gpmi_devdata gpmi_devdata_imx23 = {
-	.type = IS_MX23,
-	.bch_max_ecc_strength = 20,
-	.max_chain_delay = 16,
-};
-
-static const struct gpmi_devdata gpmi_devdata_imx28 = {
-	.type = IS_MX28,
-	.bch_max_ecc_strength = 20,
-	.max_chain_delay = 16,
-};
-
-static const struct gpmi_devdata gpmi_devdata_imx6q = {
-	.type = IS_MX6Q,
-	.bch_max_ecc_strength = 40,
-	.max_chain_delay = 12,
-};
-
-static const struct gpmi_devdata gpmi_devdata_imx6sx = {
-	.type = IS_MX6SX,
-	.bch_max_ecc_strength = 62,
-	.max_chain_delay = 12,
-};
-
-static irqreturn_t bch_irq(int irq, void *cookie)
-{
-	struct gpmi_nand_data *this = cookie;
-
-	gpmi_clear_bch(this);
-	complete(&this->bch_done);
-	return IRQ_HANDLED;
-}
-
-/*
- *  Calculate the ECC strength by hand:
- *	E : The ECC strength.
- *	G : the length of Galois Field.
- *	N : The chunk count of per page.
- *	O : the oobsize of the NAND chip.
- *	M : the metasize of per page.
- *
- *	The formula is :
- *		E * G * N
- *	      ------------ <= (O - M)
- *                  8
- *
- *      So, we get E by:
- *                    (O - M) * 8
- *              E <= -------------
- *                       G * N
- */
-static inline int get_ecc_strength(struct gpmi_nand_data *this)
-{
-	struct bch_geometry *geo = &this->bch_geometry;
-	struct mtd_info	*mtd = nand_to_mtd(&this->nand);
-	int ecc_strength;
-
-	ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
-			/ (geo->gf_len * geo->ecc_chunk_count);
-
-	/* We need the minor even number. */
-	return round_down(ecc_strength, 2);
-}
-
-static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
-{
-	struct bch_geometry *geo = &this->bch_geometry;
-
-	/* Do the sanity check. */
-	if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
-		/* The mx23/mx28 only support the GF13. */
-		if (geo->gf_len == 14)
-			return false;
-	}
-	return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
-}
-
-/*
- * If we can get the ECC information from the nand chip, we do not
- * need to calculate them ourselves.
- *
- * We may have available oob space in this case.
- */
-static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
-{
-	struct bch_geometry *geo = &this->bch_geometry;
-	struct nand_chip *chip = &this->nand;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	unsigned int block_mark_bit_offset;
-
-	if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
-		return -EINVAL;
-
-	switch (chip->ecc_step_ds) {
-	case SZ_512:
-		geo->gf_len = 13;
-		break;
-	case SZ_1K:
-		geo->gf_len = 14;
-		break;
-	default:
-		dev_err(this->dev,
-			"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
-			chip->ecc_strength_ds, chip->ecc_step_ds);
-		return -EINVAL;
-	}
-	geo->ecc_chunk_size = chip->ecc_step_ds;
-	geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
-	if (!gpmi_check_ecc(this))
-		return -EINVAL;
-
-	/* Keep the C >= O */
-	if (geo->ecc_chunk_size < mtd->oobsize) {
-		dev_err(this->dev,
-			"unsupported nand chip. ecc size: %d, oob size : %d\n",
-			chip->ecc_step_ds, mtd->oobsize);
-		return -EINVAL;
-	}
-
-	/* The default value, see comment in the legacy_set_geometry(). */
-	geo->metadata_size = 10;
-
-	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
-
-	/*
-	 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
-	 *
-	 *    |                          P                            |
-	 *    |<----------------------------------------------------->|
-	 *    |                                                       |
-	 *    |                                        (Block Mark)   |
-	 *    |                      P'                      |      | |     |
-	 *    |<-------------------------------------------->|  D   | |  O' |
-	 *    |                                              |<---->| |<--->|
-	 *    V                                              V      V V     V
-	 *    +---+----------+-+----------+-+----------+-+----------+-+-----+
-	 *    | M |   data   |E|   data   |E|   data   |E|   data   |E|     |
-	 *    +---+----------+-+----------+-+----------+-+----------+-+-----+
-	 *                                                   ^              ^
-	 *                                                   |      O       |
-	 *                                                   |<------------>|
-	 *                                                   |              |
-	 *
-	 *	P : the page size for BCH module.
-	 *	E : The ECC strength.
-	 *	G : the length of Galois Field.
-	 *	N : The chunk count of per page.
-	 *	M : the metasize of per page.
-	 *	C : the ecc chunk size, aka the "data" above.
-	 *	P': the nand chip's page size.
-	 *	O : the nand chip's oob size.
-	 *	O': the free oob.
-	 *
-	 *	The formula for P is :
-	 *
-	 *	            E * G * N
-	 *	       P = ------------ + P' + M
-	 *                      8
-	 *
-	 * The position of block mark moves forward in the ECC-based view
-	 * of page, and the delta is:
-	 *
-	 *                   E * G * (N - 1)
-	 *             D = (---------------- + M)
-	 *                          8
-	 *
-	 * Please see the comment in legacy_set_geometry().
-	 * With the condition C >= O , we still can get same result.
-	 * So the bit position of the physical block mark within the ECC-based
-	 * view of the page is :
-	 *             (P' - D) * 8
-	 */
-	geo->page_size = mtd->writesize + geo->metadata_size +
-		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
-
-	geo->payload_size = mtd->writesize;
-
-	geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
-	geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
-				+ ALIGN(geo->ecc_chunk_count, 4);
-
-	if (!this->swap_block_mark)
-		return 0;
-
-	/* For bit swap. */
-	block_mark_bit_offset = mtd->writesize * 8 -
-		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
-				+ geo->metadata_size * 8);
-
-	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
-	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
-	return 0;
-}
-
-static int legacy_set_geometry(struct gpmi_nand_data *this)
-{
-	struct bch_geometry *geo = &this->bch_geometry;
-	struct mtd_info *mtd = nand_to_mtd(&this->nand);
-	unsigned int metadata_size;
-	unsigned int status_size;
-	unsigned int block_mark_bit_offset;
-
-	/*
-	 * The size of the metadata can be changed, though we set it to 10
-	 * bytes now. But it can't be too large, because we have to save
-	 * enough space for BCH.
-	 */
-	geo->metadata_size = 10;
-
-	/* The default for the length of Galois Field. */
-	geo->gf_len = 13;
-
-	/* The default for chunk size. */
-	geo->ecc_chunk_size = 512;
-	while (geo->ecc_chunk_size < mtd->oobsize) {
-		geo->ecc_chunk_size *= 2; /* keep C >= O */
-		geo->gf_len = 14;
-	}
-
-	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
-
-	/* We use the same ECC strength for all chunks. */
-	geo->ecc_strength = get_ecc_strength(this);
-	if (!gpmi_check_ecc(this)) {
-		dev_err(this->dev,
-			"ecc strength: %d cannot be supported by the controller (%d)\n"
-			"try to use minimum ecc strength that NAND chip required\n",
-			geo->ecc_strength,
-			this->devdata->bch_max_ecc_strength);
-		return -EINVAL;
-	}
-
-	geo->page_size = mtd->writesize + geo->metadata_size +
-		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
-	geo->payload_size = mtd->writesize;
-
-	/*
-	 * The auxiliary buffer contains the metadata and the ECC status. The
-	 * metadata is padded to the nearest 32-bit boundary. The ECC status
-	 * contains one byte for every ECC chunk, and is also padded to the
-	 * nearest 32-bit boundary.
-	 */
-	metadata_size = ALIGN(geo->metadata_size, 4);
-	status_size   = ALIGN(geo->ecc_chunk_count, 4);
-
-	geo->auxiliary_size = metadata_size + status_size;
-	geo->auxiliary_status_offset = metadata_size;
-
-	if (!this->swap_block_mark)
-		return 0;
-
-	/*
-	 * We need to compute the byte and bit offsets of
-	 * the physical block mark within the ECC-based view of the page.
-	 *
-	 * NAND chip with 2K page shows below:
-	 *                                             (Block Mark)
-	 *                                                   |      |
-	 *                                                   |  D   |
-	 *                                                   |<---->|
-	 *                                                   V      V
-	 *    +---+----------+-+----------+-+----------+-+----------+-+
-	 *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
-	 *    +---+----------+-+----------+-+----------+-+----------+-+
-	 *
-	 * The position of block mark moves forward in the ECC-based view
-	 * of page, and the delta is:
-	 *
-	 *                   E * G * (N - 1)
-	 *             D = (---------------- + M)
-	 *                          8
-	 *
-	 * With the formula to compute the ECC strength, and the condition
-	 *       : C >= O         (C is the ecc chunk size)
-	 *
-	 * It's easy to deduce to the following result:
-	 *
-	 *         E * G       (O - M)      C - M         C - M
-	 *      ----------- <= ------- <=  --------  <  ---------
-	 *           8            N           N          (N - 1)
-	 *
-	 *  So, we get:
-	 *
-	 *                   E * G * (N - 1)
-	 *             D = (---------------- + M) < C
-	 *                          8
-	 *
-	 *  The above inequality means the position of block mark
-	 *  within the ECC-based view of the page is still in the data chunk,
-	 *  and it's NOT in the ECC bits of the chunk.
-	 *
-	 *  Use the following to compute the bit position of the
-	 *  physical block mark within the ECC-based view of the page:
-	 *          (page_size - D) * 8
-	 *
-	 *  --Huang Shijie
-	 */
-	block_mark_bit_offset = mtd->writesize * 8 -
-		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
-				+ geo->metadata_size * 8);
-
-	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
-	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
-	return 0;
-}
-
-int common_nfc_set_geometry(struct gpmi_nand_data *this)
-{
-	if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
-				|| legacy_set_geometry(this))
-		return set_geometry_by_ecc_info(this);
-
-	return 0;
-}
-
-struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
-{
-	/* We use the DMA channel 0 to access all the nand chips. */
-	return this->dma_chans[0];
-}
-
-/* Can we use the upper's buffer directly for DMA? */
-void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
-{
-	struct scatterlist *sgl = &this->data_sgl;
-	int ret;
-
-	/* first try to map the upper buffer directly */
-	if (virt_addr_valid(this->upper_buf) &&
-		!object_is_on_stack(this->upper_buf)) {
-		sg_init_one(sgl, this->upper_buf, this->upper_len);
-		ret = dma_map_sg(this->dev, sgl, 1, dr);
-		if (ret == 0)
-			goto map_fail;
-
-		this->direct_dma_map_ok = true;
-		return;
-	}
-
-map_fail:
-	/* We have to use our own DMA buffer. */
-	sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
-
-	if (dr == DMA_TO_DEVICE)
-		memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
-
-	dma_map_sg(this->dev, sgl, 1, dr);
-
-	this->direct_dma_map_ok = false;
-}
-
-/* This will be called after the DMA operation is finished. */
-static void dma_irq_callback(void *param)
-{
-	struct gpmi_nand_data *this = param;
-	struct completion *dma_c = &this->dma_done;
-
-	switch (this->dma_type) {
-	case DMA_FOR_COMMAND:
-		dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
-		break;
-
-	case DMA_FOR_READ_DATA:
-		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
-		if (this->direct_dma_map_ok == false)
-			memcpy(this->upper_buf, this->data_buffer_dma,
-				this->upper_len);
-		break;
-
-	case DMA_FOR_WRITE_DATA:
-		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
-		break;
-
-	case DMA_FOR_READ_ECC_PAGE:
-	case DMA_FOR_WRITE_ECC_PAGE:
-		/* We have to wait the BCH interrupt to finish. */
-		break;
-
-	default:
-		dev_err(this->dev, "in wrong DMA operation.\n");
-	}
-
-	complete(dma_c);
-}
-
-int start_dma_without_bch_irq(struct gpmi_nand_data *this,
-				struct dma_async_tx_descriptor *desc)
-{
-	struct completion *dma_c = &this->dma_done;
-	unsigned long timeout;
-
-	init_completion(dma_c);
-
-	desc->callback		= dma_irq_callback;
-	desc->callback_param	= this;
-	dmaengine_submit(desc);
-	dma_async_issue_pending(get_dma_chan(this));
-
-	/* Wait for the interrupt from the DMA block. */
-	timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
-	if (!timeout) {
-		dev_err(this->dev, "DMA timeout, last DMA :%d\n",
-			this->last_dma_type);
-		gpmi_dump_info(this);
-		return -ETIMEDOUT;
-	}
-	return 0;
-}
-
-/*
- * This function is used in BCH reading or BCH writing pages.
- * It will wait for the BCH interrupt as long as ONE second.
- * Actually, we must wait for two interrupts :
- *	[1] firstly the DMA interrupt and
- *	[2] secondly the BCH interrupt.
- */
-int start_dma_with_bch_irq(struct gpmi_nand_data *this,
-			struct dma_async_tx_descriptor *desc)
-{
-	struct completion *bch_c = &this->bch_done;
-	unsigned long timeout;
-
-	/* Prepare to receive an interrupt from the BCH block. */
-	init_completion(bch_c);
-
-	/* start the DMA */
-	start_dma_without_bch_irq(this, desc);
-
-	/* Wait for the interrupt from the BCH block. */
-	timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
-	if (!timeout) {
-		dev_err(this->dev, "BCH timeout, last DMA :%d\n",
-			this->last_dma_type);
-		gpmi_dump_info(this);
-		return -ETIMEDOUT;
-	}
-	return 0;
-}
-
-static int acquire_register_block(struct gpmi_nand_data *this,
-				  const char *res_name)
-{
-	struct platform_device *pdev = this->pdev;
-	struct resources *res = &this->resources;
-	struct resource *r;
-	void __iomem *p;
-
-	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
-	p = devm_ioremap_resource(&pdev->dev, r);
-	if (IS_ERR(p))
-		return PTR_ERR(p);
-
-	if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
-		res->gpmi_regs = p;
-	else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
-		res->bch_regs = p;
-	else
-		dev_err(this->dev, "unknown resource name : %s\n", res_name);
-
-	return 0;
-}
-
-static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
-{
-	struct platform_device *pdev = this->pdev;
-	const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
-	struct resource *r;
-	int err;
-
-	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
-	if (!r) {
-		dev_err(this->dev, "Can't get resource for %s\n", res_name);
-		return -ENODEV;
-	}
-
-	err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
-	if (err)
-		dev_err(this->dev, "error requesting BCH IRQ\n");
-
-	return err;
-}
-
-static void release_dma_channels(struct gpmi_nand_data *this)
-{
-	unsigned int i;
-	for (i = 0; i < DMA_CHANS; i++)
-		if (this->dma_chans[i]) {
-			dma_release_channel(this->dma_chans[i]);
-			this->dma_chans[i] = NULL;
-		}
-}
-
-static int acquire_dma_channels(struct gpmi_nand_data *this)
-{
-	struct platform_device *pdev = this->pdev;
-	struct dma_chan *dma_chan;
-
-	/* request dma channel */
-	dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
-	if (!dma_chan) {
-		dev_err(this->dev, "Failed to request DMA channel.\n");
-		goto acquire_err;
-	}
-
-	this->dma_chans[0] = dma_chan;
-	return 0;
-
-acquire_err:
-	release_dma_channels(this);
-	return -EINVAL;
-}
-
-static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
-	"gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
-};
-
-static int gpmi_get_clks(struct gpmi_nand_data *this)
-{
-	struct resources *r = &this->resources;
-	char **extra_clks = NULL;
-	struct clk *clk;
-	int err, i;
-
-	/* The main clock is stored in the first. */
-	r->clock[0] = devm_clk_get(this->dev, "gpmi_io");
-	if (IS_ERR(r->clock[0])) {
-		err = PTR_ERR(r->clock[0]);
-		goto err_clock;
-	}
-
-	/* Get extra clocks */
-	if (GPMI_IS_MX6(this))
-		extra_clks = extra_clks_for_mx6q;
-	if (!extra_clks)
-		return 0;
-
-	for (i = 1; i < GPMI_CLK_MAX; i++) {
-		if (extra_clks[i - 1] == NULL)
-			break;
-
-		clk = devm_clk_get(this->dev, extra_clks[i - 1]);
-		if (IS_ERR(clk)) {
-			err = PTR_ERR(clk);
-			goto err_clock;
-		}
-
-		r->clock[i] = clk;
-	}
-
-	if (GPMI_IS_MX6(this))
-		/*
-		 * Set the default value for the gpmi clock.
-		 *
-		 * If you want to use the ONFI nand which is in the
-		 * Synchronous Mode, you should change the clock as you need.
-		 */
-		clk_set_rate(r->clock[0], 22000000);
-
-	return 0;
-
-err_clock:
-	dev_dbg(this->dev, "failed in finding the clocks.\n");
-	return err;
-}
-
-static int acquire_resources(struct gpmi_nand_data *this)
-{
-	int ret;
-
-	ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
-	if (ret)
-		goto exit_regs;
-
-	ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
-	if (ret)
-		goto exit_regs;
-
-	ret = acquire_bch_irq(this, bch_irq);
-	if (ret)
-		goto exit_regs;
-
-	ret = acquire_dma_channels(this);
-	if (ret)
-		goto exit_regs;
-
-	ret = gpmi_get_clks(this);
-	if (ret)
-		goto exit_clock;
-	return 0;
-
-exit_clock:
-	release_dma_channels(this);
-exit_regs:
-	return ret;
-}
-
-static void release_resources(struct gpmi_nand_data *this)
-{
-	release_dma_channels(this);
-}
-
-static int init_hardware(struct gpmi_nand_data *this)
-{
-	int ret;
-
-	/*
-	 * This structure contains the "safe" GPMI timing that should succeed
-	 * with any NAND Flash device
-	 * (although, with less-than-optimal performance).
-	 */
-	struct nand_timing  safe_timing = {
-		.data_setup_in_ns        = 80,
-		.data_hold_in_ns         = 60,
-		.address_setup_in_ns     = 25,
-		.gpmi_sample_delay_in_ns =  6,
-		.tREA_in_ns              = -1,
-		.tRLOH_in_ns             = -1,
-		.tRHOH_in_ns             = -1,
-	};
-
-	/* Initialize the hardwares. */
-	ret = gpmi_init(this);
-	if (ret)
-		return ret;
-
-	this->timing = safe_timing;
-	return 0;
-}
-
-static int read_page_prepare(struct gpmi_nand_data *this,
-			void *destination, unsigned length,
-			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-			void **use_virt, dma_addr_t *use_phys)
-{
-	struct device *dev = this->dev;
-
-	if (virt_addr_valid(destination)) {
-		dma_addr_t dest_phys;
-
-		dest_phys = dma_map_single(dev, destination,
-						length, DMA_FROM_DEVICE);
-		if (dma_mapping_error(dev, dest_phys)) {
-			if (alt_size < length) {
-				dev_err(dev, "Alternate buffer is too small\n");
-				return -ENOMEM;
-			}
-			goto map_failed;
-		}
-		*use_virt = destination;
-		*use_phys = dest_phys;
-		this->direct_dma_map_ok = true;
-		return 0;
-	}
-
-map_failed:
-	*use_virt = alt_virt;
-	*use_phys = alt_phys;
-	this->direct_dma_map_ok = false;
-	return 0;
-}
-
-static inline void read_page_end(struct gpmi_nand_data *this,
-			void *destination, unsigned length,
-			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-			void *used_virt, dma_addr_t used_phys)
-{
-	if (this->direct_dma_map_ok)
-		dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
-}
-
-static inline void read_page_swap_end(struct gpmi_nand_data *this,
-			void *destination, unsigned length,
-			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-			void *used_virt, dma_addr_t used_phys)
-{
-	if (!this->direct_dma_map_ok)
-		memcpy(destination, alt_virt, length);
-}
-
-static int send_page_prepare(struct gpmi_nand_data *this,
-			const void *source, unsigned length,
-			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-			const void **use_virt, dma_addr_t *use_phys)
-{
-	struct device *dev = this->dev;
-
-	if (virt_addr_valid(source)) {
-		dma_addr_t source_phys;
-
-		source_phys = dma_map_single(dev, (void *)source, length,
-						DMA_TO_DEVICE);
-		if (dma_mapping_error(dev, source_phys)) {
-			if (alt_size < length) {
-				dev_err(dev, "Alternate buffer is too small\n");
-				return -ENOMEM;
-			}
-			goto map_failed;
-		}
-		*use_virt = source;
-		*use_phys = source_phys;
-		return 0;
-	}
-map_failed:
-	/*
-	 * Copy the content of the source buffer into the alternate
-	 * buffer and set up the return values accordingly.
-	 */
-	memcpy(alt_virt, source, length);
-
-	*use_virt = alt_virt;
-	*use_phys = alt_phys;
-	return 0;
-}
-
-static void send_page_end(struct gpmi_nand_data *this,
-			const void *source, unsigned length,
-			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
-			const void *used_virt, dma_addr_t used_phys)
-{
-	struct device *dev = this->dev;
-	if (used_virt == source)
-		dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
-}
-
-static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
-{
-	struct device *dev = this->dev;
-
-	if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
-		dma_free_coherent(dev, this->page_buffer_size,
-					this->page_buffer_virt,
-					this->page_buffer_phys);
-	kfree(this->cmd_buffer);
-	kfree(this->data_buffer_dma);
-	kfree(this->raw_buffer);
-
-	this->cmd_buffer	= NULL;
-	this->data_buffer_dma	= NULL;
-	this->raw_buffer	= NULL;
-	this->page_buffer_virt	= NULL;
-	this->page_buffer_size	=  0;
-}
-
-/* Allocate the DMA buffers */
-static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
-{
-	struct bch_geometry *geo = &this->bch_geometry;
-	struct device *dev = this->dev;
-	struct mtd_info *mtd = nand_to_mtd(&this->nand);
-
-	/* [1] Allocate a command buffer. PAGE_SIZE is enough. */
-	this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
-	if (this->cmd_buffer == NULL)
-		goto error_alloc;
-
-	/*
-	 * [2] Allocate a read/write data buffer.
-	 *     The gpmi_alloc_dma_buffer can be called twice.
-	 *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
-	 *     is called before the nand_scan_ident; and we allocate a buffer
-	 *     of the real NAND page size when the gpmi_alloc_dma_buffer is
-	 *     called after the nand_scan_ident.
-	 */
-	this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
-					GFP_DMA | GFP_KERNEL);
-	if (this->data_buffer_dma == NULL)
-		goto error_alloc;
-
-	/*
-	 * [3] Allocate the page buffer.
-	 *
-	 * Both the payload buffer and the auxiliary buffer must appear on
-	 * 32-bit boundaries. We presume the size of the payload buffer is a
-	 * power of two and is much larger than four, which guarantees the
-	 * auxiliary buffer will appear on a 32-bit boundary.
-	 */
-	this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
-	this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
-					&this->page_buffer_phys, GFP_DMA);
-	if (!this->page_buffer_virt)
-		goto error_alloc;
-
-	this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
-	if (!this->raw_buffer)
-		goto error_alloc;
-
-	/* Slice up the page buffer. */
-	this->payload_virt = this->page_buffer_virt;
-	this->payload_phys = this->page_buffer_phys;
-	this->auxiliary_virt = this->payload_virt + geo->payload_size;
-	this->auxiliary_phys = this->payload_phys + geo->payload_size;
-	return 0;
-
-error_alloc:
-	gpmi_free_dma_buffer(this);
-	return -ENOMEM;
-}
-
-static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	int ret;
-
-	/*
-	 * Every operation begins with a command byte and a series of zero or
-	 * more address bytes. These are distinguished by either the Address
-	 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
-	 * asserted. When MTD is ready to execute the command, it will deassert
-	 * both latch enables.
-	 *
-	 * Rather than run a separate DMA operation for every single byte, we
-	 * queue them up and run a single DMA operation for the entire series
-	 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
-	 */
-	if ((ctrl & (NAND_ALE | NAND_CLE))) {
-		if (data != NAND_CMD_NONE)
-			this->cmd_buffer[this->command_length++] = data;
-		return;
-	}
-
-	if (!this->command_length)
-		return;
-
-	ret = gpmi_send_command(this);
-	if (ret)
-		dev_err(this->dev, "Chip: %u, Error %d\n",
-			this->current_chip, ret);
-
-	this->command_length = 0;
-}
-
-static int gpmi_dev_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-
-	return gpmi_is_ready(this, this->current_chip);
-}
-
-static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-
-	if ((this->current_chip < 0) && (chipnr >= 0))
-		gpmi_begin(this);
-	else if ((this->current_chip >= 0) && (chipnr < 0))
-		gpmi_end(this);
-
-	this->current_chip = chipnr;
-}
-
-static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-
-	dev_dbg(this->dev, "len is %d\n", len);
-	this->upper_buf	= buf;
-	this->upper_len	= len;
-
-	gpmi_read_data(this);
-}
-
-static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-
-	dev_dbg(this->dev, "len is %d\n", len);
-	this->upper_buf	= (uint8_t *)buf;
-	this->upper_len	= len;
-
-	gpmi_send_data(this);
-}
-
-static uint8_t gpmi_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	uint8_t *buf = this->data_buffer_dma;
-
-	gpmi_read_buf(mtd, buf, 1);
-	return buf[0];
-}
-
-/*
- * Handles block mark swapping.
- * It can be called in swapping the block mark, or swapping it back,
- * because the the operations are the same.
- */
-static void block_mark_swapping(struct gpmi_nand_data *this,
-				void *payload, void *auxiliary)
-{
-	struct bch_geometry *nfc_geo = &this->bch_geometry;
-	unsigned char *p;
-	unsigned char *a;
-	unsigned int  bit;
-	unsigned char mask;
-	unsigned char from_data;
-	unsigned char from_oob;
-
-	if (!this->swap_block_mark)
-		return;
-
-	/*
-	 * If control arrives here, we're swapping. Make some convenience
-	 * variables.
-	 */
-	bit = nfc_geo->block_mark_bit_offset;
-	p   = payload + nfc_geo->block_mark_byte_offset;
-	a   = auxiliary;
-
-	/*
-	 * Get the byte from the data area that overlays the block mark. Since
-	 * the ECC engine applies its own view to the bits in the page, the
-	 * physical block mark won't (in general) appear on a byte boundary in
-	 * the data.
-	 */
-	from_data = (p[0] >> bit) | (p[1] << (8 - bit));
-
-	/* Get the byte from the OOB. */
-	from_oob = a[0];
-
-	/* Swap them. */
-	a[0] = from_data;
-
-	mask = (0x1 << bit) - 1;
-	p[0] = (p[0] & mask) | (from_oob << bit);
-
-	mask = ~0 << bit;
-	p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
-}
-
-static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int oob_required, int page)
-{
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	struct bch_geometry *nfc_geo = &this->bch_geometry;
-	void          *payload_virt;
-	dma_addr_t    payload_phys;
-	void          *auxiliary_virt;
-	dma_addr_t    auxiliary_phys;
-	unsigned int  i;
-	unsigned char *status;
-	unsigned int  max_bitflips = 0;
-	int           ret;
-
-	dev_dbg(this->dev, "page number is : %d\n", page);
-	ret = read_page_prepare(this, buf, nfc_geo->payload_size,
-					this->payload_virt, this->payload_phys,
-					nfc_geo->payload_size,
-					&payload_virt, &payload_phys);
-	if (ret) {
-		dev_err(this->dev, "Inadequate DMA buffer\n");
-		ret = -ENOMEM;
-		return ret;
-	}
-	auxiliary_virt = this->auxiliary_virt;
-	auxiliary_phys = this->auxiliary_phys;
-
-	/* go! */
-	ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
-	read_page_end(this, buf, nfc_geo->payload_size,
-			this->payload_virt, this->payload_phys,
-			nfc_geo->payload_size,
-			payload_virt, payload_phys);
-	if (ret) {
-		dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
-		return ret;
-	}
-
-	/* handle the block mark swapping */
-	block_mark_swapping(this, payload_virt, auxiliary_virt);
-
-	/* Loop over status bytes, accumulating ECC status. */
-	status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
-
-	read_page_swap_end(this, buf, nfc_geo->payload_size,
-			   this->payload_virt, this->payload_phys,
-			   nfc_geo->payload_size,
-			   payload_virt, payload_phys);
-
-	for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
-		if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
-			continue;
-
-		if (*status == STATUS_UNCORRECTABLE) {
-			int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
-			u8 *eccbuf = this->raw_buffer;
-			int offset, bitoffset;
-			int eccbytes;
-			int flips;
-
-			/* Read ECC bytes into our internal raw_buffer */
-			offset = nfc_geo->metadata_size * 8;
-			offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
-			offset -= eccbits;
-			bitoffset = offset % 8;
-			eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
-			offset /= 8;
-			eccbytes -= offset;
-			chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
-			chip->read_buf(mtd, eccbuf, eccbytes);
-
-			/*
-			 * ECC data are not byte aligned and we may have
-			 * in-band data in the first and last byte of
-			 * eccbuf. Set non-eccbits to one so that
-			 * nand_check_erased_ecc_chunk() does not count them
-			 * as bitflips.
-			 */
-			if (bitoffset)
-				eccbuf[0] |= GENMASK(bitoffset - 1, 0);
-
-			bitoffset = (bitoffset + eccbits) % 8;
-			if (bitoffset)
-				eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
-
-			/*
-			 * The ECC hardware has an uncorrectable ECC status
-			 * code in case we have bitflips in an erased page. As
-			 * nothing was written into this subpage the ECC is
-			 * obviously wrong and we can not trust it. We assume
-			 * at this point that we are reading an erased page and
-			 * try to correct the bitflips in buffer up to
-			 * ecc_strength bitflips. If this is a page with random
-			 * data, we exceed this number of bitflips and have a
-			 * ECC failure. Otherwise we use the corrected buffer.
-			 */
-			if (i == 0) {
-				/* The first block includes metadata */
-				flips = nand_check_erased_ecc_chunk(
-						buf + i * nfc_geo->ecc_chunk_size,
-						nfc_geo->ecc_chunk_size,
-						eccbuf, eccbytes,
-						auxiliary_virt,
-						nfc_geo->metadata_size,
-						nfc_geo->ecc_strength);
-			} else {
-				flips = nand_check_erased_ecc_chunk(
-						buf + i * nfc_geo->ecc_chunk_size,
-						nfc_geo->ecc_chunk_size,
-						eccbuf, eccbytes,
-						NULL, 0,
-						nfc_geo->ecc_strength);
-			}
-
-			if (flips > 0) {
-				max_bitflips = max_t(unsigned int, max_bitflips,
-						     flips);
-				mtd->ecc_stats.corrected += flips;
-				continue;
-			}
-
-			mtd->ecc_stats.failed++;
-			continue;
-		}
-
-		mtd->ecc_stats.corrected += *status;
-		max_bitflips = max_t(unsigned int, max_bitflips, *status);
-	}
-
-	if (oob_required) {
-		/*
-		 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
-		 * for details about our policy for delivering the OOB.
-		 *
-		 * We fill the caller's buffer with set bits, and then copy the
-		 * block mark to th caller's buffer. Note that, if block mark
-		 * swapping was necessary, it has already been done, so we can
-		 * rely on the first byte of the auxiliary buffer to contain
-		 * the block mark.
-		 */
-		memset(chip->oob_poi, ~0, mtd->oobsize);
-		chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
-	}
-
-	return max_bitflips;
-}
-
-/* Fake a virtual small page for the subpage read */
-static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
-			uint32_t offs, uint32_t len, uint8_t *buf, int page)
-{
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	void __iomem *bch_regs = this->resources.bch_regs;
-	struct bch_geometry old_geo = this->bch_geometry;
-	struct bch_geometry *geo = &this->bch_geometry;
-	int size = chip->ecc.size; /* ECC chunk size */
-	int meta, n, page_size;
-	u32 r1_old, r2_old, r1_new, r2_new;
-	unsigned int max_bitflips;
-	int first, last, marker_pos;
-	int ecc_parity_size;
-	int col = 0;
-	int old_swap_block_mark = this->swap_block_mark;
-
-	/* The size of ECC parity */
-	ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
-
-	/* Align it with the chunk size */
-	first = offs / size;
-	last = (offs + len - 1) / size;
-
-	if (this->swap_block_mark) {
-		/*
-		 * Find the chunk which contains the Block Marker.
-		 * If this chunk is in the range of [first, last],
-		 * we have to read out the whole page.
-		 * Why? since we had swapped the data at the position of Block
-		 * Marker to the metadata which is bound with the chunk 0.
-		 */
-		marker_pos = geo->block_mark_byte_offset / size;
-		if (last >= marker_pos && first <= marker_pos) {
-			dev_dbg(this->dev,
-				"page:%d, first:%d, last:%d, marker at:%d\n",
-				page, first, last, marker_pos);
-			return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
-		}
-	}
-
-	meta = geo->metadata_size;
-	if (first) {
-		col = meta + (size + ecc_parity_size) * first;
-		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
-
-		meta = 0;
-		buf = buf + first * size;
-	}
-
-	/* Save the old environment */
-	r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
-	r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
-
-	/* change the BCH registers and bch_geometry{} */
-	n = last - first + 1;
-	page_size = meta + (size + ecc_parity_size) * n;
-
-	r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS |
-			BM_BCH_FLASH0LAYOUT0_META_SIZE);
-	r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1)
-			| BF_BCH_FLASH0LAYOUT0_META_SIZE(meta);
-	writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0);
-
-	r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE;
-	r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size);
-	writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1);
-
-	geo->ecc_chunk_count = n;
-	geo->payload_size = n * size;
-	geo->page_size = page_size;
-	geo->auxiliary_status_offset = ALIGN(meta, 4);
-
-	dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
-		page, offs, len, col, first, n, page_size);
-
-	/* Read the subpage now */
-	this->swap_block_mark = false;
-	max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
-
-	/* Restore */
-	writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
-	writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
-	this->bch_geometry = old_geo;
-	this->swap_block_mark = old_swap_block_mark;
-
-	return max_bitflips;
-}
-
-static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-				const uint8_t *buf, int oob_required, int page)
-{
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	struct bch_geometry *nfc_geo = &this->bch_geometry;
-	const void *payload_virt;
-	dma_addr_t payload_phys;
-	const void *auxiliary_virt;
-	dma_addr_t auxiliary_phys;
-	int        ret;
-
-	dev_dbg(this->dev, "ecc write page.\n");
-	if (this->swap_block_mark) {
-		/*
-		 * If control arrives here, we're doing block mark swapping.
-		 * Since we can't modify the caller's buffers, we must copy them
-		 * into our own.
-		 */
-		memcpy(this->payload_virt, buf, mtd->writesize);
-		payload_virt = this->payload_virt;
-		payload_phys = this->payload_phys;
-
-		memcpy(this->auxiliary_virt, chip->oob_poi,
-				nfc_geo->auxiliary_size);
-		auxiliary_virt = this->auxiliary_virt;
-		auxiliary_phys = this->auxiliary_phys;
-
-		/* Handle block mark swapping. */
-		block_mark_swapping(this,
-				(void *)payload_virt, (void *)auxiliary_virt);
-	} else {
-		/*
-		 * If control arrives here, we're not doing block mark swapping,
-		 * so we can to try and use the caller's buffers.
-		 */
-		ret = send_page_prepare(this,
-				buf, mtd->writesize,
-				this->payload_virt, this->payload_phys,
-				nfc_geo->payload_size,
-				&payload_virt, &payload_phys);
-		if (ret) {
-			dev_err(this->dev, "Inadequate payload DMA buffer\n");
-			return 0;
-		}
-
-		ret = send_page_prepare(this,
-				chip->oob_poi, mtd->oobsize,
-				this->auxiliary_virt, this->auxiliary_phys,
-				nfc_geo->auxiliary_size,
-				&auxiliary_virt, &auxiliary_phys);
-		if (ret) {
-			dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
-			goto exit_auxiliary;
-		}
-	}
-
-	/* Ask the NFC. */
-	ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
-	if (ret)
-		dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
-
-	if (!this->swap_block_mark) {
-		send_page_end(this, chip->oob_poi, mtd->oobsize,
-				this->auxiliary_virt, this->auxiliary_phys,
-				nfc_geo->auxiliary_size,
-				auxiliary_virt, auxiliary_phys);
-exit_auxiliary:
-		send_page_end(this, buf, mtd->writesize,
-				this->payload_virt, this->payload_phys,
-				nfc_geo->payload_size,
-				payload_virt, payload_phys);
-	}
-
-	return 0;
-}
-
-/*
- * There are several places in this driver where we have to handle the OOB and
- * block marks. This is the function where things are the most complicated, so
- * this is where we try to explain it all. All the other places refer back to
- * here.
- *
- * These are the rules, in order of decreasing importance:
- *
- * 1) Nothing the caller does can be allowed to imperil the block mark.
- *
- * 2) In read operations, the first byte of the OOB we return must reflect the
- *    true state of the block mark, no matter where that block mark appears in
- *    the physical page.
- *
- * 3) ECC-based read operations return an OOB full of set bits (since we never
- *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
- *    return).
- *
- * 4) "Raw" read operations return a direct view of the physical bytes in the
- *    page, using the conventional definition of which bytes are data and which
- *    are OOB. This gives the caller a way to see the actual, physical bytes
- *    in the page, without the distortions applied by our ECC engine.
- *
- *
- * What we do for this specific read operation depends on two questions:
- *
- * 1) Are we doing a "raw" read, or an ECC-based read?
- *
- * 2) Are we using block mark swapping or transcription?
- *
- * There are four cases, illustrated by the following Karnaugh map:
- *
- *                    |           Raw           |         ECC-based       |
- *       -------------+-------------------------+-------------------------+
- *                    | Read the conventional   |                         |
- *                    | OOB at the end of the   |                         |
- *       Swapping     | page and return it. It  |                         |
- *                    | contains exactly what   |                         |
- *                    | we want.                | Read the block mark and |
- *       -------------+-------------------------+ return it in a buffer   |
- *                    | Read the conventional   | full of set bits.       |
- *                    | OOB at the end of the   |                         |
- *                    | page and also the block |                         |
- *       Transcribing | mark in the metadata.   |                         |
- *                    | Copy the block mark     |                         |
- *                    | into the first byte of  |                         |
- *                    | the OOB.                |                         |
- *       -------------+-------------------------+-------------------------+
- *
- * Note that we break rule #4 in the Transcribing/Raw case because we're not
- * giving an accurate view of the actual, physical bytes in the page (we're
- * overwriting the block mark). That's OK because it's more important to follow
- * rule #2.
- *
- * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
- * easy. When reading a page, for example, the NAND Flash MTD code calls our
- * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
- * ECC-based or raw view of the page is implicit in which function it calls
- * (there is a similar pair of ECC-based/raw functions for writing).
- */
-static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-				int page)
-{
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-
-	dev_dbg(this->dev, "page number is %d\n", page);
-	/* clear the OOB buffer */
-	memset(chip->oob_poi, ~0, mtd->oobsize);
-
-	/* Read out the conventional OOB. */
-	chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	/*
-	 * Now, we want to make sure the block mark is correct. In the
-	 * non-transcribing case (!GPMI_IS_MX23()), we already have it.
-	 * Otherwise, we need to explicitly read it.
-	 */
-	if (GPMI_IS_MX23(this)) {
-		/* Read the block mark into the first byte of the OOB buffer. */
-		chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-		chip->oob_poi[0] = chip->read_byte(mtd);
-	}
-
-	return 0;
-}
-
-static int
-gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
-{
-	struct mtd_oob_region of = { };
-	int status = 0;
-
-	/* Do we have available oob area? */
-	mtd_ooblayout_free(mtd, 0, &of);
-	if (!of.length)
-		return -EPERM;
-
-	if (!nand_is_slc(chip))
-		return -EPERM;
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
-	chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
-	status = chip->waitfunc(mtd, chip);
-	return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-/*
- * This function reads a NAND page without involving the ECC engine (no HW
- * ECC correction).
- * The tricky part in the GPMI/BCH controller is that it stores ECC bits
- * inline (interleaved with payload DATA), and do not align data chunk on
- * byte boundaries.
- * We thus need to take care moving the payload data and ECC bits stored in the
- * page into the provided buffers, which is why we're using gpmi_copy_bits.
- *
- * See set_geometry_by_ecc_info inline comments to have a full description
- * of the layout used by the GPMI controller.
- */
-static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
-				  struct nand_chip *chip, uint8_t *buf,
-				  int oob_required, int page)
-{
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	struct bch_geometry *nfc_geo = &this->bch_geometry;
-	int eccsize = nfc_geo->ecc_chunk_size;
-	int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
-	u8 *tmp_buf = this->raw_buffer;
-	size_t src_bit_off;
-	size_t oob_bit_off;
-	size_t oob_byte_off;
-	uint8_t *oob = chip->oob_poi;
-	int step;
-
-	chip->read_buf(mtd, tmp_buf,
-		       mtd->writesize + mtd->oobsize);
-
-	/*
-	 * If required, swap the bad block marker and the data stored in the
-	 * metadata section, so that we don't wrongly consider a block as bad.
-	 *
-	 * See the layout description for a detailed explanation on why this
-	 * is needed.
-	 */
-	if (this->swap_block_mark) {
-		u8 swap = tmp_buf[0];
-
-		tmp_buf[0] = tmp_buf[mtd->writesize];
-		tmp_buf[mtd->writesize] = swap;
-	}
-
-	/*
-	 * Copy the metadata section into the oob buffer (this section is
-	 * guaranteed to be aligned on a byte boundary).
-	 */
-	if (oob_required)
-		memcpy(oob, tmp_buf, nfc_geo->metadata_size);
-
-	oob_bit_off = nfc_geo->metadata_size * 8;
-	src_bit_off = oob_bit_off;
-
-	/* Extract interleaved payload data and ECC bits */
-	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
-		if (buf)
-			gpmi_copy_bits(buf, step * eccsize * 8,
-				       tmp_buf, src_bit_off,
-				       eccsize * 8);
-		src_bit_off += eccsize * 8;
-
-		/* Align last ECC block to align a byte boundary */
-		if (step == nfc_geo->ecc_chunk_count - 1 &&
-		    (oob_bit_off + eccbits) % 8)
-			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
-
-		if (oob_required)
-			gpmi_copy_bits(oob, oob_bit_off,
-				       tmp_buf, src_bit_off,
-				       eccbits);
-
-		src_bit_off += eccbits;
-		oob_bit_off += eccbits;
-	}
-
-	if (oob_required) {
-		oob_byte_off = oob_bit_off / 8;
-
-		if (oob_byte_off < mtd->oobsize)
-			memcpy(oob + oob_byte_off,
-			       tmp_buf + mtd->writesize + oob_byte_off,
-			       mtd->oobsize - oob_byte_off);
-	}
-
-	return 0;
-}
-
-/*
- * This function writes a NAND page without involving the ECC engine (no HW
- * ECC generation).
- * The tricky part in the GPMI/BCH controller is that it stores ECC bits
- * inline (interleaved with payload DATA), and do not align data chunk on
- * byte boundaries.
- * We thus need to take care moving the OOB area at the right place in the
- * final page, which is why we're using gpmi_copy_bits.
- *
- * See set_geometry_by_ecc_info inline comments to have a full description
- * of the layout used by the GPMI controller.
- */
-static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
-				   struct nand_chip *chip,
-				   const uint8_t *buf,
-				   int oob_required, int page)
-{
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	struct bch_geometry *nfc_geo = &this->bch_geometry;
-	int eccsize = nfc_geo->ecc_chunk_size;
-	int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
-	u8 *tmp_buf = this->raw_buffer;
-	uint8_t *oob = chip->oob_poi;
-	size_t dst_bit_off;
-	size_t oob_bit_off;
-	size_t oob_byte_off;
-	int step;
-
-	/*
-	 * Initialize all bits to 1 in case we don't have a buffer for the
-	 * payload or oob data in order to leave unspecified bits of data
-	 * to their initial state.
-	 */
-	if (!buf || !oob_required)
-		memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
-
-	/*
-	 * First copy the metadata section (stored in oob buffer) at the
-	 * beginning of the page, as imposed by the GPMI layout.
-	 */
-	memcpy(tmp_buf, oob, nfc_geo->metadata_size);
-	oob_bit_off = nfc_geo->metadata_size * 8;
-	dst_bit_off = oob_bit_off;
-
-	/* Interleave payload data and ECC bits */
-	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
-		if (buf)
-			gpmi_copy_bits(tmp_buf, dst_bit_off,
-				       buf, step * eccsize * 8, eccsize * 8);
-		dst_bit_off += eccsize * 8;
-
-		/* Align last ECC block to align a byte boundary */
-		if (step == nfc_geo->ecc_chunk_count - 1 &&
-		    (oob_bit_off + eccbits) % 8)
-			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
-
-		if (oob_required)
-			gpmi_copy_bits(tmp_buf, dst_bit_off,
-				       oob, oob_bit_off, eccbits);
-
-		dst_bit_off += eccbits;
-		oob_bit_off += eccbits;
-	}
-
-	oob_byte_off = oob_bit_off / 8;
-
-	if (oob_required && oob_byte_off < mtd->oobsize)
-		memcpy(tmp_buf + mtd->writesize + oob_byte_off,
-		       oob + oob_byte_off, mtd->oobsize - oob_byte_off);
-
-	/*
-	 * If required, swap the bad block marker and the first byte of the
-	 * metadata section, so that we don't modify the bad block marker.
-	 *
-	 * See the layout description for a detailed explanation on why this
-	 * is needed.
-	 */
-	if (this->swap_block_mark) {
-		u8 swap = tmp_buf[0];
-
-		tmp_buf[0] = tmp_buf[mtd->writesize];
-		tmp_buf[mtd->writesize] = swap;
-	}
-
-	chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
-
-	return 0;
-}
-
-static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				 int page)
-{
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
-	return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
-}
-
-static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				 int page)
-{
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
-
-	return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
-}
-
-static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct gpmi_nand_data *this = nand_get_controller_data(chip);
-	int ret = 0;
-	uint8_t *block_mark;
-	int column, page, status, chipnr;
-
-	chipnr = (int)(ofs >> chip->chip_shift);
-	chip->select_chip(mtd, chipnr);
-
-	column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
-
-	/* Write the block mark. */
-	block_mark = this->data_buffer_dma;
-	block_mark[0] = 0; /* bad block marker */
-
-	/* Shift to get page */
-	page = (int)(ofs >> chip->page_shift);
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
-	chip->write_buf(mtd, block_mark, 1);
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
-	status = chip->waitfunc(mtd, chip);
-	if (status & NAND_STATUS_FAIL)
-		ret = -EIO;
-
-	chip->select_chip(mtd, -1);
-
-	return ret;
-}
-
-static int nand_boot_set_geometry(struct gpmi_nand_data *this)
-{
-	struct boot_rom_geometry *geometry = &this->rom_geometry;
-
-	/*
-	 * Set the boot block stride size.
-	 *
-	 * In principle, we should be reading this from the OTP bits, since
-	 * that's where the ROM is going to get it. In fact, we don't have any
-	 * way to read the OTP bits, so we go with the default and hope for the
-	 * best.
-	 */
-	geometry->stride_size_in_pages = 64;
-
-	/*
-	 * Set the search area stride exponent.
-	 *
-	 * In principle, we should be reading this from the OTP bits, since
-	 * that's where the ROM is going to get it. In fact, we don't have any
-	 * way to read the OTP bits, so we go with the default and hope for the
-	 * best.
-	 */
-	geometry->search_area_stride_exponent = 2;
-	return 0;
-}
-
-static const char  *fingerprint = "STMP";
-static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
-{
-	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
-	struct device *dev = this->dev;
-	struct nand_chip *chip = &this->nand;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	unsigned int search_area_size_in_strides;
-	unsigned int stride;
-	unsigned int page;
-	uint8_t *buffer = chip->buffers->databuf;
-	int saved_chip_number;
-	int found_an_ncb_fingerprint = false;
-
-	/* Compute the number of strides in a search area. */
-	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
-
-	saved_chip_number = this->current_chip;
-	chip->select_chip(mtd, 0);
-
-	/*
-	 * Loop through the first search area, looking for the NCB fingerprint.
-	 */
-	dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
-
-	for (stride = 0; stride < search_area_size_in_strides; stride++) {
-		/* Compute the page addresses. */
-		page = stride * rom_geo->stride_size_in_pages;
-
-		dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
-
-		/*
-		 * Read the NCB fingerprint. The fingerprint is four bytes long
-		 * and starts in the 12th byte of the page.
-		 */
-		chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
-		chip->read_buf(mtd, buffer, strlen(fingerprint));
-
-		/* Look for the fingerprint. */
-		if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
-			found_an_ncb_fingerprint = true;
-			break;
-		}
-
-	}
-
-	chip->select_chip(mtd, saved_chip_number);
-
-	if (found_an_ncb_fingerprint)
-		dev_dbg(dev, "\tFound a fingerprint\n");
-	else
-		dev_dbg(dev, "\tNo fingerprint found\n");
-	return found_an_ncb_fingerprint;
-}
-
-/* Writes a transcription stamp. */
-static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
-{
-	struct device *dev = this->dev;
-	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
-	struct nand_chip *chip = &this->nand;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	unsigned int block_size_in_pages;
-	unsigned int search_area_size_in_strides;
-	unsigned int search_area_size_in_pages;
-	unsigned int search_area_size_in_blocks;
-	unsigned int block;
-	unsigned int stride;
-	unsigned int page;
-	uint8_t      *buffer = chip->buffers->databuf;
-	int saved_chip_number;
-	int status;
-
-	/* Compute the search area geometry. */
-	block_size_in_pages = mtd->erasesize / mtd->writesize;
-	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
-	search_area_size_in_pages = search_area_size_in_strides *
-					rom_geo->stride_size_in_pages;
-	search_area_size_in_blocks =
-		  (search_area_size_in_pages + (block_size_in_pages - 1)) /
-				    block_size_in_pages;
-
-	dev_dbg(dev, "Search Area Geometry :\n");
-	dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
-	dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
-	dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
-
-	/* Select chip 0. */
-	saved_chip_number = this->current_chip;
-	chip->select_chip(mtd, 0);
-
-	/* Loop over blocks in the first search area, erasing them. */
-	dev_dbg(dev, "Erasing the search area...\n");
-
-	for (block = 0; block < search_area_size_in_blocks; block++) {
-		/* Compute the page address. */
-		page = block * block_size_in_pages;
-
-		/* Erase this block. */
-		dev_dbg(dev, "\tErasing block 0x%x\n", block);
-		chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
-		chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-
-		/* Wait for the erase to finish. */
-		status = chip->waitfunc(mtd, chip);
-		if (status & NAND_STATUS_FAIL)
-			dev_err(dev, "[%s] Erase failed.\n", __func__);
-	}
-
-	/* Write the NCB fingerprint into the page buffer. */
-	memset(buffer, ~0, mtd->writesize);
-	memcpy(buffer + 12, fingerprint, strlen(fingerprint));
-
-	/* Loop through the first search area, writing NCB fingerprints. */
-	dev_dbg(dev, "Writing NCB fingerprints...\n");
-	for (stride = 0; stride < search_area_size_in_strides; stride++) {
-		/* Compute the page addresses. */
-		page = stride * rom_geo->stride_size_in_pages;
-
-		/* Write the first page of the current stride. */
-		dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
-		chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-		chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
-		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
-		/* Wait for the write to finish. */
-		status = chip->waitfunc(mtd, chip);
-		if (status & NAND_STATUS_FAIL)
-			dev_err(dev, "[%s] Write failed.\n", __func__);
-	}
-
-	/* Deselect chip 0. */
-	chip->select_chip(mtd, saved_chip_number);
-	return 0;
-}
-
-static int mx23_boot_init(struct gpmi_nand_data  *this)
-{
-	struct device *dev = this->dev;
-	struct nand_chip *chip = &this->nand;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	unsigned int block_count;
-	unsigned int block;
-	int     chipnr;
-	int     page;
-	loff_t  byte;
-	uint8_t block_mark;
-	int     ret = 0;
-
-	/*
-	 * If control arrives here, we can't use block mark swapping, which
-	 * means we're forced to use transcription. First, scan for the
-	 * transcription stamp. If we find it, then we don't have to do
-	 * anything -- the block marks are already transcribed.
-	 */
-	if (mx23_check_transcription_stamp(this))
-		return 0;
-
-	/*
-	 * If control arrives here, we couldn't find a transcription stamp, so
-	 * so we presume the block marks are in the conventional location.
-	 */
-	dev_dbg(dev, "Transcribing bad block marks...\n");
-
-	/* Compute the number of blocks in the entire medium. */
-	block_count = chip->chipsize >> chip->phys_erase_shift;
-
-	/*
-	 * Loop over all the blocks in the medium, transcribing block marks as
-	 * we go.
-	 */
-	for (block = 0; block < block_count; block++) {
-		/*
-		 * Compute the chip, page and byte addresses for this block's
-		 * conventional mark.
-		 */
-		chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
-		page = block << (chip->phys_erase_shift - chip->page_shift);
-		byte = block <<  chip->phys_erase_shift;
-
-		/* Send the command to read the conventional block mark. */
-		chip->select_chip(mtd, chipnr);
-		chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
-		block_mark = chip->read_byte(mtd);
-		chip->select_chip(mtd, -1);
-
-		/*
-		 * Check if the block is marked bad. If so, we need to mark it
-		 * again, but this time the result will be a mark in the
-		 * location where we transcribe block marks.
-		 */
-		if (block_mark != 0xff) {
-			dev_dbg(dev, "Transcribing mark in block %u\n", block);
-			ret = chip->block_markbad(mtd, byte);
-			if (ret)
-				dev_err(dev,
-					"Failed to mark block bad with ret %d\n",
-					ret);
-		}
-	}
-
-	/* Write the stamp that indicates we've transcribed the block marks. */
-	mx23_write_transcription_stamp(this);
-	return 0;
-}
-
-static int nand_boot_init(struct gpmi_nand_data  *this)
-{
-	nand_boot_set_geometry(this);
-
-	/* This is ROM arch-specific initilization before the BBT scanning. */
-	if (GPMI_IS_MX23(this))
-		return mx23_boot_init(this);
-	return 0;
-}
-
-static int gpmi_set_geometry(struct gpmi_nand_data *this)
-{
-	int ret;
-
-	/* Free the temporary DMA memory for reading ID. */
-	gpmi_free_dma_buffer(this);
-
-	/* Set up the NFC geometry which is used by BCH. */
-	ret = bch_set_geometry(this);
-	if (ret) {
-		dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
-		return ret;
-	}
-
-	/* Alloc the new DMA buffers according to the pagesize and oobsize */
-	return gpmi_alloc_dma_buffer(this);
-}
-
-static void gpmi_nand_exit(struct gpmi_nand_data *this)
-{
-	nand_release(nand_to_mtd(&this->nand));
-	gpmi_free_dma_buffer(this);
-}
-
-static int gpmi_init_last(struct gpmi_nand_data *this)
-{
-	struct nand_chip *chip = &this->nand;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	struct bch_geometry *bch_geo = &this->bch_geometry;
-	int ret;
-
-	/* Set up the medium geometry */
-	ret = gpmi_set_geometry(this);
-	if (ret)
-		return ret;
-
-	/* Init the nand_ecc_ctrl{} */
-	ecc->read_page	= gpmi_ecc_read_page;
-	ecc->write_page	= gpmi_ecc_write_page;
-	ecc->read_oob	= gpmi_ecc_read_oob;
-	ecc->write_oob	= gpmi_ecc_write_oob;
-	ecc->read_page_raw = gpmi_ecc_read_page_raw;
-	ecc->write_page_raw = gpmi_ecc_write_page_raw;
-	ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
-	ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
-	ecc->mode	= NAND_ECC_HW;
-	ecc->size	= bch_geo->ecc_chunk_size;
-	ecc->strength	= bch_geo->ecc_strength;
-	mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
-
-	/*
-	 * We only enable the subpage read when:
-	 *  (1) the chip is imx6, and
-	 *  (2) the size of the ECC parity is byte aligned.
-	 */
-	if (GPMI_IS_MX6(this) &&
-		((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
-		ecc->read_subpage = gpmi_ecc_read_subpage;
-		chip->options |= NAND_SUBPAGE_READ;
-	}
-
-	/*
-	 * Can we enable the extra features? such as EDO or Sync mode.
-	 *
-	 * We do not check the return value now. That's means if we fail in
-	 * enable the extra features, we still can run in the normal way.
-	 */
-	gpmi_extra_init(this);
-
-	return 0;
-}
-
-static int gpmi_nand_init(struct gpmi_nand_data *this)
-{
-	struct nand_chip *chip = &this->nand;
-	struct mtd_info  *mtd = nand_to_mtd(chip);
-	int ret;
-
-	/* init current chip */
-	this->current_chip	= -1;
-
-	/* init the MTD data structures */
-	mtd->name		= "gpmi-nand";
-	mtd->dev.parent		= this->dev;
-
-	/* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
-	nand_set_controller_data(chip, this);
-	nand_set_flash_node(chip, this->pdev->dev.of_node);
-	chip->select_chip	= gpmi_select_chip;
-	chip->cmd_ctrl		= gpmi_cmd_ctrl;
-	chip->dev_ready		= gpmi_dev_ready;
-	chip->read_byte		= gpmi_read_byte;
-	chip->read_buf		= gpmi_read_buf;
-	chip->write_buf		= gpmi_write_buf;
-	chip->badblock_pattern	= &gpmi_bbt_descr;
-	chip->block_markbad	= gpmi_block_markbad;
-	chip->options		|= NAND_NO_SUBPAGE_WRITE;
-
-	/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
-	this->swap_block_mark = !GPMI_IS_MX23(this);
-
-	/*
-	 * Allocate a temporary DMA buffer for reading ID in the
-	 * nand_scan_ident().
-	 */
-	this->bch_geometry.payload_size = 1024;
-	this->bch_geometry.auxiliary_size = 128;
-	ret = gpmi_alloc_dma_buffer(this);
-	if (ret)
-		goto err_out;
-
-	ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);
-	if (ret)
-		goto err_out;
-
-	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
-		chip->bbt_options |= NAND_BBT_NO_OOB;
-
-		if (of_property_read_bool(this->dev->of_node,
-						"fsl,no-blockmark-swap"))
-			this->swap_block_mark = false;
-	}
-	dev_dbg(this->dev, "Blockmark swapping %sabled\n",
-		this->swap_block_mark ? "en" : "dis");
-
-	ret = gpmi_init_last(this);
-	if (ret)
-		goto err_out;
-
-	chip->options |= NAND_SKIP_BBTSCAN;
-	ret = nand_scan_tail(mtd);
-	if (ret)
-		goto err_out;
-
-	ret = nand_boot_init(this);
-	if (ret)
-		goto err_out;
-	ret = chip->scan_bbt(mtd);
-	if (ret)
-		goto err_out;
-
-	ret = mtd_device_register(mtd, NULL, 0);
-	if (ret)
-		goto err_out;
-	return 0;
-
-err_out:
-	gpmi_nand_exit(this);
-	return ret;
-}
-
-static const struct of_device_id gpmi_nand_id_table[] = {
-	{
-		.compatible = "fsl,imx23-gpmi-nand",
-		.data = &gpmi_devdata_imx23,
-	}, {
-		.compatible = "fsl,imx28-gpmi-nand",
-		.data = &gpmi_devdata_imx28,
-	}, {
-		.compatible = "fsl,imx6q-gpmi-nand",
-		.data = &gpmi_devdata_imx6q,
-	}, {
-		.compatible = "fsl,imx6sx-gpmi-nand",
-		.data = &gpmi_devdata_imx6sx,
-	}, {}
-};
-MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
-
-static int gpmi_nand_probe(struct platform_device *pdev)
-{
-	struct gpmi_nand_data *this;
-	const struct of_device_id *of_id;
-	int ret;
-
-	this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
-	if (!this)
-		return -ENOMEM;
-
-	of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
-	if (of_id) {
-		this->devdata = of_id->data;
-	} else {
-		dev_err(&pdev->dev, "Failed to find the right device id.\n");
-		return -ENODEV;
-	}
-
-	platform_set_drvdata(pdev, this);
-	this->pdev  = pdev;
-	this->dev   = &pdev->dev;
-
-	ret = acquire_resources(this);
-	if (ret)
-		goto exit_acquire_resources;
-
-	ret = init_hardware(this);
-	if (ret)
-		goto exit_nfc_init;
-
-	ret = gpmi_nand_init(this);
-	if (ret)
-		goto exit_nfc_init;
-
-	dev_info(this->dev, "driver registered.\n");
-
-	return 0;
-
-exit_nfc_init:
-	release_resources(this);
-exit_acquire_resources:
-
-	return ret;
-}
-
-static int gpmi_nand_remove(struct platform_device *pdev)
-{
-	struct gpmi_nand_data *this = platform_get_drvdata(pdev);
-
-	gpmi_nand_exit(this);
-	release_resources(this);
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int gpmi_pm_suspend(struct device *dev)
-{
-	struct gpmi_nand_data *this = dev_get_drvdata(dev);
-
-	release_dma_channels(this);
-	return 0;
-}
-
-static int gpmi_pm_resume(struct device *dev)
-{
-	struct gpmi_nand_data *this = dev_get_drvdata(dev);
-	int ret;
-
-	ret = acquire_dma_channels(this);
-	if (ret < 0)
-		return ret;
-
-	/* re-init the GPMI registers */
-	this->flags &= ~GPMI_TIMING_INIT_OK;
-	ret = gpmi_init(this);
-	if (ret) {
-		dev_err(this->dev, "Error setting GPMI : %d\n", ret);
-		return ret;
-	}
-
-	/* re-init the BCH registers */
-	ret = bch_set_geometry(this);
-	if (ret) {
-		dev_err(this->dev, "Error setting BCH : %d\n", ret);
-		return ret;
-	}
-
-	/* re-init others */
-	gpmi_extra_init(this);
-
-	return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static const struct dev_pm_ops gpmi_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
-};
-
-static struct platform_driver gpmi_nand_driver = {
-	.driver = {
-		.name = "gpmi-nand",
-		.pm = &gpmi_pm_ops,
-		.of_match_table = gpmi_nand_id_table,
-	},
-	.probe   = gpmi_nand_probe,
-	.remove  = gpmi_nand_remove,
-};
-module_platform_driver(gpmi_nand_driver);
-
-MODULE_AUTHOR("Freescale Semiconductor, Inc.");
-MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
deleted file mode 100644
index d7625cad6493..000000000000
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ /dev/null
@@ -1,310 +0,0 @@ 
-/*
- * Freescale GPMI NAND Flash Driver
- *
- * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
- * Copyright (C) 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
-#define __DRIVERS_MTD_NAND_GPMI_NAND_H
-
-#include <linux/mtd/rawnand.h>
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-
-#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
-struct resources {
-	void __iomem  *gpmi_regs;
-	void __iomem  *bch_regs;
-	unsigned int  dma_low_channel;
-	unsigned int  dma_high_channel;
-	struct clk    *clock[GPMI_CLK_MAX];
-};
-
-/**
- * struct bch_geometry - BCH geometry description.
- * @gf_len:                   The length of Galois Field. (e.g., 13 or 14)
- * @ecc_strength:             A number that describes the strength of the ECC
- *                            algorithm.
- * @page_size:                The size, in bytes, of a physical page, including
- *                            both data and OOB.
- * @metadata_size:            The size, in bytes, of the metadata.
- * @ecc_chunk_size:           The size, in bytes, of a single ECC chunk. Note
- *                            the first chunk in the page includes both data and
- *                            metadata, so it's a bit larger than this value.
- * @ecc_chunk_count:          The number of ECC chunks in the page,
- * @payload_size:             The size, in bytes, of the payload buffer.
- * @auxiliary_size:           The size, in bytes, of the auxiliary buffer.
- * @auxiliary_status_offset:  The offset into the auxiliary buffer at which
- *                            the ECC status appears.
- * @block_mark_byte_offset:   The byte offset in the ECC-based page view at
- *                            which the underlying physical block mark appears.
- * @block_mark_bit_offset:    The bit offset into the ECC-based page view at
- *                            which the underlying physical block mark appears.
- */
-struct bch_geometry {
-	unsigned int  gf_len;
-	unsigned int  ecc_strength;
-	unsigned int  page_size;
-	unsigned int  metadata_size;
-	unsigned int  ecc_chunk_size;
-	unsigned int  ecc_chunk_count;
-	unsigned int  payload_size;
-	unsigned int  auxiliary_size;
-	unsigned int  auxiliary_status_offset;
-	unsigned int  block_mark_byte_offset;
-	unsigned int  block_mark_bit_offset;
-};
-
-/**
- * struct boot_rom_geometry - Boot ROM geometry description.
- * @stride_size_in_pages:        The size of a boot block stride, in pages.
- * @search_area_stride_exponent: The logarithm to base 2 of the size of a
- *                               search area in boot block strides.
- */
-struct boot_rom_geometry {
-	unsigned int  stride_size_in_pages;
-	unsigned int  search_area_stride_exponent;
-};
-
-/* DMA operations types */
-enum dma_ops_type {
-	DMA_FOR_COMMAND = 1,
-	DMA_FOR_READ_DATA,
-	DMA_FOR_WRITE_DATA,
-	DMA_FOR_READ_ECC_PAGE,
-	DMA_FOR_WRITE_ECC_PAGE
-};
-
-/**
- * struct nand_timing - Fundamental timing attributes for NAND.
- * @data_setup_in_ns:         The data setup time, in nanoseconds. Usually the
- *                            maximum of tDS and tWP. A negative value
- *                            indicates this characteristic isn't known.
- * @data_hold_in_ns:          The data hold time, in nanoseconds. Usually the
- *                            maximum of tDH, tWH and tREH. A negative value
- *                            indicates this characteristic isn't known.
- * @address_setup_in_ns:      The address setup time, in nanoseconds. Usually
- *                            the maximum of tCLS, tCS and tALS. A negative
- *                            value indicates this characteristic isn't known.
- * @gpmi_sample_delay_in_ns:  A GPMI-specific timing parameter. A negative value
- *                            indicates this characteristic isn't known.
- * @tREA_in_ns:               tREA, in nanoseconds, from the data sheet. A
- *                            negative value indicates this characteristic isn't
- *                            known.
- * @tRLOH_in_ns:              tRLOH, in nanoseconds, from the data sheet. A
- *                            negative value indicates this characteristic isn't
- *                            known.
- * @tRHOH_in_ns:              tRHOH, in nanoseconds, from the data sheet. A
- *                            negative value indicates this characteristic isn't
- *                            known.
- */
-struct nand_timing {
-	int8_t  data_setup_in_ns;
-	int8_t  data_hold_in_ns;
-	int8_t  address_setup_in_ns;
-	int8_t  gpmi_sample_delay_in_ns;
-	int8_t  tREA_in_ns;
-	int8_t  tRLOH_in_ns;
-	int8_t  tRHOH_in_ns;
-};
-
-enum gpmi_type {
-	IS_MX23,
-	IS_MX28,
-	IS_MX6Q,
-	IS_MX6SX
-};
-
-struct gpmi_devdata {
-	enum gpmi_type type;
-	int bch_max_ecc_strength;
-	int max_chain_delay; /* See the async EDO mode */
-};
-
-struct gpmi_nand_data {
-	/* flags */
-#define GPMI_ASYNC_EDO_ENABLED	(1 << 0)
-#define GPMI_TIMING_INIT_OK	(1 << 1)
-	int			flags;
-	const struct gpmi_devdata *devdata;
-
-	/* System Interface */
-	struct device		*dev;
-	struct platform_device	*pdev;
-
-	/* Resources */
-	struct resources	resources;
-
-	/* Flash Hardware */
-	struct nand_timing	timing;
-	int			timing_mode;
-
-	/* BCH */
-	struct bch_geometry	bch_geometry;
-	struct completion	bch_done;
-
-	/* NAND Boot issue */
-	bool			swap_block_mark;
-	struct boot_rom_geometry rom_geometry;
-
-	/* MTD / NAND */
-	struct nand_chip	nand;
-
-	/* General-use Variables */
-	int			current_chip;
-	unsigned int		command_length;
-
-	/* passed from upper layer */
-	uint8_t			*upper_buf;
-	int			upper_len;
-
-	/* for DMA operations */
-	bool			direct_dma_map_ok;
-
-	struct scatterlist	cmd_sgl;
-	char			*cmd_buffer;
-
-	struct scatterlist	data_sgl;
-	char			*data_buffer_dma;
-
-	void			*page_buffer_virt;
-	dma_addr_t		page_buffer_phys;
-	unsigned int		page_buffer_size;
-
-	void			*payload_virt;
-	dma_addr_t		payload_phys;
-
-	void			*auxiliary_virt;
-	dma_addr_t		auxiliary_phys;
-
-	void			*raw_buffer;
-
-	/* DMA channels */
-#define DMA_CHANS		8
-	struct dma_chan		*dma_chans[DMA_CHANS];
-	enum dma_ops_type	last_dma_type;
-	enum dma_ops_type	dma_type;
-	struct completion	dma_done;
-
-	/* private */
-	void			*private;
-};
-
-/**
- * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
- * @data_setup_in_cycles:      The data setup time, in cycles.
- * @data_hold_in_cycles:       The data hold time, in cycles.
- * @address_setup_in_cycles:   The address setup time, in cycles.
- * @device_busy_timeout:       The timeout waiting for NAND Ready/Busy,
- *                             this value is the number of cycles multiplied
- *                             by 4096.
- * @use_half_periods:          Indicates the clock is running slowly, so the
- *                             NFC DLL should use half-periods.
- * @sample_delay_factor:       The sample delay factor.
- * @wrn_dly_sel:               The delay on the GPMI write strobe.
- */
-struct gpmi_nfc_hardware_timing {
-	/* for HW_GPMI_TIMING0 */
-	uint8_t  data_setup_in_cycles;
-	uint8_t  data_hold_in_cycles;
-	uint8_t  address_setup_in_cycles;
-
-	/* for HW_GPMI_TIMING1 */
-	uint16_t device_busy_timeout;
-#define GPMI_DEFAULT_BUSY_TIMEOUT	0x500 /* default busy timeout value.*/
-
-	/* for HW_GPMI_CTRL1 */
-	bool     use_half_periods;
-	uint8_t  sample_delay_factor;
-	uint8_t  wrn_dly_sel;
-};
-
-/**
- * struct timing_threshod - Timing threshold
- * @max_data_setup_cycles:       The maximum number of data setup cycles that
- *                               can be expressed in the hardware.
- * @internal_data_setup_in_ns:   The time, in ns, that the NFC hardware requires
- *                               for data read internal setup. In the Reference
- *                               Manual, see the chapter "High-Speed NAND
- *                               Timing" for more details.
- * @max_sample_delay_factor:     The maximum sample delay factor that can be
- *                               expressed in the hardware.
- * @max_dll_clock_period_in_ns:  The maximum period of the GPMI clock that the
- *                               sample delay DLL hardware can possibly work
- *                               with (the DLL is unusable with longer periods).
- *                               If the full-cycle period is greater than HALF
- *                               this value, the DLL must be configured to use
- *                               half-periods.
- * @max_dll_delay_in_ns:         The maximum amount of delay, in ns, that the
- *                               DLL can implement.
- * @clock_frequency_in_hz:       The clock frequency, in Hz, during the current
- *                               I/O transaction. If no I/O transaction is in
- *                               progress, this is the clock frequency during
- *                               the most recent I/O transaction.
- */
-struct timing_threshod {
-	const unsigned int      max_chip_count;
-	const unsigned int      max_data_setup_cycles;
-	const unsigned int      internal_data_setup_in_ns;
-	const unsigned int      max_sample_delay_factor;
-	const unsigned int      max_dll_clock_period_in_ns;
-	const unsigned int      max_dll_delay_in_ns;
-	unsigned long           clock_frequency_in_hz;
-
-};
-
-/* Common Services */
-extern int common_nfc_set_geometry(struct gpmi_nand_data *);
-extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
-extern void prepare_data_dma(struct gpmi_nand_data *,
-				enum dma_data_direction dr);
-extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
-				struct dma_async_tx_descriptor *);
-extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
-				struct dma_async_tx_descriptor *);
-
-/* GPMI-NAND helper function library */
-extern int gpmi_init(struct gpmi_nand_data *);
-extern int gpmi_extra_init(struct gpmi_nand_data *);
-extern void gpmi_clear_bch(struct gpmi_nand_data *);
-extern void gpmi_dump_info(struct gpmi_nand_data *);
-extern int bch_set_geometry(struct gpmi_nand_data *);
-extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
-extern int gpmi_send_command(struct gpmi_nand_data *);
-extern void gpmi_begin(struct gpmi_nand_data *);
-extern void gpmi_end(struct gpmi_nand_data *);
-extern int gpmi_read_data(struct gpmi_nand_data *);
-extern int gpmi_send_data(struct gpmi_nand_data *);
-extern int gpmi_send_page(struct gpmi_nand_data *,
-			dma_addr_t payload, dma_addr_t auxiliary);
-extern int gpmi_read_page(struct gpmi_nand_data *,
-			dma_addr_t payload, dma_addr_t auxiliary);
-
-void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
-		    const u8 *src, size_t src_bit_off,
-		    size_t nbits);
-
-/* BCH : Status Block Completion Codes */
-#define STATUS_GOOD		0x00
-#define STATUS_ERASED		0xff
-#define STATUS_UNCORRECTABLE	0xfe
-
-/* Use the devdata to distinguish different Archs. */
-#define GPMI_IS_MX23(x)		((x)->devdata->type == IS_MX23)
-#define GPMI_IS_MX28(x)		((x)->devdata->type == IS_MX28)
-#define GPMI_IS_MX6Q(x)		((x)->devdata->type == IS_MX6Q)
-#define GPMI_IS_MX6SX(x)	((x)->devdata->type == IS_MX6SX)
-
-#define GPMI_IS_MX6(x)		(GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x))
-#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
deleted file mode 100644
index 82114cdc8330..000000000000
--- a/drivers/mtd/nand/gpmi-nand/gpmi-regs.h
+++ /dev/null
@@ -1,187 +0,0 @@ 
-/*
- * Freescale GPMI NAND Flash Driver
- *
- * Copyright 2008-2011 Freescale Semiconductor, Inc.
- * Copyright 2008 Embedded Alley Solutions, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#ifndef __GPMI_NAND_GPMI_REGS_H
-#define __GPMI_NAND_GPMI_REGS_H
-
-#define HW_GPMI_CTRL0					0x00000000
-#define HW_GPMI_CTRL0_SET				0x00000004
-#define HW_GPMI_CTRL0_CLR				0x00000008
-#define HW_GPMI_CTRL0_TOG				0x0000000c
-
-#define BP_GPMI_CTRL0_COMMAND_MODE			24
-#define BM_GPMI_CTRL0_COMMAND_MODE	(3 << BP_GPMI_CTRL0_COMMAND_MODE)
-#define BF_GPMI_CTRL0_COMMAND_MODE(v)	\
-	(((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
-#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE		0x0
-#define BV_GPMI_CTRL0_COMMAND_MODE__READ		0x1
-#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE	0x2
-#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY	0x3
-
-#define BM_GPMI_CTRL0_WORD_LENGTH			(1 << 23)
-#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT		0x0
-#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT		0x1
-
-/*
- *  Difference in LOCK_CS between imx23 and imx28 :
- *  This bit may impact the _POWER_ consumption. So some chips
- *  do not set it.
- */
-#define MX23_BP_GPMI_CTRL0_LOCK_CS			22
-#define MX28_BP_GPMI_CTRL0_LOCK_CS			27
-#define LOCK_CS_ENABLE					0x1
-#define BF_GPMI_CTRL0_LOCK_CS(v, x)			0x0
-
-/* Difference in CS between imx23 and imx28 */
-#define BP_GPMI_CTRL0_CS				20
-#define MX23_BM_GPMI_CTRL0_CS		(3 << BP_GPMI_CTRL0_CS)
-#define MX28_BM_GPMI_CTRL0_CS		(7 << BP_GPMI_CTRL0_CS)
-#define BF_GPMI_CTRL0_CS(v, x)		(((v) << BP_GPMI_CTRL0_CS) & \
-						(GPMI_IS_MX23((x)) \
-						? MX23_BM_GPMI_CTRL0_CS	\
-						: MX28_BM_GPMI_CTRL0_CS))
-
-#define BP_GPMI_CTRL0_ADDRESS				17
-#define BM_GPMI_CTRL0_ADDRESS		(3 << BP_GPMI_CTRL0_ADDRESS)
-#define BF_GPMI_CTRL0_ADDRESS(v)	\
-		(((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
-#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA		0x0
-#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE			0x1
-#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE			0x2
-
-#define BM_GPMI_CTRL0_ADDRESS_INCREMENT			(1 << 16)
-#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED	0x0
-#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED	0x1
-
-#define BP_GPMI_CTRL0_XFER_COUNT			0
-#define BM_GPMI_CTRL0_XFER_COUNT	(0xffff << BP_GPMI_CTRL0_XFER_COUNT)
-#define BF_GPMI_CTRL0_XFER_COUNT(v)	\
-		(((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
-
-#define HW_GPMI_COMPARE					0x00000010
-
-#define HW_GPMI_ECCCTRL					0x00000020
-#define HW_GPMI_ECCCTRL_SET				0x00000024
-#define HW_GPMI_ECCCTRL_CLR				0x00000028
-#define HW_GPMI_ECCCTRL_TOG				0x0000002c
-
-#define BP_GPMI_ECCCTRL_ECC_CMD				13
-#define BM_GPMI_ECCCTRL_ECC_CMD		(3 << BP_GPMI_ECCCTRL_ECC_CMD)
-#define BF_GPMI_ECCCTRL_ECC_CMD(v)	\
-		(((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
-#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE		0x0
-#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE		0x1
-
-#define BM_GPMI_ECCCTRL_ENABLE_ECC			(1 << 12)
-#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE		0x1
-#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE		0x0
-
-#define BP_GPMI_ECCCTRL_BUFFER_MASK			0
-#define BM_GPMI_ECCCTRL_BUFFER_MASK	(0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
-#define BF_GPMI_ECCCTRL_BUFFER_MASK(v)	\
-	(((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
-#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY	0x100
-#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE		0x1FF
-
-#define HW_GPMI_ECCCOUNT				0x00000030
-#define HW_GPMI_PAYLOAD					0x00000040
-#define HW_GPMI_AUXILIARY				0x00000050
-#define HW_GPMI_CTRL1					0x00000060
-#define HW_GPMI_CTRL1_SET				0x00000064
-#define HW_GPMI_CTRL1_CLR				0x00000068
-#define HW_GPMI_CTRL1_TOG				0x0000006c
-
-#define BP_GPMI_CTRL1_DECOUPLE_CS			24
-#define BM_GPMI_CTRL1_DECOUPLE_CS	(1 << BP_GPMI_CTRL1_DECOUPLE_CS)
-
-#define BP_GPMI_CTRL1_WRN_DLY_SEL			22
-#define BM_GPMI_CTRL1_WRN_DLY_SEL	(0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
-#define BF_GPMI_CTRL1_WRN_DLY_SEL(v)  \
-	(((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL)
-#define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS		0x0
-#define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS		0x1
-#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS		0x2
-#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY		0x3
-
-#define BM_GPMI_CTRL1_BCH_MODE				(1 << 18)
-
-#define BP_GPMI_CTRL1_DLL_ENABLE			17
-#define BM_GPMI_CTRL1_DLL_ENABLE	(1 << BP_GPMI_CTRL1_DLL_ENABLE)
-
-#define BP_GPMI_CTRL1_HALF_PERIOD			16
-#define BM_GPMI_CTRL1_HALF_PERIOD	(1 << BP_GPMI_CTRL1_HALF_PERIOD)
-
-#define BP_GPMI_CTRL1_RDN_DELAY				12
-#define BM_GPMI_CTRL1_RDN_DELAY		(0xf << BP_GPMI_CTRL1_RDN_DELAY)
-#define BF_GPMI_CTRL1_RDN_DELAY(v)	\
-		(((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
-
-#define BM_GPMI_CTRL1_DEV_RESET				(1 << 3)
-#define BV_GPMI_CTRL1_DEV_RESET__ENABLED		0x0
-#define BV_GPMI_CTRL1_DEV_RESET__DISABLED		0x1
-
-#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY		(1 << 2)
-#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW	0x0
-#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH	0x1
-
-#define BM_GPMI_CTRL1_CAMERA_MODE			(1 << 1)
-#define BV_GPMI_CTRL1_GPMI_MODE__NAND			0x0
-#define BV_GPMI_CTRL1_GPMI_MODE__ATA			0x1
-
-#define BM_GPMI_CTRL1_GPMI_MODE				(1 << 0)
-
-#define HW_GPMI_TIMING0					0x00000070
-
-#define BP_GPMI_TIMING0_ADDRESS_SETUP			16
-#define BM_GPMI_TIMING0_ADDRESS_SETUP	(0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
-#define BF_GPMI_TIMING0_ADDRESS_SETUP(v)	\
-	(((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
-
-#define BP_GPMI_TIMING0_DATA_HOLD			8
-#define BM_GPMI_TIMING0_DATA_HOLD	(0xff << BP_GPMI_TIMING0_DATA_HOLD)
-#define BF_GPMI_TIMING0_DATA_HOLD(v)		\
-	(((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
-
-#define BP_GPMI_TIMING0_DATA_SETUP			0
-#define BM_GPMI_TIMING0_DATA_SETUP	(0xff << BP_GPMI_TIMING0_DATA_SETUP)
-#define BF_GPMI_TIMING0_DATA_SETUP(v)		\
-	(((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
-
-#define HW_GPMI_TIMING1					0x00000080
-#define BP_GPMI_TIMING1_BUSY_TIMEOUT			16
-#define BM_GPMI_TIMING1_BUSY_TIMEOUT	(0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT)
-#define BF_GPMI_TIMING1_BUSY_TIMEOUT(v)		\
-	(((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT)
-
-#define HW_GPMI_TIMING2					0x00000090
-#define HW_GPMI_DATA					0x000000a0
-
-/* MX28 uses this to detect READY. */
-#define HW_GPMI_STAT					0x000000b0
-#define MX28_BP_GPMI_STAT_READY_BUSY			24
-#define MX28_BM_GPMI_STAT_READY_BUSY	(0xff << MX28_BP_GPMI_STAT_READY_BUSY)
-#define MX28_BF_GPMI_STAT_READY_BUSY(v)		\
-	(((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
-
-/* MX23 uses this to detect READY. */
-#define HW_GPMI_DEBUG					0x000000c0
-#define MX23_BP_GPMI_DEBUG_READY0			28
-#define MX23_BM_GPMI_DEBUG_READY0	(1 << MX23_BP_GPMI_DEBUG_READY0)
-#endif
diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c
deleted file mode 100644
index a287d73bb17e..000000000000
--- a/drivers/mtd/nand/hisi504_nand.c
+++ /dev/null
@@ -1,898 +0,0 @@ 
-/*
- * Hisilicon NAND Flash controller driver
- *
- * Copyright © 2012-2014 HiSilicon Technologies Co., Ltd.
- *              http://www.hisilicon.com
- *
- * Author: Zhou Wang <wangzhou.bry@gmail.com>
- * The initial developer of the original code is Zhiyong Cai
- * <caizhiyong@huawei.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-#include <linux/of.h>
-#include <linux/mtd/mtd.h>
-#include <linux/sizes.h>
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/dma-mapping.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/partitions.h>
-
-#define HINFC504_MAX_CHIP                               (4)
-#define HINFC504_W_LATCH                                (5)
-#define HINFC504_R_LATCH                                (7)
-#define HINFC504_RW_LATCH                               (3)
-
-#define HINFC504_NFC_TIMEOUT				(2 * HZ)
-#define HINFC504_NFC_PM_TIMEOUT				(1 * HZ)
-#define HINFC504_NFC_DMA_TIMEOUT			(5 * HZ)
-#define HINFC504_CHIP_DELAY				(25)
-
-#define HINFC504_REG_BASE_ADDRESS_LEN			(0x100)
-#define HINFC504_BUFFER_BASE_ADDRESS_LEN		(2048 + 128)
-
-#define HINFC504_ADDR_CYCLE_MASK			0x4
-
-#define HINFC504_CON					0x00
-#define HINFC504_CON_OP_MODE_NORMAL			BIT(0)
-#define HINFC504_CON_PAGEISZE_SHIFT			(1)
-#define HINFC504_CON_PAGESIZE_MASK			(0x07)
-#define HINFC504_CON_BUS_WIDTH				BIT(4)
-#define HINFC504_CON_READY_BUSY_SEL			BIT(8)
-#define HINFC504_CON_ECCTYPE_SHIFT			(9)
-#define HINFC504_CON_ECCTYPE_MASK			(0x07)
-
-#define HINFC504_PWIDTH					0x04
-#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \
-	((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8))
-
-#define HINFC504_CMD					0x0C
-#define HINFC504_ADDRL					0x10
-#define HINFC504_ADDRH					0x14
-#define HINFC504_DATA_NUM				0x18
-
-#define HINFC504_OP					0x1C
-#define HINFC504_OP_READ_DATA_EN			BIT(1)
-#define HINFC504_OP_WAIT_READY_EN			BIT(2)
-#define HINFC504_OP_CMD2_EN				BIT(3)
-#define HINFC504_OP_WRITE_DATA_EN			BIT(4)
-#define HINFC504_OP_ADDR_EN				BIT(5)
-#define HINFC504_OP_CMD1_EN				BIT(6)
-#define HINFC504_OP_NF_CS_SHIFT                         (7)
-#define HINFC504_OP_NF_CS_MASK				(3)
-#define HINFC504_OP_ADDR_CYCLE_SHIFT			(9)
-#define HINFC504_OP_ADDR_CYCLE_MASK			(7)
-
-#define HINFC504_STATUS                                 0x20
-#define HINFC504_READY					BIT(0)
-
-#define HINFC504_INTEN					0x24
-#define HINFC504_INTEN_DMA				BIT(9)
-#define HINFC504_INTEN_UE				BIT(6)
-#define HINFC504_INTEN_CE				BIT(5)
-
-#define HINFC504_INTS					0x28
-#define HINFC504_INTS_DMA				BIT(9)
-#define HINFC504_INTS_UE				BIT(6)
-#define HINFC504_INTS_CE				BIT(5)
-
-#define HINFC504_INTCLR                                 0x2C
-#define HINFC504_INTCLR_DMA				BIT(9)
-#define HINFC504_INTCLR_UE				BIT(6)
-#define HINFC504_INTCLR_CE				BIT(5)
-
-#define HINFC504_ECC_STATUS                             0x5C
-#define HINFC504_ECC_16_BIT_SHIFT                       12
-
-#define HINFC504_DMA_CTRL				0x60
-#define HINFC504_DMA_CTRL_DMA_START			BIT(0)
-#define HINFC504_DMA_CTRL_WE				BIT(1)
-#define HINFC504_DMA_CTRL_DATA_AREA_EN			BIT(2)
-#define HINFC504_DMA_CTRL_OOB_AREA_EN			BIT(3)
-#define HINFC504_DMA_CTRL_BURST4_EN			BIT(4)
-#define HINFC504_DMA_CTRL_BURST8_EN			BIT(5)
-#define HINFC504_DMA_CTRL_BURST16_EN			BIT(6)
-#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT		(7)
-#define HINFC504_DMA_CTRL_ADDR_NUM_MASK                 (1)
-#define HINFC504_DMA_CTRL_CS_SHIFT			(8)
-#define HINFC504_DMA_CTRL_CS_MASK			(0x03)
-
-#define HINFC504_DMA_ADDR_DATA				0x64
-#define HINFC504_DMA_ADDR_OOB				0x68
-
-#define HINFC504_DMA_LEN				0x6C
-#define HINFC504_DMA_LEN_OOB_SHIFT			(16)
-#define HINFC504_DMA_LEN_OOB_MASK			(0xFFF)
-
-#define HINFC504_DMA_PARA				0x70
-#define HINFC504_DMA_PARA_DATA_RW_EN			BIT(0)
-#define HINFC504_DMA_PARA_OOB_RW_EN			BIT(1)
-#define HINFC504_DMA_PARA_DATA_EDC_EN			BIT(2)
-#define HINFC504_DMA_PARA_OOB_EDC_EN			BIT(3)
-#define HINFC504_DMA_PARA_DATA_ECC_EN			BIT(4)
-#define HINFC504_DMA_PARA_OOB_ECC_EN			BIT(5)
-
-#define HINFC_VERSION                                   0x74
-#define HINFC504_LOG_READ_ADDR				0x7C
-#define HINFC504_LOG_READ_LEN				0x80
-
-#define HINFC504_NANDINFO_LEN				0x10
-
-struct hinfc_host {
-	struct nand_chip	chip;
-	struct device		*dev;
-	void __iomem		*iobase;
-	void __iomem		*mmio;
-	struct completion       cmd_complete;
-	unsigned int		offset;
-	unsigned int		command;
-	int			chipselect;
-	unsigned int		addr_cycle;
-	u32                     addr_value[2];
-	u32                     cache_addr_value[2];
-	char			*buffer;
-	dma_addr_t		dma_buffer;
-	dma_addr_t		dma_oob;
-	int			version;
-	unsigned int            irq_status; /* interrupt status */
-};
-
-static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg)
-{
-	return readl(host->iobase + reg);
-}
-
-static inline void hinfc_write(struct hinfc_host *host, unsigned int value,
-			       unsigned int reg)
-{
-	writel(value, host->iobase + reg);
-}
-
-static void wait_controller_finished(struct hinfc_host *host)
-{
-	unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT;
-	int val;
-
-	while (time_before(jiffies, timeout)) {
-		val = hinfc_read(host, HINFC504_STATUS);
-		if (host->command == NAND_CMD_ERASE2) {
-			/* nfc is ready */
-			while (!(val & HINFC504_READY))	{
-				usleep_range(500, 1000);
-				val = hinfc_read(host, HINFC504_STATUS);
-			}
-			return;
-		}
-
-		if (val & HINFC504_READY)
-			return;
-	}
-
-	/* wait cmd timeout */
-	dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n");
-}
-
-static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
-{
-	struct nand_chip *chip = &host->chip;
-	struct mtd_info	*mtd = nand_to_mtd(chip);
-	unsigned long val;
-	int ret;
-
-	hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
-	hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
-
-	if (chip->ecc.mode == NAND_ECC_NONE) {
-		hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
-			<< HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
-
-		hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
-			| HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA);
-	} else {
-		if (host->command == NAND_CMD_READOOB)
-			hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN
-			| HINFC504_DMA_PARA_OOB_EDC_EN
-			| HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
-		else
-			hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
-			| HINFC504_DMA_PARA_OOB_RW_EN
-			| HINFC504_DMA_PARA_DATA_EDC_EN
-			| HINFC504_DMA_PARA_OOB_EDC_EN
-			| HINFC504_DMA_PARA_DATA_ECC_EN
-			| HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
-
-	}
-
-	val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN
-		| HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN
-		| HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN
-		| ((host->addr_cycle == 4 ? 1 : 0)
-			<< HINFC504_DMA_CTRL_ADDR_NUM_SHIFT)
-		| ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK)
-			<< HINFC504_DMA_CTRL_CS_SHIFT));
-
-	if (todev)
-		val |= HINFC504_DMA_CTRL_WE;
-
-	init_completion(&host->cmd_complete);
-
-	hinfc_write(host, val, HINFC504_DMA_CTRL);
-	ret = wait_for_completion_timeout(&host->cmd_complete,
-			HINFC504_NFC_DMA_TIMEOUT);
-
-	if (!ret) {
-		dev_err(host->dev, "DMA operation(irq) timeout!\n");
-		/* sanity check */
-		val = hinfc_read(host, HINFC504_DMA_CTRL);
-		if (!(val & HINFC504_DMA_CTRL_DMA_START))
-			dev_err(host->dev, "DMA is already done but without irq ACK!\n");
-		else
-			dev_err(host->dev, "DMA is really timeout!\n");
-	}
-}
-
-static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host)
-{
-	host->addr_value[0] &= 0xffff0000;
-
-	hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
-	hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
-	hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN,
-		    HINFC504_CMD);
-
-	hisi_nfc_dma_transfer(host, 1);
-
-	return 0;
-}
-
-static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host)
-{
-	struct mtd_info	*mtd = nand_to_mtd(&host->chip);
-
-	if ((host->addr_value[0] == host->cache_addr_value[0]) &&
-	    (host->addr_value[1] == host->cache_addr_value[1]))
-		return 0;
-
-	host->addr_value[0] &= 0xffff0000;
-
-	hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
-	hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
-	hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0,
-		    HINFC504_CMD);
-
-	hinfc_write(host, 0, HINFC504_LOG_READ_ADDR);
-	hinfc_write(host, mtd->writesize + mtd->oobsize,
-		    HINFC504_LOG_READ_LEN);
-
-	hisi_nfc_dma_transfer(host, 0);
-
-	host->cache_addr_value[0] = host->addr_value[0];
-	host->cache_addr_value[1] = host->addr_value[1];
-
-	return 0;
-}
-
-static int hisi_nfc_send_cmd_erase(struct hinfc_host *host)
-{
-	hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
-	hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1,
-		    HINFC504_CMD);
-
-	hinfc_write(host, HINFC504_OP_WAIT_READY_EN
-		| HINFC504_OP_CMD2_EN
-		| HINFC504_OP_CMD1_EN
-		| HINFC504_OP_ADDR_EN
-		| ((host->chipselect & HINFC504_OP_NF_CS_MASK)
-			<< HINFC504_OP_NF_CS_SHIFT)
-		| ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK)
-			<< HINFC504_OP_ADDR_CYCLE_SHIFT),
-		HINFC504_OP);
-
-	wait_controller_finished(host);
-
-	return 0;
-}
-
-static int hisi_nfc_send_cmd_readid(struct hinfc_host *host)
-{
-	hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
-	hinfc_write(host, NAND_CMD_READID, HINFC504_CMD);
-	hinfc_write(host, 0, HINFC504_ADDRL);
-
-	hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN
-		| HINFC504_OP_READ_DATA_EN
-		| ((host->chipselect & HINFC504_OP_NF_CS_MASK)
-			<< HINFC504_OP_NF_CS_SHIFT)
-		| 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP);
-
-	wait_controller_finished(host);
-
-	return 0;
-}
-
-static int hisi_nfc_send_cmd_status(struct hinfc_host *host)
-{
-	hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
-	hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD);
-	hinfc_write(host, HINFC504_OP_CMD1_EN
-		| HINFC504_OP_READ_DATA_EN
-		| ((host->chipselect & HINFC504_OP_NF_CS_MASK)
-			<< HINFC504_OP_NF_CS_SHIFT),
-		HINFC504_OP);
-
-	wait_controller_finished(host);
-
-	return 0;
-}
-
-static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
-{
-	hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD);
-
-	hinfc_write(host, HINFC504_OP_CMD1_EN
-		| ((chipselect & HINFC504_OP_NF_CS_MASK)
-			<< HINFC504_OP_NF_CS_SHIFT)
-		| HINFC504_OP_WAIT_READY_EN,
-		HINFC504_OP);
-
-	wait_controller_finished(host);
-
-	return 0;
-}
-
-static void hisi_nfc_select_chip(struct mtd_info *mtd, int chipselect)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct hinfc_host *host = nand_get_controller_data(chip);
-
-	if (chipselect < 0)
-		return;
-
-	host->chipselect = chipselect;
-}
-
-static uint8_t hisi_nfc_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct hinfc_host *host = nand_get_controller_data(chip);
-
-	if (host->command == NAND_CMD_STATUS)
-		return *(uint8_t *)(host->mmio);
-
-	host->offset++;
-
-	if (host->command == NAND_CMD_READID)
-		return *(uint8_t *)(host->mmio + host->offset - 1);
-
-	return *(uint8_t *)(host->buffer + host->offset - 1);
-}
-
-static u16 hisi_nfc_read_word(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct hinfc_host *host = nand_get_controller_data(chip);
-
-	host->offset += 2;
-	return *(u16 *)(host->buffer + host->offset - 2);
-}
-
-static void
-hisi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct hinfc_host *host = nand_get_controller_data(chip);
-
-	memcpy(host->buffer + host->offset, buf, len);
-	host->offset += len;
-}
-
-static void hisi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct hinfc_host *host = nand_get_controller_data(chip);
-
-	memcpy(buf, host->buffer + host->offset, len);
-	host->offset += len;
-}
-
-static void set_addr(struct mtd_info *mtd, int column, int page_addr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct hinfc_host *host = nand_get_controller_data(chip);
-	unsigned int command = host->command;
-
-	host->addr_cycle    = 0;
-	host->addr_value[0] = 0;
-	host->addr_value[1] = 0;
-
-	/* Serially input address */
-	if (column != -1) {
-		/* Adjust columns for 16 bit buswidth */
-		if (chip->options & NAND_BUSWIDTH_16 &&
-				!nand_opcode_8bits(command))
-			column >>= 1;
-
-		host->addr_value[0] = column & 0xffff;
-		host->addr_cycle    = 2;
-	}
-	if (page_addr != -1) {
-		host->addr_value[0] |= (page_addr & 0xffff)
-			<< (host->addr_cycle * 8);
-		host->addr_cycle    += 2;
-		/* One more address cycle for devices > 128MiB */
-		if (chip->chipsize > (128 << 20)) {
-			host->addr_cycle += 1;
-			if (host->command == NAND_CMD_ERASE1)
-				host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
-			else
-				host->addr_value[1] |= ((page_addr >> 16) & 0xff);
-		}
-	}
-}
-
-static void hisi_nfc_cmdfunc(struct mtd_info *mtd, unsigned command, int column,
-		int page_addr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct hinfc_host *host = nand_get_controller_data(chip);
-	int is_cache_invalid = 1;
-	unsigned int flag = 0;
-
-	host->command =  command;
-
-	switch (command) {
-	case NAND_CMD_READ0:
-	case NAND_CMD_READOOB:
-		if (command == NAND_CMD_READ0)
-			host->offset = column;
-		else
-			host->offset = column + mtd->writesize;
-
-		is_cache_invalid = 0;
-		set_addr(mtd, column, page_addr);
-		hisi_nfc_send_cmd_readstart(host);
-		break;
-
-	case NAND_CMD_SEQIN:
-		host->offset = column;
-		set_addr(mtd, column, page_addr);
-		break;
-
-	case NAND_CMD_ERASE1:
-		set_addr(mtd, column, page_addr);
-		break;
-
-	case NAND_CMD_PAGEPROG:
-		hisi_nfc_send_cmd_pageprog(host);
-		break;
-
-	case NAND_CMD_ERASE2:
-		hisi_nfc_send_cmd_erase(host);
-		break;
-
-	case NAND_CMD_READID:
-		host->offset = column;
-		memset(host->mmio, 0, 0x10);
-		hisi_nfc_send_cmd_readid(host);
-		break;
-
-	case NAND_CMD_STATUS:
-		flag = hinfc_read(host, HINFC504_CON);
-		if (chip->ecc.mode == NAND_ECC_HW)
-			hinfc_write(host,
-				    flag & ~(HINFC504_CON_ECCTYPE_MASK <<
-				    HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
-
-		host->offset = 0;
-		memset(host->mmio, 0, 0x10);
-		hisi_nfc_send_cmd_status(host);
-		hinfc_write(host, flag, HINFC504_CON);
-		break;
-
-	case NAND_CMD_RESET:
-		hisi_nfc_send_cmd_reset(host, host->chipselect);
-		break;
-
-	default:
-		dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n",
-			command, column, page_addr);
-	}
-
-	if (is_cache_invalid) {
-		host->cache_addr_value[0] = ~0;
-		host->cache_addr_value[1] = ~0;
-	}
-}
-
-static irqreturn_t hinfc_irq_handle(int irq, void *devid)
-{
-	struct hinfc_host *host = devid;
-	unsigned int flag;
-
-	flag = hinfc_read(host, HINFC504_INTS);
-	/* store interrupts state */
-	host->irq_status |= flag;
-
-	if (flag & HINFC504_INTS_DMA) {
-		hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR);
-		complete(&host->cmd_complete);
-	} else if (flag & HINFC504_INTS_CE) {
-		hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR);
-	} else if (flag & HINFC504_INTS_UE) {
-		hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR);
-	}
-
-	return IRQ_HANDLED;
-}
-
-static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
-	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
-{
-	struct hinfc_host *host = nand_get_controller_data(chip);
-	int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
-	int stat_1, stat_2;
-
-	chip->read_buf(mtd, buf, mtd->writesize);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	/* errors which can not be corrected by ECC */
-	if (host->irq_status & HINFC504_INTS_UE) {
-		mtd->ecc_stats.failed++;
-	} else if (host->irq_status & HINFC504_INTS_CE) {
-		/* TODO: need add other ECC modes! */
-		switch (chip->ecc.strength) {
-		case 16:
-			status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >>
-					HINFC504_ECC_16_BIT_SHIFT & 0x0fff;
-			stat_2 = status_ecc & 0x3f;
-			stat_1 = status_ecc >> 6 & 0x3f;
-			stat = stat_1 + stat_2;
-			stat_max = max_t(int, stat_1, stat_2);
-		}
-		mtd->ecc_stats.corrected += stat;
-		max_bitflips = max_t(int, max_bitflips, stat_max);
-	}
-	host->irq_status = 0;
-
-	return max_bitflips;
-}
-
-static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-				int page)
-{
-	struct hinfc_host *host = nand_get_controller_data(chip);
-
-	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	if (host->irq_status & HINFC504_INTS_UE) {
-		host->irq_status = 0;
-		return -EBADMSG;
-	}
-
-	host->irq_status = 0;
-	return 0;
-}
-
-static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
-		struct nand_chip *chip, const uint8_t *buf, int oob_required,
-		int page)
-{
-	chip->write_buf(mtd, buf, mtd->writesize);
-	if (oob_required)
-		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-static void hisi_nfc_host_init(struct hinfc_host *host)
-{
-	struct nand_chip *chip = &host->chip;
-	unsigned int flag = 0;
-
-	host->version = hinfc_read(host, HINFC_VERSION);
-	host->addr_cycle		= 0;
-	host->addr_value[0]		= 0;
-	host->addr_value[1]		= 0;
-	host->cache_addr_value[0]	= ~0;
-	host->cache_addr_value[1]	= ~0;
-	host->chipselect		= 0;
-
-	/* default page size: 2K, ecc_none. need modify */
-	flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL
-		| ((0x001 & HINFC504_CON_PAGESIZE_MASK)
-			<< HINFC504_CON_PAGEISZE_SHIFT)
-		| ((0x0 & HINFC504_CON_ECCTYPE_MASK)
-			<< HINFC504_CON_ECCTYPE_SHIFT)
-		| ((chip->options & NAND_BUSWIDTH_16) ?
-			HINFC504_CON_BUS_WIDTH : 0);
-	hinfc_write(host, flag, HINFC504_CON);
-
-	memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN);
-
-	hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
-		    HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
-
-	/* enable DMA irq */
-	hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
-}
-
-static int hisi_ooblayout_ecc(struct mtd_info *mtd, int section,
-			      struct mtd_oob_region *oobregion)
-{
-	/* FIXME: add ECC bytes position */
-	return -ENOTSUPP;
-}
-
-static int hisi_ooblayout_free(struct mtd_info *mtd, int section,
-			       struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 2;
-	oobregion->length = 6;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops hisi_ooblayout_ops = {
-	.ecc = hisi_ooblayout_ecc,
-	.free = hisi_ooblayout_free,
-};
-
-static int hisi_nfc_ecc_probe(struct hinfc_host *host)
-{
-	unsigned int flag;
-	int size, strength, ecc_bits;
-	struct device *dev = host->dev;
-	struct nand_chip *chip = &host->chip;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-
-	size = chip->ecc.size;
-	strength = chip->ecc.strength;
-	if (size != 1024) {
-		dev_err(dev, "error ecc size: %d\n", size);
-		return -EINVAL;
-	}
-
-	if ((size == 1024) && ((strength != 8) && (strength != 16) &&
-				(strength != 24) && (strength != 40))) {
-		dev_err(dev, "ecc size and strength do not match\n");
-		return -EINVAL;
-	}
-
-	chip->ecc.size = size;
-	chip->ecc.strength = strength;
-
-	chip->ecc.read_page = hisi_nand_read_page_hwecc;
-	chip->ecc.read_oob = hisi_nand_read_oob;
-	chip->ecc.write_page = hisi_nand_write_page_hwecc;
-
-	switch (chip->ecc.strength) {
-	case 16:
-		ecc_bits = 6;
-		if (mtd->writesize == 2048)
-			mtd_set_ooblayout(mtd, &hisi_ooblayout_ops);
-
-		/* TODO: add more page size support */
-		break;
-
-	/* TODO: add more ecc strength support */
-	default:
-		dev_err(dev, "not support strength: %d\n", chip->ecc.strength);
-		return -EINVAL;
-	}
-
-	flag = hinfc_read(host, HINFC504_CON);
-	/* add ecc type configure */
-	flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK)
-						<< HINFC504_CON_ECCTYPE_SHIFT);
-	hinfc_write(host, flag, HINFC504_CON);
-
-	/* enable ecc irq */
-	flag = hinfc_read(host, HINFC504_INTEN) & 0xfff;
-	hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE,
-		    HINFC504_INTEN);
-
-	return 0;
-}
-
-static int hisi_nfc_probe(struct platform_device *pdev)
-{
-	int ret = 0, irq, flag, max_chips = HINFC504_MAX_CHIP;
-	struct device *dev = &pdev->dev;
-	struct hinfc_host *host;
-	struct nand_chip  *chip;
-	struct mtd_info   *mtd;
-	struct resource	  *res;
-	struct device_node *np = dev->of_node;
-
-	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
-	if (!host)
-		return -ENOMEM;
-	host->dev = dev;
-
-	platform_set_drvdata(pdev, host);
-	chip = &host->chip;
-	mtd  = nand_to_mtd(chip);
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "no IRQ resource defined\n");
-		ret = -ENXIO;
-		goto err_res;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	host->iobase = devm_ioremap_resource(dev, res);
-	if (IS_ERR(host->iobase)) {
-		ret = PTR_ERR(host->iobase);
-		goto err_res;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	host->mmio = devm_ioremap_resource(dev, res);
-	if (IS_ERR(host->mmio)) {
-		ret = PTR_ERR(host->mmio);
-		dev_err(dev, "devm_ioremap_resource[1] fail\n");
-		goto err_res;
-	}
-
-	mtd->name		= "hisi_nand";
-	mtd->dev.parent         = &pdev->dev;
-
-	nand_set_controller_data(chip, host);
-	nand_set_flash_node(chip, np);
-	chip->cmdfunc		= hisi_nfc_cmdfunc;
-	chip->select_chip	= hisi_nfc_select_chip;
-	chip->read_byte		= hisi_nfc_read_byte;
-	chip->read_word		= hisi_nfc_read_word;
-	chip->write_buf		= hisi_nfc_write_buf;
-	chip->read_buf		= hisi_nfc_read_buf;
-	chip->chip_delay	= HINFC504_CHIP_DELAY;
-
-	hisi_nfc_host_init(host);
-
-	ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
-	if (ret) {
-		dev_err(dev, "failed to request IRQ\n");
-		goto err_res;
-	}
-
-	ret = nand_scan_ident(mtd, max_chips, NULL);
-	if (ret) {
-		ret = -ENODEV;
-		goto err_res;
-	}
-
-	host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
-		&host->dma_buffer, GFP_KERNEL);
-	if (!host->buffer) {
-		ret = -ENOMEM;
-		goto err_res;
-	}
-
-	host->dma_oob = host->dma_buffer + mtd->writesize;
-	memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
-
-	flag = hinfc_read(host, HINFC504_CON);
-	flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
-	switch (mtd->writesize) {
-	case 2048:
-		flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT); break;
-	/*
-	 * TODO: add more pagesize support,
-	 * default pagesize has been set in hisi_nfc_host_init
-	 */
-	default:
-		dev_err(dev, "NON-2KB page size nand flash\n");
-		ret = -EINVAL;
-		goto err_res;
-	}
-	hinfc_write(host, flag, HINFC504_CON);
-
-	if (chip->ecc.mode == NAND_ECC_HW)
-		hisi_nfc_ecc_probe(host);
-
-	ret = nand_scan_tail(mtd);
-	if (ret) {
-		dev_err(dev, "nand_scan_tail failed: %d\n", ret);
-		goto err_res;
-	}
-
-	ret = mtd_device_register(mtd, NULL, 0);
-	if (ret) {
-		dev_err(dev, "Err MTD partition=%d\n", ret);
-		goto err_mtd;
-	}
-
-	return 0;
-
-err_mtd:
-	nand_release(mtd);
-err_res:
-	return ret;
-}
-
-static int hisi_nfc_remove(struct platform_device *pdev)
-{
-	struct hinfc_host *host = platform_get_drvdata(pdev);
-	struct mtd_info *mtd = nand_to_mtd(&host->chip);
-
-	nand_release(mtd);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int hisi_nfc_suspend(struct device *dev)
-{
-	struct hinfc_host *host = dev_get_drvdata(dev);
-	unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT;
-
-	while (time_before(jiffies, timeout)) {
-		if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) &&
-		    (hinfc_read(host, HINFC504_DMA_CTRL) &
-		     HINFC504_DMA_CTRL_DMA_START)) {
-			cond_resched();
-			return 0;
-		}
-	}
-
-	dev_err(host->dev, "nand controller suspend timeout.\n");
-
-	return -EAGAIN;
-}
-
-static int hisi_nfc_resume(struct device *dev)
-{
-	int cs;
-	struct hinfc_host *host = dev_get_drvdata(dev);
-	struct nand_chip *chip = &host->chip;
-
-	for (cs = 0; cs < chip->numchips; cs++)
-		hisi_nfc_send_cmd_reset(host, cs);
-	hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
-		    HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
-
-	return 0;
-}
-#endif
-static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume);
-
-static const struct of_device_id nfc_id_table[] = {
-	{ .compatible = "hisilicon,504-nfc" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, nfc_id_table);
-
-static struct platform_driver hisi_nfc_driver = {
-	.driver = {
-		.name  = "hisi_nand",
-		.of_match_table = nfc_id_table,
-		.pm = &hisi_nfc_pm_ops,
-	},
-	.probe		= hisi_nfc_probe,
-	.remove		= hisi_nfc_remove,
-};
-
-module_platform_driver(hisi_nfc_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Zhou Wang");
-MODULE_AUTHOR("Zhiyong Cai");
-MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
deleted file mode 100644
index e813ec11ee84..000000000000
--- a/drivers/mtd/nand/jz4740_nand.c
+++ /dev/null
@@ -1,557 +0,0 @@ 
-/*
- *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
- *  JZ4740 SoC NAND controller driver
- *
- *  This program is free software; you can redistribute it and/or modify it
- *  under  the terms of the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the License, or (at your
- *  option) any later version.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with this program; if not, write to the Free Software Foundation, Inc.,
- *  675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#include <linux/gpio.h>
-
-#include <asm/mach-jz4740/gpio.h>
-#include <asm/mach-jz4740/jz4740_nand.h>
-
-#define JZ_REG_NAND_CTRL	0x50
-#define JZ_REG_NAND_ECC_CTRL	0x100
-#define JZ_REG_NAND_DATA	0x104
-#define JZ_REG_NAND_PAR0	0x108
-#define JZ_REG_NAND_PAR1	0x10C
-#define JZ_REG_NAND_PAR2	0x110
-#define JZ_REG_NAND_IRQ_STAT	0x114
-#define JZ_REG_NAND_IRQ_CTRL	0x118
-#define JZ_REG_NAND_ERR(x)	(0x11C + ((x) << 2))
-
-#define JZ_NAND_ECC_CTRL_PAR_READY	BIT(4)
-#define JZ_NAND_ECC_CTRL_ENCODING	BIT(3)
-#define JZ_NAND_ECC_CTRL_RS		BIT(2)
-#define JZ_NAND_ECC_CTRL_RESET		BIT(1)
-#define JZ_NAND_ECC_CTRL_ENABLE		BIT(0)
-
-#define JZ_NAND_STATUS_ERR_COUNT	(BIT(31) | BIT(30) | BIT(29))
-#define JZ_NAND_STATUS_PAD_FINISH	BIT(4)
-#define JZ_NAND_STATUS_DEC_FINISH	BIT(3)
-#define JZ_NAND_STATUS_ENC_FINISH	BIT(2)
-#define JZ_NAND_STATUS_UNCOR_ERROR	BIT(1)
-#define JZ_NAND_STATUS_ERROR		BIT(0)
-
-#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1)
-#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1)
-#define JZ_NAND_CTRL_ASSERT_CHIP_MASK 0xaa
-
-#define JZ_NAND_MEM_CMD_OFFSET 0x08000
-#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
-
-struct jz_nand {
-	struct nand_chip chip;
-	void __iomem *base;
-	struct resource *mem;
-
-	unsigned char banks[JZ_NAND_NUM_BANKS];
-	void __iomem *bank_base[JZ_NAND_NUM_BANKS];
-	struct resource *bank_mem[JZ_NAND_NUM_BANKS];
-
-	int selected_bank;
-
-	struct gpio_desc *busy_gpio;
-	bool is_reading;
-};
-
-static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct jz_nand, chip);
-}
-
-static void jz_nand_select_chip(struct mtd_info *mtd, int chipnr)
-{
-	struct jz_nand *nand = mtd_to_jz_nand(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	uint32_t ctrl;
-	int banknr;
-
-	ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
-	ctrl &= ~JZ_NAND_CTRL_ASSERT_CHIP_MASK;
-
-	if (chipnr == -1) {
-		banknr = -1;
-	} else {
-		banknr = nand->banks[chipnr] - 1;
-		chip->IO_ADDR_R = nand->bank_base[banknr];
-		chip->IO_ADDR_W = nand->bank_base[banknr];
-	}
-	writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
-
-	nand->selected_bank = banknr;
-}
-
-static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
-{
-	struct jz_nand *nand = mtd_to_jz_nand(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	uint32_t reg;
-	void __iomem *bank_base = nand->bank_base[nand->selected_bank];
-
-	BUG_ON(nand->selected_bank < 0);
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE));
-		if (ctrl & NAND_ALE)
-			bank_base += JZ_NAND_MEM_ADDR_OFFSET;
-		else if (ctrl & NAND_CLE)
-			bank_base += JZ_NAND_MEM_CMD_OFFSET;
-		chip->IO_ADDR_W = bank_base;
-
-		reg = readl(nand->base + JZ_REG_NAND_CTRL);
-		if (ctrl & NAND_NCE)
-			reg |= JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
-		else
-			reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
-		writel(reg, nand->base + JZ_REG_NAND_CTRL);
-	}
-	if (dat != NAND_CMD_NONE)
-		writeb(dat, chip->IO_ADDR_W);
-}
-
-static int jz_nand_dev_ready(struct mtd_info *mtd)
-{
-	struct jz_nand *nand = mtd_to_jz_nand(mtd);
-	return gpiod_get_value_cansleep(nand->busy_gpio);
-}
-
-static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
-{
-	struct jz_nand *nand = mtd_to_jz_nand(mtd);
-	uint32_t reg;
-
-	writel(0, nand->base + JZ_REG_NAND_IRQ_STAT);
-	reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
-
-	reg |= JZ_NAND_ECC_CTRL_RESET;
-	reg |= JZ_NAND_ECC_CTRL_ENABLE;
-	reg |= JZ_NAND_ECC_CTRL_RS;
-
-	switch (mode) {
-	case NAND_ECC_READ:
-		reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
-		nand->is_reading = true;
-		break;
-	case NAND_ECC_WRITE:
-		reg |= JZ_NAND_ECC_CTRL_ENCODING;
-		nand->is_reading = false;
-		break;
-	default:
-		break;
-	}
-
-	writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
-}
-
-static int jz_nand_calculate_ecc_rs(struct mtd_info *mtd, const uint8_t *dat,
-	uint8_t *ecc_code)
-{
-	struct jz_nand *nand = mtd_to_jz_nand(mtd);
-	uint32_t reg, status;
-	int i;
-	unsigned int timeout = 1000;
-	static uint8_t empty_block_ecc[] = {0xcd, 0x9d, 0x90, 0x58, 0xf4,
-						0x8b, 0xff, 0xb7, 0x6f};
-
-	if (nand->is_reading)
-		return 0;
-
-	do {
-		status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
-	} while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
-
-	if (timeout == 0)
-	    return -1;
-
-	reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
-	reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
-	writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
-
-	for (i = 0; i < 9; ++i)
-		ecc_code[i] = readb(nand->base + JZ_REG_NAND_PAR0 + i);
-
-	/* If the written data is completly 0xff, we also want to write 0xff as
-	 * ecc, otherwise we will get in trouble when doing subpage writes. */
-	if (memcmp(ecc_code, empty_block_ecc, 9) == 0)
-		memset(ecc_code, 0xff, 9);
-
-	return 0;
-}
-
-static void jz_nand_correct_data(uint8_t *dat, int index, int mask)
-{
-	int offset = index & 0x7;
-	uint16_t data;
-
-	index += (index >> 3);
-
-	data = dat[index];
-	data |= dat[index+1] << 8;
-
-	mask ^= (data >> offset) & 0x1ff;
-	data &= ~(0x1ff << offset);
-	data |= (mask << offset);
-
-	dat[index] = data & 0xff;
-	dat[index+1] = (data >> 8) & 0xff;
-}
-
-static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
-	uint8_t *read_ecc, uint8_t *calc_ecc)
-{
-	struct jz_nand *nand = mtd_to_jz_nand(mtd);
-	int i, error_count, index;
-	uint32_t reg, status, error;
-	unsigned int timeout = 1000;
-
-	for (i = 0; i < 9; ++i)
-		writeb(read_ecc[i], nand->base + JZ_REG_NAND_PAR0 + i);
-
-	reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
-	reg |= JZ_NAND_ECC_CTRL_PAR_READY;
-	writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
-
-	do {
-		status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
-	} while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
-
-	if (timeout == 0)
-		return -ETIMEDOUT;
-
-	reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
-	reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
-	writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
-
-	if (status & JZ_NAND_STATUS_ERROR) {
-		if (status & JZ_NAND_STATUS_UNCOR_ERROR)
-			return -EBADMSG;
-
-		error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
-
-		for (i = 0; i < error_count; ++i) {
-			error = readl(nand->base + JZ_REG_NAND_ERR(i));
-			index = ((error >> 16) & 0x1ff) - 1;
-			if (index >= 0 && index < 512)
-				jz_nand_correct_data(dat, index, error & 0x1ff);
-		}
-
-		return error_count;
-	}
-
-	return 0;
-}
-
-static int jz_nand_ioremap_resource(struct platform_device *pdev,
-	const char *name, struct resource **res, void *__iomem *base)
-{
-	int ret;
-
-	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
-	if (!*res) {
-		dev_err(&pdev->dev, "Failed to get platform %s memory\n", name);
-		ret = -ENXIO;
-		goto err;
-	}
-
-	*res = request_mem_region((*res)->start, resource_size(*res),
-				pdev->name);
-	if (!*res) {
-		dev_err(&pdev->dev, "Failed to request %s memory region\n", name);
-		ret = -EBUSY;
-		goto err;
-	}
-
-	*base = ioremap((*res)->start, resource_size(*res));
-	if (!*base) {
-		dev_err(&pdev->dev, "Failed to ioremap %s memory region\n", name);
-		ret = -EBUSY;
-		goto err_release_mem;
-	}
-
-	return 0;
-
-err_release_mem:
-	release_mem_region((*res)->start, resource_size(*res));
-err:
-	*res = NULL;
-	*base = NULL;
-	return ret;
-}
-
-static inline void jz_nand_iounmap_resource(struct resource *res,
-					    void __iomem *base)
-{
-	iounmap(base);
-	release_mem_region(res->start, resource_size(res));
-}
-
-static int jz_nand_detect_bank(struct platform_device *pdev,
-			       struct jz_nand *nand, unsigned char bank,
-			       size_t chipnr, uint8_t *nand_maf_id,
-			       uint8_t *nand_dev_id)
-{
-	int ret;
-	int gpio;
-	char gpio_name[9];
-	char res_name[6];
-	uint32_t ctrl;
-	struct nand_chip *chip = &nand->chip;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-
-	/* Request GPIO port. */
-	gpio = JZ_GPIO_MEM_CS0 + bank - 1;
-	sprintf(gpio_name, "NAND CS%d", bank);
-	ret = gpio_request(gpio, gpio_name);
-	if (ret) {
-		dev_warn(&pdev->dev,
-			"Failed to request %s gpio %d: %d\n",
-			gpio_name, gpio, ret);
-		goto notfound_gpio;
-	}
-
-	/* Request I/O resource. */
-	sprintf(res_name, "bank%d", bank);
-	ret = jz_nand_ioremap_resource(pdev, res_name,
-					&nand->bank_mem[bank - 1],
-					&nand->bank_base[bank - 1]);
-	if (ret)
-		goto notfound_resource;
-
-	/* Enable chip in bank. */
-	jz_gpio_set_function(gpio, JZ_GPIO_FUNC_MEM_CS0);
-	ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
-	ctrl |= JZ_NAND_CTRL_ENABLE_CHIP(bank - 1);
-	writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
-
-	if (chipnr == 0) {
-		/* Detect first chip. */
-		ret = nand_scan_ident(mtd, 1, NULL);
-		if (ret)
-			goto notfound_id;
-
-		/* Retrieve the IDs from the first chip. */
-		chip->select_chip(mtd, 0);
-		chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-		*nand_maf_id = chip->read_byte(mtd);
-		*nand_dev_id = chip->read_byte(mtd);
-	} else {
-		/* Detect additional chip. */
-		chip->select_chip(mtd, chipnr);
-		chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-		if (*nand_maf_id != chip->read_byte(mtd)
-		 || *nand_dev_id != chip->read_byte(mtd)) {
-			ret = -ENODEV;
-			goto notfound_id;
-		}
-
-		/* Update size of the MTD. */
-		chip->numchips++;
-		mtd->size += chip->chipsize;
-	}
-
-	dev_info(&pdev->dev, "Found chip %i on bank %i\n", chipnr, bank);
-	return 0;
-
-notfound_id:
-	dev_info(&pdev->dev, "No chip found on bank %i\n", bank);
-	ctrl &= ~(JZ_NAND_CTRL_ENABLE_CHIP(bank - 1));
-	writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
-	jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE);
-	jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
-				 nand->bank_base[bank - 1]);
-notfound_resource:
-	gpio_free(gpio);
-notfound_gpio:
-	return ret;
-}
-
-static int jz_nand_probe(struct platform_device *pdev)
-{
-	int ret;
-	struct jz_nand *nand;
-	struct nand_chip *chip;
-	struct mtd_info *mtd;
-	struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	size_t chipnr, bank_idx;
-	uint8_t nand_maf_id = 0, nand_dev_id = 0;
-
-	nand = kzalloc(sizeof(*nand), GFP_KERNEL);
-	if (!nand)
-		return -ENOMEM;
-
-	ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
-	if (ret)
-		goto err_free;
-
-	nand->busy_gpio = devm_gpiod_get_optional(&pdev->dev, "busy", GPIOD_IN);
-	if (IS_ERR(nand->busy_gpio)) {
-		ret = PTR_ERR(nand->busy_gpio);
-		dev_err(&pdev->dev, "Failed to request busy gpio %d\n",
-		    ret);
-		goto err_iounmap_mmio;
-	}
-
-	chip		= &nand->chip;
-	mtd		= nand_to_mtd(chip);
-	mtd->dev.parent = &pdev->dev;
-	mtd->name	= "jz4740-nand";
-
-	chip->ecc.hwctl		= jz_nand_hwctl;
-	chip->ecc.calculate	= jz_nand_calculate_ecc_rs;
-	chip->ecc.correct	= jz_nand_correct_ecc_rs;
-	chip->ecc.mode		= NAND_ECC_HW_OOB_FIRST;
-	chip->ecc.size		= 512;
-	chip->ecc.bytes		= 9;
-	chip->ecc.strength	= 4;
-	chip->ecc.options	= NAND_ECC_GENERIC_ERASED_CHECK;
-
-	chip->chip_delay = 50;
-	chip->cmd_ctrl = jz_nand_cmd_ctrl;
-	chip->select_chip = jz_nand_select_chip;
-
-	if (nand->busy_gpio)
-		chip->dev_ready = jz_nand_dev_ready;
-
-	platform_set_drvdata(pdev, nand);
-
-	/* We are going to autodetect NAND chips in the banks specified in the
-	 * platform data. Although nand_scan_ident() can detect multiple chips,
-	 * it requires those chips to be numbered consecuitively, which is not
-	 * always the case for external memory banks. And a fixed chip-to-bank
-	 * mapping is not practical either, since for example Dingoo units
-	 * produced at different times have NAND chips in different banks.
-	 */
-	chipnr = 0;
-	for (bank_idx = 0; bank_idx < JZ_NAND_NUM_BANKS; bank_idx++) {
-		unsigned char bank;
-
-		/* If there is no platform data, look for NAND in bank 1,
-		 * which is the most likely bank since it is the only one
-		 * that can be booted from.
-		 */
-		bank = pdata ? pdata->banks[bank_idx] : bank_idx ^ 1;
-		if (bank == 0)
-			break;
-		if (bank > JZ_NAND_NUM_BANKS) {
-			dev_warn(&pdev->dev,
-				"Skipping non-existing bank: %d\n", bank);
-			continue;
-		}
-		/* The detection routine will directly or indirectly call
-		 * jz_nand_select_chip(), so nand->banks has to contain the
-		 * bank we're checking.
-		 */
-		nand->banks[chipnr] = bank;
-		if (jz_nand_detect_bank(pdev, nand, bank, chipnr,
-					&nand_maf_id, &nand_dev_id) == 0)
-			chipnr++;
-		else
-			nand->banks[chipnr] = 0;
-	}
-	if (chipnr == 0) {
-		dev_err(&pdev->dev, "No NAND chips found\n");
-		goto err_iounmap_mmio;
-	}
-
-	if (pdata && pdata->ident_callback) {
-		pdata->ident_callback(pdev, mtd, &pdata->partitions,
-					&pdata->num_partitions);
-	}
-
-	ret = nand_scan_tail(mtd);
-	if (ret) {
-		dev_err(&pdev->dev,  "Failed to scan NAND\n");
-		goto err_unclaim_banks;
-	}
-
-	ret = mtd_device_parse_register(mtd, NULL, NULL,
-					pdata ? pdata->partitions : NULL,
-					pdata ? pdata->num_partitions : 0);
-
-	if (ret) {
-		dev_err(&pdev->dev, "Failed to add mtd device\n");
-		goto err_nand_release;
-	}
-
-	dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n");
-
-	return 0;
-
-err_nand_release:
-	nand_release(mtd);
-err_unclaim_banks:
-	while (chipnr--) {
-		unsigned char bank = nand->banks[chipnr];
-		gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
-		jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
-					 nand->bank_base[bank - 1]);
-	}
-	writel(0, nand->base + JZ_REG_NAND_CTRL);
-err_iounmap_mmio:
-	jz_nand_iounmap_resource(nand->mem, nand->base);
-err_free:
-	kfree(nand);
-	return ret;
-}
-
-static int jz_nand_remove(struct platform_device *pdev)
-{
-	struct jz_nand *nand = platform_get_drvdata(pdev);
-	size_t i;
-
-	nand_release(nand_to_mtd(&nand->chip));
-
-	/* Deassert and disable all chips */
-	writel(0, nand->base + JZ_REG_NAND_CTRL);
-
-	for (i = 0; i < JZ_NAND_NUM_BANKS; ++i) {
-		unsigned char bank = nand->banks[i];
-		if (bank != 0) {
-			jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
-						 nand->bank_base[bank - 1]);
-			gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
-		}
-	}
-
-	jz_nand_iounmap_resource(nand->mem, nand->base);
-
-	kfree(nand);
-
-	return 0;
-}
-
-static struct platform_driver jz_nand_driver = {
-	.probe = jz_nand_probe,
-	.remove = jz_nand_remove,
-	.driver = {
-		.name = "jz4740-nand",
-	},
-};
-
-module_platform_driver(jz_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
-MODULE_DESCRIPTION("NAND controller driver for JZ4740 SoC");
-MODULE_ALIAS("platform:jz4740-nand");
diff --git a/drivers/mtd/nand/jz4780_bch.c b/drivers/mtd/nand/jz4780_bch.c
deleted file mode 100644
index 731c6051d91e..000000000000
--- a/drivers/mtd/nand/jz4780_bch.c
+++ /dev/null
@@ -1,380 +0,0 @@ 
-/*
- * JZ4780 BCH controller
- *
- * Copyright (c) 2015 Imagination Technologies
- * Author: Alex Smith <alex.smith@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/iopoll.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#include "jz4780_bch.h"
-
-#define BCH_BHCR			0x0
-#define BCH_BHCCR			0x8
-#define BCH_BHCNT			0xc
-#define BCH_BHDR			0x10
-#define BCH_BHPAR0			0x14
-#define BCH_BHERR0			0x84
-#define BCH_BHINT			0x184
-#define BCH_BHINTES			0x188
-#define BCH_BHINTEC			0x18c
-#define BCH_BHINTE			0x190
-
-#define BCH_BHCR_BSEL_SHIFT		4
-#define BCH_BHCR_BSEL_MASK		(0x7f << BCH_BHCR_BSEL_SHIFT)
-#define BCH_BHCR_ENCE			BIT(2)
-#define BCH_BHCR_INIT			BIT(1)
-#define BCH_BHCR_BCHE			BIT(0)
-
-#define BCH_BHCNT_PARITYSIZE_SHIFT	16
-#define BCH_BHCNT_PARITYSIZE_MASK	(0x7f << BCH_BHCNT_PARITYSIZE_SHIFT)
-#define BCH_BHCNT_BLOCKSIZE_SHIFT	0
-#define BCH_BHCNT_BLOCKSIZE_MASK	(0x7ff << BCH_BHCNT_BLOCKSIZE_SHIFT)
-
-#define BCH_BHERR_MASK_SHIFT		16
-#define BCH_BHERR_MASK_MASK		(0xffff << BCH_BHERR_MASK_SHIFT)
-#define BCH_BHERR_INDEX_SHIFT		0
-#define BCH_BHERR_INDEX_MASK		(0x7ff << BCH_BHERR_INDEX_SHIFT)
-
-#define BCH_BHINT_ERRC_SHIFT		24
-#define BCH_BHINT_ERRC_MASK		(0x7f << BCH_BHINT_ERRC_SHIFT)
-#define BCH_BHINT_TERRC_SHIFT		16
-#define BCH_BHINT_TERRC_MASK		(0x7f << BCH_BHINT_TERRC_SHIFT)
-#define BCH_BHINT_DECF			BIT(3)
-#define BCH_BHINT_ENCF			BIT(2)
-#define BCH_BHINT_UNCOR			BIT(1)
-#define BCH_BHINT_ERR			BIT(0)
-
-#define BCH_CLK_RATE			(200 * 1000 * 1000)
-
-/* Timeout for BCH calculation/correction. */
-#define BCH_TIMEOUT_US			100000
-
-struct jz4780_bch {
-	struct device *dev;
-	void __iomem *base;
-	struct clk *clk;
-	struct mutex lock;
-};
-
-static void jz4780_bch_init(struct jz4780_bch *bch,
-			    struct jz4780_bch_params *params, bool encode)
-{
-	u32 reg;
-
-	/* Clear interrupt status. */
-	writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
-
-	/* Set up BCH count register. */
-	reg = params->size << BCH_BHCNT_BLOCKSIZE_SHIFT;
-	reg |= params->bytes << BCH_BHCNT_PARITYSIZE_SHIFT;
-	writel(reg, bch->base + BCH_BHCNT);
-
-	/* Initialise and enable BCH. */
-	reg = BCH_BHCR_BCHE | BCH_BHCR_INIT;
-	reg |= params->strength << BCH_BHCR_BSEL_SHIFT;
-	if (encode)
-		reg |= BCH_BHCR_ENCE;
-	writel(reg, bch->base + BCH_BHCR);
-}
-
-static void jz4780_bch_disable(struct jz4780_bch *bch)
-{
-	writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
-	writel(BCH_BHCR_BCHE, bch->base + BCH_BHCCR);
-}
-
-static void jz4780_bch_write_data(struct jz4780_bch *bch, const void *buf,
-				  size_t size)
-{
-	size_t size32 = size / sizeof(u32);
-	size_t size8 = size % sizeof(u32);
-	const u32 *src32;
-	const u8 *src8;
-
-	src32 = (const u32 *)buf;
-	while (size32--)
-		writel(*src32++, bch->base + BCH_BHDR);
-
-	src8 = (const u8 *)src32;
-	while (size8--)
-		writeb(*src8++, bch->base + BCH_BHDR);
-}
-
-static void jz4780_bch_read_parity(struct jz4780_bch *bch, void *buf,
-				   size_t size)
-{
-	size_t size32 = size / sizeof(u32);
-	size_t size8 = size % sizeof(u32);
-	u32 *dest32;
-	u8 *dest8;
-	u32 val, offset = 0;
-
-	dest32 = (u32 *)buf;
-	while (size32--) {
-		*dest32++ = readl(bch->base + BCH_BHPAR0 + offset);
-		offset += sizeof(u32);
-	}
-
-	dest8 = (u8 *)dest32;
-	val = readl(bch->base + BCH_BHPAR0 + offset);
-	switch (size8) {
-	case 3:
-		dest8[2] = (val >> 16) & 0xff;
-	case 2:
-		dest8[1] = (val >> 8) & 0xff;
-	case 1:
-		dest8[0] = val & 0xff;
-		break;
-	}
-}
-
-static bool jz4780_bch_wait_complete(struct jz4780_bch *bch, unsigned int irq,
-				     u32 *status)
-{
-	u32 reg;
-	int ret;
-
-	/*
-	 * While we could use interrupts here and sleep until the operation
-	 * completes, the controller works fairly quickly (usually a few
-	 * microseconds) and so the overhead of sleeping until we get an
-	 * interrupt quite noticeably decreases performance.
-	 */
-	ret = readl_poll_timeout(bch->base + BCH_BHINT, reg,
-				 (reg & irq) == irq, 0, BCH_TIMEOUT_US);
-	if (ret)
-		return false;
-
-	if (status)
-		*status = reg;
-
-	writel(reg, bch->base + BCH_BHINT);
-	return true;
-}
-
-/**
- * jz4780_bch_calculate() - calculate ECC for a data buffer
- * @bch: BCH device.
- * @params: BCH parameters.
- * @buf: input buffer with raw data.
- * @ecc_code: output buffer with ECC.
- *
- * Return: 0 on success, -ETIMEDOUT if timed out while waiting for BCH
- * controller.
- */
-int jz4780_bch_calculate(struct jz4780_bch *bch, struct jz4780_bch_params *params,
-			 const u8 *buf, u8 *ecc_code)
-{
-	int ret = 0;
-
-	mutex_lock(&bch->lock);
-	jz4780_bch_init(bch, params, true);
-	jz4780_bch_write_data(bch, buf, params->size);
-
-	if (jz4780_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL)) {
-		jz4780_bch_read_parity(bch, ecc_code, params->bytes);
-	} else {
-		dev_err(bch->dev, "timed out while calculating ECC\n");
-		ret = -ETIMEDOUT;
-	}
-
-	jz4780_bch_disable(bch);
-	mutex_unlock(&bch->lock);
-	return ret;
-}
-EXPORT_SYMBOL(jz4780_bch_calculate);
-
-/**
- * jz4780_bch_correct() - detect and correct bit errors
- * @bch: BCH device.
- * @params: BCH parameters.
- * @buf: raw data read from the chip.
- * @ecc_code: ECC read from the chip.
- *
- * Given the raw data and the ECC read from the NAND device, detects and
- * corrects errors in the data.
- *
- * Return: the number of bit errors corrected, -EBADMSG if there are too many
- * errors to correct or -ETIMEDOUT if we timed out waiting for the controller.
- */
-int jz4780_bch_correct(struct jz4780_bch *bch, struct jz4780_bch_params *params,
-		       u8 *buf, u8 *ecc_code)
-{
-	u32 reg, mask, index;
-	int i, ret, count;
-
-	mutex_lock(&bch->lock);
-
-	jz4780_bch_init(bch, params, false);
-	jz4780_bch_write_data(bch, buf, params->size);
-	jz4780_bch_write_data(bch, ecc_code, params->bytes);
-
-	if (!jz4780_bch_wait_complete(bch, BCH_BHINT_DECF, &reg)) {
-		dev_err(bch->dev, "timed out while correcting data\n");
-		ret = -ETIMEDOUT;
-		goto out;
-	}
-
-	if (reg & BCH_BHINT_UNCOR) {
-		dev_warn(bch->dev, "uncorrectable ECC error\n");
-		ret = -EBADMSG;
-		goto out;
-	}
-
-	/* Correct any detected errors. */
-	if (reg & BCH_BHINT_ERR) {
-		count = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
-		ret = (reg & BCH_BHINT_TERRC_MASK) >> BCH_BHINT_TERRC_SHIFT;
-
-		for (i = 0; i < count; i++) {
-			reg = readl(bch->base + BCH_BHERR0 + (i * 4));
-			mask = (reg & BCH_BHERR_MASK_MASK) >>
-						BCH_BHERR_MASK_SHIFT;
-			index = (reg & BCH_BHERR_INDEX_MASK) >>
-						BCH_BHERR_INDEX_SHIFT;
-			buf[(index * 2) + 0] ^= mask;
-			buf[(index * 2) + 1] ^= mask >> 8;
-		}
-	} else {
-		ret = 0;
-	}
-
-out:
-	jz4780_bch_disable(bch);
-	mutex_unlock(&bch->lock);
-	return ret;
-}
-EXPORT_SYMBOL(jz4780_bch_correct);
-
-/**
- * jz4780_bch_get() - get the BCH controller device
- * @np: BCH device tree node.
- *
- * Gets the BCH controller device from the specified device tree node. The
- * device must be released with jz4780_bch_release() when it is no longer being
- * used.
- *
- * Return: a pointer to jz4780_bch, errors are encoded into the pointer.
- * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
- */
-static struct jz4780_bch *jz4780_bch_get(struct device_node *np)
-{
-	struct platform_device *pdev;
-	struct jz4780_bch *bch;
-
-	pdev = of_find_device_by_node(np);
-	if (!pdev || !platform_get_drvdata(pdev))
-		return ERR_PTR(-EPROBE_DEFER);
-
-	get_device(&pdev->dev);
-
-	bch = platform_get_drvdata(pdev);
-	clk_prepare_enable(bch->clk);
-
-	return bch;
-}
-
-/**
- * of_jz4780_bch_get() - get the BCH controller from a DT node
- * @of_node: the node that contains a bch-controller property.
- *
- * Get the bch-controller property from the given device tree
- * node and pass it to jz4780_bch_get to do the work.
- *
- * Return: a pointer to jz4780_bch, errors are encoded into the pointer.
- * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
- */
-struct jz4780_bch *of_jz4780_bch_get(struct device_node *of_node)
-{
-	struct jz4780_bch *bch = NULL;
-	struct device_node *np;
-
-	np = of_parse_phandle(of_node, "ingenic,bch-controller", 0);
-
-	if (np) {
-		bch = jz4780_bch_get(np);
-		of_node_put(np);
-	}
-	return bch;
-}
-EXPORT_SYMBOL(of_jz4780_bch_get);
-
-/**
- * jz4780_bch_release() - release the BCH controller device
- * @bch: BCH device.
- */
-void jz4780_bch_release(struct jz4780_bch *bch)
-{
-	clk_disable_unprepare(bch->clk);
-	put_device(bch->dev);
-}
-EXPORT_SYMBOL(jz4780_bch_release);
-
-static int jz4780_bch_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct jz4780_bch *bch;
-	struct resource *res;
-
-	bch = devm_kzalloc(dev, sizeof(*bch), GFP_KERNEL);
-	if (!bch)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	bch->base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(bch->base))
-		return PTR_ERR(bch->base);
-
-	jz4780_bch_disable(bch);
-
-	bch->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(bch->clk)) {
-		dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(bch->clk));
-		return PTR_ERR(bch->clk);
-	}
-
-	clk_set_rate(bch->clk, BCH_CLK_RATE);
-
-	mutex_init(&bch->lock);
-
-	bch->dev = dev;
-	platform_set_drvdata(pdev, bch);
-
-	return 0;
-}
-
-static const struct of_device_id jz4780_bch_dt_match[] = {
-	{ .compatible = "ingenic,jz4780-bch" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, jz4780_bch_dt_match);
-
-static struct platform_driver jz4780_bch_driver = {
-	.probe		= jz4780_bch_probe,
-	.driver	= {
-		.name	= "jz4780-bch",
-		.of_match_table = of_match_ptr(jz4780_bch_dt_match),
-	},
-};
-module_platform_driver(jz4780_bch_driver);
-
-MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
-MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/jz4780_bch.h b/drivers/mtd/nand/jz4780_bch.h
deleted file mode 100644
index bf4718088a3a..000000000000
--- a/drivers/mtd/nand/jz4780_bch.h
+++ /dev/null
@@ -1,43 +0,0 @@ 
-/*
- * JZ4780 BCH controller
- *
- * Copyright (c) 2015 Imagination Technologies
- * Author: Alex Smith <alex.smith@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef __DRIVERS_MTD_NAND_JZ4780_BCH_H__
-#define __DRIVERS_MTD_NAND_JZ4780_BCH_H__
-
-#include <linux/types.h>
-
-struct device;
-struct device_node;
-struct jz4780_bch;
-
-/**
- * struct jz4780_bch_params - BCH parameters
- * @size: data bytes per ECC step.
- * @bytes: ECC bytes per step.
- * @strength: number of correctable bits per ECC step.
- */
-struct jz4780_bch_params {
-	int size;
-	int bytes;
-	int strength;
-};
-
-int jz4780_bch_calculate(struct jz4780_bch *bch,
-				struct jz4780_bch_params *params,
-				const u8 *buf, u8 *ecc_code);
-int jz4780_bch_correct(struct jz4780_bch *bch,
-			      struct jz4780_bch_params *params, u8 *buf,
-			      u8 *ecc_code);
-
-void jz4780_bch_release(struct jz4780_bch *bch);
-struct jz4780_bch *of_jz4780_bch_get(struct device_node *np);
-
-#endif /* __DRIVERS_MTD_NAND_JZ4780_BCH_H__ */
diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c
deleted file mode 100644
index 2f725bd83de8..000000000000
--- a/drivers/mtd/nand/jz4780_nand.c
+++ /dev/null
@@ -1,416 +0,0 @@ 
-/*
- * JZ4780 NAND driver
- *
- * Copyright (c) 2015 Imagination Technologies
- * Author: Alex Smith <alex.smith@imgtec.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-#include <linux/gpio/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#include <linux/jz4780-nemc.h>
-
-#include "jz4780_bch.h"
-
-#define DRV_NAME	"jz4780-nand"
-
-#define OFFSET_DATA	0x00000000
-#define OFFSET_CMD	0x00400000
-#define OFFSET_ADDR	0x00800000
-
-/* Command delay when there is no R/B pin. */
-#define RB_DELAY_US	100
-
-struct jz4780_nand_cs {
-	unsigned int bank;
-	void __iomem *base;
-};
-
-struct jz4780_nand_controller {
-	struct device *dev;
-	struct jz4780_bch *bch;
-	struct nand_hw_control controller;
-	unsigned int num_banks;
-	struct list_head chips;
-	int selected;
-	struct jz4780_nand_cs cs[];
-};
-
-struct jz4780_nand_chip {
-	struct nand_chip chip;
-	struct list_head chip_list;
-
-	struct gpio_desc *busy_gpio;
-	struct gpio_desc *wp_gpio;
-	unsigned int reading: 1;
-};
-
-static inline struct jz4780_nand_chip *to_jz4780_nand_chip(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct jz4780_nand_chip, chip);
-}
-
-static inline struct jz4780_nand_controller *to_jz4780_nand_controller(struct nand_hw_control *ctrl)
-{
-	return container_of(ctrl, struct jz4780_nand_controller, controller);
-}
-
-static void jz4780_nand_select_chip(struct mtd_info *mtd, int chipnr)
-{
-	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
-	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
-	struct jz4780_nand_cs *cs;
-
-	/* Ensure the currently selected chip is deasserted. */
-	if (chipnr == -1 && nfc->selected >= 0) {
-		cs = &nfc->cs[nfc->selected];
-		jz4780_nemc_assert(nfc->dev, cs->bank, false);
-	}
-
-	nfc->selected = chipnr;
-}
-
-static void jz4780_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
-				 unsigned int ctrl)
-{
-	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
-	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
-	struct jz4780_nand_cs *cs;
-
-	if (WARN_ON(nfc->selected < 0))
-		return;
-
-	cs = &nfc->cs[nfc->selected];
-
-	jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE);
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_ALE)
-		writeb(cmd, cs->base + OFFSET_ADDR);
-	else if (ctrl & NAND_CLE)
-		writeb(cmd, cs->base + OFFSET_CMD);
-}
-
-static int jz4780_nand_dev_ready(struct mtd_info *mtd)
-{
-	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
-
-	return !gpiod_get_value_cansleep(nand->busy_gpio);
-}
-
-static void jz4780_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
-{
-	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
-
-	nand->reading = (mode == NAND_ECC_READ);
-}
-
-static int jz4780_nand_ecc_calculate(struct mtd_info *mtd, const u8 *dat,
-				     u8 *ecc_code)
-{
-	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
-	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
-	struct jz4780_bch_params params;
-
-	/*
-	 * Don't need to generate the ECC when reading, BCH does it for us as
-	 * part of decoding/correction.
-	 */
-	if (nand->reading)
-		return 0;
-
-	params.size = nand->chip.ecc.size;
-	params.bytes = nand->chip.ecc.bytes;
-	params.strength = nand->chip.ecc.strength;
-
-	return jz4780_bch_calculate(nfc->bch, &params, dat, ecc_code);
-}
-
-static int jz4780_nand_ecc_correct(struct mtd_info *mtd, u8 *dat,
-				   u8 *read_ecc, u8 *calc_ecc)
-{
-	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
-	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
-	struct jz4780_bch_params params;
-
-	params.size = nand->chip.ecc.size;
-	params.bytes = nand->chip.ecc.bytes;
-	params.strength = nand->chip.ecc.strength;
-
-	return jz4780_bch_correct(nfc->bch, &params, dat, read_ecc);
-}
-
-static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *dev)
-{
-	struct nand_chip *chip = &nand->chip;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller);
-	int eccbytes;
-
-	chip->ecc.bytes = fls((1 + 8) * chip->ecc.size)	*
-				(chip->ecc.strength / 8);
-
-	switch (chip->ecc.mode) {
-	case NAND_ECC_HW:
-		if (!nfc->bch) {
-			dev_err(dev, "HW BCH selected, but BCH controller not found\n");
-			return -ENODEV;
-		}
-
-		chip->ecc.hwctl = jz4780_nand_ecc_hwctl;
-		chip->ecc.calculate = jz4780_nand_ecc_calculate;
-		chip->ecc.correct = jz4780_nand_ecc_correct;
-		/* fall through */
-	case NAND_ECC_SOFT:
-		dev_info(dev, "using %s (strength %d, size %d, bytes %d)\n",
-			(nfc->bch) ? "hardware BCH" : "software ECC",
-			chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
-		break;
-	case NAND_ECC_NONE:
-		dev_info(dev, "not using ECC\n");
-		break;
-	default:
-		dev_err(dev, "ECC mode %d not supported\n", chip->ecc.mode);
-		return -EINVAL;
-	}
-
-	/* The NAND core will generate the ECC layout for SW ECC */
-	if (chip->ecc.mode != NAND_ECC_HW)
-		return 0;
-
-	/* Generate ECC layout. ECC codes are right aligned in the OOB area. */
-	eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
-
-	if (eccbytes > mtd->oobsize - 2) {
-		dev_err(dev,
-			"invalid ECC config: required %d ECC bytes, but only %d are available",
-			eccbytes, mtd->oobsize - 2);
-		return -EINVAL;
-	}
-
-	mtd->ooblayout = &nand_ooblayout_lp_ops;
-
-	return 0;
-}
-
-static int jz4780_nand_init_chip(struct platform_device *pdev,
-				struct jz4780_nand_controller *nfc,
-				struct device_node *np,
-				unsigned int chipnr)
-{
-	struct device *dev = &pdev->dev;
-	struct jz4780_nand_chip *nand;
-	struct jz4780_nand_cs *cs;
-	struct resource *res;
-	struct nand_chip *chip;
-	struct mtd_info *mtd;
-	const __be32 *reg;
-	int ret = 0;
-
-	cs = &nfc->cs[chipnr];
-
-	reg = of_get_property(np, "reg", NULL);
-	if (!reg)
-		return -EINVAL;
-
-	cs->bank = be32_to_cpu(*reg);
-
-	jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, chipnr);
-	cs->base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(cs->base))
-		return PTR_ERR(cs->base);
-
-	nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
-	if (!nand)
-		return -ENOMEM;
-
-	nand->busy_gpio = devm_gpiod_get_optional(dev, "rb", GPIOD_IN);
-
-	if (IS_ERR(nand->busy_gpio)) {
-		ret = PTR_ERR(nand->busy_gpio);
-		dev_err(dev, "failed to request busy GPIO: %d\n", ret);
-		return ret;
-	} else if (nand->busy_gpio) {
-		nand->chip.dev_ready = jz4780_nand_dev_ready;
-	}
-
-	nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
-
-	if (IS_ERR(nand->wp_gpio)) {
-		ret = PTR_ERR(nand->wp_gpio);
-		dev_err(dev, "failed to request WP GPIO: %d\n", ret);
-		return ret;
-	}
-
-	chip = &nand->chip;
-	mtd = nand_to_mtd(chip);
-	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
-				   cs->bank);
-	if (!mtd->name)
-		return -ENOMEM;
-	mtd->dev.parent = dev;
-
-	chip->IO_ADDR_R = cs->base + OFFSET_DATA;
-	chip->IO_ADDR_W = cs->base + OFFSET_DATA;
-	chip->chip_delay = RB_DELAY_US;
-	chip->options = NAND_NO_SUBPAGE_WRITE;
-	chip->select_chip = jz4780_nand_select_chip;
-	chip->cmd_ctrl = jz4780_nand_cmd_ctrl;
-	chip->ecc.mode = NAND_ECC_HW;
-	chip->controller = &nfc->controller;
-	nand_set_flash_node(chip, np);
-
-	ret = nand_scan_ident(mtd, 1, NULL);
-	if (ret)
-		return ret;
-
-	ret = jz4780_nand_init_ecc(nand, dev);
-	if (ret)
-		return ret;
-
-	ret = nand_scan_tail(mtd);
-	if (ret)
-		return ret;
-
-	ret = mtd_device_register(mtd, NULL, 0);
-	if (ret) {
-		nand_release(mtd);
-		return ret;
-	}
-
-	list_add_tail(&nand->chip_list, &nfc->chips);
-
-	return 0;
-}
-
-static void jz4780_nand_cleanup_chips(struct jz4780_nand_controller *nfc)
-{
-	struct jz4780_nand_chip *chip;
-
-	while (!list_empty(&nfc->chips)) {
-		chip = list_first_entry(&nfc->chips, struct jz4780_nand_chip, chip_list);
-		nand_release(nand_to_mtd(&chip->chip));
-		list_del(&chip->chip_list);
-	}
-}
-
-static int jz4780_nand_init_chips(struct jz4780_nand_controller *nfc,
-				  struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *np;
-	int i = 0;
-	int ret;
-	int num_chips = of_get_child_count(dev->of_node);
-
-	if (num_chips > nfc->num_banks) {
-		dev_err(dev, "found %d chips but only %d banks\n", num_chips, nfc->num_banks);
-		return -EINVAL;
-	}
-
-	for_each_child_of_node(dev->of_node, np) {
-		ret = jz4780_nand_init_chip(pdev, nfc, np, i);
-		if (ret) {
-			jz4780_nand_cleanup_chips(nfc);
-			return ret;
-		}
-
-		i++;
-	}
-
-	return 0;
-}
-
-static int jz4780_nand_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	unsigned int num_banks;
-	struct jz4780_nand_controller *nfc;
-	int ret;
-
-	num_banks = jz4780_nemc_num_banks(dev);
-	if (num_banks == 0) {
-		dev_err(dev, "no banks found\n");
-		return -ENODEV;
-	}
-
-	nfc = devm_kzalloc(dev, sizeof(*nfc) + (sizeof(nfc->cs[0]) * num_banks), GFP_KERNEL);
-	if (!nfc)
-		return -ENOMEM;
-
-	/*
-	 * Check for BCH HW before we call nand_scan_ident, to prevent us from
-	 * having to call it again if the BCH driver returns -EPROBE_DEFER.
-	 */
-	nfc->bch = of_jz4780_bch_get(dev->of_node);
-	if (IS_ERR(nfc->bch))
-		return PTR_ERR(nfc->bch);
-
-	nfc->dev = dev;
-	nfc->num_banks = num_banks;
-
-	nand_hw_control_init(&nfc->controller);
-	INIT_LIST_HEAD(&nfc->chips);
-
-	ret = jz4780_nand_init_chips(nfc, pdev);
-	if (ret) {
-		if (nfc->bch)
-			jz4780_bch_release(nfc->bch);
-		return ret;
-	}
-
-	platform_set_drvdata(pdev, nfc);
-	return 0;
-}
-
-static int jz4780_nand_remove(struct platform_device *pdev)
-{
-	struct jz4780_nand_controller *nfc = platform_get_drvdata(pdev);
-
-	if (nfc->bch)
-		jz4780_bch_release(nfc->bch);
-
-	jz4780_nand_cleanup_chips(nfc);
-
-	return 0;
-}
-
-static const struct of_device_id jz4780_nand_dt_match[] = {
-	{ .compatible = "ingenic,jz4780-nand" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, jz4780_nand_dt_match);
-
-static struct platform_driver jz4780_nand_driver = {
-	.probe		= jz4780_nand_probe,
-	.remove		= jz4780_nand_remove,
-	.driver	= {
-		.name	= DRV_NAME,
-		.of_match_table = of_match_ptr(jz4780_nand_dt_match),
-	},
-};
-module_platform_driver(jz4780_nand_driver);
-
-MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
-MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
-MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
deleted file mode 100644
index b212bb0fd902..000000000000
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ /dev/null
@@ -1,902 +0,0 @@ 
-/*
- * Driver for NAND MLC Controller in LPC32xx
- *
- * Author: Roland Stigge <stigge@antcom.de>
- *
- * Copyright © 2011 WORK Microwave GmbH
- * Copyright © 2011, 2012 Roland Stigge
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- *
- * NAND Flash Controller Operation:
- * - Read: Auto Decode
- * - Write: Auto Encode
- * - Tested Page Sizes: 2048, 4096
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/completion.h>
-#include <linux/interrupt.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/mtd/lpc32xx_mlc.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/mtd/nand_ecc.h>
-
-#define DRV_NAME "lpc32xx_mlc"
-
-/**********************************************************************
-* MLC NAND controller register offsets
-**********************************************************************/
-
-#define MLC_BUFF(x)			(x + 0x00000)
-#define MLC_DATA(x)			(x + 0x08000)
-#define MLC_CMD(x)			(x + 0x10000)
-#define MLC_ADDR(x)			(x + 0x10004)
-#define MLC_ECC_ENC_REG(x)		(x + 0x10008)
-#define MLC_ECC_DEC_REG(x)		(x + 0x1000C)
-#define MLC_ECC_AUTO_ENC_REG(x)		(x + 0x10010)
-#define MLC_ECC_AUTO_DEC_REG(x)		(x + 0x10014)
-#define MLC_RPR(x)			(x + 0x10018)
-#define MLC_WPR(x)			(x + 0x1001C)
-#define MLC_RUBP(x)			(x + 0x10020)
-#define MLC_ROBP(x)			(x + 0x10024)
-#define MLC_SW_WP_ADD_LOW(x)		(x + 0x10028)
-#define MLC_SW_WP_ADD_HIG(x)		(x + 0x1002C)
-#define MLC_ICR(x)			(x + 0x10030)
-#define MLC_TIME_REG(x)			(x + 0x10034)
-#define MLC_IRQ_MR(x)			(x + 0x10038)
-#define MLC_IRQ_SR(x)			(x + 0x1003C)
-#define MLC_LOCK_PR(x)			(x + 0x10044)
-#define MLC_ISR(x)			(x + 0x10048)
-#define MLC_CEH(x)			(x + 0x1004C)
-
-/**********************************************************************
-* MLC_CMD bit definitions
-**********************************************************************/
-#define MLCCMD_RESET			0xFF
-
-/**********************************************************************
-* MLC_ICR bit definitions
-**********************************************************************/
-#define MLCICR_WPROT			(1 << 3)
-#define MLCICR_LARGEBLOCK		(1 << 2)
-#define MLCICR_LONGADDR			(1 << 1)
-#define MLCICR_16BIT			(1 << 0)  /* unsupported by LPC32x0! */
-
-/**********************************************************************
-* MLC_TIME_REG bit definitions
-**********************************************************************/
-#define MLCTIMEREG_TCEA_DELAY(n)	(((n) & 0x03) << 24)
-#define MLCTIMEREG_BUSY_DELAY(n)	(((n) & 0x1F) << 19)
-#define MLCTIMEREG_NAND_TA(n)		(((n) & 0x07) << 16)
-#define MLCTIMEREG_RD_HIGH(n)		(((n) & 0x0F) << 12)
-#define MLCTIMEREG_RD_LOW(n)		(((n) & 0x0F) << 8)
-#define MLCTIMEREG_WR_HIGH(n)		(((n) & 0x0F) << 4)
-#define MLCTIMEREG_WR_LOW(n)		(((n) & 0x0F) << 0)
-
-/**********************************************************************
-* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
-**********************************************************************/
-#define MLCIRQ_NAND_READY		(1 << 5)
-#define MLCIRQ_CONTROLLER_READY		(1 << 4)
-#define MLCIRQ_DECODE_FAILURE		(1 << 3)
-#define MLCIRQ_DECODE_ERROR		(1 << 2)
-#define MLCIRQ_ECC_READY		(1 << 1)
-#define MLCIRQ_WRPROT_FAULT		(1 << 0)
-
-/**********************************************************************
-* MLC_LOCK_PR bit definitions
-**********************************************************************/
-#define MLCLOCKPR_MAGIC			0xA25E
-
-/**********************************************************************
-* MLC_ISR bit definitions
-**********************************************************************/
-#define MLCISR_DECODER_FAILURE		(1 << 6)
-#define MLCISR_ERRORS			((1 << 4) | (1 << 5))
-#define MLCISR_ERRORS_DETECTED		(1 << 3)
-#define MLCISR_ECC_READY		(1 << 2)
-#define MLCISR_CONTROLLER_READY		(1 << 1)
-#define MLCISR_NAND_READY		(1 << 0)
-
-/**********************************************************************
-* MLC_CEH bit definitions
-**********************************************************************/
-#define MLCCEH_NORMAL			(1 << 0)
-
-struct lpc32xx_nand_cfg_mlc {
-	uint32_t tcea_delay;
-	uint32_t busy_delay;
-	uint32_t nand_ta;
-	uint32_t rd_high;
-	uint32_t rd_low;
-	uint32_t wr_high;
-	uint32_t wr_low;
-	int wp_gpio;
-	struct mtd_partition *parts;
-	unsigned num_parts;
-};
-
-static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-
-	if (section >= nand_chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
-	oobregion->length = nand_chip->ecc.bytes;
-
-	return 0;
-}
-
-static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-
-	if (section >= nand_chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = 16 * section;
-	oobregion->length = 16 - nand_chip->ecc.bytes;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
-	.ecc = lpc32xx_ooblayout_ecc,
-	.free = lpc32xx_ooblayout_free,
-};
-
-static struct nand_bbt_descr lpc32xx_nand_bbt = {
-	.options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
-		   NAND_BBT_WRITE,
-	.pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
-};
-
-static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
-	.options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
-		   NAND_BBT_WRITE,
-	.pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
-};
-
-struct lpc32xx_nand_host {
-	struct nand_chip	nand_chip;
-	struct lpc32xx_mlc_platform_data *pdata;
-	struct clk		*clk;
-	void __iomem		*io_base;
-	int			irq;
-	struct lpc32xx_nand_cfg_mlc	*ncfg;
-	struct completion       comp_nand;
-	struct completion       comp_controller;
-	uint32_t llptr;
-	/*
-	 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
-	 */
-	dma_addr_t		oob_buf_phy;
-	/*
-	 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
-	 */
-	uint8_t			*oob_buf;
-	/* Physical address of DMA base address */
-	dma_addr_t		io_base_phy;
-
-	struct completion	comp_dma;
-	struct dma_chan		*dma_chan;
-	struct dma_slave_config	dma_slave_config;
-	struct scatterlist	sgl;
-	uint8_t			*dma_buf;
-	uint8_t			*dummy_buf;
-	int			mlcsubpages; /* number of 512bytes-subpages */
-};
-
-/*
- * Activate/Deactivate DMA Operation:
- *
- * Using the PL080 DMA Controller for transferring the 512 byte subpages
- * instead of doing readl() / writel() in a loop slows it down significantly.
- * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
- *
- * - readl() of 128 x 32 bits in a loop: ~20us
- * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
- * - DMA read of 512 bytes (32 bit, no bursts): ~100us
- *
- * This applies to the transfer itself. In the DMA case: only the
- * wait_for_completion() (DMA setup _not_ included).
- *
- * Note that the 512 bytes subpage transfer is done directly from/to a
- * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
- * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
- * controller transferring data between its internal buffer to/from the NAND
- * chip.)
- *
- * Therefore, using the PL080 DMA is disabled by default, for now.
- *
- */
-static int use_dma;
-
-static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
-{
-	uint32_t clkrate, tmp;
-
-	/* Reset MLC controller */
-	writel(MLCCMD_RESET, MLC_CMD(host->io_base));
-	udelay(1000);
-
-	/* Get base clock for MLC block */
-	clkrate = clk_get_rate(host->clk);
-	if (clkrate == 0)
-		clkrate = 104000000;
-
-	/* Unlock MLC_ICR
-	 * (among others, will be locked again automatically) */
-	writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
-
-	/* Configure MLC Controller: Large Block, 5 Byte Address */
-	tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
-	writel(tmp, MLC_ICR(host->io_base));
-
-	/* Unlock MLC_TIME_REG
-	 * (among others, will be locked again automatically) */
-	writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
-
-	/* Compute clock setup values, see LPC and NAND manual */
-	tmp = 0;
-	tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
-	tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
-	tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
-	tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
-	tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
-	tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
-	tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
-	writel(tmp, MLC_TIME_REG(host->io_base));
-
-	/* Enable IRQ for CONTROLLER_READY and NAND_READY */
-	writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
-			MLC_IRQ_MR(host->io_base));
-
-	/* Normal nCE operation: nCE controlled by controller */
-	writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
-}
-
-/*
- * Hardware specific access to control lines
- */
-static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
-				  unsigned int ctrl)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if (cmd != NAND_CMD_NONE) {
-		if (ctrl & NAND_CLE)
-			writel(cmd, MLC_CMD(host->io_base));
-		else
-			writel(cmd, MLC_ADDR(host->io_base));
-	}
-}
-
-/*
- * Read Device Ready (NAND device _and_ controller ready)
- */
-static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if ((readb(MLC_ISR(host->io_base)) &
-	     (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
-	    (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
-		return  1;
-
-	return 0;
-}
-
-static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
-{
-	uint8_t sr;
-
-	/* Clear interrupt flag by reading status */
-	sr = readb(MLC_IRQ_SR(host->io_base));
-	if (sr & MLCIRQ_NAND_READY)
-		complete(&host->comp_nand);
-	if (sr & MLCIRQ_CONTROLLER_READY)
-		complete(&host->comp_controller);
-
-	return IRQ_HANDLED;
-}
-
-static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
-{
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-
-	if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
-		goto exit;
-
-	wait_for_completion(&host->comp_nand);
-
-	while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
-		/* Seems to be delayed sometimes by controller */
-		dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
-		cpu_relax();
-	}
-
-exit:
-	return NAND_STATUS_READY;
-}
-
-static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
-				       struct nand_chip *chip)
-{
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-
-	if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
-		goto exit;
-
-	wait_for_completion(&host->comp_controller);
-
-	while (!(readb(MLC_ISR(host->io_base)) &
-		 MLCISR_CONTROLLER_READY)) {
-		dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
-		cpu_relax();
-	}
-
-exit:
-	return NAND_STATUS_READY;
-}
-
-static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
-{
-	lpc32xx_waitfunc_nand(mtd, chip);
-	lpc32xx_waitfunc_controller(mtd, chip);
-
-	return NAND_STATUS_READY;
-}
-
-/*
- * Enable NAND write protect
- */
-static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
-{
-	if (gpio_is_valid(host->ncfg->wp_gpio))
-		gpio_set_value(host->ncfg->wp_gpio, 0);
-}
-
-/*
- * Disable NAND write protect
- */
-static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
-{
-	if (gpio_is_valid(host->ncfg->wp_gpio))
-		gpio_set_value(host->ncfg->wp_gpio, 1);
-}
-
-static void lpc32xx_dma_complete_func(void *completion)
-{
-	complete(completion);
-}
-
-static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
-			    enum dma_transfer_direction dir)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	struct dma_async_tx_descriptor *desc;
-	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
-	int res;
-
-	sg_init_one(&host->sgl, mem, len);
-
-	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
-			 DMA_BIDIRECTIONAL);
-	if (res != 1) {
-		dev_err(mtd->dev.parent, "Failed to map sg list\n");
-		return -ENXIO;
-	}
-	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
-				       flags);
-	if (!desc) {
-		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
-		goto out1;
-	}
-
-	init_completion(&host->comp_dma);
-	desc->callback = lpc32xx_dma_complete_func;
-	desc->callback_param = &host->comp_dma;
-
-	dmaengine_submit(desc);
-	dma_async_issue_pending(host->dma_chan);
-
-	wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
-
-	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
-		     DMA_BIDIRECTIONAL);
-	return 0;
-out1:
-	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
-		     DMA_BIDIRECTIONAL);
-	return -ENXIO;
-}
-
-static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-			     uint8_t *buf, int oob_required, int page)
-{
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	int i, j;
-	uint8_t *oobbuf = chip->oob_poi;
-	uint32_t mlc_isr;
-	int res;
-	uint8_t *dma_buf;
-	bool dma_mapped;
-
-	if ((void *)buf <= high_memory) {
-		dma_buf = buf;
-		dma_mapped = true;
-	} else {
-		dma_buf = host->dma_buf;
-		dma_mapped = false;
-	}
-
-	/* Writing Command and Address */
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
-	/* For all sub-pages */
-	for (i = 0; i < host->mlcsubpages; i++) {
-		/* Start Auto Decode Command */
-		writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
-
-		/* Wait for Controller Ready */
-		lpc32xx_waitfunc_controller(mtd, chip);
-
-		/* Check ECC Error status */
-		mlc_isr = readl(MLC_ISR(host->io_base));
-		if (mlc_isr & MLCISR_DECODER_FAILURE) {
-			mtd->ecc_stats.failed++;
-			dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
-		} else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
-			mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
-		}
-
-		/* Read 512 + 16 Bytes */
-		if (use_dma) {
-			res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
-					       DMA_DEV_TO_MEM);
-			if (res)
-				return res;
-		} else {
-			for (j = 0; j < (512 >> 2); j++) {
-				*((uint32_t *)(buf)) =
-					readl(MLC_BUFF(host->io_base));
-				buf += 4;
-			}
-		}
-		for (j = 0; j < (16 >> 2); j++) {
-			*((uint32_t *)(oobbuf)) =
-				readl(MLC_BUFF(host->io_base));
-			oobbuf += 4;
-		}
-	}
-
-	if (use_dma && !dma_mapped)
-		memcpy(buf, dma_buf, mtd->writesize);
-
-	return 0;
-}
-
-static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
-				       struct nand_chip *chip,
-				       const uint8_t *buf, int oob_required,
-				       int page)
-{
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	const uint8_t *oobbuf = chip->oob_poi;
-	uint8_t *dma_buf = (uint8_t *)buf;
-	int res;
-	int i, j;
-
-	if (use_dma && (void *)buf >= high_memory) {
-		dma_buf = host->dma_buf;
-		memcpy(dma_buf, buf, mtd->writesize);
-	}
-
-	for (i = 0; i < host->mlcsubpages; i++) {
-		/* Start Encode */
-		writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
-
-		/* Write 512 + 6 Bytes to Buffer */
-		if (use_dma) {
-			res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
-					       DMA_MEM_TO_DEV);
-			if (res)
-				return res;
-		} else {
-			for (j = 0; j < (512 >> 2); j++) {
-				writel(*((uint32_t *)(buf)),
-				       MLC_BUFF(host->io_base));
-				buf += 4;
-			}
-		}
-		writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
-		oobbuf += 4;
-		writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
-		oobbuf += 12;
-
-		/* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
-		writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
-
-		/* Wait for Controller Ready */
-		lpc32xx_waitfunc_controller(mtd, chip);
-	}
-	return 0;
-}
-
-static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			    int page)
-{
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-
-	/* Read whole page - necessary with MLC controller! */
-	lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
-
-	return 0;
-}
-
-static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			      int page)
-{
-	/* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
-	return 0;
-}
-
-/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
-static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
-{
-	/* Always enabled! */
-}
-
-static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
-{
-	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
-	dma_cap_mask_t mask;
-
-	if (!host->pdata || !host->pdata->dma_filter) {
-		dev_err(mtd->dev.parent, "no DMA platform data\n");
-		return -ENOENT;
-	}
-
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-	host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
-					     "nand-mlc");
-	if (!host->dma_chan) {
-		dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
-		return -EBUSY;
-	}
-
-	/*
-	 * Set direction to a sensible value even if the dmaengine driver
-	 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
-	 * driver criticizes it as "alien transfer direction".
-	 */
-	host->dma_slave_config.direction = DMA_DEV_TO_MEM;
-	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	host->dma_slave_config.src_maxburst = 128;
-	host->dma_slave_config.dst_maxburst = 128;
-	/* DMA controller does flow control: */
-	host->dma_slave_config.device_fc = false;
-	host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
-	host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
-	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
-		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
-		goto out1;
-	}
-
-	return 0;
-out1:
-	dma_release_channel(host->dma_chan);
-	return -ENXIO;
-}
-
-static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
-{
-	struct lpc32xx_nand_cfg_mlc *ncfg;
-	struct device_node *np = dev->of_node;
-
-	ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
-	if (!ncfg)
-		return NULL;
-
-	of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
-	of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
-	of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
-	of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
-	of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
-	of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
-	of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
-
-	if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
-	    !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
-	    !ncfg->wr_low) {
-		dev_err(dev, "chip parameters not specified correctly\n");
-		return NULL;
-	}
-
-	ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
-
-	return ncfg;
-}
-
-/*
- * Probe for NAND controller
- */
-static int lpc32xx_nand_probe(struct platform_device *pdev)
-{
-	struct lpc32xx_nand_host *host;
-	struct mtd_info *mtd;
-	struct nand_chip *nand_chip;
-	struct resource *rc;
-	int res;
-
-	/* Allocate memory for the device structure (and zero it) */
-	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
-	if (!host)
-		return -ENOMEM;
-
-	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	host->io_base = devm_ioremap_resource(&pdev->dev, rc);
-	if (IS_ERR(host->io_base))
-		return PTR_ERR(host->io_base);
-	
-	host->io_base_phy = rc->start;
-
-	nand_chip = &host->nand_chip;
-	mtd = nand_to_mtd(nand_chip);
-	if (pdev->dev.of_node)
-		host->ncfg = lpc32xx_parse_dt(&pdev->dev);
-	if (!host->ncfg) {
-		dev_err(&pdev->dev,
-			"Missing or bad NAND config from device tree\n");
-		return -ENOENT;
-	}
-	if (host->ncfg->wp_gpio == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
-	if (gpio_is_valid(host->ncfg->wp_gpio) &&
-			gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
-		dev_err(&pdev->dev, "GPIO not available\n");
-		return -EBUSY;
-	}
-	lpc32xx_wp_disable(host);
-
-	host->pdata = dev_get_platdata(&pdev->dev);
-
-	/* link the private data structures */
-	nand_set_controller_data(nand_chip, host);
-	nand_set_flash_node(nand_chip, pdev->dev.of_node);
-	mtd->dev.parent = &pdev->dev;
-
-	/* Get NAND clock */
-	host->clk = clk_get(&pdev->dev, NULL);
-	if (IS_ERR(host->clk)) {
-		dev_err(&pdev->dev, "Clock initialization failure\n");
-		res = -ENOENT;
-		goto err_exit1;
-	}
-	clk_prepare_enable(host->clk);
-
-	nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
-	nand_chip->dev_ready = lpc32xx_nand_device_ready;
-	nand_chip->chip_delay = 25; /* us */
-	nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
-	nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
-
-	/* Init NAND controller */
-	lpc32xx_nand_setup(host);
-
-	platform_set_drvdata(pdev, host);
-
-	/* Initialize function pointers */
-	nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
-	nand_chip->ecc.read_page_raw = lpc32xx_read_page;
-	nand_chip->ecc.read_page = lpc32xx_read_page;
-	nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
-	nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
-	nand_chip->ecc.write_oob = lpc32xx_write_oob;
-	nand_chip->ecc.read_oob = lpc32xx_read_oob;
-	nand_chip->ecc.strength = 4;
-	nand_chip->ecc.bytes = 10;
-	nand_chip->waitfunc = lpc32xx_waitfunc;
-
-	nand_chip->options = NAND_NO_SUBPAGE_WRITE;
-	nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
-	nand_chip->bbt_td = &lpc32xx_nand_bbt;
-	nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
-
-	if (use_dma) {
-		res = lpc32xx_dma_setup(host);
-		if (res) {
-			res = -EIO;
-			goto err_exit2;
-		}
-	}
-
-	/*
-	 * Scan to find existance of the device and
-	 * Get the type of NAND device SMALL block or LARGE block
-	 */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		res = -ENXIO;
-		goto err_exit3;
-	}
-
-	host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
-	if (!host->dma_buf) {
-		res = -ENOMEM;
-		goto err_exit3;
-	}
-
-	host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
-	if (!host->dummy_buf) {
-		res = -ENOMEM;
-		goto err_exit3;
-	}
-
-	nand_chip->ecc.mode = NAND_ECC_HW;
-	nand_chip->ecc.size = 512;
-	mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
-	host->mlcsubpages = mtd->writesize / 512;
-
-	/* initially clear interrupt status */
-	readb(MLC_IRQ_SR(host->io_base));
-
-	init_completion(&host->comp_nand);
-	init_completion(&host->comp_controller);
-
-	host->irq = platform_get_irq(pdev, 0);
-	if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
-		dev_err(&pdev->dev, "failed to get platform irq\n");
-		res = -EINVAL;
-		goto err_exit3;
-	}
-
-	if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
-			IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
-		dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
-		res = -ENXIO;
-		goto err_exit3;
-	}
-
-	/*
-	 * Fills out all the uninitialized function pointers with the defaults
-	 * And scans for a bad block table if appropriate.
-	 */
-	if (nand_scan_tail(mtd)) {
-		res = -ENXIO;
-		goto err_exit4;
-	}
-
-	mtd->name = DRV_NAME;
-
-	res = mtd_device_register(mtd, host->ncfg->parts,
-				  host->ncfg->num_parts);
-	if (!res)
-		return res;
-
-	nand_release(mtd);
-
-err_exit4:
-	free_irq(host->irq, host);
-err_exit3:
-	if (use_dma)
-		dma_release_channel(host->dma_chan);
-err_exit2:
-	clk_disable_unprepare(host->clk);
-	clk_put(host->clk);
-err_exit1:
-	lpc32xx_wp_enable(host);
-	gpio_free(host->ncfg->wp_gpio);
-
-	return res;
-}
-
-/*
- * Remove NAND device
- */
-static int lpc32xx_nand_remove(struct platform_device *pdev)
-{
-	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
-	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
-
-	nand_release(mtd);
-	free_irq(host->irq, host);
-	if (use_dma)
-		dma_release_channel(host->dma_chan);
-
-	clk_disable_unprepare(host->clk);
-	clk_put(host->clk);
-
-	lpc32xx_wp_enable(host);
-	gpio_free(host->ncfg->wp_gpio);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int lpc32xx_nand_resume(struct platform_device *pdev)
-{
-	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
-
-	/* Re-enable NAND clock */
-	clk_prepare_enable(host->clk);
-
-	/* Fresh init of NAND controller */
-	lpc32xx_nand_setup(host);
-
-	/* Disable write protect */
-	lpc32xx_wp_disable(host);
-
-	return 0;
-}
-
-static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
-{
-	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
-
-	/* Enable write protect for safety */
-	lpc32xx_wp_enable(host);
-
-	/* Disable clock */
-	clk_disable_unprepare(host->clk);
-	return 0;
-}
-
-#else
-#define lpc32xx_nand_resume NULL
-#define lpc32xx_nand_suspend NULL
-#endif
-
-static const struct of_device_id lpc32xx_nand_match[] = {
-	{ .compatible = "nxp,lpc3220-mlc" },
-	{ /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
-
-static struct platform_driver lpc32xx_nand_driver = {
-	.probe		= lpc32xx_nand_probe,
-	.remove		= lpc32xx_nand_remove,
-	.resume		= lpc32xx_nand_resume,
-	.suspend	= lpc32xx_nand_suspend,
-	.driver		= {
-		.name	= DRV_NAME,
-		.of_match_table = lpc32xx_nand_match,
-	},
-};
-
-module_platform_driver(lpc32xx_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
-MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/lpc32xx_slc.c b/drivers/mtd/nand/lpc32xx_slc.c
deleted file mode 100644
index 018d783d37cd..000000000000
--- a/drivers/mtd/nand/lpc32xx_slc.c
+++ /dev/null
@@ -1,1041 +0,0 @@ 
-/*
- * NXP LPC32XX NAND SLC driver
- *
- * Authors:
- *    Kevin Wells <kevin.wells@nxp.com>
- *    Roland Stigge <stigge@antcom.de>
- *
- * Copyright © 2011 NXP Semiconductors
- * Copyright © 2012 Roland Stigge
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/mm.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/gpio.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
-#include <linux/mtd/lpc32xx_slc.h>
-
-#define LPC32XX_MODNAME		"lpc32xx-nand"
-
-/**********************************************************************
-* SLC NAND controller register offsets
-**********************************************************************/
-
-#define SLC_DATA(x)		(x + 0x000)
-#define SLC_ADDR(x)		(x + 0x004)
-#define SLC_CMD(x)		(x + 0x008)
-#define SLC_STOP(x)		(x + 0x00C)
-#define SLC_CTRL(x)		(x + 0x010)
-#define SLC_CFG(x)		(x + 0x014)
-#define SLC_STAT(x)		(x + 0x018)
-#define SLC_INT_STAT(x)		(x + 0x01C)
-#define SLC_IEN(x)		(x + 0x020)
-#define SLC_ISR(x)		(x + 0x024)
-#define SLC_ICR(x)		(x + 0x028)
-#define SLC_TAC(x)		(x + 0x02C)
-#define SLC_TC(x)		(x + 0x030)
-#define SLC_ECC(x)		(x + 0x034)
-#define SLC_DMA_DATA(x)		(x + 0x038)
-
-/**********************************************************************
-* slc_ctrl register definitions
-**********************************************************************/
-#define SLCCTRL_SW_RESET	(1 << 2) /* Reset the NAND controller bit */
-#define SLCCTRL_ECC_CLEAR	(1 << 1) /* Reset ECC bit */
-#define SLCCTRL_DMA_START	(1 << 0) /* Start DMA channel bit */
-
-/**********************************************************************
-* slc_cfg register definitions
-**********************************************************************/
-#define SLCCFG_CE_LOW		(1 << 5) /* Force CE low bit */
-#define SLCCFG_DMA_ECC		(1 << 4) /* Enable DMA ECC bit */
-#define SLCCFG_ECC_EN		(1 << 3) /* ECC enable bit */
-#define SLCCFG_DMA_BURST	(1 << 2) /* DMA burst bit */
-#define SLCCFG_DMA_DIR		(1 << 1) /* DMA write(0)/read(1) bit */
-#define SLCCFG_WIDTH		(1 << 0) /* External device width, 0=8bit */
-
-/**********************************************************************
-* slc_stat register definitions
-**********************************************************************/
-#define SLCSTAT_DMA_FIFO	(1 << 2) /* DMA FIFO has data bit */
-#define SLCSTAT_SLC_FIFO	(1 << 1) /* SLC FIFO has data bit */
-#define SLCSTAT_NAND_READY	(1 << 0) /* NAND device is ready bit */
-
-/**********************************************************************
-* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
-**********************************************************************/
-#define SLCSTAT_INT_TC		(1 << 1) /* Transfer count bit */
-#define SLCSTAT_INT_RDY_EN	(1 << 0) /* Ready interrupt bit */
-
-/**********************************************************************
-* slc_tac register definitions
-**********************************************************************/
-/* Computation of clock cycles on basis of controller and device clock rates */
-#define SLCTAC_CLOCKS(c, n, s)	(min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
-
-/* Clock setting for RDY write sample wait time in 2*n clocks */
-#define SLCTAC_WDR(n)		(((n) & 0xF) << 28)
-/* Write pulse width in clock cycles, 1 to 16 clocks */
-#define SLCTAC_WWIDTH(c, n)	(SLCTAC_CLOCKS(c, n, 24))
-/* Write hold time of control and data signals, 1 to 16 clocks */
-#define SLCTAC_WHOLD(c, n)	(SLCTAC_CLOCKS(c, n, 20))
-/* Write setup time of control and data signals, 1 to 16 clocks */
-#define SLCTAC_WSETUP(c, n)	(SLCTAC_CLOCKS(c, n, 16))
-/* Clock setting for RDY read sample wait time in 2*n clocks */
-#define SLCTAC_RDR(n)		(((n) & 0xF) << 12)
-/* Read pulse width in clock cycles, 1 to 16 clocks */
-#define SLCTAC_RWIDTH(c, n)	(SLCTAC_CLOCKS(c, n, 8))
-/* Read hold time of control and data signals, 1 to 16 clocks */
-#define SLCTAC_RHOLD(c, n)	(SLCTAC_CLOCKS(c, n, 4))
-/* Read setup time of control and data signals, 1 to 16 clocks */
-#define SLCTAC_RSETUP(c, n)	(SLCTAC_CLOCKS(c, n, 0))
-
-/**********************************************************************
-* slc_ecc register definitions
-**********************************************************************/
-/* ECC line party fetch macro */
-#define SLCECC_TO_LINEPAR(n)	(((n) >> 6) & 0x7FFF)
-#define SLCECC_TO_COLPAR(n)	((n) & 0x3F)
-
-/*
- * DMA requires storage space for the DMA local buffer and the hardware ECC
- * storage area. The DMA local buffer is only used if DMA mapping fails
- * during runtime.
- */
-#define LPC32XX_DMA_DATA_SIZE		4096
-#define LPC32XX_ECC_SAVE_SIZE		((4096 / 256) * 4)
-
-/* Number of bytes used for ECC stored in NAND per 256 bytes */
-#define LPC32XX_SLC_DEV_ECC_BYTES	3
-
-/*
- * If the NAND base clock frequency can't be fetched, this frequency will be
- * used instead as the base. This rate is used to setup the timing registers
- * used for NAND accesses.
- */
-#define LPC32XX_DEF_BUS_RATE		133250000
-
-/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
-#define LPC32XX_DMA_TIMEOUT		100
-
-/*
- * NAND ECC Layout for small page NAND devices
- * Note: For large and huge page devices, the default layouts are used
- */
-static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->length = 6;
-	oobregion->offset = 10;
-
-	return 0;
-}
-
-static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	if (section > 1)
-		return -ERANGE;
-
-	if (!section) {
-		oobregion->offset = 0;
-		oobregion->length = 4;
-	} else {
-		oobregion->offset = 6;
-		oobregion->length = 4;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
-	.ecc = lpc32xx_ooblayout_ecc,
-	.free = lpc32xx_ooblayout_free,
-};
-
-static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
-static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
-
-/*
- * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
- * Note: Large page devices used the default layout
- */
-static struct nand_bbt_descr bbt_smallpage_main_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs =	0,
-	.len = 4,
-	.veroffs = 6,
-	.maxblocks = 4,
-	.pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs =	0,
-	.len = 4,
-	.veroffs = 6,
-	.maxblocks = 4,
-	.pattern = mirror_pattern
-};
-
-/*
- * NAND platform configuration structure
- */
-struct lpc32xx_nand_cfg_slc {
-	uint32_t wdr_clks;
-	uint32_t wwidth;
-	uint32_t whold;
-	uint32_t wsetup;
-	uint32_t rdr_clks;
-	uint32_t rwidth;
-	uint32_t rhold;
-	uint32_t rsetup;
-	int wp_gpio;
-	struct mtd_partition *parts;
-	unsigned num_parts;
-};
-
-struct lpc32xx_nand_host {
-	struct nand_chip	nand_chip;
-	struct lpc32xx_slc_platform_data *pdata;
-	struct clk		*clk;
-	void __iomem		*io_base;
-	struct lpc32xx_nand_cfg_slc *ncfg;
-
-	struct completion	comp;
-	struct dma_chan		*dma_chan;
-	uint32_t		dma_buf_len;
-	struct dma_slave_config	dma_slave_config;
-	struct scatterlist	sgl;
-
-	/*
-	 * DMA and CPU addresses of ECC work area and data buffer
-	 */
-	uint32_t		*ecc_buf;
-	uint8_t			*data_buf;
-	dma_addr_t		io_base_dma;
-};
-
-static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
-{
-	uint32_t clkrate, tmp;
-
-	/* Reset SLC controller */
-	writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
-	udelay(1000);
-
-	/* Basic setup */
-	writel(0, SLC_CFG(host->io_base));
-	writel(0, SLC_IEN(host->io_base));
-	writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
-		SLC_ICR(host->io_base));
-
-	/* Get base clock for SLC block */
-	clkrate = clk_get_rate(host->clk);
-	if (clkrate == 0)
-		clkrate = LPC32XX_DEF_BUS_RATE;
-
-	/* Compute clock setup values */
-	tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
-		SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
-		SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
-		SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
-		SLCTAC_RDR(host->ncfg->rdr_clks) |
-		SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
-		SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
-		SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
-	writel(tmp, SLC_TAC(host->io_base));
-}
-
-/*
- * Hardware specific access to control lines
- */
-static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
-	unsigned int ctrl)
-{
-	uint32_t tmp;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-
-	/* Does CE state need to be changed? */
-	tmp = readl(SLC_CFG(host->io_base));
-	if (ctrl & NAND_NCE)
-		tmp |= SLCCFG_CE_LOW;
-	else
-		tmp &= ~SLCCFG_CE_LOW;
-	writel(tmp, SLC_CFG(host->io_base));
-
-	if (cmd != NAND_CMD_NONE) {
-		if (ctrl & NAND_CLE)
-			writel(cmd, SLC_CMD(host->io_base));
-		else
-			writel(cmd, SLC_ADDR(host->io_base));
-	}
-}
-
-/*
- * Read the Device Ready pin
- */
-static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	int rdy = 0;
-
-	if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
-		rdy = 1;
-
-	return rdy;
-}
-
-/*
- * Enable NAND write protect
- */
-static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
-{
-	if (gpio_is_valid(host->ncfg->wp_gpio))
-		gpio_set_value(host->ncfg->wp_gpio, 0);
-}
-
-/*
- * Disable NAND write protect
- */
-static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
-{
-	if (gpio_is_valid(host->ncfg->wp_gpio))
-		gpio_set_value(host->ncfg->wp_gpio, 1);
-}
-
-/*
- * Prepares SLC for transfers with H/W ECC enabled
- */
-static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
-{
-	/* Hardware ECC is enabled automatically in hardware as needed */
-}
-
-/*
- * Calculates the ECC for the data
- */
-static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
-				      const unsigned char *buf,
-				      unsigned char *code)
-{
-	/*
-	 * ECC is calculated automatically in hardware during syndrome read
-	 * and write operations, so it doesn't need to be calculated here.
-	 */
-	return 0;
-}
-
-/*
- * Read a single byte from NAND device
- */
-static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-
-	return (uint8_t)readl(SLC_DATA(host->io_base));
-}
-
-/*
- * Simple device read without ECC
- */
-static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-
-	/* Direct device read with no ECC */
-	while (len-- > 0)
-		*buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
-}
-
-/*
- * Simple device write without ECC
- */
-static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-
-	/* Direct device write with no ECC */
-	while (len-- > 0)
-		writel((uint32_t)*buf++, SLC_DATA(host->io_base));
-}
-
-/*
- * Read the OOB data from the device without ECC using FIFO method
- */
-static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
-					  struct nand_chip *chip, int page)
-{
-	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-/*
- * Write the OOB data to the device without ECC using FIFO method
- */
-static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
-	struct nand_chip *chip, int page)
-{
-	int status;
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	/* Send command to program the OOB data */
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
-	status = chip->waitfunc(mtd, chip);
-
-	return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-/*
- * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
- */
-static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
-{
-	int i;
-
-	for (i = 0; i < (count * 3); i += 3) {
-		uint32_t ce = ecc[i / 3];
-		ce = ~(ce << 2) & 0xFFFFFF;
-		spare[i + 2] = (uint8_t)(ce & 0xFF);
-		ce >>= 8;
-		spare[i + 1] = (uint8_t)(ce & 0xFF);
-		ce >>= 8;
-		spare[i] = (uint8_t)(ce & 0xFF);
-	}
-}
-
-static void lpc32xx_dma_complete_func(void *completion)
-{
-	complete(completion);
-}
-
-static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
-			    void *mem, int len, enum dma_transfer_direction dir)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	struct dma_async_tx_descriptor *desc;
-	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
-	int res;
-
-	host->dma_slave_config.direction = dir;
-	host->dma_slave_config.src_addr = dma;
-	host->dma_slave_config.dst_addr = dma;
-	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	host->dma_slave_config.src_maxburst = 4;
-	host->dma_slave_config.dst_maxburst = 4;
-	/* DMA controller does flow control: */
-	host->dma_slave_config.device_fc = false;
-	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
-		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
-		return -ENXIO;
-	}
-
-	sg_init_one(&host->sgl, mem, len);
-
-	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
-			 DMA_BIDIRECTIONAL);
-	if (res != 1) {
-		dev_err(mtd->dev.parent, "Failed to map sg list\n");
-		return -ENXIO;
-	}
-	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
-				       flags);
-	if (!desc) {
-		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
-		goto out1;
-	}
-
-	init_completion(&host->comp);
-	desc->callback = lpc32xx_dma_complete_func;
-	desc->callback_param = &host->comp;
-
-	dmaengine_submit(desc);
-	dma_async_issue_pending(host->dma_chan);
-
-	wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
-
-	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
-		     DMA_BIDIRECTIONAL);
-
-	return 0;
-out1:
-	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
-		     DMA_BIDIRECTIONAL);
-	return -ENXIO;
-}
-
-/*
- * DMA read/write transfers with ECC support
- */
-static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
-			int read)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	int i, status = 0;
-	unsigned long timeout;
-	int res;
-	enum dma_transfer_direction dir =
-		read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
-	uint8_t *dma_buf;
-	bool dma_mapped;
-
-	if ((void *)buf <= high_memory) {
-		dma_buf = buf;
-		dma_mapped = true;
-	} else {
-		dma_buf = host->data_buf;
-		dma_mapped = false;
-		if (!read)
-			memcpy(host->data_buf, buf, mtd->writesize);
-	}
-
-	if (read) {
-		writel(readl(SLC_CFG(host->io_base)) |
-		       SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
-		       SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
-	} else {
-		writel((readl(SLC_CFG(host->io_base)) |
-			SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
-		       ~SLCCFG_DMA_DIR,
-			SLC_CFG(host->io_base));
-	}
-
-	/* Clear initial ECC */
-	writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
-
-	/* Transfer size is data area only */
-	writel(mtd->writesize, SLC_TC(host->io_base));
-
-	/* Start transfer in the NAND controller */
-	writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
-	       SLC_CTRL(host->io_base));
-
-	for (i = 0; i < chip->ecc.steps; i++) {
-		/* Data */
-		res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
-				       dma_buf + i * chip->ecc.size,
-				       mtd->writesize / chip->ecc.steps, dir);
-		if (res)
-			return res;
-
-		/* Always _read_ ECC */
-		if (i == chip->ecc.steps - 1)
-			break;
-		if (!read) /* ECC availability delayed on write */
-			udelay(10);
-		res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
-				       &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
-		if (res)
-			return res;
-	}
-
-	/*
-	 * According to NXP, the DMA can be finished here, but the NAND
-	 * controller may still have buffered data. After porting to using the
-	 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
-	 * appears to be always true, according to tests. Keeping the check for
-	 * safety reasons for now.
-	 */
-	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
-		dev_warn(mtd->dev.parent, "FIFO not empty!\n");
-		timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
-		while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
-		       time_before(jiffies, timeout))
-			cpu_relax();
-		if (!time_before(jiffies, timeout)) {
-			dev_err(mtd->dev.parent, "FIFO held data too long\n");
-			status = -EIO;
-		}
-	}
-
-	/* Read last calculated ECC value */
-	if (!read)
-		udelay(10);
-	host->ecc_buf[chip->ecc.steps - 1] =
-		readl(SLC_ECC(host->io_base));
-
-	/* Flush DMA */
-	dmaengine_terminate_all(host->dma_chan);
-
-	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
-	    readl(SLC_TC(host->io_base))) {
-		/* Something is left in the FIFO, something is wrong */
-		dev_err(mtd->dev.parent, "DMA FIFO failure\n");
-		status = -EIO;
-	}
-
-	/* Stop DMA & HW ECC */
-	writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
-	       SLC_CTRL(host->io_base));
-	writel(readl(SLC_CFG(host->io_base)) &
-	       ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
-		 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
-
-	if (!dma_mapped && read)
-		memcpy(buf, host->data_buf, mtd->writesize);
-
-	return status;
-}
-
-/*
- * Read the data and OOB data from the device, use ECC correction with the
- * data, disable ECC for the OOB data
- */
-static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
-					   struct nand_chip *chip, uint8_t *buf,
-					   int oob_required, int page)
-{
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	struct mtd_oob_region oobregion = { };
-	int stat, i, status, error;
-	uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
-
-	/* Issue read command */
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
-	/* Read data and oob, calculate ECC */
-	status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
-
-	/* Get OOB data */
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	/* Convert to stored ECC format */
-	lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
-
-	/* Pointer to ECC data retrieved from NAND spare area */
-	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
-	if (error)
-		return error;
-
-	oobecc = chip->oob_poi + oobregion.offset;
-
-	for (i = 0; i < chip->ecc.steps; i++) {
-		stat = chip->ecc.correct(mtd, buf, oobecc,
-					 &tmpecc[i * chip->ecc.bytes]);
-		if (stat < 0)
-			mtd->ecc_stats.failed++;
-		else
-			mtd->ecc_stats.corrected += stat;
-
-		buf += chip->ecc.size;
-		oobecc += chip->ecc.bytes;
-	}
-
-	return status;
-}
-
-/*
- * Read the data and OOB data from the device, no ECC correction with the
- * data or OOB data
- */
-static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
-					       struct nand_chip *chip,
-					       uint8_t *buf, int oob_required,
-					       int page)
-{
-	/* Issue read command */
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
-	/* Raw reads can just use the FIFO interface */
-	chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-/*
- * Write the data and OOB data to the device, use ECC with the data,
- * disable ECC for the OOB data
- */
-static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
-					    struct nand_chip *chip,
-					    const uint8_t *buf,
-					    int oob_required, int page)
-{
-	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
-	struct mtd_oob_region oobregion = { };
-	uint8_t *pb;
-	int error;
-
-	/* Write data, calculate ECC on outbound data */
-	error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
-	if (error)
-		return error;
-
-	/*
-	 * The calculated ECC needs some manual work done to it before
-	 * committing it to NAND. Process the calculated ECC and place
-	 * the resultant values directly into the OOB buffer. */
-	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
-	if (error)
-		return error;
-
-	pb = chip->oob_poi + oobregion.offset;
-	lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
-
-	/* Write ECC data to device */
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 0;
-}
-
-/*
- * Write the data and OOB data to the device, no ECC correction with the
- * data or OOB data
- */
-static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
-						struct nand_chip *chip,
-						const uint8_t *buf,
-						int oob_required, int page)
-{
-	/* Raw writes can just use the FIFO interface */
-	chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 0;
-}
-
-static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
-{
-	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
-	dma_cap_mask_t mask;
-
-	if (!host->pdata || !host->pdata->dma_filter) {
-		dev_err(mtd->dev.parent, "no DMA platform data\n");
-		return -ENOENT;
-	}
-
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-	host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
-					     "nand-slc");
-	if (!host->dma_chan) {
-		dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
-		return -EBUSY;
-	}
-
-	return 0;
-}
-
-static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
-{
-	struct lpc32xx_nand_cfg_slc *ncfg;
-	struct device_node *np = dev->of_node;
-
-	ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
-	if (!ncfg)
-		return NULL;
-
-	of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
-	of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
-	of_property_read_u32(np, "nxp,whold", &ncfg->whold);
-	of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
-	of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
-	of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
-	of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
-	of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
-
-	if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
-	    !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
-	    !ncfg->rhold || !ncfg->rsetup) {
-		dev_err(dev, "chip parameters not specified correctly\n");
-		return NULL;
-	}
-
-	ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
-
-	return ncfg;
-}
-
-/*
- * Probe for NAND controller
- */
-static int lpc32xx_nand_probe(struct platform_device *pdev)
-{
-	struct lpc32xx_nand_host *host;
-	struct mtd_info *mtd;
-	struct nand_chip *chip;
-	struct resource *rc;
-	int res;
-
-	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (rc == NULL) {
-		dev_err(&pdev->dev, "No memory resource found for device\n");
-		return -EBUSY;
-	}
-
-	/* Allocate memory for the device structure (and zero it) */
-	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
-	if (!host)
-		return -ENOMEM;
-	host->io_base_dma = rc->start;
-
-	host->io_base = devm_ioremap_resource(&pdev->dev, rc);
-	if (IS_ERR(host->io_base))
-		return PTR_ERR(host->io_base);
-
-	if (pdev->dev.of_node)
-		host->ncfg = lpc32xx_parse_dt(&pdev->dev);
-	if (!host->ncfg) {
-		dev_err(&pdev->dev,
-			"Missing or bad NAND config from device tree\n");
-		return -ENOENT;
-	}
-	if (host->ncfg->wp_gpio == -EPROBE_DEFER)
-		return -EPROBE_DEFER;
-	if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
-			host->ncfg->wp_gpio, "NAND WP")) {
-		dev_err(&pdev->dev, "GPIO not available\n");
-		return -EBUSY;
-	}
-	lpc32xx_wp_disable(host);
-
-	host->pdata = dev_get_platdata(&pdev->dev);
-
-	chip = &host->nand_chip;
-	mtd = nand_to_mtd(chip);
-	nand_set_controller_data(chip, host);
-	nand_set_flash_node(chip, pdev->dev.of_node);
-	mtd->owner = THIS_MODULE;
-	mtd->dev.parent = &pdev->dev;
-
-	/* Get NAND clock */
-	host->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(host->clk)) {
-		dev_err(&pdev->dev, "Clock failure\n");
-		res = -ENOENT;
-		goto err_exit1;
-	}
-	clk_prepare_enable(host->clk);
-
-	/* Set NAND IO addresses and command/ready functions */
-	chip->IO_ADDR_R = SLC_DATA(host->io_base);
-	chip->IO_ADDR_W = SLC_DATA(host->io_base);
-	chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
-	chip->dev_ready = lpc32xx_nand_device_ready;
-	chip->chip_delay = 20; /* 20us command delay time */
-
-	/* Init NAND controller */
-	lpc32xx_nand_setup(host);
-
-	platform_set_drvdata(pdev, host);
-
-	/* NAND callbacks for LPC32xx SLC hardware */
-	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
-	chip->read_byte = lpc32xx_nand_read_byte;
-	chip->read_buf = lpc32xx_nand_read_buf;
-	chip->write_buf = lpc32xx_nand_write_buf;
-	chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
-	chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
-	chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
-	chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
-	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
-	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
-	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
-	chip->ecc.correct = nand_correct_data;
-	chip->ecc.strength = 1;
-	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
-
-	/*
-	 * Allocate a large enough buffer for a single huge page plus
-	 * extra space for the spare area and ECC storage area
-	 */
-	host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
-	host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
-				      GFP_KERNEL);
-	if (host->data_buf == NULL) {
-		res = -ENOMEM;
-		goto err_exit2;
-	}
-
-	res = lpc32xx_nand_dma_setup(host);
-	if (res) {
-		res = -EIO;
-		goto err_exit2;
-	}
-
-	/* Find NAND device */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		res = -ENXIO;
-		goto err_exit3;
-	}
-
-	/* OOB and ECC CPU and DMA work areas */
-	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
-
-	/*
-	 * Small page FLASH has a unique OOB layout, but large and huge
-	 * page FLASH use the standard layout. Small page FLASH uses a
-	 * custom BBT marker layout.
-	 */
-	if (mtd->writesize <= 512)
-		mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
-
-	/* These sizes remain the same regardless of page size */
-	chip->ecc.size = 256;
-	chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
-	chip->ecc.prepad = chip->ecc.postpad = 0;
-
-	/*
-	 * Use a custom BBT marker setup for small page FLASH that
-	 * won't interfere with the ECC layout. Large and huge page
-	 * FLASH use the standard layout.
-	 */
-	if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
-	    mtd->writesize <= 512) {
-		chip->bbt_td = &bbt_smallpage_main_descr;
-		chip->bbt_md = &bbt_smallpage_mirror_descr;
-	}
-
-	/*
-	 * Fills out all the uninitialized function pointers with the defaults
-	 */
-	if (nand_scan_tail(mtd)) {
-		res = -ENXIO;
-		goto err_exit3;
-	}
-
-	mtd->name = "nxp_lpc3220_slc";
-	res = mtd_device_register(mtd, host->ncfg->parts,
-				  host->ncfg->num_parts);
-	if (!res)
-		return res;
-
-	nand_release(mtd);
-
-err_exit3:
-	dma_release_channel(host->dma_chan);
-err_exit2:
-	clk_disable_unprepare(host->clk);
-err_exit1:
-	lpc32xx_wp_enable(host);
-
-	return res;
-}
-
-/*
- * Remove NAND device.
- */
-static int lpc32xx_nand_remove(struct platform_device *pdev)
-{
-	uint32_t tmp;
-	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
-	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
-
-	nand_release(mtd);
-	dma_release_channel(host->dma_chan);
-
-	/* Force CE high */
-	tmp = readl(SLC_CTRL(host->io_base));
-	tmp &= ~SLCCFG_CE_LOW;
-	writel(tmp, SLC_CTRL(host->io_base));
-
-	clk_disable_unprepare(host->clk);
-	lpc32xx_wp_enable(host);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int lpc32xx_nand_resume(struct platform_device *pdev)
-{
-	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
-
-	/* Re-enable NAND clock */
-	clk_prepare_enable(host->clk);
-
-	/* Fresh init of NAND controller */
-	lpc32xx_nand_setup(host);
-
-	/* Disable write protect */
-	lpc32xx_wp_disable(host);
-
-	return 0;
-}
-
-static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
-{
-	uint32_t tmp;
-	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
-
-	/* Force CE high */
-	tmp = readl(SLC_CTRL(host->io_base));
-	tmp &= ~SLCCFG_CE_LOW;
-	writel(tmp, SLC_CTRL(host->io_base));
-
-	/* Enable write protect for safety */
-	lpc32xx_wp_enable(host);
-
-	/* Disable clock */
-	clk_disable_unprepare(host->clk);
-
-	return 0;
-}
-
-#else
-#define lpc32xx_nand_resume NULL
-#define lpc32xx_nand_suspend NULL
-#endif
-
-static const struct of_device_id lpc32xx_nand_match[] = {
-	{ .compatible = "nxp,lpc3220-slc" },
-	{ /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
-
-static struct platform_driver lpc32xx_nand_driver = {
-	.probe		= lpc32xx_nand_probe,
-	.remove		= lpc32xx_nand_remove,
-	.resume		= lpc32xx_nand_resume,
-	.suspend	= lpc32xx_nand_suspend,
-	.driver		= {
-		.name	= LPC32XX_MODNAME,
-		.of_match_table = lpc32xx_nand_match,
-	},
-};
-
-module_platform_driver(lpc32xx_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
-MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
-MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
deleted file mode 100644
index 2a1fa86fd123..000000000000
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ /dev/null
@@ -1,855 +0,0 @@ 
-/*
- * Copyright 2004-2008 Freescale Semiconductor, Inc.
- * Copyright 2009 Semihalf.
- *
- * Approved as OSADL project by a majority of OSADL members and funded
- * by OSADL membership fees in 2009;  for details see www.osadl.org.
- *
- * Based on original driver from Freescale Semiconductor
- * written by John Rigby <jrigby@freescale.com> on basis
- * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
- * Piotr Ziecik <kosmo@semihalf.com>.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/gfp.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-
-#include <asm/mpc5121.h>
-
-/* Addresses for NFC MAIN RAM BUFFER areas */
-#define NFC_MAIN_AREA(n)	((n) *  0x200)
-
-/* Addresses for NFC SPARE BUFFER areas */
-#define NFC_SPARE_BUFFERS	8
-#define NFC_SPARE_LEN		0x40
-#define NFC_SPARE_AREA(n)	(0x1000 + ((n) * NFC_SPARE_LEN))
-
-/* MPC5121 NFC registers */
-#define NFC_BUF_ADDR		0x1E04
-#define NFC_FLASH_ADDR		0x1E06
-#define NFC_FLASH_CMD		0x1E08
-#define NFC_CONFIG		0x1E0A
-#define NFC_ECC_STATUS1		0x1E0C
-#define NFC_ECC_STATUS2		0x1E0E
-#define NFC_SPAS		0x1E10
-#define NFC_WRPROT		0x1E12
-#define NFC_NF_WRPRST		0x1E18
-#define NFC_CONFIG1		0x1E1A
-#define NFC_CONFIG2		0x1E1C
-#define NFC_UNLOCKSTART_BLK0	0x1E20
-#define NFC_UNLOCKEND_BLK0	0x1E22
-#define NFC_UNLOCKSTART_BLK1	0x1E24
-#define NFC_UNLOCKEND_BLK1	0x1E26
-#define NFC_UNLOCKSTART_BLK2	0x1E28
-#define NFC_UNLOCKEND_BLK2	0x1E2A
-#define NFC_UNLOCKSTART_BLK3	0x1E2C
-#define NFC_UNLOCKEND_BLK3	0x1E2E
-
-/* Bit Definitions: NFC_BUF_ADDR */
-#define NFC_RBA_MASK		(7 << 0)
-#define NFC_ACTIVE_CS_SHIFT	5
-#define NFC_ACTIVE_CS_MASK	(3 << NFC_ACTIVE_CS_SHIFT)
-
-/* Bit Definitions: NFC_CONFIG */
-#define NFC_BLS_UNLOCKED	(1 << 1)
-
-/* Bit Definitions: NFC_CONFIG1 */
-#define NFC_ECC_4BIT		(1 << 0)
-#define NFC_FULL_PAGE_DMA	(1 << 1)
-#define NFC_SPARE_ONLY		(1 << 2)
-#define NFC_ECC_ENABLE		(1 << 3)
-#define NFC_INT_MASK		(1 << 4)
-#define NFC_BIG_ENDIAN		(1 << 5)
-#define NFC_RESET		(1 << 6)
-#define NFC_CE			(1 << 7)
-#define NFC_ONE_CYCLE		(1 << 8)
-#define NFC_PPB_32		(0 << 9)
-#define NFC_PPB_64		(1 << 9)
-#define NFC_PPB_128		(2 << 9)
-#define NFC_PPB_256		(3 << 9)
-#define NFC_PPB_MASK		(3 << 9)
-#define NFC_FULL_PAGE_INT	(1 << 11)
-
-/* Bit Definitions: NFC_CONFIG2 */
-#define NFC_COMMAND		(1 << 0)
-#define NFC_ADDRESS		(1 << 1)
-#define NFC_INPUT		(1 << 2)
-#define NFC_OUTPUT		(1 << 3)
-#define NFC_ID			(1 << 4)
-#define NFC_STATUS		(1 << 5)
-#define NFC_CMD_FAIL		(1 << 15)
-#define NFC_INT			(1 << 15)
-
-/* Bit Definitions: NFC_WRPROT */
-#define NFC_WPC_LOCK_TIGHT	(1 << 0)
-#define NFC_WPC_LOCK		(1 << 1)
-#define NFC_WPC_UNLOCK		(1 << 2)
-
-#define	DRV_NAME		"mpc5121_nfc"
-
-/* Timeouts */
-#define NFC_RESET_TIMEOUT	1000		/* 1 ms */
-#define NFC_TIMEOUT		(HZ / 10)	/* 1/10 s */
-
-struct mpc5121_nfc_prv {
-	struct nand_chip	chip;
-	int			irq;
-	void __iomem		*regs;
-	struct clk		*clk;
-	wait_queue_head_t	irq_waitq;
-	uint			column;
-	int			spareonly;
-	void __iomem		*csreg;
-	struct device		*dev;
-};
-
-static void mpc5121_nfc_done(struct mtd_info *mtd);
-
-/* Read NFC register */
-static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
-
-	return in_be16(prv->regs + reg);
-}
-
-/* Write NFC register */
-static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
-
-	out_be16(prv->regs + reg, val);
-}
-
-/* Set bits in NFC register */
-static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
-{
-	nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
-}
-
-/* Clear bits in NFC register */
-static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
-{
-	nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
-}
-
-/* Invoke address cycle */
-static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
-{
-	nfc_write(mtd, NFC_FLASH_ADDR, addr);
-	nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
-	mpc5121_nfc_done(mtd);
-}
-
-/* Invoke command cycle */
-static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
-{
-	nfc_write(mtd, NFC_FLASH_CMD, cmd);
-	nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
-	mpc5121_nfc_done(mtd);
-}
-
-/* Send data from NFC buffers to NAND flash */
-static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
-{
-	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
-	nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
-	mpc5121_nfc_done(mtd);
-}
-
-/* Receive data from NAND flash */
-static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
-{
-	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
-	nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
-	mpc5121_nfc_done(mtd);
-}
-
-/* Receive ID from NAND flash */
-static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
-{
-	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
-	nfc_write(mtd, NFC_CONFIG2, NFC_ID);
-	mpc5121_nfc_done(mtd);
-}
-
-/* Receive status from NAND flash */
-static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
-{
-	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
-	nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
-	mpc5121_nfc_done(mtd);
-}
-
-/* NFC interrupt handler */
-static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
-{
-	struct mtd_info *mtd = data;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
-
-	nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
-	wake_up(&prv->irq_waitq);
-
-	return IRQ_HANDLED;
-}
-
-/* Wait for operation complete */
-static void mpc5121_nfc_done(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
-	int rv;
-
-	if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
-		nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
-		rv = wait_event_timeout(prv->irq_waitq,
-			(nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
-
-		if (!rv)
-			dev_warn(prv->dev,
-				"Timeout while waiting for interrupt.\n");
-	}
-
-	nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
-}
-
-/* Do address cycle(s) */
-static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	u32 pagemask = chip->pagemask;
-
-	if (column != -1) {
-		mpc5121_nfc_send_addr(mtd, column);
-		if (mtd->writesize > 512)
-			mpc5121_nfc_send_addr(mtd, column >> 8);
-	}
-
-	if (page != -1) {
-		do {
-			mpc5121_nfc_send_addr(mtd, page & 0xFF);
-			page >>= 8;
-			pagemask >>= 8;
-		} while (pagemask);
-	}
-}
-
-/* Control chip select signals */
-static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
-{
-	if (chip < 0) {
-		nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
-		return;
-	}
-
-	nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
-	nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
-							NFC_ACTIVE_CS_MASK);
-	nfc_set(mtd, NFC_CONFIG1, NFC_CE);
-}
-
-/* Init external chip select logic on ADS5121 board */
-static int ads5121_chipselect_init(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
-	struct device_node *dn;
-
-	dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
-	if (dn) {
-		prv->csreg = of_iomap(dn, 0);
-		of_node_put(dn);
-		if (!prv->csreg)
-			return -ENOMEM;
-
-		/* CPLD Register 9 controls NAND /CE Lines */
-		prv->csreg += 9;
-		return 0;
-	}
-
-	return -EINVAL;
-}
-
-/* Control chips select signal on ADS5121 board */
-static void ads5121_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
-	u8 v;
-
-	v = in_8(prv->csreg);
-	v |= 0x0F;
-
-	if (chip >= 0) {
-		mpc5121_nfc_select_chip(mtd, 0);
-		v &= ~(1 << chip);
-	} else
-		mpc5121_nfc_select_chip(mtd, -1);
-
-	out_8(prv->csreg, v);
-}
-
-/* Read NAND Ready/Busy signal */
-static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
-{
-	/*
-	 * NFC handles ready/busy signal internally. Therefore, this function
-	 * always returns status as ready.
-	 */
-	return 1;
-}
-
-/* Write command to NAND flash */
-static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
-							int column, int page)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
-
-	prv->column = (column >= 0) ? column : 0;
-	prv->spareonly = 0;
-
-	switch (command) {
-	case NAND_CMD_PAGEPROG:
-		mpc5121_nfc_send_prog_page(mtd);
-		break;
-	/*
-	 * NFC does not support sub-page reads and writes,
-	 * so emulate them using full page transfers.
-	 */
-	case NAND_CMD_READ0:
-		column = 0;
-		break;
-
-	case NAND_CMD_READ1:
-		prv->column += 256;
-		command = NAND_CMD_READ0;
-		column = 0;
-		break;
-
-	case NAND_CMD_READOOB:
-		prv->spareonly = 1;
-		command = NAND_CMD_READ0;
-		column = 0;
-		break;
-
-	case NAND_CMD_SEQIN:
-		mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
-		column = 0;
-		break;
-
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_READID:
-	case NAND_CMD_STATUS:
-		break;
-
-	default:
-		return;
-	}
-
-	mpc5121_nfc_send_cmd(mtd, command);
-	mpc5121_nfc_addr_cycle(mtd, column, page);
-
-	switch (command) {
-	case NAND_CMD_READ0:
-		if (mtd->writesize > 512)
-			mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
-		mpc5121_nfc_send_read_page(mtd);
-		break;
-
-	case NAND_CMD_READID:
-		mpc5121_nfc_send_read_id(mtd);
-		break;
-
-	case NAND_CMD_STATUS:
-		mpc5121_nfc_send_read_status(mtd);
-		if (chip->options & NAND_BUSWIDTH_16)
-			prv->column = 1;
-		else
-			prv->column = 0;
-		break;
-	}
-}
-
-/* Copy data from/to NFC spare buffers. */
-static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
-						u8 *buffer, uint size, int wr)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
-	uint o, s, sbsize, blksize;
-
-	/*
-	 * NAND spare area is available through NFC spare buffers.
-	 * The NFC divides spare area into (page_size / 512) chunks.
-	 * Each chunk is placed into separate spare memory area, using
-	 * first (spare_size / num_of_chunks) bytes of the buffer.
-	 *
-	 * For NAND device in which the spare area is not divided fully
-	 * by the number of chunks, number of used bytes in each spare
-	 * buffer is rounded down to the nearest even number of bytes,
-	 * and all remaining bytes are added to the last used spare area.
-	 *
-	 * For more information read section 26.6.10 of MPC5121e
-	 * Microcontroller Reference Manual, Rev. 3.
-	 */
-
-	/* Calculate number of valid bytes in each spare buffer */
-	sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
-
-	while (size) {
-		/* Calculate spare buffer number */
-		s = offset / sbsize;
-		if (s > NFC_SPARE_BUFFERS - 1)
-			s = NFC_SPARE_BUFFERS - 1;
-
-		/*
-		 * Calculate offset to requested data block in selected spare
-		 * buffer and its size.
-		 */
-		o = offset - (s * sbsize);
-		blksize = min(sbsize - o, size);
-
-		if (wr)
-			memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
-							buffer, blksize);
-		else
-			memcpy_fromio(buffer,
-				prv->regs + NFC_SPARE_AREA(s) + o, blksize);
-
-		buffer += blksize;
-		offset += blksize;
-		size -= blksize;
-	};
-}
-
-/* Copy data from/to NFC main and spare buffers */
-static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
-									int wr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
-	uint c = prv->column;
-	uint l;
-
-	/* Handle spare area access */
-	if (prv->spareonly || c >= mtd->writesize) {
-		/* Calculate offset from beginning of spare area */
-		if (c >= mtd->writesize)
-			c -= mtd->writesize;
-
-		prv->column += len;
-		mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
-		return;
-	}
-
-	/*
-	 * Handle main area access - limit copy length to prevent
-	 * crossing main/spare boundary.
-	 */
-	l = min((uint)len, mtd->writesize - c);
-	prv->column += l;
-
-	if (wr)
-		memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
-	else
-		memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
-
-	/* Handle crossing main/spare boundary */
-	if (l != len) {
-		buf += l;
-		len -= l;
-		mpc5121_nfc_buf_copy(mtd, buf, len, wr);
-	}
-}
-
-/* Read data from NFC buffers */
-static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	mpc5121_nfc_buf_copy(mtd, buf, len, 0);
-}
-
-/* Write data to NFC buffers */
-static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
-						const u_char *buf, int len)
-{
-	mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
-}
-
-/* Read byte from NFC buffers */
-static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
-{
-	u8 tmp;
-
-	mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
-
-	return tmp;
-}
-
-/* Read word from NFC buffers */
-static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
-{
-	u16 tmp;
-
-	mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
-
-	return tmp;
-}
-
-/*
- * Read NFC configuration from Reset Config Word
- *
- * NFC is configured during reset in basis of information stored
- * in Reset Config Word. There is no other way to set NAND block
- * size, spare size and bus width.
- */
-static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
-	struct mpc512x_reset_module *rm;
-	struct device_node *rmnode;
-	uint rcw_pagesize = 0;
-	uint rcw_sparesize = 0;
-	uint rcw_width;
-	uint rcwh;
-	uint romloc, ps;
-	int ret = 0;
-
-	rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
-	if (!rmnode) {
-		dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
-					"node in device tree!\n");
-		return -ENODEV;
-	}
-
-	rm = of_iomap(rmnode, 0);
-	if (!rm) {
-		dev_err(prv->dev, "Error mapping reset module node!\n");
-		ret = -EBUSY;
-		goto out;
-	}
-
-	rcwh = in_be32(&rm->rcwhr);
-
-	/* Bit 6: NFC bus width */
-	rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
-
-	/* Bit 7: NFC Page/Spare size */
-	ps = (rcwh >> 7) & 0x1;
-
-	/* Bits [22:21]: ROM Location */
-	romloc = (rcwh >> 21) & 0x3;
-
-	/* Decode RCW bits */
-	switch ((ps << 2) | romloc) {
-	case 0x00:
-	case 0x01:
-		rcw_pagesize = 512;
-		rcw_sparesize = 16;
-		break;
-	case 0x02:
-	case 0x03:
-		rcw_pagesize = 4096;
-		rcw_sparesize = 128;
-		break;
-	case 0x04:
-	case 0x05:
-		rcw_pagesize = 2048;
-		rcw_sparesize = 64;
-		break;
-	case 0x06:
-	case 0x07:
-		rcw_pagesize = 4096;
-		rcw_sparesize = 218;
-		break;
-	}
-
-	mtd->writesize = rcw_pagesize;
-	mtd->oobsize = rcw_sparesize;
-	if (rcw_width == 2)
-		chip->options |= NAND_BUSWIDTH_16;
-
-	dev_notice(prv->dev, "Configured for "
-				"%u-bit NAND, page size %u "
-				"with %u spare.\n",
-				rcw_width * 8, rcw_pagesize,
-				rcw_sparesize);
-	iounmap(rm);
-out:
-	of_node_put(rmnode);
-	return ret;
-}
-
-/* Free driver resources */
-static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
-
-	if (prv->clk)
-		clk_disable_unprepare(prv->clk);
-
-	if (prv->csreg)
-		iounmap(prv->csreg);
-}
-
-static int mpc5121_nfc_probe(struct platform_device *op)
-{
-	struct device_node *dn = op->dev.of_node;
-	struct clk *clk;
-	struct device *dev = &op->dev;
-	struct mpc5121_nfc_prv *prv;
-	struct resource res;
-	struct mtd_info *mtd;
-	struct nand_chip *chip;
-	unsigned long regs_paddr, regs_size;
-	const __be32 *chips_no;
-	int resettime = 0;
-	int retval = 0;
-	int rev, len;
-
-	/*
-	 * Check SoC revision. This driver supports only NFC
-	 * in MPC5121 revision 2 and MPC5123 revision 3.
-	 */
-	rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
-	if ((rev != 2) && (rev != 3)) {
-		dev_err(dev, "SoC revision %u is not supported!\n", rev);
-		return -ENXIO;
-	}
-
-	prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
-	if (!prv)
-		return -ENOMEM;
-
-	chip = &prv->chip;
-	mtd = nand_to_mtd(chip);
-
-	mtd->dev.parent = dev;
-	nand_set_controller_data(chip, prv);
-	nand_set_flash_node(chip, dn);
-	prv->dev = dev;
-
-	/* Read NFC configuration from Reset Config Word */
-	retval = mpc5121_nfc_read_hw_config(mtd);
-	if (retval) {
-		dev_err(dev, "Unable to read NFC config!\n");
-		return retval;
-	}
-
-	prv->irq = irq_of_parse_and_map(dn, 0);
-	if (prv->irq == NO_IRQ) {
-		dev_err(dev, "Error mapping IRQ!\n");
-		return -EINVAL;
-	}
-
-	retval = of_address_to_resource(dn, 0, &res);
-	if (retval) {
-		dev_err(dev, "Error parsing memory region!\n");
-		return retval;
-	}
-
-	chips_no = of_get_property(dn, "chips", &len);
-	if (!chips_no || len != sizeof(*chips_no)) {
-		dev_err(dev, "Invalid/missing 'chips' property!\n");
-		return -EINVAL;
-	}
-
-	regs_paddr = res.start;
-	regs_size = resource_size(&res);
-
-	if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
-		dev_err(dev, "Error requesting memory region!\n");
-		return -EBUSY;
-	}
-
-	prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
-	if (!prv->regs) {
-		dev_err(dev, "Error mapping memory region!\n");
-		return -ENOMEM;
-	}
-
-	mtd->name = "MPC5121 NAND";
-	chip->dev_ready = mpc5121_nfc_dev_ready;
-	chip->cmdfunc = mpc5121_nfc_command;
-	chip->read_byte = mpc5121_nfc_read_byte;
-	chip->read_word = mpc5121_nfc_read_word;
-	chip->read_buf = mpc5121_nfc_read_buf;
-	chip->write_buf = mpc5121_nfc_write_buf;
-	chip->select_chip = mpc5121_nfc_select_chip;
-	chip->bbt_options = NAND_BBT_USE_FLASH;
-	chip->ecc.mode = NAND_ECC_SOFT;
-	chip->ecc.algo = NAND_ECC_HAMMING;
-
-	/* Support external chip-select logic on ADS5121 board */
-	if (of_machine_is_compatible("fsl,mpc5121ads")) {
-		retval = ads5121_chipselect_init(mtd);
-		if (retval) {
-			dev_err(dev, "Chipselect init error!\n");
-			return retval;
-		}
-
-		chip->select_chip = ads5121_select_chip;
-	}
-
-	/* Enable NFC clock */
-	clk = devm_clk_get(dev, "ipg");
-	if (IS_ERR(clk)) {
-		dev_err(dev, "Unable to acquire NFC clock!\n");
-		retval = PTR_ERR(clk);
-		goto error;
-	}
-	retval = clk_prepare_enable(clk);
-	if (retval) {
-		dev_err(dev, "Unable to enable NFC clock!\n");
-		goto error;
-	}
-	prv->clk = clk;
-
-	/* Reset NAND Flash controller */
-	nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
-	while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
-		if (resettime++ >= NFC_RESET_TIMEOUT) {
-			dev_err(dev, "Timeout while resetting NFC!\n");
-			retval = -EINVAL;
-			goto error;
-		}
-
-		udelay(1);
-	}
-
-	/* Enable write to NFC memory */
-	nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
-
-	/* Enable write to all NAND pages */
-	nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
-	nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
-	nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
-
-	/*
-	 * Setup NFC:
-	 *	- Big Endian transfers,
-	 *	- Interrupt after full page read/write.
-	 */
-	nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
-							NFC_FULL_PAGE_INT);
-
-	/* Set spare area size */
-	nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
-
-	init_waitqueue_head(&prv->irq_waitq);
-	retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
-									mtd);
-	if (retval) {
-		dev_err(dev, "Error requesting IRQ!\n");
-		goto error;
-	}
-
-	/* Detect NAND chips */
-	if (nand_scan(mtd, be32_to_cpup(chips_no))) {
-		dev_err(dev, "NAND Flash not found !\n");
-		retval = -ENXIO;
-		goto error;
-	}
-
-	/* Set erase block size */
-	switch (mtd->erasesize / mtd->writesize) {
-	case 32:
-		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
-		break;
-
-	case 64:
-		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
-		break;
-
-	case 128:
-		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
-		break;
-
-	case 256:
-		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
-		break;
-
-	default:
-		dev_err(dev, "Unsupported NAND flash!\n");
-		retval = -ENXIO;
-		goto error;
-	}
-
-	dev_set_drvdata(dev, mtd);
-
-	/* Register device in MTD */
-	retval = mtd_device_register(mtd, NULL, 0);
-	if (retval) {
-		dev_err(dev, "Error adding MTD device!\n");
-		goto error;
-	}
-
-	return 0;
-error:
-	mpc5121_nfc_free(dev, mtd);
-	return retval;
-}
-
-static int mpc5121_nfc_remove(struct platform_device *op)
-{
-	struct device *dev = &op->dev;
-	struct mtd_info *mtd = dev_get_drvdata(dev);
-
-	nand_release(mtd);
-	mpc5121_nfc_free(dev, mtd);
-
-	return 0;
-}
-
-static const struct of_device_id mpc5121_nfc_match[] = {
-	{ .compatible = "fsl,mpc5121-nfc", },
-	{},
-};
-MODULE_DEVICE_TABLE(of, mpc5121_nfc_match);
-
-static struct platform_driver mpc5121_nfc_driver = {
-	.probe		= mpc5121_nfc_probe,
-	.remove		= mpc5121_nfc_remove,
-	.driver		= {
-		.name = DRV_NAME,
-		.of_match_table = mpc5121_nfc_match,
-	},
-};
-
-module_platform_driver(mpc5121_nfc_driver);
-
-MODULE_AUTHOR("Freescale Semiconductor, Inc.");
-MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
deleted file mode 100644
index 25a4fbd4d24a..000000000000
--- a/drivers/mtd/nand/mtk_ecc.c
+++ /dev/null
@@ -1,530 +0,0 @@ 
-/*
- * MTK ECC controller driver.
- * Copyright (C) 2016  MediaTek Inc.
- * Authors:	Xiaolei Li		<xiaolei.li@mediatek.com>
- *		Jorge Ramirez-Ortiz	<jorge.ramirez-ortiz@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/iopoll.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/mutex.h>
-
-#include "mtk_ecc.h"
-
-#define ECC_IDLE_MASK		BIT(0)
-#define ECC_IRQ_EN		BIT(0)
-#define ECC_OP_ENABLE		(1)
-#define ECC_OP_DISABLE		(0)
-
-#define ECC_ENCCON		(0x00)
-#define ECC_ENCCNFG		(0x04)
-#define		ECC_CNFG_4BIT		(0)
-#define		ECC_CNFG_6BIT		(1)
-#define		ECC_CNFG_8BIT		(2)
-#define		ECC_CNFG_10BIT		(3)
-#define		ECC_CNFG_12BIT		(4)
-#define		ECC_CNFG_14BIT		(5)
-#define		ECC_CNFG_16BIT		(6)
-#define		ECC_CNFG_18BIT		(7)
-#define		ECC_CNFG_20BIT		(8)
-#define		ECC_CNFG_22BIT		(9)
-#define		ECC_CNFG_24BIT		(0xa)
-#define		ECC_CNFG_28BIT		(0xb)
-#define		ECC_CNFG_32BIT		(0xc)
-#define		ECC_CNFG_36BIT		(0xd)
-#define		ECC_CNFG_40BIT		(0xe)
-#define		ECC_CNFG_44BIT		(0xf)
-#define		ECC_CNFG_48BIT		(0x10)
-#define		ECC_CNFG_52BIT		(0x11)
-#define		ECC_CNFG_56BIT		(0x12)
-#define		ECC_CNFG_60BIT		(0x13)
-#define		ECC_MODE_SHIFT		(5)
-#define		ECC_MS_SHIFT		(16)
-#define ECC_ENCDIADDR		(0x08)
-#define ECC_ENCIDLE		(0x0C)
-#define ECC_ENCPAR(x)		(0x10 + (x) * sizeof(u32))
-#define ECC_ENCIRQ_EN		(0x80)
-#define ECC_ENCIRQ_STA		(0x84)
-#define ECC_DECCON		(0x100)
-#define ECC_DECCNFG		(0x104)
-#define		DEC_EMPTY_EN		BIT(31)
-#define		DEC_CNFG_CORRECT	(0x3 << 12)
-#define ECC_DECIDLE		(0x10C)
-#define ECC_DECENUM0		(0x114)
-#define		ERR_MASK		(0x3f)
-#define ECC_DECDONE		(0x124)
-#define ECC_DECIRQ_EN		(0x200)
-#define ECC_DECIRQ_STA		(0x204)
-
-#define ECC_TIMEOUT		(500000)
-
-#define ECC_IDLE_REG(op)	((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
-#define ECC_CTL_REG(op)		((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
-#define ECC_IRQ_REG(op)		((op) == ECC_ENCODE ? \
-					ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
-
-struct mtk_ecc {
-	struct device *dev;
-	void __iomem *regs;
-	struct clk *clk;
-
-	struct completion done;
-	struct mutex lock;
-	u32 sectors;
-};
-
-static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
-				     enum mtk_ecc_operation op)
-{
-	struct device *dev = ecc->dev;
-	u32 val;
-	int ret;
-
-	ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
-					val & ECC_IDLE_MASK,
-					10, ECC_TIMEOUT);
-	if (ret)
-		dev_warn(dev, "%s NOT idle\n",
-			 op == ECC_ENCODE ? "encoder" : "decoder");
-}
-
-static irqreturn_t mtk_ecc_irq(int irq, void *id)
-{
-	struct mtk_ecc *ecc = id;
-	enum mtk_ecc_operation op;
-	u32 dec, enc;
-
-	dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
-	if (dec) {
-		op = ECC_DECODE;
-		dec = readw(ecc->regs + ECC_DECDONE);
-		if (dec & ecc->sectors) {
-			ecc->sectors = 0;
-			complete(&ecc->done);
-		} else {
-			return IRQ_HANDLED;
-		}
-	} else {
-		enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
-		if (enc) {
-			op = ECC_ENCODE;
-			complete(&ecc->done);
-		} else {
-			return IRQ_NONE;
-		}
-	}
-
-	writel(0, ecc->regs + ECC_IRQ_REG(op));
-
-	return IRQ_HANDLED;
-}
-
-static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
-{
-	u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
-	u32 reg;
-
-	switch (config->strength) {
-	case 4:
-		ecc_bit = ECC_CNFG_4BIT;
-		break;
-	case 6:
-		ecc_bit = ECC_CNFG_6BIT;
-		break;
-	case 8:
-		ecc_bit = ECC_CNFG_8BIT;
-		break;
-	case 10:
-		ecc_bit = ECC_CNFG_10BIT;
-		break;
-	case 12:
-		ecc_bit = ECC_CNFG_12BIT;
-		break;
-	case 14:
-		ecc_bit = ECC_CNFG_14BIT;
-		break;
-	case 16:
-		ecc_bit = ECC_CNFG_16BIT;
-		break;
-	case 18:
-		ecc_bit = ECC_CNFG_18BIT;
-		break;
-	case 20:
-		ecc_bit = ECC_CNFG_20BIT;
-		break;
-	case 22:
-		ecc_bit = ECC_CNFG_22BIT;
-		break;
-	case 24:
-		ecc_bit = ECC_CNFG_24BIT;
-		break;
-	case 28:
-		ecc_bit = ECC_CNFG_28BIT;
-		break;
-	case 32:
-		ecc_bit = ECC_CNFG_32BIT;
-		break;
-	case 36:
-		ecc_bit = ECC_CNFG_36BIT;
-		break;
-	case 40:
-		ecc_bit = ECC_CNFG_40BIT;
-		break;
-	case 44:
-		ecc_bit = ECC_CNFG_44BIT;
-		break;
-	case 48:
-		ecc_bit = ECC_CNFG_48BIT;
-		break;
-	case 52:
-		ecc_bit = ECC_CNFG_52BIT;
-		break;
-	case 56:
-		ecc_bit = ECC_CNFG_56BIT;
-		break;
-	case 60:
-		ecc_bit = ECC_CNFG_60BIT;
-		break;
-	default:
-		dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n",
-			config->strength);
-	}
-
-	if (config->op == ECC_ENCODE) {
-		/* configure ECC encoder (in bits) */
-		enc_sz = config->len << 3;
-
-		reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
-		reg |= (enc_sz << ECC_MS_SHIFT);
-		writel(reg, ecc->regs + ECC_ENCCNFG);
-
-		if (config->mode != ECC_NFI_MODE)
-			writel(lower_32_bits(config->addr),
-			       ecc->regs + ECC_ENCDIADDR);
-
-	} else {
-		/* configure ECC decoder (in bits) */
-		dec_sz = (config->len << 3) +
-					config->strength * ECC_PARITY_BITS;
-
-		reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
-		reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
-		reg |= DEC_EMPTY_EN;
-		writel(reg, ecc->regs + ECC_DECCNFG);
-
-		if (config->sectors)
-			ecc->sectors = 1 << (config->sectors - 1);
-	}
-}
-
-void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
-		       int sectors)
-{
-	u32 offset, i, err;
-	u32 bitflips = 0;
-
-	stats->corrected = 0;
-	stats->failed = 0;
-
-	for (i = 0; i < sectors; i++) {
-		offset = (i >> 2) << 2;
-		err = readl(ecc->regs + ECC_DECENUM0 + offset);
-		err = err >> ((i % 4) * 8);
-		err &= ERR_MASK;
-		if (err == ERR_MASK) {
-			/* uncorrectable errors */
-			stats->failed++;
-			continue;
-		}
-
-		stats->corrected += err;
-		bitflips = max_t(u32, bitflips, err);
-	}
-
-	stats->bitflips = bitflips;
-}
-EXPORT_SYMBOL(mtk_ecc_get_stats);
-
-void mtk_ecc_release(struct mtk_ecc *ecc)
-{
-	clk_disable_unprepare(ecc->clk);
-	put_device(ecc->dev);
-}
-EXPORT_SYMBOL(mtk_ecc_release);
-
-static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
-{
-	mtk_ecc_wait_idle(ecc, ECC_ENCODE);
-	writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
-
-	mtk_ecc_wait_idle(ecc, ECC_DECODE);
-	writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
-}
-
-static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
-{
-	struct platform_device *pdev;
-	struct mtk_ecc *ecc;
-
-	pdev = of_find_device_by_node(np);
-	if (!pdev || !platform_get_drvdata(pdev))
-		return ERR_PTR(-EPROBE_DEFER);
-
-	get_device(&pdev->dev);
-	ecc = platform_get_drvdata(pdev);
-	clk_prepare_enable(ecc->clk);
-	mtk_ecc_hw_init(ecc);
-
-	return ecc;
-}
-
-struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
-{
-	struct mtk_ecc *ecc = NULL;
-	struct device_node *np;
-
-	np = of_parse_phandle(of_node, "ecc-engine", 0);
-	if (np) {
-		ecc = mtk_ecc_get(np);
-		of_node_put(np);
-	}
-
-	return ecc;
-}
-EXPORT_SYMBOL(of_mtk_ecc_get);
-
-int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
-{
-	enum mtk_ecc_operation op = config->op;
-	int ret;
-
-	ret = mutex_lock_interruptible(&ecc->lock);
-	if (ret) {
-		dev_err(ecc->dev, "interrupted when attempting to lock\n");
-		return ret;
-	}
-
-	mtk_ecc_wait_idle(ecc, op);
-	mtk_ecc_config(ecc, config);
-	writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
-
-	init_completion(&ecc->done);
-	writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op));
-
-	return 0;
-}
-EXPORT_SYMBOL(mtk_ecc_enable);
-
-void mtk_ecc_disable(struct mtk_ecc *ecc)
-{
-	enum mtk_ecc_operation op = ECC_ENCODE;
-
-	/* find out the running operation */
-	if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
-		op = ECC_DECODE;
-
-	/* disable it */
-	mtk_ecc_wait_idle(ecc, op);
-	writew(0, ecc->regs + ECC_IRQ_REG(op));
-	writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
-
-	mutex_unlock(&ecc->lock);
-}
-EXPORT_SYMBOL(mtk_ecc_disable);
-
-int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
-{
-	int ret;
-
-	ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
-	if (!ret) {
-		dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
-			(op == ECC_ENCODE) ? "encoder" : "decoder");
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-EXPORT_SYMBOL(mtk_ecc_wait_done);
-
-int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
-		   u8 *data, u32 bytes)
-{
-	dma_addr_t addr;
-	u32 *p, len, i;
-	int ret = 0;
-
-	addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
-	ret = dma_mapping_error(ecc->dev, addr);
-	if (ret) {
-		dev_err(ecc->dev, "dma mapping error\n");
-		return -EINVAL;
-	}
-
-	config->op = ECC_ENCODE;
-	config->addr = addr;
-	ret = mtk_ecc_enable(ecc, config);
-	if (ret) {
-		dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
-		return ret;
-	}
-
-	ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
-	if (ret)
-		goto timeout;
-
-	mtk_ecc_wait_idle(ecc, ECC_ENCODE);
-
-	/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
-	len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
-	p = (u32 *)(data + bytes);
-
-	/* write the parity bytes generated by the ECC back to the OOB region */
-	for (i = 0; i < len; i++)
-		p[i] = readl(ecc->regs + ECC_ENCPAR(i));
-timeout:
-
-	dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
-	mtk_ecc_disable(ecc);
-
-	return ret;
-}
-EXPORT_SYMBOL(mtk_ecc_encode);
-
-void mtk_ecc_adjust_strength(u32 *p)
-{
-	u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
-			40, 44, 48, 52, 56, 60};
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(ecc); i++) {
-		if (*p <= ecc[i]) {
-			if (!i)
-				*p = ecc[i];
-			else if (*p != ecc[i])
-				*p = ecc[i - 1];
-			return;
-		}
-	}
-
-	*p = ecc[ARRAY_SIZE(ecc) - 1];
-}
-EXPORT_SYMBOL(mtk_ecc_adjust_strength);
-
-static int mtk_ecc_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct mtk_ecc *ecc;
-	struct resource *res;
-	int irq, ret;
-
-	ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
-	if (!ecc)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	ecc->regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(ecc->regs)) {
-		dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
-		return PTR_ERR(ecc->regs);
-	}
-
-	ecc->clk = devm_clk_get(dev, NULL);
-	if (IS_ERR(ecc->clk)) {
-		dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
-		return PTR_ERR(ecc->clk);
-	}
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "failed to get irq\n");
-		return -EINVAL;
-	}
-
-	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
-	if (ret) {
-		dev_err(dev, "failed to set DMA mask\n");
-		return ret;
-	}
-
-	ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
-	if (ret) {
-		dev_err(dev, "failed to request irq\n");
-		return -EINVAL;
-	}
-
-	ecc->dev = dev;
-	mutex_init(&ecc->lock);
-	platform_set_drvdata(pdev, ecc);
-	dev_info(dev, "probed\n");
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int mtk_ecc_suspend(struct device *dev)
-{
-	struct mtk_ecc *ecc = dev_get_drvdata(dev);
-
-	clk_disable_unprepare(ecc->clk);
-
-	return 0;
-}
-
-static int mtk_ecc_resume(struct device *dev)
-{
-	struct mtk_ecc *ecc = dev_get_drvdata(dev);
-	int ret;
-
-	ret = clk_prepare_enable(ecc->clk);
-	if (ret) {
-		dev_err(dev, "failed to enable clk\n");
-		return ret;
-	}
-
-	mtk_ecc_hw_init(ecc);
-
-	return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
-#endif
-
-static const struct of_device_id mtk_ecc_dt_match[] = {
-	{ .compatible = "mediatek,mt2701-ecc" },
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
-
-static struct platform_driver mtk_ecc_driver = {
-	.probe  = mtk_ecc_probe,
-	.driver = {
-		.name  = "mtk-ecc",
-		.of_match_table = of_match_ptr(mtk_ecc_dt_match),
-#ifdef CONFIG_PM_SLEEP
-		.pm = &mtk_ecc_pm_ops,
-#endif
-	},
-};
-
-module_platform_driver(mtk_ecc_driver);
-
-MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
-MODULE_DESCRIPTION("MTK Nand ECC Driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h
deleted file mode 100644
index cbeba5cd1c13..000000000000
--- a/drivers/mtd/nand/mtk_ecc.h
+++ /dev/null
@@ -1,50 +0,0 @@ 
-/*
- * MTK SDG1 ECC controller
- *
- * Copyright (c) 2016 Mediatek
- * Authors:	Xiaolei Li		<xiaolei.li@mediatek.com>
- *		Jorge Ramirez-Ortiz	<jorge.ramirez-ortiz@linaro.org>
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published
- * by the Free Software Foundation.
- */
-
-#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
-#define __DRIVERS_MTD_NAND_MTK_ECC_H__
-
-#include <linux/types.h>
-
-#define ECC_PARITY_BITS		(14)
-
-enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
-enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
-
-struct device_node;
-struct mtk_ecc;
-
-struct mtk_ecc_stats {
-	u32 corrected;
-	u32 bitflips;
-	u32 failed;
-};
-
-struct mtk_ecc_config {
-	enum mtk_ecc_operation op;
-	enum mtk_ecc_mode mode;
-	dma_addr_t addr;
-	u32 strength;
-	u32 sectors;
-	u32 len;
-};
-
-int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
-void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
-int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
-int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
-void mtk_ecc_disable(struct mtk_ecc *);
-void mtk_ecc_adjust_strength(u32 *);
-
-struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
-void mtk_ecc_release(struct mtk_ecc *);
-
-#endif
diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c
deleted file mode 100644
index 65156b8fe839..000000000000
--- a/drivers/mtd/nand/mtk_nand.c
+++ /dev/null
@@ -1,1526 +0,0 @@ 
-/*
- * MTK NAND Flash controller driver.
- * Copyright (C) 2016 MediaTek Inc.
- * Authors:	Xiaolei Li		<xiaolei.li@mediatek.com>
- *		Jorge Ramirez-Ortiz	<jorge.ramirez-ortiz@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/mtd.h>
-#include <linux/module.h>
-#include <linux/iopoll.h>
-#include <linux/of.h>
-#include "mtk_ecc.h"
-
-/* NAND controller register definition */
-#define NFI_CNFG		(0x00)
-#define		CNFG_AHB		BIT(0)
-#define		CNFG_READ_EN		BIT(1)
-#define		CNFG_DMA_BURST_EN	BIT(2)
-#define		CNFG_BYTE_RW		BIT(6)
-#define		CNFG_HW_ECC_EN		BIT(8)
-#define		CNFG_AUTO_FMT_EN	BIT(9)
-#define		CNFG_OP_CUST		(6 << 12)
-#define NFI_PAGEFMT		(0x04)
-#define		PAGEFMT_FDM_ECC_SHIFT	(12)
-#define		PAGEFMT_FDM_SHIFT	(8)
-#define		PAGEFMT_SPARE_16	(0)
-#define		PAGEFMT_SPARE_26	(1)
-#define		PAGEFMT_SPARE_27	(2)
-#define		PAGEFMT_SPARE_28	(3)
-#define		PAGEFMT_SPARE_32	(4)
-#define		PAGEFMT_SPARE_36	(5)
-#define		PAGEFMT_SPARE_40	(6)
-#define		PAGEFMT_SPARE_44	(7)
-#define		PAGEFMT_SPARE_48	(8)
-#define		PAGEFMT_SPARE_49	(9)
-#define		PAGEFMT_SPARE_50	(0xa)
-#define		PAGEFMT_SPARE_51	(0xb)
-#define		PAGEFMT_SPARE_52	(0xc)
-#define		PAGEFMT_SPARE_62	(0xd)
-#define		PAGEFMT_SPARE_63	(0xe)
-#define		PAGEFMT_SPARE_64	(0xf)
-#define		PAGEFMT_SPARE_SHIFT	(4)
-#define		PAGEFMT_SEC_SEL_512	BIT(2)
-#define		PAGEFMT_512_2K		(0)
-#define		PAGEFMT_2K_4K		(1)
-#define		PAGEFMT_4K_8K		(2)
-#define		PAGEFMT_8K_16K		(3)
-/* NFI control */
-#define NFI_CON			(0x08)
-#define		CON_FIFO_FLUSH		BIT(0)
-#define		CON_NFI_RST		BIT(1)
-#define		CON_BRD			BIT(8)  /* burst  read */
-#define		CON_BWR			BIT(9)	/* burst  write */
-#define		CON_SEC_SHIFT		(12)
-/* Timming control register */
-#define NFI_ACCCON		(0x0C)
-#define NFI_INTR_EN		(0x10)
-#define		INTR_AHB_DONE_EN	BIT(6)
-#define NFI_INTR_STA		(0x14)
-#define NFI_CMD			(0x20)
-#define NFI_ADDRNOB		(0x30)
-#define NFI_COLADDR		(0x34)
-#define NFI_ROWADDR		(0x38)
-#define NFI_STRDATA		(0x40)
-#define		STAR_EN			(1)
-#define		STAR_DE			(0)
-#define NFI_CNRNB		(0x44)
-#define NFI_DATAW		(0x50)
-#define NFI_DATAR		(0x54)
-#define NFI_PIO_DIRDY		(0x58)
-#define		PIO_DI_RDY		(0x01)
-#define NFI_STA			(0x60)
-#define		STA_CMD			BIT(0)
-#define		STA_ADDR		BIT(1)
-#define		STA_BUSY		BIT(8)
-#define		STA_EMP_PAGE		BIT(12)
-#define		NFI_FSM_CUSTDATA	(0xe << 16)
-#define		NFI_FSM_MASK		(0xf << 16)
-#define NFI_ADDRCNTR		(0x70)
-#define		CNTR_MASK		GENMASK(16, 12)
-#define NFI_STRADDR		(0x80)
-#define NFI_BYTELEN		(0x84)
-#define NFI_CSEL		(0x90)
-#define NFI_FDML(x)		(0xA0 + (x) * sizeof(u32) * 2)
-#define NFI_FDMM(x)		(0xA4 + (x) * sizeof(u32) * 2)
-#define NFI_FDM_MAX_SIZE	(8)
-#define NFI_FDM_MIN_SIZE	(1)
-#define NFI_MASTER_STA		(0x224)
-#define		MASTER_STA_MASK		(0x0FFF)
-#define NFI_EMPTY_THRESH	(0x23C)
-
-#define MTK_NAME		"mtk-nand"
-#define KB(x)			((x) * 1024UL)
-#define MB(x)			(KB(x) * 1024UL)
-
-#define MTK_TIMEOUT		(500000)
-#define MTK_RESET_TIMEOUT	(1000000)
-#define MTK_MAX_SECTOR		(16)
-#define MTK_NAND_MAX_NSELS	(2)
-
-struct mtk_nfc_bad_mark_ctl {
-	void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
-	u32 sec;
-	u32 pos;
-};
-
-/*
- * FDM: region used to store free OOB data
- */
-struct mtk_nfc_fdm {
-	u32 reg_size;
-	u32 ecc_size;
-};
-
-struct mtk_nfc_nand_chip {
-	struct list_head node;
-	struct nand_chip nand;
-
-	struct mtk_nfc_bad_mark_ctl bad_mark;
-	struct mtk_nfc_fdm fdm;
-	u32 spare_per_sector;
-
-	int nsels;
-	u8 sels[0];
-	/* nothing after this field */
-};
-
-struct mtk_nfc_clk {
-	struct clk *nfi_clk;
-	struct clk *pad_clk;
-};
-
-struct mtk_nfc {
-	struct nand_hw_control controller;
-	struct mtk_ecc_config ecc_cfg;
-	struct mtk_nfc_clk clk;
-	struct mtk_ecc *ecc;
-
-	struct device *dev;
-	void __iomem *regs;
-
-	struct completion done;
-	struct list_head chips;
-
-	u8 *buffer;
-};
-
-static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
-{
-	return container_of(nand, struct mtk_nfc_nand_chip, nand);
-}
-
-static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
-{
-	return (u8 *)p + i * chip->ecc.size;
-}
-
-static inline u8 *oob_ptr(struct nand_chip *chip, int i)
-{
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	u8 *poi;
-
-	/* map the sector's FDM data to free oob:
-	 * the beginning of the oob area stores the FDM data of bad mark sectors
-	 */
-
-	if (i < mtk_nand->bad_mark.sec)
-		poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
-	else if (i == mtk_nand->bad_mark.sec)
-		poi = chip->oob_poi;
-	else
-		poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
-
-	return poi;
-}
-
-static inline int mtk_data_len(struct nand_chip *chip)
-{
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-
-	return chip->ecc.size + mtk_nand->spare_per_sector;
-}
-
-static inline u8 *mtk_data_ptr(struct nand_chip *chip,  int i)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-
-	return nfc->buffer + i * mtk_data_len(chip);
-}
-
-static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-
-	return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
-}
-
-static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
-{
-	writel(val, nfc->regs + reg);
-}
-
-static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
-{
-	writew(val, nfc->regs + reg);
-}
-
-static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
-{
-	writeb(val, nfc->regs + reg);
-}
-
-static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
-{
-	return readl_relaxed(nfc->regs + reg);
-}
-
-static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
-{
-	return readw_relaxed(nfc->regs + reg);
-}
-
-static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
-{
-	return readb_relaxed(nfc->regs + reg);
-}
-
-static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
-{
-	struct device *dev = nfc->dev;
-	u32 val;
-	int ret;
-
-	/* reset all registers and force the NFI master to terminate */
-	nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
-
-	/* wait for the master to finish the last transaction */
-	ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
-				 !(val & MASTER_STA_MASK), 50,
-				 MTK_RESET_TIMEOUT);
-	if (ret)
-		dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
-			 NFI_MASTER_STA, val);
-
-	/* ensure any status register affected by the NFI master is reset */
-	nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
-	nfi_writew(nfc, STAR_DE, NFI_STRDATA);
-}
-
-static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
-{
-	struct device *dev = nfc->dev;
-	u32 val;
-	int ret;
-
-	nfi_writel(nfc, command, NFI_CMD);
-
-	ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
-					!(val & STA_CMD), 10,  MTK_TIMEOUT);
-	if (ret) {
-		dev_warn(dev, "nfi core timed out entering command mode\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
-{
-	struct device *dev = nfc->dev;
-	u32 val;
-	int ret;
-
-	nfi_writel(nfc, addr, NFI_COLADDR);
-	nfi_writel(nfc, 0, NFI_ROWADDR);
-	nfi_writew(nfc, 1, NFI_ADDRNOB);
-
-	ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
-					!(val & STA_ADDR), 10, MTK_TIMEOUT);
-	if (ret) {
-		dev_warn(dev, "nfi core timed out entering address mode\n");
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	u32 fmt, spare;
-
-	if (!mtd->writesize)
-		return 0;
-
-	spare = mtk_nand->spare_per_sector;
-
-	switch (mtd->writesize) {
-	case 512:
-		fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
-		break;
-	case KB(2):
-		if (chip->ecc.size == 512)
-			fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
-		else
-			fmt = PAGEFMT_512_2K;
-		break;
-	case KB(4):
-		if (chip->ecc.size == 512)
-			fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
-		else
-			fmt = PAGEFMT_2K_4K;
-		break;
-	case KB(8):
-		if (chip->ecc.size == 512)
-			fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
-		else
-			fmt = PAGEFMT_4K_8K;
-		break;
-	case KB(16):
-		fmt = PAGEFMT_8K_16K;
-		break;
-	default:
-		dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
-		return -EINVAL;
-	}
-
-	/*
-	 * the hardware will double the value for this eccsize, so we need to
-	 * halve it
-	 */
-	if (chip->ecc.size == 1024)
-		spare >>= 1;
-
-	switch (spare) {
-	case 16:
-		fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 26:
-		fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 27:
-		fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 28:
-		fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 32:
-		fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 36:
-		fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 40:
-		fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 44:
-		fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 48:
-		fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 49:
-		fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 50:
-		fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 51:
-		fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 52:
-		fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 62:
-		fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 63:
-		fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
-		break;
-	case 64:
-		fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
-		break;
-	default:
-		dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
-		return -EINVAL;
-	}
-
-	fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
-	fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
-	nfi_writew(nfc, fmt, NFI_PAGEFMT);
-
-	nfc->ecc_cfg.strength = chip->ecc.strength;
-	nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
-
-	return 0;
-}
-
-static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct mtk_nfc *nfc = nand_get_controller_data(nand);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
-
-	if (chip < 0)
-		return;
-
-	mtk_nfc_hw_runtime_config(mtd);
-
-	nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
-}
-
-static int mtk_nfc_dev_ready(struct mtd_info *mtd)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
-
-	if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
-		return 0;
-
-	return 1;
-}
-
-static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
-
-	if (ctrl & NAND_ALE) {
-		mtk_nfc_send_address(nfc, dat);
-	} else if (ctrl & NAND_CLE) {
-		mtk_nfc_hw_reset(nfc);
-
-		nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
-		mtk_nfc_send_command(nfc, dat);
-	}
-}
-
-static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
-{
-	int rc;
-	u8 val;
-
-	rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
-				       val & PIO_DI_RDY, 10, MTK_TIMEOUT);
-	if (rc < 0)
-		dev_err(nfc->dev, "data not ready\n");
-}
-
-static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	u32 reg;
-
-	/* after each byte read, the NFI_STA reg is reset by the hardware */
-	reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
-	if (reg != NFI_FSM_CUSTDATA) {
-		reg = nfi_readw(nfc, NFI_CNFG);
-		reg |= CNFG_BYTE_RW | CNFG_READ_EN;
-		nfi_writew(nfc, reg, NFI_CNFG);
-
-		/*
-		 * set to max sector to allow the HW to continue reading over
-		 * unaligned accesses
-		 */
-		reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
-		nfi_writel(nfc, reg, NFI_CON);
-
-		/* trigger to fetch data */
-		nfi_writew(nfc, STAR_EN, NFI_STRDATA);
-	}
-
-	mtk_nfc_wait_ioready(nfc);
-
-	return nfi_readb(nfc, NFI_DATAR);
-}
-
-static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
-{
-	int i;
-
-	for (i = 0; i < len; i++)
-		buf[i] = mtk_nfc_read_byte(mtd);
-}
-
-static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
-	u32 reg;
-
-	reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
-
-	if (reg != NFI_FSM_CUSTDATA) {
-		reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
-		nfi_writew(nfc, reg, NFI_CNFG);
-
-		reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
-		nfi_writel(nfc, reg, NFI_CON);
-
-		nfi_writew(nfc, STAR_EN, NFI_STRDATA);
-	}
-
-	mtk_nfc_wait_ioready(nfc);
-	nfi_writeb(nfc, byte, NFI_DATAW);
-}
-
-static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
-{
-	int i;
-
-	for (i = 0; i < len; i++)
-		mtk_nfc_write_byte(mtd, buf[i]);
-}
-
-static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	int size = chip->ecc.size + mtk_nand->fdm.reg_size;
-
-	nfc->ecc_cfg.mode = ECC_DMA_MODE;
-	nfc->ecc_cfg.op = ECC_ENCODE;
-
-	return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
-}
-
-static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
-{
-	/* nop */
-}
-
-static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
-	u32 bad_pos = nand->bad_mark.pos;
-
-	if (raw)
-		bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
-	else
-		bad_pos += nand->bad_mark.sec * chip->ecc.size;
-
-	swap(chip->oob_poi[0], buf[bad_pos]);
-}
-
-static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
-				  u32 len, const u8 *buf)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
-	u32 start, end;
-	int i, ret;
-
-	start = offset / chip->ecc.size;
-	end = DIV_ROUND_UP(offset + len, chip->ecc.size);
-
-	memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
-	for (i = 0; i < chip->ecc.steps; i++) {
-		memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
-		       chip->ecc.size);
-
-		if (start > i || i >= end)
-			continue;
-
-		if (i == mtk_nand->bad_mark.sec)
-			mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
-
-		memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
-
-		/* program the CRC back to the OOB */
-		ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
-		if (ret < 0)
-			return ret;
-	}
-
-	return 0;
-}
-
-static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
-	u32 i;
-
-	memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
-	for (i = 0; i < chip->ecc.steps; i++) {
-		if (buf)
-			memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
-			       chip->ecc.size);
-
-		if (i == mtk_nand->bad_mark.sec)
-			mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
-
-		memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
-	}
-}
-
-static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
-				    u32 sectors)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
-	u32 vall, valm;
-	u8 *oobptr;
-	int i, j;
-
-	for (i = 0; i < sectors; i++) {
-		oobptr = oob_ptr(chip, start + i);
-		vall = nfi_readl(nfc, NFI_FDML(i));
-		valm = nfi_readl(nfc, NFI_FDMM(i));
-
-		for (j = 0; j < fdm->reg_size; j++)
-			oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
-	}
-}
-
-static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
-	u32 vall, valm;
-	u8 *oobptr;
-	int i, j;
-
-	for (i = 0; i < chip->ecc.steps; i++) {
-		oobptr = oob_ptr(chip, i);
-		vall = 0;
-		valm = 0;
-		for (j = 0; j < 8; j++) {
-			if (j < 4)
-				vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
-						<< (j * 8);
-			else
-				valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
-						<< ((j - 4) * 8);
-		}
-		nfi_writel(nfc, vall, NFI_FDML(i));
-		nfi_writel(nfc, valm, NFI_FDMM(i));
-	}
-}
-
-static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-				 const u8 *buf, int page, int len)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct device *dev = nfc->dev;
-	dma_addr_t addr;
-	u32 reg;
-	int ret;
-
-	addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
-	ret = dma_mapping_error(nfc->dev, addr);
-	if (ret) {
-		dev_err(nfc->dev, "dma mapping error\n");
-		return -EINVAL;
-	}
-
-	reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
-	nfi_writew(nfc, reg, NFI_CNFG);
-
-	nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
-	nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
-	nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
-
-	init_completion(&nfc->done);
-
-	reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
-	nfi_writel(nfc, reg, NFI_CON);
-	nfi_writew(nfc, STAR_EN, NFI_STRDATA);
-
-	ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
-	if (!ret) {
-		dev_err(dev, "program ahb done timeout\n");
-		nfi_writew(nfc, 0, NFI_INTR_EN);
-		ret = -ETIMEDOUT;
-		goto timeout;
-	}
-
-	ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
-					(reg & CNTR_MASK) >= chip->ecc.steps,
-					10, MTK_TIMEOUT);
-	if (ret)
-		dev_err(dev, "hwecc write timeout\n");
-
-timeout:
-
-	dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
-	nfi_writel(nfc, 0, NFI_CON);
-
-	return ret;
-}
-
-static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-			      const u8 *buf, int page, int raw)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	size_t len;
-	const u8 *bufpoi;
-	u32 reg;
-	int ret;
-
-	if (!raw) {
-		/* OOB => FDM: from register,  ECC: from HW */
-		reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
-		nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
-
-		nfc->ecc_cfg.op = ECC_ENCODE;
-		nfc->ecc_cfg.mode = ECC_NFI_MODE;
-		ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
-		if (ret) {
-			/* clear NFI config */
-			reg = nfi_readw(nfc, NFI_CNFG);
-			reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
-			nfi_writew(nfc, reg, NFI_CNFG);
-
-			return ret;
-		}
-
-		memcpy(nfc->buffer, buf, mtd->writesize);
-		mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
-		bufpoi = nfc->buffer;
-
-		/* write OOB into the FDM registers (OOB area in MTK NAND) */
-		mtk_nfc_write_fdm(chip);
-	} else {
-		bufpoi = buf;
-	}
-
-	len = mtd->writesize + (raw ? mtd->oobsize : 0);
-	ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
-
-	if (!raw)
-		mtk_ecc_disable(nfc->ecc);
-
-	return ret;
-}
-
-static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
-				    struct nand_chip *chip, const u8 *buf,
-				    int oob_on, int page)
-{
-	return mtk_nfc_write_page(mtd, chip, buf, page, 0);
-}
-
-static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				  const u8 *buf, int oob_on, int pg)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-
-	mtk_nfc_format_page(mtd, buf);
-	return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
-}
-
-static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
-				       struct nand_chip *chip, u32 offset,
-				       u32 data_len, const u8 *buf,
-				       int oob_on, int page)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	int ret;
-
-	ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
-	if (ret < 0)
-		return ret;
-
-	/* use the data in the private buffer (now with FDM and CRC) */
-	return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
-}
-
-static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
-				 int page)
-{
-	int ret;
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
-	ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
-	if (ret < 0)
-		return -EIO;
-
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-	ret = chip->waitfunc(mtd, chip);
-
-	return ret & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	struct mtk_ecc_stats stats;
-	int rc, i;
-
-	rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
-	if (rc) {
-		memset(buf, 0xff, sectors * chip->ecc.size);
-		for (i = 0; i < sectors; i++)
-			memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
-		return 0;
-	}
-
-	mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
-	mtd->ecc_stats.corrected += stats.corrected;
-	mtd->ecc_stats.failed += stats.failed;
-
-	return stats.bitflips;
-}
-
-static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
-				u32 data_offs, u32 readlen,
-				u8 *bufpoi, int page, int raw)
-{
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	u32 spare = mtk_nand->spare_per_sector;
-	u32 column, sectors, start, end, reg;
-	dma_addr_t addr;
-	int bitflips;
-	size_t len;
-	u8 *buf;
-	int rc;
-
-	start = data_offs / chip->ecc.size;
-	end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
-
-	sectors = end - start;
-	column = start * (chip->ecc.size + spare);
-
-	len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
-	buf = bufpoi + start * chip->ecc.size;
-
-	if (column != 0)
-		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
-
-	addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
-	rc = dma_mapping_error(nfc->dev, addr);
-	if (rc) {
-		dev_err(nfc->dev, "dma mapping error\n");
-
-		return -EINVAL;
-	}
-
-	reg = nfi_readw(nfc, NFI_CNFG);
-	reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
-	if (!raw) {
-		reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
-		nfi_writew(nfc, reg, NFI_CNFG);
-
-		nfc->ecc_cfg.mode = ECC_NFI_MODE;
-		nfc->ecc_cfg.sectors = sectors;
-		nfc->ecc_cfg.op = ECC_DECODE;
-		rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
-		if (rc) {
-			dev_err(nfc->dev, "ecc enable\n");
-			/* clear NFI_CNFG */
-			reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
-				CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
-			nfi_writew(nfc, reg, NFI_CNFG);
-			dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
-
-			return rc;
-		}
-	} else {
-		nfi_writew(nfc, reg, NFI_CNFG);
-	}
-
-	nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
-	nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
-	nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
-
-	init_completion(&nfc->done);
-	reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
-	nfi_writel(nfc, reg, NFI_CON);
-	nfi_writew(nfc, STAR_EN, NFI_STRDATA);
-
-	rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
-	if (!rc)
-		dev_warn(nfc->dev, "read ahb/dma done timeout\n");
-
-	rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
-				       (reg & CNTR_MASK) >= sectors, 10,
-				       MTK_TIMEOUT);
-	if (rc < 0) {
-		dev_err(nfc->dev, "subpage done timeout\n");
-		bitflips = -EIO;
-	} else {
-		bitflips = 0;
-		if (!raw) {
-			rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
-			bitflips = rc < 0 ? -ETIMEDOUT :
-				mtk_nfc_update_ecc_stats(mtd, buf, sectors);
-			mtk_nfc_read_fdm(chip, start, sectors);
-		}
-	}
-
-	dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
-
-	if (raw)
-		goto done;
-
-	mtk_ecc_disable(nfc->ecc);
-
-	if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
-		mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
-done:
-	nfi_writel(nfc, 0, NFI_CON);
-
-	return bitflips;
-}
-
-static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
-				      struct nand_chip *chip, u32 off,
-				      u32 len, u8 *p, int pg)
-{
-	return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
-}
-
-static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
-				   struct nand_chip *chip, u8 *p,
-				   int oob_on, int pg)
-{
-	return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
-}
-
-static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-				 u8 *buf, int oob_on, int page)
-{
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	struct mtk_nfc *nfc = nand_get_controller_data(chip);
-	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
-	int i, ret;
-
-	memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
-	ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
-				   page, 1);
-	if (ret < 0)
-		return ret;
-
-	for (i = 0; i < chip->ecc.steps; i++) {
-		memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
-
-		if (i == mtk_nand->bad_mark.sec)
-			mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
-
-		if (buf)
-			memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
-			       chip->ecc.size);
-	}
-
-	return ret;
-}
-
-static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
-				int page)
-{
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
-	return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
-}
-
-static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
-{
-	/*
-	 * ACCON: access timing control register
-	 * -------------------------------------
-	 * 31:28: minimum required time for CS post pulling down after accessing
-	 *	the device
-	 * 27:22: minimum required time for CS pre pulling down before accessing
-	 *	the device
-	 * 21:16: minimum required time from NCEB low to NREB low
-	 * 15:12: minimum required time from NWEB high to NREB low.
-	 * 11:08: write enable hold time
-	 * 07:04: write wait states
-	 * 03:00: read wait states
-	 */
-	nfi_writel(nfc, 0x10804211, NFI_ACCCON);
-
-	/*
-	 * CNRNB: nand ready/busy register
-	 * -------------------------------
-	 * 7:4: timeout register for polling the NAND busy/ready signal
-	 * 0  : poll the status of the busy/ready signal after [7:4]*16 cycles.
-	 */
-	nfi_writew(nfc, 0xf1, NFI_CNRNB);
-	nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
-
-	mtk_nfc_hw_reset(nfc);
-
-	nfi_readl(nfc, NFI_INTR_STA);
-	nfi_writel(nfc, 0, NFI_INTR_EN);
-}
-
-static irqreturn_t mtk_nfc_irq(int irq, void *id)
-{
-	struct mtk_nfc *nfc = id;
-	u16 sta, ien;
-
-	sta = nfi_readw(nfc, NFI_INTR_STA);
-	ien = nfi_readw(nfc, NFI_INTR_EN);
-
-	if (!(sta & ien))
-		return IRQ_NONE;
-
-	nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
-	complete(&nfc->done);
-
-	return IRQ_HANDLED;
-}
-
-static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
-{
-	int ret;
-
-	ret = clk_prepare_enable(clk->nfi_clk);
-	if (ret) {
-		dev_err(dev, "failed to enable nfi clk\n");
-		return ret;
-	}
-
-	ret = clk_prepare_enable(clk->pad_clk);
-	if (ret) {
-		dev_err(dev, "failed to enable pad clk\n");
-		clk_disable_unprepare(clk->nfi_clk);
-		return ret;
-	}
-
-	return 0;
-}
-
-static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
-{
-	clk_disable_unprepare(clk->nfi_clk);
-	clk_disable_unprepare(clk->pad_clk);
-}
-
-static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oob_region)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
-	u32 eccsteps;
-
-	eccsteps = mtd->writesize / chip->ecc.size;
-
-	if (section >= eccsteps)
-		return -ERANGE;
-
-	oob_region->length = fdm->reg_size - fdm->ecc_size;
-	oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
-
-	return 0;
-}
-
-static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oob_region)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
-	u32 eccsteps;
-
-	if (section)
-		return -ERANGE;
-
-	eccsteps = mtd->writesize / chip->ecc.size;
-	oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
-	oob_region->length = mtd->oobsize - oob_region->offset;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
-	.free = mtk_nfc_ooblayout_free,
-	.ecc = mtk_nfc_ooblayout_ecc,
-};
-
-static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
-	u32 ecc_bytes;
-
-	ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
-
-	fdm->reg_size = chip->spare_per_sector - ecc_bytes;
-	if (fdm->reg_size > NFI_FDM_MAX_SIZE)
-		fdm->reg_size = NFI_FDM_MAX_SIZE;
-
-	/* bad block mark storage */
-	fdm->ecc_size = 1;
-}
-
-static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
-				     struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-
-	if (mtd->writesize == 512) {
-		bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
-	} else {
-		bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
-		bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
-		bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
-	}
-}
-
-static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
-			48, 49, 50, 51, 52, 62, 63, 64};
-	u32 eccsteps, i;
-
-	eccsteps = mtd->writesize / nand->ecc.size;
-	*sps = mtd->oobsize / eccsteps;
-
-	if (nand->ecc.size == 1024)
-		*sps >>= 1;
-
-	for (i = 0; i < ARRAY_SIZE(spare); i++) {
-		if (*sps <= spare[i]) {
-			if (!i)
-				*sps = spare[i];
-			else if (*sps != spare[i])
-				*sps = spare[i - 1];
-			break;
-		}
-	}
-
-	if (i >= ARRAY_SIZE(spare))
-		*sps = spare[ARRAY_SIZE(spare) - 1];
-
-	if (nand->ecc.size == 1024)
-		*sps <<= 1;
-}
-
-static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	u32 spare;
-	int free;
-
-	/* support only ecc hw mode */
-	if (nand->ecc.mode != NAND_ECC_HW) {
-		dev_err(dev, "ecc.mode not supported\n");
-		return -EINVAL;
-	}
-
-	/* if optional dt settings not present */
-	if (!nand->ecc.size || !nand->ecc.strength) {
-		/* use datasheet requirements */
-		nand->ecc.strength = nand->ecc_strength_ds;
-		nand->ecc.size = nand->ecc_step_ds;
-
-		/*
-		 * align eccstrength and eccsize
-		 * this controller only supports 512 and 1024 sizes
-		 */
-		if (nand->ecc.size < 1024) {
-			if (mtd->writesize > 512) {
-				nand->ecc.size = 1024;
-				nand->ecc.strength <<= 1;
-			} else {
-				nand->ecc.size = 512;
-			}
-		} else {
-			nand->ecc.size = 1024;
-		}
-
-		mtk_nfc_set_spare_per_sector(&spare, mtd);
-
-		/* calculate oob bytes except ecc parity data */
-		free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
-		free = spare - free;
-
-		/*
-		 * enhance ecc strength if oob left is bigger than max FDM size
-		 * or reduce ecc strength if oob size is not enough for ecc
-		 * parity data.
-		 */
-		if (free > NFI_FDM_MAX_SIZE) {
-			spare -= NFI_FDM_MAX_SIZE;
-			nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
-		} else if (free < 0) {
-			spare -= NFI_FDM_MIN_SIZE;
-			nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
-		}
-	}
-
-	mtk_ecc_adjust_strength(&nand->ecc.strength);
-
-	dev_info(dev, "eccsize %d eccstrength %d\n",
-		 nand->ecc.size, nand->ecc.strength);
-
-	return 0;
-}
-
-static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
-				  struct device_node *np)
-{
-	struct mtk_nfc_nand_chip *chip;
-	struct nand_chip *nand;
-	struct mtd_info *mtd;
-	int nsels, len;
-	u32 tmp;
-	int ret;
-	int i;
-
-	if (!of_get_property(np, "reg", &nsels))
-		return -ENODEV;
-
-	nsels /= sizeof(u32);
-	if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
-		dev_err(dev, "invalid reg property size %d\n", nsels);
-		return -EINVAL;
-	}
-
-	chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
-			    GFP_KERNEL);
-	if (!chip)
-		return -ENOMEM;
-
-	chip->nsels = nsels;
-	for (i = 0; i < nsels; i++) {
-		ret = of_property_read_u32_index(np, "reg", i, &tmp);
-		if (ret) {
-			dev_err(dev, "reg property failure : %d\n", ret);
-			return ret;
-		}
-		chip->sels[i] = tmp;
-	}
-
-	nand = &chip->nand;
-	nand->controller = &nfc->controller;
-
-	nand_set_flash_node(nand, np);
-	nand_set_controller_data(nand, nfc);
-
-	nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
-	nand->dev_ready = mtk_nfc_dev_ready;
-	nand->select_chip = mtk_nfc_select_chip;
-	nand->write_byte = mtk_nfc_write_byte;
-	nand->write_buf = mtk_nfc_write_buf;
-	nand->read_byte = mtk_nfc_read_byte;
-	nand->read_buf = mtk_nfc_read_buf;
-	nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
-
-	/* set default mode in case dt entry is missing */
-	nand->ecc.mode = NAND_ECC_HW;
-
-	nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
-	nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
-	nand->ecc.write_page = mtk_nfc_write_page_hwecc;
-	nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
-	nand->ecc.write_oob = mtk_nfc_write_oob_std;
-
-	nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
-	nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
-	nand->ecc.read_page = mtk_nfc_read_page_hwecc;
-	nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
-	nand->ecc.read_oob = mtk_nfc_read_oob_std;
-
-	mtd = nand_to_mtd(nand);
-	mtd->owner = THIS_MODULE;
-	mtd->dev.parent = dev;
-	mtd->name = MTK_NAME;
-	mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
-
-	mtk_nfc_hw_init(nfc);
-
-	ret = nand_scan_ident(mtd, nsels, NULL);
-	if (ret)
-		return -ENODEV;
-
-	/* store bbt magic in page, cause OOB is not protected */
-	if (nand->bbt_options & NAND_BBT_USE_FLASH)
-		nand->bbt_options |= NAND_BBT_NO_OOB;
-
-	ret = mtk_nfc_ecc_init(dev, mtd);
-	if (ret)
-		return -EINVAL;
-
-	if (nand->options & NAND_BUSWIDTH_16) {
-		dev_err(dev, "16bits buswidth not supported");
-		return -EINVAL;
-	}
-
-	mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
-	mtk_nfc_set_fdm(&chip->fdm, mtd);
-	mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
-
-	len = mtd->writesize + mtd->oobsize;
-	nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
-	if (!nfc->buffer)
-		return  -ENOMEM;
-
-	ret = nand_scan_tail(mtd);
-	if (ret)
-		return -ENODEV;
-
-	ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
-	if (ret) {
-		dev_err(dev, "mtd parse partition error\n");
-		nand_release(mtd);
-		return ret;
-	}
-
-	list_add_tail(&chip->node, &nfc->chips);
-
-	return 0;
-}
-
-static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
-{
-	struct device_node *np = dev->of_node;
-	struct device_node *nand_np;
-	int ret;
-
-	for_each_child_of_node(np, nand_np) {
-		ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
-		if (ret) {
-			of_node_put(nand_np);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static int mtk_nfc_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct device_node *np = dev->of_node;
-	struct mtk_nfc *nfc;
-	struct resource *res;
-	int ret, irq;
-
-	nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
-	if (!nfc)
-		return -ENOMEM;
-
-	spin_lock_init(&nfc->controller.lock);
-	init_waitqueue_head(&nfc->controller.wq);
-	INIT_LIST_HEAD(&nfc->chips);
-
-	/* probe defer if not ready */
-	nfc->ecc = of_mtk_ecc_get(np);
-	if (IS_ERR(nfc->ecc))
-		return PTR_ERR(nfc->ecc);
-	else if (!nfc->ecc)
-		return -ENODEV;
-
-	nfc->dev = dev;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	nfc->regs = devm_ioremap_resource(dev, res);
-	if (IS_ERR(nfc->regs)) {
-		ret = PTR_ERR(nfc->regs);
-		dev_err(dev, "no nfi base\n");
-		goto release_ecc;
-	}
-
-	nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
-	if (IS_ERR(nfc->clk.nfi_clk)) {
-		dev_err(dev, "no clk\n");
-		ret = PTR_ERR(nfc->clk.nfi_clk);
-		goto release_ecc;
-	}
-
-	nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
-	if (IS_ERR(nfc->clk.pad_clk)) {
-		dev_err(dev, "no pad clk\n");
-		ret = PTR_ERR(nfc->clk.pad_clk);
-		goto release_ecc;
-	}
-
-	ret = mtk_nfc_enable_clk(dev, &nfc->clk);
-	if (ret)
-		goto release_ecc;
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "no nfi irq resource\n");
-		ret = -EINVAL;
-		goto clk_disable;
-	}
-
-	ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
-	if (ret) {
-		dev_err(dev, "failed to request nfi irq\n");
-		goto clk_disable;
-	}
-
-	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
-	if (ret) {
-		dev_err(dev, "failed to set dma mask\n");
-		goto clk_disable;
-	}
-
-	platform_set_drvdata(pdev, nfc);
-
-	ret = mtk_nfc_nand_chips_init(dev, nfc);
-	if (ret) {
-		dev_err(dev, "failed to init nand chips\n");
-		goto clk_disable;
-	}
-
-	return 0;
-
-clk_disable:
-	mtk_nfc_disable_clk(&nfc->clk);
-
-release_ecc:
-	mtk_ecc_release(nfc->ecc);
-
-	return ret;
-}
-
-static int mtk_nfc_remove(struct platform_device *pdev)
-{
-	struct mtk_nfc *nfc = platform_get_drvdata(pdev);
-	struct mtk_nfc_nand_chip *chip;
-
-	while (!list_empty(&nfc->chips)) {
-		chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
-					node);
-		nand_release(nand_to_mtd(&chip->nand));
-		list_del(&chip->node);
-	}
-
-	mtk_ecc_release(nfc->ecc);
-	mtk_nfc_disable_clk(&nfc->clk);
-
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int mtk_nfc_suspend(struct device *dev)
-{
-	struct mtk_nfc *nfc = dev_get_drvdata(dev);
-
-	mtk_nfc_disable_clk(&nfc->clk);
-
-	return 0;
-}
-
-static int mtk_nfc_resume(struct device *dev)
-{
-	struct mtk_nfc *nfc = dev_get_drvdata(dev);
-	struct mtk_nfc_nand_chip *chip;
-	struct nand_chip *nand;
-	struct mtd_info *mtd;
-	int ret;
-	u32 i;
-
-	udelay(200);
-
-	ret = mtk_nfc_enable_clk(dev, &nfc->clk);
-	if (ret)
-		return ret;
-
-	mtk_nfc_hw_init(nfc);
-
-	/* reset NAND chip if VCC was powered off */
-	list_for_each_entry(chip, &nfc->chips, node) {
-		nand = &chip->nand;
-		mtd = nand_to_mtd(nand);
-		for (i = 0; i < chip->nsels; i++) {
-			nand->select_chip(mtd, i);
-			nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-		}
-	}
-
-	return 0;
-}
-
-static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
-#endif
-
-static const struct of_device_id mtk_nfc_id_table[] = {
-	{ .compatible = "mediatek,mt2701-nfc" },
-	{}
-};
-MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
-
-static struct platform_driver mtk_nfc_driver = {
-	.probe  = mtk_nfc_probe,
-	.remove = mtk_nfc_remove,
-	.driver = {
-		.name  = MTK_NAME,
-		.of_match_table = mtk_nfc_id_table,
-#ifdef CONFIG_PM_SLEEP
-		.pm = &mtk_nfc_pm_ops,
-#endif
-	},
-};
-
-module_platform_driver(mtk_nfc_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
-MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
deleted file mode 100644
index 379e11be6e0b..000000000000
--- a/drivers/mtd/nand/mxc_nand.c
+++ /dev/null
@@ -1,1857 +0,0 @@ 
-/*
- * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
- * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
- * MA 02110-1301, USA.
- */
-
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/interrupt.h>
-#include <linux/device.h>
-#include <linux/platform_device.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/completion.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <asm/mach/flash.h>
-#include <linux/platform_data/mtd-mxc_nand.h>
-
-#define DRIVER_NAME "mxc_nand"
-
-/* Addresses for NFC registers */
-#define NFC_V1_V2_BUF_SIZE		(host->regs + 0x00)
-#define NFC_V1_V2_BUF_ADDR		(host->regs + 0x04)
-#define NFC_V1_V2_FLASH_ADDR		(host->regs + 0x06)
-#define NFC_V1_V2_FLASH_CMD		(host->regs + 0x08)
-#define NFC_V1_V2_CONFIG		(host->regs + 0x0a)
-#define NFC_V1_V2_ECC_STATUS_RESULT	(host->regs + 0x0c)
-#define NFC_V1_V2_RSLTMAIN_AREA		(host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA	(host->regs + 0x10)
-#define NFC_V1_V2_WRPROT		(host->regs + 0x12)
-#define NFC_V1_UNLOCKSTART_BLKADDR	(host->regs + 0x14)
-#define NFC_V1_UNLOCKEND_BLKADDR	(host->regs + 0x16)
-#define NFC_V21_UNLOCKSTART_BLKADDR0	(host->regs + 0x20)
-#define NFC_V21_UNLOCKSTART_BLKADDR1	(host->regs + 0x24)
-#define NFC_V21_UNLOCKSTART_BLKADDR2	(host->regs + 0x28)
-#define NFC_V21_UNLOCKSTART_BLKADDR3	(host->regs + 0x2c)
-#define NFC_V21_UNLOCKEND_BLKADDR0	(host->regs + 0x22)
-#define NFC_V21_UNLOCKEND_BLKADDR1	(host->regs + 0x26)
-#define NFC_V21_UNLOCKEND_BLKADDR2	(host->regs + 0x2a)
-#define NFC_V21_UNLOCKEND_BLKADDR3	(host->regs + 0x2e)
-#define NFC_V1_V2_NF_WRPRST		(host->regs + 0x18)
-#define NFC_V1_V2_CONFIG1		(host->regs + 0x1a)
-#define NFC_V1_V2_CONFIG2		(host->regs + 0x1c)
-
-#define NFC_V2_CONFIG1_ECC_MODE_4	(1 << 0)
-#define NFC_V1_V2_CONFIG1_SP_EN		(1 << 2)
-#define NFC_V1_V2_CONFIG1_ECC_EN	(1 << 3)
-#define NFC_V1_V2_CONFIG1_INT_MSK	(1 << 4)
-#define NFC_V1_V2_CONFIG1_BIG		(1 << 5)
-#define NFC_V1_V2_CONFIG1_RST		(1 << 6)
-#define NFC_V1_V2_CONFIG1_CE		(1 << 7)
-#define NFC_V2_CONFIG1_ONE_CYCLE	(1 << 8)
-#define NFC_V2_CONFIG1_PPB(x)		(((x) & 0x3) << 9)
-#define NFC_V2_CONFIG1_FP_INT		(1 << 11)
-
-#define NFC_V1_V2_CONFIG2_INT		(1 << 15)
-
-/*
- * Operation modes for the NFC. Valid for v1, v2 and v3
- * type controllers.
- */
-#define NFC_CMD				(1 << 0)
-#define NFC_ADDR			(1 << 1)
-#define NFC_INPUT			(1 << 2)
-#define NFC_OUTPUT			(1 << 3)
-#define NFC_ID				(1 << 4)
-#define NFC_STATUS			(1 << 5)
-
-#define NFC_V3_FLASH_CMD		(host->regs_axi + 0x00)
-#define NFC_V3_FLASH_ADDR0		(host->regs_axi + 0x04)
-
-#define NFC_V3_CONFIG1			(host->regs_axi + 0x34)
-#define NFC_V3_CONFIG1_SP_EN		(1 << 0)
-#define NFC_V3_CONFIG1_RBA(x)		(((x) & 0x7 ) << 4)
-
-#define NFC_V3_ECC_STATUS_RESULT	(host->regs_axi + 0x38)
-
-#define NFC_V3_LAUNCH			(host->regs_axi + 0x40)
-
-#define NFC_V3_WRPROT			(host->regs_ip + 0x0)
-#define NFC_V3_WRPROT_LOCK_TIGHT	(1 << 0)
-#define NFC_V3_WRPROT_LOCK		(1 << 1)
-#define NFC_V3_WRPROT_UNLOCK		(1 << 2)
-#define NFC_V3_WRPROT_BLS_UNLOCK	(2 << 6)
-
-#define NFC_V3_WRPROT_UNLOCK_BLK_ADD0   (host->regs_ip + 0x04)
-
-#define NFC_V3_CONFIG2			(host->regs_ip + 0x24)
-#define NFC_V3_CONFIG2_PS_512			(0 << 0)
-#define NFC_V3_CONFIG2_PS_2048			(1 << 0)
-#define NFC_V3_CONFIG2_PS_4096			(2 << 0)
-#define NFC_V3_CONFIG2_ONE_CYCLE		(1 << 2)
-#define NFC_V3_CONFIG2_ECC_EN			(1 << 3)
-#define NFC_V3_CONFIG2_2CMD_PHASES		(1 << 4)
-#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0		(1 << 5)
-#define NFC_V3_CONFIG2_ECC_MODE_8		(1 << 6)
-#define NFC_V3_CONFIG2_PPB(x, shift)		(((x) & 0x3) << shift)
-#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x)	(((x) & 0x3) << 12)
-#define NFC_V3_CONFIG2_INT_MSK			(1 << 15)
-#define NFC_V3_CONFIG2_ST_CMD(x)		(((x) & 0xff) << 24)
-#define NFC_V3_CONFIG2_SPAS(x)			(((x) & 0xff) << 16)
-
-#define NFC_V3_CONFIG3				(host->regs_ip + 0x28)
-#define NFC_V3_CONFIG3_ADD_OP(x)		(((x) & 0x3) << 0)
-#define NFC_V3_CONFIG3_FW8			(1 << 3)
-#define NFC_V3_CONFIG3_SBB(x)			(((x) & 0x7) << 8)
-#define NFC_V3_CONFIG3_NUM_OF_DEVICES(x)	(((x) & 0x7) << 12)
-#define NFC_V3_CONFIG3_RBB_MODE			(1 << 15)
-#define NFC_V3_CONFIG3_NO_SDMA			(1 << 20)
-
-#define NFC_V3_IPC			(host->regs_ip + 0x2C)
-#define NFC_V3_IPC_CREQ			(1 << 0)
-#define NFC_V3_IPC_INT			(1 << 31)
-
-#define NFC_V3_DELAY_LINE		(host->regs_ip + 0x34)
-
-struct mxc_nand_host;
-
-struct mxc_nand_devtype_data {
-	void (*preset)(struct mtd_info *);
-	void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
-	void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
-	void (*send_page)(struct mtd_info *, unsigned int);
-	void (*send_read_id)(struct mxc_nand_host *);
-	uint16_t (*get_dev_status)(struct mxc_nand_host *);
-	int (*check_int)(struct mxc_nand_host *);
-	void (*irq_control)(struct mxc_nand_host *, int);
-	u32 (*get_ecc_status)(struct mxc_nand_host *);
-	const struct mtd_ooblayout_ops *ooblayout;
-	void (*select_chip)(struct mtd_info *mtd, int chip);
-	int (*correct_data)(struct mtd_info *mtd, u_char *dat,
-			u_char *read_ecc, u_char *calc_ecc);
-	int (*setup_data_interface)(struct mtd_info *mtd,
-				    const struct nand_data_interface *conf,
-				    bool check_only);
-
-	/*
-	 * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
-	 * (CONFIG1:INT_MSK is set). To handle this the driver uses
-	 * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
-	 */
-	int irqpending_quirk;
-	int needs_ip;
-
-	size_t regs_offset;
-	size_t spare0_offset;
-	size_t axi_offset;
-
-	int spare_len;
-	int eccbytes;
-	int eccsize;
-	int ppb_shift;
-};
-
-struct mxc_nand_host {
-	struct nand_chip	nand;
-	struct device		*dev;
-
-	void __iomem		*spare0;
-	void __iomem		*main_area0;
-
-	void __iomem		*base;
-	void __iomem		*regs;
-	void __iomem		*regs_axi;
-	void __iomem		*regs_ip;
-	int			status_request;
-	struct clk		*clk;
-	int			clk_act;
-	int			irq;
-	int			eccsize;
-	int			used_oobsize;
-	int			active_cs;
-
-	struct completion	op_completion;
-
-	uint8_t			*data_buf;
-	unsigned int		buf_start;
-
-	const struct mxc_nand_devtype_data *devtype_data;
-	struct mxc_nand_platform_data pdata;
-};
-
-static const char * const part_probes[] = {
-	"cmdlinepart", "RedBoot", "ofpart", NULL };
-
-static void memcpy32_fromio(void *trg, const void __iomem  *src, size_t size)
-{
-	int i;
-	u32 *t = trg;
-	const __iomem u32 *s = src;
-
-	for (i = 0; i < (size >> 2); i++)
-		*t++ = __raw_readl(s++);
-}
-
-static void memcpy16_fromio(void *trg, const void __iomem  *src, size_t size)
-{
-	int i;
-	u16 *t = trg;
-	const __iomem u16 *s = src;
-
-	/* We assume that src (IO) is always 32bit aligned */
-	if (PTR_ALIGN(trg, 4) == trg && IS_ALIGNED(size, 4)) {
-		memcpy32_fromio(trg, src, size);
-		return;
-	}
-
-	for (i = 0; i < (size >> 1); i++)
-		*t++ = __raw_readw(s++);
-}
-
-static inline void memcpy32_toio(void __iomem *trg, const void *src, int size)
-{
-	/* __iowrite32_copy use 32bit size values so divide by 4 */
-	__iowrite32_copy(trg, src, size / 4);
-}
-
-static void memcpy16_toio(void __iomem *trg, const void *src, int size)
-{
-	int i;
-	__iomem u16 *t = trg;
-	const u16 *s = src;
-
-	/* We assume that trg (IO) is always 32bit aligned */
-	if (PTR_ALIGN(src, 4) == src && IS_ALIGNED(size, 4)) {
-		memcpy32_toio(trg, src, size);
-		return;
-	}
-
-	for (i = 0; i < (size >> 1); i++)
-		__raw_writew(*s++, t++);
-}
-
-static int check_int_v3(struct mxc_nand_host *host)
-{
-	uint32_t tmp;
-
-	tmp = readl(NFC_V3_IPC);
-	if (!(tmp & NFC_V3_IPC_INT))
-		return 0;
-
-	tmp &= ~NFC_V3_IPC_INT;
-	writel(tmp, NFC_V3_IPC);
-
-	return 1;
-}
-
-static int check_int_v1_v2(struct mxc_nand_host *host)
-{
-	uint32_t tmp;
-
-	tmp = readw(NFC_V1_V2_CONFIG2);
-	if (!(tmp & NFC_V1_V2_CONFIG2_INT))
-		return 0;
-
-	if (!host->devtype_data->irqpending_quirk)
-		writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
-
-	return 1;
-}
-
-static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
-{
-	uint16_t tmp;
-
-	tmp = readw(NFC_V1_V2_CONFIG1);
-
-	if (activate)
-		tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
-	else
-		tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
-
-	writew(tmp, NFC_V1_V2_CONFIG1);
-}
-
-static void irq_control_v3(struct mxc_nand_host *host, int activate)
-{
-	uint32_t tmp;
-
-	tmp = readl(NFC_V3_CONFIG2);
-
-	if (activate)
-		tmp &= ~NFC_V3_CONFIG2_INT_MSK;
-	else
-		tmp |= NFC_V3_CONFIG2_INT_MSK;
-
-	writel(tmp, NFC_V3_CONFIG2);
-}
-
-static void irq_control(struct mxc_nand_host *host, int activate)
-{
-	if (host->devtype_data->irqpending_quirk) {
-		if (activate)
-			enable_irq(host->irq);
-		else
-			disable_irq_nosync(host->irq);
-	} else {
-		host->devtype_data->irq_control(host, activate);
-	}
-}
-
-static u32 get_ecc_status_v1(struct mxc_nand_host *host)
-{
-	return readw(NFC_V1_V2_ECC_STATUS_RESULT);
-}
-
-static u32 get_ecc_status_v2(struct mxc_nand_host *host)
-{
-	return readl(NFC_V1_V2_ECC_STATUS_RESULT);
-}
-
-static u32 get_ecc_status_v3(struct mxc_nand_host *host)
-{
-	return readl(NFC_V3_ECC_STATUS_RESULT);
-}
-
-static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
-{
-	struct mxc_nand_host *host = dev_id;
-
-	if (!host->devtype_data->check_int(host))
-		return IRQ_NONE;
-
-	irq_control(host, 0);
-
-	complete(&host->op_completion);
-
-	return IRQ_HANDLED;
-}
-
-/* This function polls the NANDFC to wait for the basic operation to
- * complete by checking the INT bit of config2 register.
- */
-static int wait_op_done(struct mxc_nand_host *host, int useirq)
-{
-	int ret = 0;
-
-	/*
-	 * If operation is already complete, don't bother to setup an irq or a
-	 * loop.
-	 */
-	if (host->devtype_data->check_int(host))
-		return 0;
-
-	if (useirq) {
-		unsigned long timeout;
-
-		reinit_completion(&host->op_completion);
-
-		irq_control(host, 1);
-
-		timeout = wait_for_completion_timeout(&host->op_completion, HZ);
-		if (!timeout && !host->devtype_data->check_int(host)) {
-			dev_dbg(host->dev, "timeout waiting for irq\n");
-			ret = -ETIMEDOUT;
-		}
-	} else {
-		int max_retries = 8000;
-		int done;
-
-		do {
-			udelay(1);
-
-			done = host->devtype_data->check_int(host);
-			if (done)
-				break;
-
-		} while (--max_retries);
-
-		if (!done) {
-			dev_dbg(host->dev, "timeout polling for completion\n");
-			ret = -ETIMEDOUT;
-		}
-	}
-
-	WARN_ONCE(ret < 0, "timeout! useirq=%d\n", useirq);
-
-	return ret;
-}
-
-static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
-{
-	/* fill command */
-	writel(cmd, NFC_V3_FLASH_CMD);
-
-	/* send out command */
-	writel(NFC_CMD, NFC_V3_LAUNCH);
-
-	/* Wait for operation to complete */
-	wait_op_done(host, useirq);
-}
-
-/* This function issues the specified command to the NAND device and
- * waits for completion. */
-static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
-{
-	pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
-
-	writew(cmd, NFC_V1_V2_FLASH_CMD);
-	writew(NFC_CMD, NFC_V1_V2_CONFIG2);
-
-	if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
-		int max_retries = 100;
-		/* Reset completion is indicated by NFC_CONFIG2 */
-		/* being set to 0 */
-		while (max_retries-- > 0) {
-			if (readw(NFC_V1_V2_CONFIG2) == 0) {
-				break;
-			}
-			udelay(1);
-		}
-		if (max_retries < 0)
-			pr_debug("%s: RESET failed\n", __func__);
-	} else {
-		/* Wait for operation to complete */
-		wait_op_done(host, useirq);
-	}
-}
-
-static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
-{
-	/* fill address */
-	writel(addr, NFC_V3_FLASH_ADDR0);
-
-	/* send out address */
-	writel(NFC_ADDR, NFC_V3_LAUNCH);
-
-	wait_op_done(host, 0);
-}
-
-/* This function sends an address (or partial address) to the
- * NAND device. The address is used to select the source/destination for
- * a NAND command. */
-static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
-{
-	pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
-
-	writew(addr, NFC_V1_V2_FLASH_ADDR);
-	writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
-
-	/* Wait for operation to complete */
-	wait_op_done(host, islast);
-}
-
-static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	uint32_t tmp;
-
-	tmp = readl(NFC_V3_CONFIG1);
-	tmp &= ~(7 << 4);
-	writel(tmp, NFC_V3_CONFIG1);
-
-	/* transfer data from NFC ram to nand */
-	writel(ops, NFC_V3_LAUNCH);
-
-	wait_op_done(host, false);
-}
-
-static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
-	/* NANDFC buffer 0 is used for page read/write */
-	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
-
-	writew(ops, NFC_V1_V2_CONFIG2);
-
-	/* Wait for operation to complete */
-	wait_op_done(host, true);
-}
-
-static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	int bufs, i;
-
-	if (mtd->writesize > 512)
-		bufs = 4;
-	else
-		bufs = 1;
-
-	for (i = 0; i < bufs; i++) {
-
-		/* NANDFC buffer 0 is used for page read/write */
-		writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
-
-		writew(ops, NFC_V1_V2_CONFIG2);
-
-		/* Wait for operation to complete */
-		wait_op_done(host, true);
-	}
-}
-
-static void send_read_id_v3(struct mxc_nand_host *host)
-{
-	/* Read ID into main buffer */
-	writel(NFC_ID, NFC_V3_LAUNCH);
-
-	wait_op_done(host, true);
-
-	memcpy32_fromio(host->data_buf, host->main_area0, 16);
-}
-
-/* Request the NANDFC to perform a read of the NAND device ID. */
-static void send_read_id_v1_v2(struct mxc_nand_host *host)
-{
-	/* NANDFC buffer 0 is used for device ID output */
-	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
-
-	writew(NFC_ID, NFC_V1_V2_CONFIG2);
-
-	/* Wait for operation to complete */
-	wait_op_done(host, true);
-
-	memcpy32_fromio(host->data_buf, host->main_area0, 16);
-}
-
-static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
-{
-	writew(NFC_STATUS, NFC_V3_LAUNCH);
-	wait_op_done(host, true);
-
-	return readl(NFC_V3_CONFIG1) >> 16;
-}
-
-/* This function requests the NANDFC to perform a read of the
- * NAND device status and returns the current status. */
-static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
-{
-	void __iomem *main_buf = host->main_area0;
-	uint32_t store;
-	uint16_t ret;
-
-	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
-
-	/*
-	 * The device status is stored in main_area0. To
-	 * prevent corruption of the buffer save the value
-	 * and restore it afterwards.
-	 */
-	store = readl(main_buf);
-
-	writew(NFC_STATUS, NFC_V1_V2_CONFIG2);
-	wait_op_done(host, true);
-
-	ret = readw(main_buf);
-
-	writel(store, main_buf);
-
-	return ret;
-}
-
-/* This functions is used by upper layer to checks if device is ready */
-static int mxc_nand_dev_ready(struct mtd_info *mtd)
-{
-	/*
-	 * NFC handles R/B internally. Therefore, this function
-	 * always returns status as ready.
-	 */
-	return 1;
-}
-
-static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	/*
-	 * If HW ECC is enabled, we turn it on during init. There is
-	 * no need to enable again here.
-	 */
-}
-
-static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
-				 u_char *read_ecc, u_char *calc_ecc)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
-	/*
-	 * 1-Bit errors are automatically corrected in HW.  No need for
-	 * additional correction.  2-Bit errors cannot be corrected by
-	 * HW ECC, so we need to return failure
-	 */
-	uint16_t ecc_status = get_ecc_status_v1(host);
-
-	if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
-		pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
-		return -EBADMSG;
-	}
-
-	return 0;
-}
-
-static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
-				 u_char *read_ecc, u_char *calc_ecc)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	u32 ecc_stat, err;
-	int no_subpages = 1;
-	int ret = 0;
-	u8 ecc_bit_mask, err_limit;
-
-	ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
-	err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
-
-	no_subpages = mtd->writesize >> 9;
-
-	ecc_stat = host->devtype_data->get_ecc_status(host);
-
-	do {
-		err = ecc_stat & ecc_bit_mask;
-		if (err > err_limit) {
-			printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
-			return -EBADMSG;
-		} else {
-			ret += err;
-		}
-		ecc_stat >>= 4;
-	} while (--no_subpages);
-
-	pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
-
-	return ret;
-}
-
-static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
-				  u_char *ecc_code)
-{
-	return 0;
-}
-
-static u_char mxc_nand_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	uint8_t ret;
-
-	/* Check for status request */
-	if (host->status_request)
-		return host->devtype_data->get_dev_status(host) & 0xFF;
-
-	if (nand_chip->options & NAND_BUSWIDTH_16) {
-		/* only take the lower byte of each word */
-		ret = *(uint16_t *)(host->data_buf + host->buf_start);
-
-		host->buf_start += 2;
-	} else {
-		ret = *(uint8_t *)(host->data_buf + host->buf_start);
-		host->buf_start++;
-	}
-
-	pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
-	return ret;
-}
-
-static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	uint16_t ret;
-
-	ret = *(uint16_t *)(host->data_buf + host->buf_start);
-	host->buf_start += 2;
-
-	return ret;
-}
-
-/* Write data of length len to buffer buf. The data to be
- * written on NAND Flash is first copied to RAMbuffer. After the Data Input
- * Operation by the NFC, the data is written to NAND Flash */
-static void mxc_nand_write_buf(struct mtd_info *mtd,
-				const u_char *buf, int len)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	u16 col = host->buf_start;
-	int n = mtd->oobsize + mtd->writesize - col;
-
-	n = min(n, len);
-
-	memcpy(host->data_buf + col, buf, n);
-
-	host->buf_start += n;
-}
-
-/* Read the data buffer from the NAND Flash. To read the data from NAND
- * Flash first the data output cycle is initiated by the NFC, which copies
- * the data to RAMbuffer. This data of length len is then copied to buffer buf.
- */
-static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	u16 col = host->buf_start;
-	int n = mtd->oobsize + mtd->writesize - col;
-
-	n = min(n, len);
-
-	memcpy(buf, host->data_buf + col, n);
-
-	host->buf_start += n;
-}
-
-/* This function is used by upper layer for select and
- * deselect of the NAND chip */
-static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if (chip == -1) {
-		/* Disable the NFC clock */
-		if (host->clk_act) {
-			clk_disable_unprepare(host->clk);
-			host->clk_act = 0;
-		}
-		return;
-	}
-
-	if (!host->clk_act) {
-		/* Enable the NFC clock */
-		clk_prepare_enable(host->clk);
-		host->clk_act = 1;
-	}
-}
-
-static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if (chip == -1) {
-		/* Disable the NFC clock */
-		if (host->clk_act) {
-			clk_disable_unprepare(host->clk);
-			host->clk_act = 0;
-		}
-		return;
-	}
-
-	if (!host->clk_act) {
-		/* Enable the NFC clock */
-		clk_prepare_enable(host->clk);
-		host->clk_act = 1;
-	}
-
-	host->active_cs = chip;
-	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
-}
-
-/*
- * The controller splits a page into data chunks of 512 bytes + partial oob.
- * There are writesize / 512 such chunks, the size of the partial oob parts is
- * oobsize / #chunks rounded down to a multiple of 2. The last oob chunk then
- * contains additionally the byte lost by rounding (if any).
- * This function handles the needed shuffling between host->data_buf (which
- * holds a page in natural order, i.e. writesize bytes data + oobsize bytes
- * spare) and the NFC buffer.
- */
-static void copy_spare(struct mtd_info *mtd, bool bfrom)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(this);
-	u16 i, oob_chunk_size;
-	u16 num_chunks = mtd->writesize / 512;
-
-	u8 *d = host->data_buf + mtd->writesize;
-	u8 __iomem *s = host->spare0;
-	u16 sparebuf_size = host->devtype_data->spare_len;
-
-	/* size of oob chunk for all but possibly the last one */
-	oob_chunk_size = (host->used_oobsize / num_chunks) & ~1;
-
-	if (bfrom) {
-		for (i = 0; i < num_chunks - 1; i++)
-			memcpy16_fromio(d + i * oob_chunk_size,
-					s + i * sparebuf_size,
-					oob_chunk_size);
-
-		/* the last chunk */
-		memcpy16_fromio(d + i * oob_chunk_size,
-				s + i * sparebuf_size,
-				host->used_oobsize - i * oob_chunk_size);
-	} else {
-		for (i = 0; i < num_chunks - 1; i++)
-			memcpy16_toio(&s[i * sparebuf_size],
-				      &d[i * oob_chunk_size],
-				      oob_chunk_size);
-
-		/* the last chunk */
-		memcpy16_toio(&s[i * sparebuf_size],
-			      &d[i * oob_chunk_size],
-			      host->used_oobsize - i * oob_chunk_size);
-	}
-}
-
-/*
- * MXC NANDFC can only perform full page+spare or spare-only read/write.  When
- * the upper layers perform a read/write buf operation, the saved column address
- * is used to index into the full page. So usually this function is called with
- * column == 0 (unless no column cycle is needed indicated by column == -1)
- */
-static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
-	/* Write out column address, if necessary */
-	if (column != -1) {
-		host->devtype_data->send_addr(host, column & 0xff,
-					      page_addr == -1);
-		if (mtd->writesize > 512)
-			/* another col addr cycle for 2k page */
-			host->devtype_data->send_addr(host,
-						      (column >> 8) & 0xff,
-						      false);
-	}
-
-	/* Write out page address, if necessary */
-	if (page_addr != -1) {
-		/* paddr_0 - p_addr_7 */
-		host->devtype_data->send_addr(host, (page_addr & 0xff), false);
-
-		if (mtd->writesize > 512) {
-			if (mtd->size >= 0x10000000) {
-				/* paddr_8 - paddr_15 */
-				host->devtype_data->send_addr(host,
-						(page_addr >> 8) & 0xff,
-						false);
-				host->devtype_data->send_addr(host,
-						(page_addr >> 16) & 0xff,
-						true);
-			} else
-				/* paddr_8 - paddr_15 */
-				host->devtype_data->send_addr(host,
-						(page_addr >> 8) & 0xff, true);
-		} else {
-			/* One more address cycle for higher density devices */
-			if (mtd->size >= 0x4000000) {
-				/* paddr_8 - paddr_15 */
-				host->devtype_data->send_addr(host,
-						(page_addr >> 8) & 0xff,
-						false);
-				host->devtype_data->send_addr(host,
-						(page_addr >> 16) & 0xff,
-						true);
-			} else
-				/* paddr_8 - paddr_15 */
-				host->devtype_data->send_addr(host,
-						(page_addr >> 8) & 0xff, true);
-		}
-	}
-}
-
-static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
-				struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-
-	if (section >= nand_chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = (section * 16) + 6;
-	oobregion->length = nand_chip->ecc.bytes;
-
-	return 0;
-}
-
-static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-
-	if (section > nand_chip->ecc.steps)
-		return -ERANGE;
-
-	if (!section) {
-		if (mtd->writesize <= 512) {
-			oobregion->offset = 0;
-			oobregion->length = 5;
-		} else {
-			oobregion->offset = 2;
-			oobregion->length = 4;
-		}
-	} else {
-		oobregion->offset = ((section - 1) * 16) +
-				    nand_chip->ecc.bytes + 6;
-		if (section < nand_chip->ecc.steps)
-			oobregion->length = (section * 16) + 6 -
-					    oobregion->offset;
-		else
-			oobregion->length = mtd->oobsize - oobregion->offset;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops mxc_v1_ooblayout_ops = {
-	.ecc = mxc_v1_ooblayout_ecc,
-	.free = mxc_v1_ooblayout_free,
-};
-
-static int mxc_v2_ooblayout_ecc(struct mtd_info *mtd, int section,
-				struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
-
-	if (section >= nand_chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = (section * stepsize) + 7;
-	oobregion->length = nand_chip->ecc.bytes;
-
-	return 0;
-}
-
-static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
-
-	if (section > nand_chip->ecc.steps)
-		return -ERANGE;
-
-	if (!section) {
-		if (mtd->writesize <= 512) {
-			oobregion->offset = 0;
-			oobregion->length = 5;
-		} else {
-			oobregion->offset = 2;
-			oobregion->length = 4;
-		}
-	} else {
-		oobregion->offset = section * stepsize;
-		oobregion->length = 7;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops mxc_v2_ooblayout_ops = {
-	.ecc = mxc_v2_ooblayout_ecc,
-	.free = mxc_v2_ooblayout_free,
-};
-
-/*
- * v2 and v3 type controllers can do 4bit or 8bit ecc depending
- * on how much oob the nand chip has. For 8bit ecc we need at least
- * 26 bytes of oob data per 512 byte block.
- */
-static int get_eccsize(struct mtd_info *mtd)
-{
-	int oobbytes_per_512 = 0;
-
-	oobbytes_per_512 = mtd->oobsize * 512 / mtd->writesize;
-
-	if (oobbytes_per_512 < 26)
-		return 4;
-	else
-		return 8;
-}
-
-static void preset_v1(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	uint16_t config1 = 0;
-
-	if (nand_chip->ecc.mode == NAND_ECC_HW && mtd->writesize)
-		config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
-
-	if (!host->devtype_data->irqpending_quirk)
-		config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
-
-	host->eccsize = 1;
-
-	writew(config1, NFC_V1_V2_CONFIG1);
-	/* preset operation */
-
-	/* Unlock the internal RAM Buffer */
-	writew(0x2, NFC_V1_V2_CONFIG);
-
-	/* Blocks to be unlocked */
-	writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
-	writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
-
-	/* Unlock Block Command for given address range */
-	writew(0x4, NFC_V1_V2_WRPROT);
-}
-
-static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd,
-					const struct nand_data_interface *conf,
-					bool check_only)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	int tRC_min_ns, tRC_ps, ret;
-	unsigned long rate, rate_round;
-	const struct nand_sdr_timings *timings;
-	u16 config1;
-
-	timings = nand_get_sdr_timings(conf);
-	if (IS_ERR(timings))
-		return -ENOTSUPP;
-
-	config1 = readw(NFC_V1_V2_CONFIG1);
-
-	tRC_min_ns = timings->tRC_min / 1000;
-	rate = 1000000000 / tRC_min_ns;
-
-	/*
-	 * For tRC < 30ns we have to use EDO mode. In this case the controller
-	 * does one access per clock cycle. Otherwise the controller does one
-	 * access in two clock cycles, thus we have to double the rate to the
-	 * controller.
-	 */
-	if (tRC_min_ns < 30) {
-		rate_round = clk_round_rate(host->clk, rate);
-		config1 |= NFC_V2_CONFIG1_ONE_CYCLE;
-		tRC_ps = 1000000000 / (rate_round / 1000);
-	} else {
-		rate *= 2;
-		rate_round = clk_round_rate(host->clk, rate);
-		config1 &= ~NFC_V2_CONFIG1_ONE_CYCLE;
-		tRC_ps = 1000000000 / (rate_round / 1000 / 2);
-	}
-
-	/*
-	 * The timing values compared against are from the i.MX25 Automotive
-	 * datasheet, Table 50. NFC Timing Parameters
-	 */
-	if (timings->tCLS_min > tRC_ps - 1000 ||
-	    timings->tCLH_min > tRC_ps - 2000 ||
-	    timings->tCS_min > tRC_ps - 1000 ||
-	    timings->tCH_min > tRC_ps - 2000 ||
-	    timings->tWP_min > tRC_ps - 1500 ||
-	    timings->tALS_min > tRC_ps ||
-	    timings->tALH_min > tRC_ps - 3000 ||
-	    timings->tDS_min > tRC_ps ||
-	    timings->tDH_min > tRC_ps - 5000 ||
-	    timings->tWC_min > 2 * tRC_ps ||
-	    timings->tWH_min > tRC_ps - 2500 ||
-	    timings->tRR_min > 6 * tRC_ps ||
-	    timings->tRP_min > 3 * tRC_ps / 2 ||
-	    timings->tRC_min > 2 * tRC_ps ||
-	    timings->tREH_min > (tRC_ps / 2) - 2500) {
-		dev_dbg(host->dev, "Timing out of bounds\n");
-		return -EINVAL;
-	}
-
-	if (check_only)
-		return 0;
-
-	ret = clk_set_rate(host->clk, rate);
-	if (ret)
-		return ret;
-
-	writew(config1, NFC_V1_V2_CONFIG1);
-
-	dev_dbg(host->dev, "Setting rate to %ldHz, %s mode\n", rate_round,
-		config1 & NFC_V2_CONFIG1_ONE_CYCLE ? "One cycle (EDO)" :
-		"normal");
-
-	return 0;
-}
-
-static void preset_v2(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	uint16_t config1 = 0;
-
-	config1 |= NFC_V2_CONFIG1_FP_INT;
-
-	if (!host->devtype_data->irqpending_quirk)
-		config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
-
-	if (mtd->writesize) {
-		uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
-
-		if (nand_chip->ecc.mode == NAND_ECC_HW)
-			config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
-
-		host->eccsize = get_eccsize(mtd);
-		if (host->eccsize == 4)
-			config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
-
-		config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
-	} else {
-		host->eccsize = 1;
-	}
-
-	writew(config1, NFC_V1_V2_CONFIG1);
-	/* preset operation */
-
-	/* Unlock the internal RAM Buffer */
-	writew(0x2, NFC_V1_V2_CONFIG);
-
-	/* Blocks to be unlocked */
-	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
-	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
-	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
-	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
-	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
-	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
-	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
-	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
-
-	/* Unlock Block Command for given address range */
-	writew(0x4, NFC_V1_V2_WRPROT);
-}
-
-static void preset_v3(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(chip);
-	uint32_t config2, config3;
-	int i, addr_phases;
-
-	writel(NFC_V3_CONFIG1_RBA(0), NFC_V3_CONFIG1);
-	writel(NFC_V3_IPC_CREQ, NFC_V3_IPC);
-
-	/* Unlock the internal RAM Buffer */
-	writel(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK,
-			NFC_V3_WRPROT);
-
-	/* Blocks to be unlocked */
-	for (i = 0; i < NAND_MAX_CHIPS; i++)
-		writel(0xffff << 16, NFC_V3_WRPROT_UNLOCK_BLK_ADD0 + (i << 2));
-
-	writel(0, NFC_V3_IPC);
-
-	config2 = NFC_V3_CONFIG2_ONE_CYCLE |
-		NFC_V3_CONFIG2_2CMD_PHASES |
-		NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
-		NFC_V3_CONFIG2_ST_CMD(0x70) |
-		NFC_V3_CONFIG2_INT_MSK |
-		NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
-
-	addr_phases = fls(chip->pagemask) >> 3;
-
-	if (mtd->writesize == 2048) {
-		config2 |= NFC_V3_CONFIG2_PS_2048;
-		config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
-	} else if (mtd->writesize == 4096) {
-		config2 |= NFC_V3_CONFIG2_PS_4096;
-		config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
-	} else {
-		config2 |= NFC_V3_CONFIG2_PS_512;
-		config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases - 1);
-	}
-
-	if (mtd->writesize) {
-		if (chip->ecc.mode == NAND_ECC_HW)
-			config2 |= NFC_V3_CONFIG2_ECC_EN;
-
-		config2 |= NFC_V3_CONFIG2_PPB(
-				ffs(mtd->erasesize / mtd->writesize) - 6,
-				host->devtype_data->ppb_shift);
-		host->eccsize = get_eccsize(mtd);
-		if (host->eccsize == 8)
-			config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
-	}
-
-	writel(config2, NFC_V3_CONFIG2);
-
-	config3 = NFC_V3_CONFIG3_NUM_OF_DEVICES(0) |
-			NFC_V3_CONFIG3_NO_SDMA |
-			NFC_V3_CONFIG3_RBB_MODE |
-			NFC_V3_CONFIG3_SBB(6) | /* Reset default */
-			NFC_V3_CONFIG3_ADD_OP(0);
-
-	if (!(chip->options & NAND_BUSWIDTH_16))
-		config3 |= NFC_V3_CONFIG3_FW8;
-
-	writel(config3, NFC_V3_CONFIG3);
-
-	writel(0, NFC_V3_DELAY_LINE);
-}
-
-/* Used by the upper layer to write command to NAND Flash for
- * different operations to be carried out on NAND Flash */
-static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
-				int column, int page_addr)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-
-	pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
-	      command, column, page_addr);
-
-	/* Reset command state information */
-	host->status_request = false;
-
-	/* Command pre-processing step */
-	switch (command) {
-	case NAND_CMD_RESET:
-		host->devtype_data->preset(mtd);
-		host->devtype_data->send_cmd(host, command, false);
-		break;
-
-	case NAND_CMD_STATUS:
-		host->buf_start = 0;
-		host->status_request = true;
-
-		host->devtype_data->send_cmd(host, command, true);
-		WARN_ONCE(column != -1 || page_addr != -1,
-			  "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
-			  command, column, page_addr);
-		mxc_do_addr_cycle(mtd, column, page_addr);
-		break;
-
-	case NAND_CMD_READ0:
-	case NAND_CMD_READOOB:
-		if (command == NAND_CMD_READ0)
-			host->buf_start = column;
-		else
-			host->buf_start = column + mtd->writesize;
-
-		command = NAND_CMD_READ0; /* only READ0 is valid */
-
-		host->devtype_data->send_cmd(host, command, false);
-		WARN_ONCE(column < 0,
-			  "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
-			  command, column, page_addr);
-		mxc_do_addr_cycle(mtd, 0, page_addr);
-
-		if (mtd->writesize > 512)
-			host->devtype_data->send_cmd(host,
-					NAND_CMD_READSTART, true);
-
-		host->devtype_data->send_page(mtd, NFC_OUTPUT);
-
-		memcpy32_fromio(host->data_buf, host->main_area0,
-				mtd->writesize);
-		copy_spare(mtd, true);
-		break;
-
-	case NAND_CMD_SEQIN:
-		if (column >= mtd->writesize)
-			/* call ourself to read a page */
-			mxc_nand_command(mtd, NAND_CMD_READ0, 0, page_addr);
-
-		host->buf_start = column;
-
-		host->devtype_data->send_cmd(host, command, false);
-		WARN_ONCE(column < -1,
-			  "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
-			  command, column, page_addr);
-		mxc_do_addr_cycle(mtd, 0, page_addr);
-		break;
-
-	case NAND_CMD_PAGEPROG:
-		memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
-		copy_spare(mtd, false);
-		host->devtype_data->send_page(mtd, NFC_INPUT);
-		host->devtype_data->send_cmd(host, command, true);
-		WARN_ONCE(column != -1 || page_addr != -1,
-			  "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
-			  command, column, page_addr);
-		mxc_do_addr_cycle(mtd, column, page_addr);
-		break;
-
-	case NAND_CMD_READID:
-		host->devtype_data->send_cmd(host, command, true);
-		mxc_do_addr_cycle(mtd, column, page_addr);
-		host->devtype_data->send_read_id(host);
-		host->buf_start = 0;
-		break;
-
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-		host->devtype_data->send_cmd(host, command, false);
-		WARN_ONCE(column != -1,
-			  "Unexpected column value (cmd=%u, col=%d)\n",
-			  command, column);
-		mxc_do_addr_cycle(mtd, column, page_addr);
-
-		break;
-	case NAND_CMD_PARAM:
-		host->devtype_data->send_cmd(host, command, false);
-		mxc_do_addr_cycle(mtd, column, page_addr);
-		host->devtype_data->send_page(mtd, NFC_OUTPUT);
-		memcpy32_fromio(host->data_buf, host->main_area0, 512);
-		host->buf_start = 0;
-		break;
-	default:
-		WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
-			  command);
-		break;
-	}
-}
-
-static int mxc_nand_onfi_set_features(struct mtd_info *mtd,
-				      struct nand_chip *chip, int addr,
-				      u8 *subfeature_param)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	int i;
-
-	if (!chip->onfi_version ||
-	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
-	      & ONFI_OPT_CMD_SET_GET_FEATURES))
-		return -EINVAL;
-
-	host->buf_start = 0;
-
-	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
-		chip->write_byte(mtd, subfeature_param[i]);
-
-	memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
-	host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
-	mxc_do_addr_cycle(mtd, addr, -1);
-	host->devtype_data->send_page(mtd, NFC_INPUT);
-
-	return 0;
-}
-
-static int mxc_nand_onfi_get_features(struct mtd_info *mtd,
-				      struct nand_chip *chip, int addr,
-				      u8 *subfeature_param)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
-	int i;
-
-	if (!chip->onfi_version ||
-	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
-	      & ONFI_OPT_CMD_SET_GET_FEATURES))
-		return -EINVAL;
-
-	host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
-	mxc_do_addr_cycle(mtd, addr, -1);
-	host->devtype_data->send_page(mtd, NFC_OUTPUT);
-	memcpy32_fromio(host->data_buf, host->main_area0, 512);
-	host->buf_start = 0;
-
-	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
-		*subfeature_param++ = chip->read_byte(mtd);
-
-	return 0;
-}
-
-/*
- * The generic flash bbt decriptors overlap with our ecc
- * hardware, so define some i.MX specific ones.
- */
-static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
-static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr bbt_main_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-	    | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs = 0,
-	.len = 4,
-	.veroffs = 4,
-	.maxblocks = 4,
-	.pattern = bbt_pattern,
-};
-
-static struct nand_bbt_descr bbt_mirror_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-	    | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs = 0,
-	.len = 4,
-	.veroffs = 4,
-	.maxblocks = 4,
-	.pattern = mirror_pattern,
-};
-
-/* v1 + irqpending_quirk: i.MX21 */
-static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
-	.preset = preset_v1,
-	.send_cmd = send_cmd_v1_v2,
-	.send_addr = send_addr_v1_v2,
-	.send_page = send_page_v1,
-	.send_read_id = send_read_id_v1_v2,
-	.get_dev_status = get_dev_status_v1_v2,
-	.check_int = check_int_v1_v2,
-	.irq_control = irq_control_v1_v2,
-	.get_ecc_status = get_ecc_status_v1,
-	.ooblayout = &mxc_v1_ooblayout_ops,
-	.select_chip = mxc_nand_select_chip_v1_v3,
-	.correct_data = mxc_nand_correct_data_v1,
-	.irqpending_quirk = 1,
-	.needs_ip = 0,
-	.regs_offset = 0xe00,
-	.spare0_offset = 0x800,
-	.spare_len = 16,
-	.eccbytes = 3,
-	.eccsize = 1,
-};
-
-/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
-static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
-	.preset = preset_v1,
-	.send_cmd = send_cmd_v1_v2,
-	.send_addr = send_addr_v1_v2,
-	.send_page = send_page_v1,
-	.send_read_id = send_read_id_v1_v2,
-	.get_dev_status = get_dev_status_v1_v2,
-	.check_int = check_int_v1_v2,
-	.irq_control = irq_control_v1_v2,
-	.get_ecc_status = get_ecc_status_v1,
-	.ooblayout = &mxc_v1_ooblayout_ops,
-	.select_chip = mxc_nand_select_chip_v1_v3,
-	.correct_data = mxc_nand_correct_data_v1,
-	.irqpending_quirk = 0,
-	.needs_ip = 0,
-	.regs_offset = 0xe00,
-	.spare0_offset = 0x800,
-	.axi_offset = 0,
-	.spare_len = 16,
-	.eccbytes = 3,
-	.eccsize = 1,
-};
-
-/* v21: i.MX25, i.MX35 */
-static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
-	.preset = preset_v2,
-	.send_cmd = send_cmd_v1_v2,
-	.send_addr = send_addr_v1_v2,
-	.send_page = send_page_v2,
-	.send_read_id = send_read_id_v1_v2,
-	.get_dev_status = get_dev_status_v1_v2,
-	.check_int = check_int_v1_v2,
-	.irq_control = irq_control_v1_v2,
-	.get_ecc_status = get_ecc_status_v2,
-	.ooblayout = &mxc_v2_ooblayout_ops,
-	.select_chip = mxc_nand_select_chip_v2,
-	.correct_data = mxc_nand_correct_data_v2_v3,
-	.setup_data_interface = mxc_nand_v2_setup_data_interface,
-	.irqpending_quirk = 0,
-	.needs_ip = 0,
-	.regs_offset = 0x1e00,
-	.spare0_offset = 0x1000,
-	.axi_offset = 0,
-	.spare_len = 64,
-	.eccbytes = 9,
-	.eccsize = 0,
-};
-
-/* v3.2a: i.MX51 */
-static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
-	.preset = preset_v3,
-	.send_cmd = send_cmd_v3,
-	.send_addr = send_addr_v3,
-	.send_page = send_page_v3,
-	.send_read_id = send_read_id_v3,
-	.get_dev_status = get_dev_status_v3,
-	.check_int = check_int_v3,
-	.irq_control = irq_control_v3,
-	.get_ecc_status = get_ecc_status_v3,
-	.ooblayout = &mxc_v2_ooblayout_ops,
-	.select_chip = mxc_nand_select_chip_v1_v3,
-	.correct_data = mxc_nand_correct_data_v2_v3,
-	.irqpending_quirk = 0,
-	.needs_ip = 1,
-	.regs_offset = 0,
-	.spare0_offset = 0x1000,
-	.axi_offset = 0x1e00,
-	.spare_len = 64,
-	.eccbytes = 0,
-	.eccsize = 0,
-	.ppb_shift = 7,
-};
-
-/* v3.2b: i.MX53 */
-static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
-	.preset = preset_v3,
-	.send_cmd = send_cmd_v3,
-	.send_addr = send_addr_v3,
-	.send_page = send_page_v3,
-	.send_read_id = send_read_id_v3,
-	.get_dev_status = get_dev_status_v3,
-	.check_int = check_int_v3,
-	.irq_control = irq_control_v3,
-	.get_ecc_status = get_ecc_status_v3,
-	.ooblayout = &mxc_v2_ooblayout_ops,
-	.select_chip = mxc_nand_select_chip_v1_v3,
-	.correct_data = mxc_nand_correct_data_v2_v3,
-	.irqpending_quirk = 0,
-	.needs_ip = 1,
-	.regs_offset = 0,
-	.spare0_offset = 0x1000,
-	.axi_offset = 0x1e00,
-	.spare_len = 64,
-	.eccbytes = 0,
-	.eccsize = 0,
-	.ppb_shift = 8,
-};
-
-static inline int is_imx21_nfc(struct mxc_nand_host *host)
-{
-	return host->devtype_data == &imx21_nand_devtype_data;
-}
-
-static inline int is_imx27_nfc(struct mxc_nand_host *host)
-{
-	return host->devtype_data == &imx27_nand_devtype_data;
-}
-
-static inline int is_imx25_nfc(struct mxc_nand_host *host)
-{
-	return host->devtype_data == &imx25_nand_devtype_data;
-}
-
-static inline int is_imx51_nfc(struct mxc_nand_host *host)
-{
-	return host->devtype_data == &imx51_nand_devtype_data;
-}
-
-static inline int is_imx53_nfc(struct mxc_nand_host *host)
-{
-	return host->devtype_data == &imx53_nand_devtype_data;
-}
-
-static const struct platform_device_id mxcnd_devtype[] = {
-	{
-		.name = "imx21-nand",
-		.driver_data = (kernel_ulong_t) &imx21_nand_devtype_data,
-	}, {
-		.name = "imx27-nand",
-		.driver_data = (kernel_ulong_t) &imx27_nand_devtype_data,
-	}, {
-		.name = "imx25-nand",
-		.driver_data = (kernel_ulong_t) &imx25_nand_devtype_data,
-	}, {
-		.name = "imx51-nand",
-		.driver_data = (kernel_ulong_t) &imx51_nand_devtype_data,
-	}, {
-		.name = "imx53-nand",
-		.driver_data = (kernel_ulong_t) &imx53_nand_devtype_data,
-	}, {
-		/* sentinel */
-	}
-};
-MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
-
-#ifdef CONFIG_OF
-static const struct of_device_id mxcnd_dt_ids[] = {
-	{
-		.compatible = "fsl,imx21-nand",
-		.data = &imx21_nand_devtype_data,
-	}, {
-		.compatible = "fsl,imx27-nand",
-		.data = &imx27_nand_devtype_data,
-	}, {
-		.compatible = "fsl,imx25-nand",
-		.data = &imx25_nand_devtype_data,
-	}, {
-		.compatible = "fsl,imx51-nand",
-		.data = &imx51_nand_devtype_data,
-	}, {
-		.compatible = "fsl,imx53-nand",
-		.data = &imx53_nand_devtype_data,
-	},
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
-
-static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
-{
-	struct device_node *np = host->dev->of_node;
-	const struct of_device_id *of_id =
-		of_match_device(mxcnd_dt_ids, host->dev);
-
-	if (!np)
-		return 1;
-
-	host->devtype_data = of_id->data;
-
-	return 0;
-}
-#else
-static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
-{
-	return 1;
-}
-#endif
-
-static int mxcnd_probe(struct platform_device *pdev)
-{
-	struct nand_chip *this;
-	struct mtd_info *mtd;
-	struct mxc_nand_host *host;
-	struct resource *res;
-	int err = 0;
-
-	/* Allocate memory for MTD device structure and private data */
-	host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
-			GFP_KERNEL);
-	if (!host)
-		return -ENOMEM;
-
-	/* allocate a temporary buffer for the nand_scan_ident() */
-	host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
-	if (!host->data_buf)
-		return -ENOMEM;
-
-	host->dev = &pdev->dev;
-	/* structures must be linked */
-	this = &host->nand;
-	mtd = nand_to_mtd(this);
-	mtd->dev.parent = &pdev->dev;
-	mtd->name = DRIVER_NAME;
-
-	/* 50 us command delay time */
-	this->chip_delay = 5;
-
-	nand_set_controller_data(this, host);
-	nand_set_flash_node(this, pdev->dev.of_node),
-	this->dev_ready = mxc_nand_dev_ready;
-	this->cmdfunc = mxc_nand_command;
-	this->read_byte = mxc_nand_read_byte;
-	this->read_word = mxc_nand_read_word;
-	this->write_buf = mxc_nand_write_buf;
-	this->read_buf = mxc_nand_read_buf;
-	this->onfi_set_features = mxc_nand_onfi_set_features;
-	this->onfi_get_features = mxc_nand_onfi_get_features;
-
-	host->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(host->clk))
-		return PTR_ERR(host->clk);
-
-	err = mxcnd_probe_dt(host);
-	if (err > 0) {
-		struct mxc_nand_platform_data *pdata =
-					dev_get_platdata(&pdev->dev);
-		if (pdata) {
-			host->pdata = *pdata;
-			host->devtype_data = (struct mxc_nand_devtype_data *)
-						pdev->id_entry->driver_data;
-		} else {
-			err = -ENODEV;
-		}
-	}
-	if (err < 0)
-		return err;
-
-	this->setup_data_interface = host->devtype_data->setup_data_interface;
-
-	if (host->devtype_data->needs_ip) {
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-		host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
-		if (IS_ERR(host->regs_ip))
-			return PTR_ERR(host->regs_ip);
-
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-	} else {
-		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	}
-
-	host->base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(host->base))
-		return PTR_ERR(host->base);
-
-	host->main_area0 = host->base;
-
-	if (host->devtype_data->regs_offset)
-		host->regs = host->base + host->devtype_data->regs_offset;
-	host->spare0 = host->base + host->devtype_data->spare0_offset;
-	if (host->devtype_data->axi_offset)
-		host->regs_axi = host->base + host->devtype_data->axi_offset;
-
-	this->ecc.bytes = host->devtype_data->eccbytes;
-	host->eccsize = host->devtype_data->eccsize;
-
-	this->select_chip = host->devtype_data->select_chip;
-	this->ecc.size = 512;
-	mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
-
-	if (host->pdata.hw_ecc) {
-		this->ecc.mode = NAND_ECC_HW;
-	} else {
-		this->ecc.mode = NAND_ECC_SOFT;
-		this->ecc.algo = NAND_ECC_HAMMING;
-	}
-
-	/* NAND bus width determines access functions used by upper layer */
-	if (host->pdata.width == 2)
-		this->options |= NAND_BUSWIDTH_16;
-
-	/* update flash based bbt */
-	if (host->pdata.flash_bbt)
-		this->bbt_options |= NAND_BBT_USE_FLASH;
-
-	init_completion(&host->op_completion);
-
-	host->irq = platform_get_irq(pdev, 0);
-	if (host->irq < 0)
-		return host->irq;
-
-	/*
-	 * Use host->devtype_data->irq_control() here instead of irq_control()
-	 * because we must not disable_irq_nosync without having requested the
-	 * irq.
-	 */
-	host->devtype_data->irq_control(host, 0);
-
-	err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
-			0, DRIVER_NAME, host);
-	if (err)
-		return err;
-
-	err = clk_prepare_enable(host->clk);
-	if (err)
-		return err;
-	host->clk_act = 1;
-
-	/*
-	 * Now that we "own" the interrupt make sure the interrupt mask bit is
-	 * cleared on i.MX21. Otherwise we can't read the interrupt status bit
-	 * on this machine.
-	 */
-	if (host->devtype_data->irqpending_quirk) {
-		disable_irq_nosync(host->irq);
-		host->devtype_data->irq_control(host, 1);
-	}
-
-	/* first scan to find the device and get the page size */
-	if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) {
-		err = -ENXIO;
-		goto escan;
-	}
-
-	switch (this->ecc.mode) {
-	case NAND_ECC_HW:
-		this->ecc.calculate = mxc_nand_calculate_ecc;
-		this->ecc.hwctl = mxc_nand_enable_hwecc;
-		this->ecc.correct = host->devtype_data->correct_data;
-		break;
-
-	case NAND_ECC_SOFT:
-		break;
-
-	default:
-		err = -EINVAL;
-		goto escan;
-	}
-
-	if (this->bbt_options & NAND_BBT_USE_FLASH) {
-		this->bbt_td = &bbt_main_descr;
-		this->bbt_md = &bbt_mirror_descr;
-	}
-
-	/* allocate the right size buffer now */
-	devm_kfree(&pdev->dev, (void *)host->data_buf);
-	host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
-					GFP_KERNEL);
-	if (!host->data_buf) {
-		err = -ENOMEM;
-		goto escan;
-	}
-
-	/* Call preset again, with correct writesize this time */
-	host->devtype_data->preset(mtd);
-
-	if (!this->ecc.bytes) {
-		if (host->eccsize == 8)
-			this->ecc.bytes = 18;
-		else if (host->eccsize == 4)
-			this->ecc.bytes = 9;
-	}
-
-	/*
-	 * Experimentation shows that i.MX NFC can only handle up to 218 oob
-	 * bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
-	 * into copying invalid data to/from the spare IO buffer, as this
-	 * might cause ECC data corruption when doing sub-page write to a
-	 * partially written page.
-	 */
-	host->used_oobsize = min(mtd->oobsize, 218U);
-
-	if (this->ecc.mode == NAND_ECC_HW) {
-		if (is_imx21_nfc(host) || is_imx27_nfc(host))
-			this->ecc.strength = 1;
-		else
-			this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
-	}
-
-	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		err = -ENXIO;
-		goto escan;
-	}
-
-	/* Register the partitions */
-	mtd_device_parse_register(mtd, part_probes,
-			NULL,
-			host->pdata.parts,
-			host->pdata.nr_parts);
-
-	platform_set_drvdata(pdev, host);
-
-	return 0;
-
-escan:
-	if (host->clk_act)
-		clk_disable_unprepare(host->clk);
-
-	return err;
-}
-
-static int mxcnd_remove(struct platform_device *pdev)
-{
-	struct mxc_nand_host *host = platform_get_drvdata(pdev);
-
-	nand_release(nand_to_mtd(&host->nand));
-	if (host->clk_act)
-		clk_disable_unprepare(host->clk);
-
-	return 0;
-}
-
-static struct platform_driver mxcnd_driver = {
-	.driver = {
-		   .name = DRIVER_NAME,
-		   .of_match_table = of_match_ptr(mxcnd_dt_ids),
-	},
-	.id_table = mxcnd_devtype,
-	.probe = mxcnd_probe,
-	.remove = mxcnd_remove,
-};
-module_platform_driver(mxcnd_driver);
-
-MODULE_AUTHOR("Freescale Semiconductor, Inc.");
-MODULE_DESCRIPTION("MXC NAND MTD driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
deleted file mode 100644
index 56b08a897115..000000000000
--- a/drivers/mtd/nand/nand_base.c
+++ /dev/null
@@ -1,4840 +0,0 @@ 
-/*
- *  Overview:
- *   This is the generic MTD driver for NAND flash devices. It should be
- *   capable of working with almost all NAND chips currently available.
- *
- *	Additional technical information is available on
- *	http://www.linux-mtd.infradead.org/doc/nand.html
- *
- *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
- *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
- *
- *  Credits:
- *	David Woodhouse for adding multichip support
- *
- *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
- *	rework for 2K page size chips
- *
- *  TODO:
- *	Enable cached programming for 2k page size chips
- *	Check, if mtd->ecctype should be set to MTD_ECC_HW
- *	if we have HW ECC support.
- *	BBT table is not serialized, has to be fixed
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/nand_bch.h>
-#include <linux/interrupt.h>
-#include <linux/bitops.h>
-#include <linux/io.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of.h>
-
-static int nand_get_device(struct mtd_info *mtd, int new_state);
-
-static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
-			     struct mtd_oob_ops *ops);
-
-/* Define default oob placement schemes for large and small page devices */
-static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (section > 1)
-		return -ERANGE;
-
-	if (!section) {
-		oobregion->offset = 0;
-		oobregion->length = 4;
-	} else {
-		oobregion->offset = 6;
-		oobregion->length = ecc->total - 4;
-	}
-
-	return 0;
-}
-
-static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	if (section > 1)
-		return -ERANGE;
-
-	if (mtd->oobsize == 16) {
-		if (section)
-			return -ERANGE;
-
-		oobregion->length = 8;
-		oobregion->offset = 8;
-	} else {
-		oobregion->length = 2;
-		if (!section)
-			oobregion->offset = 3;
-		else
-			oobregion->offset = 6;
-	}
-
-	return 0;
-}
-
-const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
-	.ecc = nand_ooblayout_ecc_sp,
-	.free = nand_ooblayout_free_sp,
-};
-EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
-
-static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->length = ecc->total;
-	oobregion->offset = mtd->oobsize - oobregion->length;
-
-	return 0;
-}
-
-static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->length = mtd->oobsize - ecc->total - 2;
-	oobregion->offset = 2;
-
-	return 0;
-}
-
-const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
-	.ecc = nand_ooblayout_ecc_lp,
-	.free = nand_ooblayout_free_lp,
-};
-EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
-
-static int check_offs_len(struct mtd_info *mtd,
-					loff_t ofs, uint64_t len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int ret = 0;
-
-	/* Start address must align on block boundary */
-	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
-		pr_debug("%s: unaligned address\n", __func__);
-		ret = -EINVAL;
-	}
-
-	/* Length must align on block boundary */
-	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
-		pr_debug("%s: length not block aligned\n", __func__);
-		ret = -EINVAL;
-	}
-
-	return ret;
-}
-
-/**
- * nand_release_device - [GENERIC] release chip
- * @mtd: MTD device structure
- *
- * Release chip lock and wake up anyone waiting on the device.
- */
-static void nand_release_device(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	/* Release the controller and the chip */
-	spin_lock(&chip->controller->lock);
-	chip->controller->active = NULL;
-	chip->state = FL_READY;
-	wake_up(&chip->controller->wq);
-	spin_unlock(&chip->controller->lock);
-}
-
-/**
- * nand_read_byte - [DEFAULT] read one byte from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 8bit buswidth
- */
-static uint8_t nand_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	return readb(chip->IO_ADDR_R);
-}
-
-/**
- * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 16bit buswidth with endianness conversion.
- *
- */
-static uint8_t nand_read_byte16(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
-}
-
-/**
- * nand_read_word - [DEFAULT] read one word from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 16bit buswidth without endianness conversion.
- */
-static u16 nand_read_word(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	return readw(chip->IO_ADDR_R);
-}
-
-/**
- * nand_select_chip - [DEFAULT] control CE line
- * @mtd: MTD device structure
- * @chipnr: chipnumber to select, -1 for deselect
- *
- * Default select function for 1 chip devices.
- */
-static void nand_select_chip(struct mtd_info *mtd, int chipnr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	switch (chipnr) {
-	case -1:
-		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
-		break;
-	case 0:
-		break;
-
-	default:
-		BUG();
-	}
-}
-
-/**
- * nand_write_byte - [DEFAULT] write single byte to chip
- * @mtd: MTD device structure
- * @byte: value to write
- *
- * Default function to write a byte to I/O[7:0]
- */
-static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	chip->write_buf(mtd, &byte, 1);
-}
-
-/**
- * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
- * @mtd: MTD device structure
- * @byte: value to write
- *
- * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
- */
-static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	uint16_t word = byte;
-
-	/*
-	 * It's not entirely clear what should happen to I/O[15:8] when writing
-	 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
-	 *
-	 *    When the host supports a 16-bit bus width, only data is
-	 *    transferred at the 16-bit width. All address and command line
-	 *    transfers shall use only the lower 8-bits of the data bus. During
-	 *    command transfers, the host may place any value on the upper
-	 *    8-bits of the data bus. During address transfers, the host shall
-	 *    set the upper 8-bits of the data bus to 00h.
-	 *
-	 * One user of the write_byte callback is nand_onfi_set_features. The
-	 * four parameters are specified to be written to I/O[7:0], but this is
-	 * neither an address nor a command transfer. Let's assume a 0 on the
-	 * upper I/O lines is OK.
-	 */
-	chip->write_buf(mtd, (uint8_t *)&word, 2);
-}
-
-/**
- * nand_write_buf - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- *
- * Default write function for 8bit buswidth.
- */
-static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	iowrite8_rep(chip->IO_ADDR_W, buf, len);
-}
-
-/**
- * nand_read_buf - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- *
- * Default read function for 8bit buswidth.
- */
-static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	ioread8_rep(chip->IO_ADDR_R, buf, len);
-}
-
-/**
- * nand_write_buf16 - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- *
- * Default write function for 16bit buswidth.
- */
-static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	u16 *p = (u16 *) buf;
-
-	iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
-}
-
-/**
- * nand_read_buf16 - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- *
- * Default read function for 16bit buswidth.
- */
-static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	u16 *p = (u16 *) buf;
-
-	ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
-}
-
-/**
- * nand_block_bad - [DEFAULT] Read bad block marker from the chip
- * @mtd: MTD device structure
- * @ofs: offset from device start
- *
- * Check, if the block is bad.
- */
-static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
-{
-	int page, res = 0, i = 0;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	u16 bad;
-
-	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
-		ofs += mtd->erasesize - mtd->writesize;
-
-	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
-
-	do {
-		if (chip->options & NAND_BUSWIDTH_16) {
-			chip->cmdfunc(mtd, NAND_CMD_READOOB,
-					chip->badblockpos & 0xFE, page);
-			bad = cpu_to_le16(chip->read_word(mtd));
-			if (chip->badblockpos & 0x1)
-				bad >>= 8;
-			else
-				bad &= 0xFF;
-		} else {
-			chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
-					page);
-			bad = chip->read_byte(mtd);
-		}
-
-		if (likely(chip->badblockbits == 8))
-			res = bad != 0xFF;
-		else
-			res = hweight8(bad) < chip->badblockbits;
-		ofs += mtd->writesize;
-		page = (int)(ofs >> chip->page_shift) & chip->pagemask;
-		i++;
-	} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
-
-	return res;
-}
-
-/**
- * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
- * @mtd: MTD device structure
- * @ofs: offset from device start
- *
- * This is the default implementation, which can be overridden by a hardware
- * specific driver. It provides the details for writing a bad block marker to a
- * block.
- */
-static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtd_oob_ops ops;
-	uint8_t buf[2] = { 0, 0 };
-	int ret = 0, res, i = 0;
-
-	memset(&ops, 0, sizeof(ops));
-	ops.oobbuf = buf;
-	ops.ooboffs = chip->badblockpos;
-	if (chip->options & NAND_BUSWIDTH_16) {
-		ops.ooboffs &= ~0x01;
-		ops.len = ops.ooblen = 2;
-	} else {
-		ops.len = ops.ooblen = 1;
-	}
-	ops.mode = MTD_OPS_PLACE_OOB;
-
-	/* Write to first/last page(s) if necessary */
-	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
-		ofs += mtd->erasesize - mtd->writesize;
-	do {
-		res = nand_do_write_oob(mtd, ofs, &ops);
-		if (!ret)
-			ret = res;
-
-		i++;
-		ofs += mtd->writesize;
-	} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
-
-	return ret;
-}
-
-/**
- * nand_block_markbad_lowlevel - mark a block bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
- *
- * This function performs the generic NAND bad block marking steps (i.e., bad
- * block table(s) and/or marker(s)). We only allow the hardware driver to
- * specify how to write bad block markers to OOB (chip->block_markbad).
- *
- * We try operations in the following order:
- *  (1) erase the affected block, to allow OOB marker to be written cleanly
- *  (2) write bad block marker to OOB area of affected block (unless flag
- *      NAND_BBT_NO_OOB_BBM is present)
- *  (3) update the BBT
- * Note that we retain the first error encountered in (2) or (3), finish the
- * procedures, and dump the error in the end.
-*/
-static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int res, ret = 0;
-
-	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
-		struct erase_info einfo;
-
-		/* Attempt erase before marking OOB */
-		memset(&einfo, 0, sizeof(einfo));
-		einfo.mtd = mtd;
-		einfo.addr = ofs;
-		einfo.len = 1ULL << chip->phys_erase_shift;
-		nand_erase_nand(mtd, &einfo, 0);
-
-		/* Write bad block marker to OOB */
-		nand_get_device(mtd, FL_WRITING);
-		ret = chip->block_markbad(mtd, ofs);
-		nand_release_device(mtd);
-	}
-
-	/* Mark block bad in BBT */
-	if (chip->bbt) {
-		res = nand_markbad_bbt(mtd, ofs);
-		if (!ret)
-			ret = res;
-	}
-
-	if (!ret)
-		mtd->ecc_stats.badblocks++;
-
-	return ret;
-}
-
-/**
- * nand_check_wp - [GENERIC] check if the chip is write protected
- * @mtd: MTD device structure
- *
- * Check, if the device is write protected. The function expects, that the
- * device is already selected.
- */
-static int nand_check_wp(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	/* Broken xD cards report WP despite being writable */
-	if (chip->options & NAND_BROKEN_XD)
-		return 0;
-
-	/* Check the WP bit */
-	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
-	return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
-}
-
-/**
- * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
- * @mtd: MTD device structure
- * @ofs: offset from device start
- *
- * Check if the block is marked as reserved.
- */
-static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (!chip->bbt)
-		return 0;
-	/* Return info from the table */
-	return nand_isreserved_bbt(mtd, ofs);
-}
-
-/**
- * nand_block_checkbad - [GENERIC] Check if a block is marked bad
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @allowbbt: 1, if its allowed to access the bbt area
- *
- * Check, if the block is bad. Either by reading the bad block table or
- * calling of the scan function.
- */
-static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (!chip->bbt)
-		return chip->block_bad(mtd, ofs);
-
-	/* Return info from the table */
-	return nand_isbad_bbt(mtd, ofs, allowbbt);
-}
-
-/**
- * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
- * @mtd: MTD device structure
- * @timeo: Timeout
- *
- * Helper function for nand_wait_ready used when needing to wait in interrupt
- * context.
- */
-static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int i;
-
-	/* Wait for the device to get ready */
-	for (i = 0; i < timeo; i++) {
-		if (chip->dev_ready(mtd))
-			break;
-		touch_softlockup_watchdog();
-		mdelay(1);
-	}
-}
-
-/**
- * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
- * @mtd: MTD device structure
- *
- * Wait for the ready pin after a command, and warn if a timeout occurs.
- */
-void nand_wait_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	unsigned long timeo = 400;
-
-	if (in_interrupt() || oops_in_progress)
-		return panic_nand_wait_ready(mtd, timeo);
-
-	/* Wait until command is processed or timeout occurs */
-	timeo = jiffies + msecs_to_jiffies(timeo);
-	do {
-		if (chip->dev_ready(mtd))
-			return;
-		cond_resched();
-	} while (time_before(jiffies, timeo));
-
-	if (!chip->dev_ready(mtd))
-		pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
-}
-EXPORT_SYMBOL_GPL(nand_wait_ready);
-
-/**
- * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
- * @mtd: MTD device structure
- * @timeo: Timeout in ms
- *
- * Wait for status ready (i.e. command done) or timeout.
- */
-static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
-{
-	register struct nand_chip *chip = mtd_to_nand(mtd);
-
-	timeo = jiffies + msecs_to_jiffies(timeo);
-	do {
-		if ((chip->read_byte(mtd) & NAND_STATUS_READY))
-			break;
-		touch_softlockup_watchdog();
-	} while (time_before(jiffies, timeo));
-};
-
-/**
- * nand_command - [DEFAULT] Send command to NAND device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
- *
- * Send command to NAND device. This function is used for small page devices
- * (512 Bytes per page).
- */
-static void nand_command(struct mtd_info *mtd, unsigned int command,
-			 int column, int page_addr)
-{
-	register struct nand_chip *chip = mtd_to_nand(mtd);
-	int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
-
-	/* Write out the command to the device */
-	if (command == NAND_CMD_SEQIN) {
-		int readcmd;
-
-		if (column >= mtd->writesize) {
-			/* OOB area */
-			column -= mtd->writesize;
-			readcmd = NAND_CMD_READOOB;
-		} else if (column < 256) {
-			/* First 256 bytes --> READ0 */
-			readcmd = NAND_CMD_READ0;
-		} else {
-			column -= 256;
-			readcmd = NAND_CMD_READ1;
-		}
-		chip->cmd_ctrl(mtd, readcmd, ctrl);
-		ctrl &= ~NAND_CTRL_CHANGE;
-	}
-	chip->cmd_ctrl(mtd, command, ctrl);
-
-	/* Address cycle, when necessary */
-	ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
-	/* Serially input address */
-	if (column != -1) {
-		/* Adjust columns for 16 bit buswidth */
-		if (chip->options & NAND_BUSWIDTH_16 &&
-				!nand_opcode_8bits(command))
-			column >>= 1;
-		chip->cmd_ctrl(mtd, column, ctrl);
-		ctrl &= ~NAND_CTRL_CHANGE;
-	}
-	if (page_addr != -1) {
-		chip->cmd_ctrl(mtd, page_addr, ctrl);
-		ctrl &= ~NAND_CTRL_CHANGE;
-		chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
-		/* One more address cycle for devices > 32MiB */
-		if (chip->chipsize > (32 << 20))
-			chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
-	}
-	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
-
-	/*
-	 * Program and erase have their own busy handlers status and sequential
-	 * in needs no delay
-	 */
-	switch (command) {
-
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_STATUS:
-		return;
-
-	case NAND_CMD_RESET:
-		if (chip->dev_ready)
-			break;
-		udelay(chip->chip_delay);
-		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
-			       NAND_CTRL_CLE | NAND_CTRL_CHANGE);
-		chip->cmd_ctrl(mtd,
-			       NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
-		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
-		nand_wait_status_ready(mtd, 250);
-		return;
-
-		/* This applies to read commands */
-	default:
-		/*
-		 * If we don't have access to the busy pin, we apply the given
-		 * command delay
-		 */
-		if (!chip->dev_ready) {
-			udelay(chip->chip_delay);
-			return;
-		}
-	}
-	/*
-	 * Apply this short delay always to ensure that we do wait tWB in
-	 * any case on any machine.
-	 */
-	ndelay(100);
-
-	nand_wait_ready(mtd);
-}
-
-/**
- * nand_command_lp - [DEFAULT] Send command to NAND large page device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
- *
- * Send command to NAND device. This is the version for the new large page
- * devices. We don't have the separate regions as we have in the small page
- * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
- */
-static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
-			    int column, int page_addr)
-{
-	register struct nand_chip *chip = mtd_to_nand(mtd);
-
-	/* Emulate NAND_CMD_READOOB */
-	if (command == NAND_CMD_READOOB) {
-		column += mtd->writesize;
-		command = NAND_CMD_READ0;
-	}
-
-	/* Command latch cycle */
-	chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
-
-	if (column != -1 || page_addr != -1) {
-		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
-
-		/* Serially input address */
-		if (column != -1) {
-			/* Adjust columns for 16 bit buswidth */
-			if (chip->options & NAND_BUSWIDTH_16 &&
-					!nand_opcode_8bits(command))
-				column >>= 1;
-			chip->cmd_ctrl(mtd, column, ctrl);
-			ctrl &= ~NAND_CTRL_CHANGE;
-
-			/* Only ouput a single addr cycle for 8bits opcodes. */
-			if (!nand_opcode_8bits(command))
-				chip->cmd_ctrl(mtd, column >> 8, ctrl);
-		}
-		if (page_addr != -1) {
-			chip->cmd_ctrl(mtd, page_addr, ctrl);
-			chip->cmd_ctrl(mtd, page_addr >> 8,
-				       NAND_NCE | NAND_ALE);
-			/* One more address cycle for devices > 128MiB */
-			if (chip->chipsize > (128 << 20))
-				chip->cmd_ctrl(mtd, page_addr >> 16,
-					       NAND_NCE | NAND_ALE);
-		}
-	}
-	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
-
-	/*
-	 * Program and erase have their own busy handlers status, sequential
-	 * in and status need no delay.
-	 */
-	switch (command) {
-
-	case NAND_CMD_CACHEDPROG:
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_RNDIN:
-	case NAND_CMD_STATUS:
-		return;
-
-	case NAND_CMD_RESET:
-		if (chip->dev_ready)
-			break;
-		udelay(chip->chip_delay);
-		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
-			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
-		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
-			       NAND_NCE | NAND_CTRL_CHANGE);
-		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
-		nand_wait_status_ready(mtd, 250);
-		return;
-
-	case NAND_CMD_RNDOUT:
-		/* No ready / busy check necessary */
-		chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
-			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
-		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
-			       NAND_NCE | NAND_CTRL_CHANGE);
-		return;
-
-	case NAND_CMD_READ0:
-		chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
-			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
-		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
-			       NAND_NCE | NAND_CTRL_CHANGE);
-
-		/* This applies to read commands */
-	default:
-		/*
-		 * If we don't have access to the busy pin, we apply the given
-		 * command delay.
-		 */
-		if (!chip->dev_ready) {
-			udelay(chip->chip_delay);
-			return;
-		}
-	}
-
-	/*
-	 * Apply this short delay always to ensure that we do wait tWB in
-	 * any case on any machine.
-	 */
-	ndelay(100);
-
-	nand_wait_ready(mtd);
-}
-
-/**
- * panic_nand_get_device - [GENERIC] Get chip for selected access
- * @chip: the nand chip descriptor
- * @mtd: MTD device structure
- * @new_state: the state which is requested
- *
- * Used when in panic, no locks are taken.
- */
-static void panic_nand_get_device(struct nand_chip *chip,
-		      struct mtd_info *mtd, int new_state)
-{
-	/* Hardware controller shared among independent devices */
-	chip->controller->active = chip;
-	chip->state = new_state;
-}
-
-/**
- * nand_get_device - [GENERIC] Get chip for selected access
- * @mtd: MTD device structure
- * @new_state: the state which is requested
- *
- * Get the device and lock it for exclusive access
- */
-static int
-nand_get_device(struct mtd_info *mtd, int new_state)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	spinlock_t *lock = &chip->controller->lock;
-	wait_queue_head_t *wq = &chip->controller->wq;
-	DECLARE_WAITQUEUE(wait, current);
-retry:
-	spin_lock(lock);
-
-	/* Hardware controller shared among independent devices */
-	if (!chip->controller->active)
-		chip->controller->active = chip;
-
-	if (chip->controller->active == chip && chip->state == FL_READY) {
-		chip->state = new_state;
-		spin_unlock(lock);
-		return 0;
-	}
-	if (new_state == FL_PM_SUSPENDED) {
-		if (chip->controller->active->state == FL_PM_SUSPENDED) {
-			chip->state = FL_PM_SUSPENDED;
-			spin_unlock(lock);
-			return 0;
-		}
-	}
-	set_current_state(TASK_UNINTERRUPTIBLE);
-	add_wait_queue(wq, &wait);
-	spin_unlock(lock);
-	schedule();
-	remove_wait_queue(wq, &wait);
-	goto retry;
-}
-
-/**
- * panic_nand_wait - [GENERIC] wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND chip structure
- * @timeo: timeout
- *
- * Wait for command done. This is a helper function for nand_wait used when
- * we are in interrupt context. May happen when in panic and trying to write
- * an oops through mtdoops.
- */
-static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
-			    unsigned long timeo)
-{
-	int i;
-	for (i = 0; i < timeo; i++) {
-		if (chip->dev_ready) {
-			if (chip->dev_ready(mtd))
-				break;
-		} else {
-			if (chip->read_byte(mtd) & NAND_STATUS_READY)
-				break;
-		}
-		mdelay(1);
-	}
-}
-
-/**
- * nand_wait - [DEFAULT] wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND chip structure
- *
- * Wait for command done. This applies to erase and program only.
- */
-static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
-{
-
-	int status;
-	unsigned long timeo = 400;
-
-	/*
-	 * Apply this short delay always to ensure that we do wait tWB in any
-	 * case on any machine.
-	 */
-	ndelay(100);
-
-	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
-
-	if (in_interrupt() || oops_in_progress)
-		panic_nand_wait(mtd, chip, timeo);
-	else {
-		timeo = jiffies + msecs_to_jiffies(timeo);
-		do {
-			if (chip->dev_ready) {
-				if (chip->dev_ready(mtd))
-					break;
-			} else {
-				if (chip->read_byte(mtd) & NAND_STATUS_READY)
-					break;
-			}
-			cond_resched();
-		} while (time_before(jiffies, timeo));
-	}
-
-	status = (int)chip->read_byte(mtd);
-	/* This can happen if in case of timeout or buggy dev_ready */
-	WARN_ON(!(status & NAND_STATUS_READY));
-	return status;
-}
-
-/**
- * nand_reset_data_interface - Reset data interface and timings
- * @chip: The NAND chip
- *
- * Reset the Data interface and timings to ONFI mode 0.
- *
- * Returns 0 for success or negative error code otherwise.
- */
-static int nand_reset_data_interface(struct nand_chip *chip)
-{
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	const struct nand_data_interface *conf;
-	int ret;
-
-	if (!chip->setup_data_interface)
-		return 0;
-
-	/*
-	 * The ONFI specification says:
-	 * "
-	 * To transition from NV-DDR or NV-DDR2 to the SDR data
-	 * interface, the host shall use the Reset (FFh) command
-	 * using SDR timing mode 0. A device in any timing mode is
-	 * required to recognize Reset (FFh) command issued in SDR
-	 * timing mode 0.
-	 * "
-	 *
-	 * Configure the data interface in SDR mode and set the
-	 * timings to timing mode 0.
-	 */
-
-	conf = nand_get_default_data_interface();
-	ret = chip->setup_data_interface(mtd, conf, false);
-	if (ret)
-		pr_err("Failed to configure data interface to SDR timing mode 0\n");
-
-	return ret;
-}
-
-/**
- * nand_setup_data_interface - Setup the best data interface and timings
- * @chip: The NAND chip
- *
- * Find and configure the best data interface and NAND timings supported by
- * the chip and the driver.
- * First tries to retrieve supported timing modes from ONFI information,
- * and if the NAND chip does not support ONFI, relies on the
- * ->onfi_timing_mode_default specified in the nand_ids table.
- *
- * Returns 0 for success or negative error code otherwise.
- */
-static int nand_setup_data_interface(struct nand_chip *chip)
-{
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	int ret;
-
-	if (!chip->setup_data_interface || !chip->data_interface)
-		return 0;
-
-	/*
-	 * Ensure the timing mode has been changed on the chip side
-	 * before changing timings on the controller side.
-	 */
-	if (chip->onfi_version) {
-		u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
-			chip->onfi_timing_mode_default,
-		};
-
-		ret = chip->onfi_set_features(mtd, chip,
-				ONFI_FEATURE_ADDR_TIMING_MODE,
-				tmode_param);
-		if (ret)
-			goto err;
-	}
-
-	ret = chip->setup_data_interface(mtd, chip->data_interface, false);
-err:
-	return ret;
-}
-
-/**
- * nand_init_data_interface - find the best data interface and timings
- * @chip: The NAND chip
- *
- * Find the best data interface and NAND timings supported by the chip
- * and the driver.
- * First tries to retrieve supported timing modes from ONFI information,
- * and if the NAND chip does not support ONFI, relies on the
- * ->onfi_timing_mode_default specified in the nand_ids table. After this
- * function nand_chip->data_interface is initialized with the best timing mode
- * available.
- *
- * Returns 0 for success or negative error code otherwise.
- */
-static int nand_init_data_interface(struct nand_chip *chip)
-{
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	int modes, mode, ret;
-
-	if (!chip->setup_data_interface)
-		return 0;
-
-	/*
-	 * First try to identify the best timings from ONFI parameters and
-	 * if the NAND does not support ONFI, fallback to the default ONFI
-	 * timing mode.
-	 */
-	modes = onfi_get_async_timing_mode(chip);
-	if (modes == ONFI_TIMING_MODE_UNKNOWN) {
-		if (!chip->onfi_timing_mode_default)
-			return 0;
-
-		modes = GENMASK(chip->onfi_timing_mode_default, 0);
-	}
-
-	chip->data_interface = kzalloc(sizeof(*chip->data_interface),
-				       GFP_KERNEL);
-	if (!chip->data_interface)
-		return -ENOMEM;
-
-	for (mode = fls(modes) - 1; mode >= 0; mode--) {
-		ret = onfi_init_data_interface(chip, chip->data_interface,
-					       NAND_SDR_IFACE, mode);
-		if (ret)
-			continue;
-
-		ret = chip->setup_data_interface(mtd, chip->data_interface,
-						 true);
-		if (!ret) {
-			chip->onfi_timing_mode_default = mode;
-			break;
-		}
-	}
-
-	return 0;
-}
-
-static void nand_release_data_interface(struct nand_chip *chip)
-{
-	kfree(chip->data_interface);
-}
-
-/**
- * nand_reset - Reset and initialize a NAND device
- * @chip: The NAND chip
- *
- * Returns 0 for success or negative error code otherwise
- */
-int nand_reset(struct nand_chip *chip)
-{
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	int ret;
-
-	ret = nand_reset_data_interface(chip);
-	if (ret)
-		return ret;
-
-	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-
-	ret = nand_setup_data_interface(chip);
-	if (ret)
-		return ret;
-
-	return 0;
-}
-
-/**
- * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- * @mtd: mtd info
- * @ofs: offset to start unlock from
- * @len: length to unlock
- * @invert: when = 0, unlock the range of blocks within the lower and
- *                    upper boundary address
- *          when = 1, unlock the range of blocks outside the boundaries
- *                    of the lower and upper boundary address
- *
- * Returs unlock status.
- */
-static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
-					uint64_t len, int invert)
-{
-	int ret = 0;
-	int status, page;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	/* Submit address of first page to unlock */
-	page = ofs >> chip->page_shift;
-	chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
-
-	/* Submit address of last page to unlock */
-	page = (ofs + len) >> chip->page_shift;
-	chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
-				(page | invert) & chip->pagemask);
-
-	/* Call wait ready function */
-	status = chip->waitfunc(mtd, chip);
-	/* See if device thinks it succeeded */
-	if (status & NAND_STATUS_FAIL) {
-		pr_debug("%s: error status = 0x%08x\n",
-					__func__, status);
-		ret = -EIO;
-	}
-
-	return ret;
-}
-
-/**
- * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- * @mtd: mtd info
- * @ofs: offset to start unlock from
- * @len: length to unlock
- *
- * Returns unlock status.
- */
-int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	int ret = 0;
-	int chipnr;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	pr_debug("%s: start = 0x%012llx, len = %llu\n",
-			__func__, (unsigned long long)ofs, len);
-
-	if (check_offs_len(mtd, ofs, len))
-		return -EINVAL;
-
-	/* Align to last block address if size addresses end of the device */
-	if (ofs + len == mtd->size)
-		len -= mtd->erasesize;
-
-	nand_get_device(mtd, FL_UNLOCKING);
-
-	/* Shift to get chip number */
-	chipnr = ofs >> chip->chip_shift;
-
-	chip->select_chip(mtd, chipnr);
-
-	/*
-	 * Reset the chip.
-	 * If we want to check the WP through READ STATUS and check the bit 7
-	 * we must reset the chip
-	 * some operation can also clear the bit 7 of status register
-	 * eg. erase/program a locked block
-	 */
-	nand_reset(chip);
-
-	/* Check, if it is write protected */
-	if (nand_check_wp(mtd)) {
-		pr_debug("%s: device is write protected!\n",
-					__func__);
-		ret = -EIO;
-		goto out;
-	}
-
-	ret = __nand_unlock(mtd, ofs, len, 0);
-
-out:
-	chip->select_chip(mtd, -1);
-	nand_release_device(mtd);
-
-	return ret;
-}
-EXPORT_SYMBOL(nand_unlock);
-
-/**
- * nand_lock - [REPLACEABLE] locks all blocks present in the device
- * @mtd: mtd info
- * @ofs: offset to start unlock from
- * @len: length to unlock
- *
- * This feature is not supported in many NAND parts. 'Micron' NAND parts do
- * have this feature, but it allows only to lock all blocks, not for specified
- * range for block. Implementing 'lock' feature by making use of 'unlock', for
- * now.
- *
- * Returns lock status.
- */
-int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
-{
-	int ret = 0;
-	int chipnr, status, page;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	pr_debug("%s: start = 0x%012llx, len = %llu\n",
-			__func__, (unsigned long long)ofs, len);
-
-	if (check_offs_len(mtd, ofs, len))
-		return -EINVAL;
-
-	nand_get_device(mtd, FL_LOCKING);
-
-	/* Shift to get chip number */
-	chipnr = ofs >> chip->chip_shift;
-
-	chip->select_chip(mtd, chipnr);
-
-	/*
-	 * Reset the chip.
-	 * If we want to check the WP through READ STATUS and check the bit 7
-	 * we must reset the chip
-	 * some operation can also clear the bit 7 of status register
-	 * eg. erase/program a locked block
-	 */
-	nand_reset(chip);
-
-	/* Check, if it is write protected */
-	if (nand_check_wp(mtd)) {
-		pr_debug("%s: device is write protected!\n",
-					__func__);
-		status = MTD_ERASE_FAILED;
-		ret = -EIO;
-		goto out;
-	}
-
-	/* Submit address of first page to lock */
-	page = ofs >> chip->page_shift;
-	chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
-
-	/* Call wait ready function */
-	status = chip->waitfunc(mtd, chip);
-	/* See if device thinks it succeeded */
-	if (status & NAND_STATUS_FAIL) {
-		pr_debug("%s: error status = 0x%08x\n",
-					__func__, status);
-		ret = -EIO;
-		goto out;
-	}
-
-	ret = __nand_unlock(mtd, ofs, len, 0x1);
-
-out:
-	chip->select_chip(mtd, -1);
-	nand_release_device(mtd);
-
-	return ret;
-}
-EXPORT_SYMBOL(nand_lock);
-
-/**
- * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
- * @buf: buffer to test
- * @len: buffer length
- * @bitflips_threshold: maximum number of bitflips
- *
- * Check if a buffer contains only 0xff, which means the underlying region
- * has been erased and is ready to be programmed.
- * The bitflips_threshold specify the maximum number of bitflips before
- * considering the region is not erased.
- * Note: The logic of this function has been extracted from the memweight
- * implementation, except that nand_check_erased_buf function exit before
- * testing the whole buffer if the number of bitflips exceed the
- * bitflips_threshold value.
- *
- * Returns a positive number of bitflips less than or equal to
- * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
- * threshold.
- */
-static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
-{
-	const unsigned char *bitmap = buf;
-	int bitflips = 0;
-	int weight;
-
-	for (; len && ((uintptr_t)bitmap) % sizeof(long);
-	     len--, bitmap++) {
-		weight = hweight8(*bitmap);
-		bitflips += BITS_PER_BYTE - weight;
-		if (unlikely(bitflips > bitflips_threshold))
-			return -EBADMSG;
-	}
-
-	for (; len >= sizeof(long);
-	     len -= sizeof(long), bitmap += sizeof(long)) {
-		weight = hweight_long(*((unsigned long *)bitmap));
-		bitflips += BITS_PER_LONG - weight;
-		if (unlikely(bitflips > bitflips_threshold))
-			return -EBADMSG;
-	}
-
-	for (; len > 0; len--, bitmap++) {
-		weight = hweight8(*bitmap);
-		bitflips += BITS_PER_BYTE - weight;
-		if (unlikely(bitflips > bitflips_threshold))
-			return -EBADMSG;
-	}
-
-	return bitflips;
-}
-
-/**
- * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
- *				 0xff data
- * @data: data buffer to test
- * @datalen: data length
- * @ecc: ECC buffer
- * @ecclen: ECC length
- * @extraoob: extra OOB buffer
- * @extraooblen: extra OOB length
- * @bitflips_threshold: maximum number of bitflips
- *
- * Check if a data buffer and its associated ECC and OOB data contains only
- * 0xff pattern, which means the underlying region has been erased and is
- * ready to be programmed.
- * The bitflips_threshold specify the maximum number of bitflips before
- * considering the region as not erased.
- *
- * Note:
- * 1/ ECC algorithms are working on pre-defined block sizes which are usually
- *    different from the NAND page size. When fixing bitflips, ECC engines will
- *    report the number of errors per chunk, and the NAND core infrastructure
- *    expect you to return the maximum number of bitflips for the whole page.
- *    This is why you should always use this function on a single chunk and
- *    not on the whole page. After checking each chunk you should update your
- *    max_bitflips value accordingly.
- * 2/ When checking for bitflips in erased pages you should not only check
- *    the payload data but also their associated ECC data, because a user might
- *    have programmed almost all bits to 1 but a few. In this case, we
- *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
- *    this case.
- * 3/ The extraoob argument is optional, and should be used if some of your OOB
- *    data are protected by the ECC engine.
- *    It could also be used if you support subpages and want to attach some
- *    extra OOB data to an ECC chunk.
- *
- * Returns a positive number of bitflips less than or equal to
- * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
- * threshold. In case of success, the passed buffers are filled with 0xff.
- */
-int nand_check_erased_ecc_chunk(void *data, int datalen,
-				void *ecc, int ecclen,
-				void *extraoob, int extraooblen,
-				int bitflips_threshold)
-{
-	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
-
-	data_bitflips = nand_check_erased_buf(data, datalen,
-					      bitflips_threshold);
-	if (data_bitflips < 0)
-		return data_bitflips;
-
-	bitflips_threshold -= data_bitflips;
-
-	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
-	if (ecc_bitflips < 0)
-		return ecc_bitflips;
-
-	bitflips_threshold -= ecc_bitflips;
-
-	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
-						  bitflips_threshold);
-	if (extraoob_bitflips < 0)
-		return extraoob_bitflips;
-
-	if (data_bitflips)
-		memset(data, 0xff, datalen);
-
-	if (ecc_bitflips)
-		memset(ecc, 0xff, ecclen);
-
-	if (extraoob_bitflips)
-		memset(extraoob, 0xff, extraooblen);
-
-	return data_bitflips + ecc_bitflips + extraoob_bitflips;
-}
-EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
-
-/**
- * nand_read_page_raw - [INTERN] read raw page data without ecc
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * Not for syndrome calculating ECC controllers, which use a special oob layout.
- */
-static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-			      uint8_t *buf, int oob_required, int page)
-{
-	chip->read_buf(mtd, buf, mtd->writesize);
-	if (oob_required)
-		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 0;
-}
-
-/**
- * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * We need a special oob layout and handling even when OOB isn't used.
- */
-static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
-				       struct nand_chip *chip, uint8_t *buf,
-				       int oob_required, int page)
-{
-	int eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	uint8_t *oob = chip->oob_poi;
-	int steps, size;
-
-	for (steps = chip->ecc.steps; steps > 0; steps--) {
-		chip->read_buf(mtd, buf, eccsize);
-		buf += eccsize;
-
-		if (chip->ecc.prepad) {
-			chip->read_buf(mtd, oob, chip->ecc.prepad);
-			oob += chip->ecc.prepad;
-		}
-
-		chip->read_buf(mtd, oob, eccbytes);
-		oob += eccbytes;
-
-		if (chip->ecc.postpad) {
-			chip->read_buf(mtd, oob, chip->ecc.postpad);
-			oob += chip->ecc.postpad;
-		}
-	}
-
-	size = mtd->oobsize - (oob - chip->oob_poi);
-	if (size)
-		chip->read_buf(mtd, oob, size);
-
-	return 0;
-}
-
-/**
- * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- */
-static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int oob_required, int page)
-{
-	int i, eccsize = chip->ecc.size, ret;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *p = buf;
-	uint8_t *ecc_calc = chip->buffers->ecccalc;
-	uint8_t *ecc_code = chip->buffers->ecccode;
-	unsigned int max_bitflips = 0;
-
-	chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
-		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
-
-	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
-					 chip->ecc.total);
-	if (ret)
-		return ret;
-
-	eccsteps = chip->ecc.steps;
-	p = buf;
-
-	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		int stat;
-
-		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-		if (stat < 0) {
-			mtd->ecc_stats.failed++;
-		} else {
-			mtd->ecc_stats.corrected += stat;
-			max_bitflips = max_t(unsigned int, max_bitflips, stat);
-		}
-	}
-	return max_bitflips;
-}
-
-/**
- * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @data_offs: offset of requested data within the page
- * @readlen: data length
- * @bufpoi: buffer to store read data
- * @page: page number to read
- */
-static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
-			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
-			int page)
-{
-	int start_step, end_step, num_steps, ret;
-	uint8_t *p;
-	int data_col_addr, i, gaps = 0;
-	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
-	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
-	int index, section = 0;
-	unsigned int max_bitflips = 0;
-	struct mtd_oob_region oobregion = { };
-
-	/* Column address within the page aligned to ECC size (256bytes) */
-	start_step = data_offs / chip->ecc.size;
-	end_step = (data_offs + readlen - 1) / chip->ecc.size;
-	num_steps = end_step - start_step + 1;
-	index = start_step * chip->ecc.bytes;
-
-	/* Data size aligned to ECC ecc.size */
-	datafrag_len = num_steps * chip->ecc.size;
-	eccfrag_len = num_steps * chip->ecc.bytes;
-
-	data_col_addr = start_step * chip->ecc.size;
-	/* If we read not a page aligned data */
-	if (data_col_addr != 0)
-		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
-
-	p = bufpoi + data_col_addr;
-	chip->read_buf(mtd, p, datafrag_len);
-
-	/* Calculate ECC */
-	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
-		chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
-
-	/*
-	 * The performance is faster if we position offsets according to
-	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
-	 */
-	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
-	if (ret)
-		return ret;
-
-	if (oobregion.length < eccfrag_len)
-		gaps = 1;
-
-	if (gaps) {
-		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
-		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	} else {
-		/*
-		 * Send the command to read the particular ECC bytes take care
-		 * about buswidth alignment in read_buf.
-		 */
-		aligned_pos = oobregion.offset & ~(busw - 1);
-		aligned_len = eccfrag_len;
-		if (oobregion.offset & (busw - 1))
-			aligned_len++;
-		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
-		    (busw - 1))
-			aligned_len++;
-
-		chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
-			      mtd->writesize + aligned_pos, -1);
-		chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
-	}
-
-	ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
-					 chip->oob_poi, index, eccfrag_len);
-	if (ret)
-		return ret;
-
-	p = bufpoi + data_col_addr;
-	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
-		int stat;
-
-		stat = chip->ecc.correct(mtd, p,
-			&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
-		if (stat == -EBADMSG &&
-		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
-			/* check for empty pages with bitflips */
-			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
-						&chip->buffers->ecccode[i],
-						chip->ecc.bytes,
-						NULL, 0,
-						chip->ecc.strength);
-		}
-
-		if (stat < 0) {
-			mtd->ecc_stats.failed++;
-		} else {
-			mtd->ecc_stats.corrected += stat;
-			max_bitflips = max_t(unsigned int, max_bitflips, stat);
-		}
-	}
-	return max_bitflips;
-}
-
-/**
- * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * Not for syndrome calculating ECC controllers which need a special oob layout.
- */
-static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int oob_required, int page)
-{
-	int i, eccsize = chip->ecc.size, ret;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *p = buf;
-	uint8_t *ecc_calc = chip->buffers->ecccalc;
-	uint8_t *ecc_code = chip->buffers->ecccode;
-	unsigned int max_bitflips = 0;
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		chip->ecc.hwctl(mtd, NAND_ECC_READ);
-		chip->read_buf(mtd, p, eccsize);
-		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
-	}
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
-					 chip->ecc.total);
-	if (ret)
-		return ret;
-
-	eccsteps = chip->ecc.steps;
-	p = buf;
-
-	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		int stat;
-
-		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
-		if (stat == -EBADMSG &&
-		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
-			/* check for empty pages with bitflips */
-			stat = nand_check_erased_ecc_chunk(p, eccsize,
-						&ecc_code[i], eccbytes,
-						NULL, 0,
-						chip->ecc.strength);
-		}
-
-		if (stat < 0) {
-			mtd->ecc_stats.failed++;
-		} else {
-			mtd->ecc_stats.corrected += stat;
-			max_bitflips = max_t(unsigned int, max_bitflips, stat);
-		}
-	}
-	return max_bitflips;
-}
-
-/**
- * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * Hardware ECC for large page chips, require OOB to be read first. For this
- * ECC mode, the write_page method is re-used from ECC_HW. These methods
- * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
- * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
- * the data area, by overwriting the NAND manufacturer bad block markings.
- */
-static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
-	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
-{
-	int i, eccsize = chip->ecc.size, ret;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *p = buf;
-	uint8_t *ecc_code = chip->buffers->ecccode;
-	uint8_t *ecc_calc = chip->buffers->ecccalc;
-	unsigned int max_bitflips = 0;
-
-	/* Read the OOB area first */
-	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
-	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
-					 chip->ecc.total);
-	if (ret)
-		return ret;
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		int stat;
-
-		chip->ecc.hwctl(mtd, NAND_ECC_READ);
-		chip->read_buf(mtd, p, eccsize);
-		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
-
-		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
-		if (stat == -EBADMSG &&
-		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
-			/* check for empty pages with bitflips */
-			stat = nand_check_erased_ecc_chunk(p, eccsize,
-						&ecc_code[i], eccbytes,
-						NULL, 0,
-						chip->ecc.strength);
-		}
-
-		if (stat < 0) {
-			mtd->ecc_stats.failed++;
-		} else {
-			mtd->ecc_stats.corrected += stat;
-			max_bitflips = max_t(unsigned int, max_bitflips, stat);
-		}
-	}
-	return max_bitflips;
-}
-
-/**
- * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
- *
- * The hw generator calculates the error syndrome automatically. Therefore we
- * need a special oob layout and handling.
- */
-static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-				   uint8_t *buf, int oob_required, int page)
-{
-	int i, eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
-	uint8_t *p = buf;
-	uint8_t *oob = chip->oob_poi;
-	unsigned int max_bitflips = 0;
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		int stat;
-
-		chip->ecc.hwctl(mtd, NAND_ECC_READ);
-		chip->read_buf(mtd, p, eccsize);
-
-		if (chip->ecc.prepad) {
-			chip->read_buf(mtd, oob, chip->ecc.prepad);
-			oob += chip->ecc.prepad;
-		}
-
-		chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
-		chip->read_buf(mtd, oob, eccbytes);
-		stat = chip->ecc.correct(mtd, p, oob, NULL);
-
-		oob += eccbytes;
-
-		if (chip->ecc.postpad) {
-			chip->read_buf(mtd, oob, chip->ecc.postpad);
-			oob += chip->ecc.postpad;
-		}
-
-		if (stat == -EBADMSG &&
-		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
-			/* check for empty pages with bitflips */
-			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
-							   oob - eccpadbytes,
-							   eccpadbytes,
-							   NULL, 0,
-							   chip->ecc.strength);
-		}
-
-		if (stat < 0) {
-			mtd->ecc_stats.failed++;
-		} else {
-			mtd->ecc_stats.corrected += stat;
-			max_bitflips = max_t(unsigned int, max_bitflips, stat);
-		}
-	}
-
-	/* Calculate remaining oob bytes */
-	i = mtd->oobsize - (oob - chip->oob_poi);
-	if (i)
-		chip->read_buf(mtd, oob, i);
-
-	return max_bitflips;
-}
-
-/**
- * nand_transfer_oob - [INTERN] Transfer oob to client buffer
- * @mtd: mtd info structure
- * @oob: oob destination address
- * @ops: oob ops structure
- * @len: size of oob to transfer
- */
-static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
-				  struct mtd_oob_ops *ops, size_t len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int ret;
-
-	switch (ops->mode) {
-
-	case MTD_OPS_PLACE_OOB:
-	case MTD_OPS_RAW:
-		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
-		return oob + len;
-
-	case MTD_OPS_AUTO_OOB:
-		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
-						  ops->ooboffs, len);
-		BUG_ON(ret);
-		return oob + len;
-
-	default:
-		BUG();
-	}
-	return NULL;
-}
-
-/**
- * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
- * @mtd: MTD device structure
- * @retry_mode: the retry mode to use
- *
- * Some vendors supply a special command to shift the Vt threshold, to be used
- * when there are too many bitflips in a page (i.e., ECC error). After setting
- * a new threshold, the host should retry reading the page.
- */
-static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	pr_debug("setting READ RETRY mode %d\n", retry_mode);
-
-	if (retry_mode >= chip->read_retries)
-		return -EINVAL;
-
-	if (!chip->setup_read_retry)
-		return -EOPNOTSUPP;
-
-	return chip->setup_read_retry(mtd, retry_mode);
-}
-
-/**
- * nand_do_read_ops - [INTERN] Read data with ECC
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob ops structure
- *
- * Internal function. Called with chip held.
- */
-static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
-			    struct mtd_oob_ops *ops)
-{
-	int chipnr, page, realpage, col, bytes, aligned, oob_required;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int ret = 0;
-	uint32_t readlen = ops->len;
-	uint32_t oobreadlen = ops->ooblen;
-	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
-
-	uint8_t *bufpoi, *oob, *buf;
-	int use_bufpoi;
-	unsigned int max_bitflips = 0;
-	int retry_mode = 0;
-	bool ecc_fail = false;
-
-	chipnr = (int)(from >> chip->chip_shift);
-	chip->select_chip(mtd, chipnr);
-
-	realpage = (int)(from >> chip->page_shift);
-	page = realpage & chip->pagemask;
-
-	col = (int)(from & (mtd->writesize - 1));
-
-	buf = ops->datbuf;
-	oob = ops->oobbuf;
-	oob_required = oob ? 1 : 0;
-
-	while (1) {
-		unsigned int ecc_failures = mtd->ecc_stats.failed;
-
-		bytes = min(mtd->writesize - col, readlen);
-		aligned = (bytes == mtd->writesize);
-
-		if (!aligned)
-			use_bufpoi = 1;
-		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
-			use_bufpoi = !virt_addr_valid(buf);
-		else
-			use_bufpoi = 0;
-
-		/* Is the current page in the buffer? */
-		if (realpage != chip->pagebuf || oob) {
-			bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
-
-			if (use_bufpoi && aligned)
-				pr_debug("%s: using read bounce buffer for buf@%p\n",
-						 __func__, buf);
-
-read_retry:
-			chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
-
-			/*
-			 * Now read the page into the buffer.  Absent an error,
-			 * the read methods return max bitflips per ecc step.
-			 */
-			if (unlikely(ops->mode == MTD_OPS_RAW))
-				ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
-							      oob_required,
-							      page);
-			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
-				 !oob)
-				ret = chip->ecc.read_subpage(mtd, chip,
-							col, bytes, bufpoi,
-							page);
-			else
-				ret = chip->ecc.read_page(mtd, chip, bufpoi,
-							  oob_required, page);
-			if (ret < 0) {
-				if (use_bufpoi)
-					/* Invalidate page cache */
-					chip->pagebuf = -1;
-				break;
-			}
-
-			max_bitflips = max_t(unsigned int, max_bitflips, ret);
-
-			/* Transfer not aligned data */
-			if (use_bufpoi) {
-				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
-				    !(mtd->ecc_stats.failed - ecc_failures) &&
-				    (ops->mode != MTD_OPS_RAW)) {
-					chip->pagebuf = realpage;
-					chip->pagebuf_bitflips = ret;
-				} else {
-					/* Invalidate page cache */
-					chip->pagebuf = -1;
-				}
-				memcpy(buf, chip->buffers->databuf + col, bytes);
-			}
-
-			if (unlikely(oob)) {
-				int toread = min(oobreadlen, max_oobsize);
-
-				if (toread) {
-					oob = nand_transfer_oob(mtd,
-						oob, ops, toread);
-					oobreadlen -= toread;
-				}
-			}
-
-			if (chip->options & NAND_NEED_READRDY) {
-				/* Apply delay or wait for ready/busy pin */
-				if (!chip->dev_ready)
-					udelay(chip->chip_delay);
-				else
-					nand_wait_ready(mtd);
-			}
-
-			if (mtd->ecc_stats.failed - ecc_failures) {
-				if (retry_mode + 1 < chip->read_retries) {
-					retry_mode++;
-					ret = nand_setup_read_retry(mtd,
-							retry_mode);
-					if (ret < 0)
-						break;
-
-					/* Reset failures; retry */
-					mtd->ecc_stats.failed = ecc_failures;
-					goto read_retry;
-				} else {
-					/* No more retry modes; real failure */
-					ecc_fail = true;
-				}
-			}
-
-			buf += bytes;
-		} else {
-			memcpy(buf, chip->buffers->databuf + col, bytes);
-			buf += bytes;
-			max_bitflips = max_t(unsigned int, max_bitflips,
-					     chip->pagebuf_bitflips);
-		}
-
-		readlen -= bytes;
-
-		/* Reset to retry mode 0 */
-		if (retry_mode) {
-			ret = nand_setup_read_retry(mtd, 0);
-			if (ret < 0)
-				break;
-			retry_mode = 0;
-		}
-
-		if (!readlen)
-			break;
-
-		/* For subsequent reads align to page boundary */
-		col = 0;
-		/* Increment page address */
-		realpage++;
-
-		page = realpage & chip->pagemask;
-		/* Check, if we cross a chip boundary */
-		if (!page) {
-			chipnr++;
-			chip->select_chip(mtd, -1);
-			chip->select_chip(mtd, chipnr);
-		}
-	}
-	chip->select_chip(mtd, -1);
-
-	ops->retlen = ops->len - (size_t) readlen;
-	if (oob)
-		ops->oobretlen = ops->ooblen - oobreadlen;
-
-	if (ret < 0)
-		return ret;
-
-	if (ecc_fail)
-		return -EBADMSG;
-
-	return max_bitflips;
-}
-
-/**
- * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
- *
- * Get hold of the chip and call nand_do_read.
- */
-static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
-		     size_t *retlen, uint8_t *buf)
-{
-	struct mtd_oob_ops ops;
-	int ret;
-
-	nand_get_device(mtd, FL_READING);
-	memset(&ops, 0, sizeof(ops));
-	ops.len = len;
-	ops.datbuf = buf;
-	ops.mode = MTD_OPS_PLACE_OOB;
-	ret = nand_do_read_ops(mtd, from, &ops);
-	*retlen = ops.retlen;
-	nand_release_device(mtd);
-	return ret;
-}
-
-/**
- * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
- */
-int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
-{
-	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 0;
-}
-EXPORT_SYMBOL(nand_read_oob_std);
-
-/**
- * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
- *			    with syndromes
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
- */
-int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-			   int page)
-{
-	int length = mtd->oobsize;
-	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
-	int eccsize = chip->ecc.size;
-	uint8_t *bufpoi = chip->oob_poi;
-	int i, toread, sndrnd = 0, pos;
-
-	chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
-	for (i = 0; i < chip->ecc.steps; i++) {
-		if (sndrnd) {
-			pos = eccsize + i * (eccsize + chunk);
-			if (mtd->writesize > 512)
-				chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
-			else
-				chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
-		} else
-			sndrnd = 1;
-		toread = min_t(int, length, chunk);
-		chip->read_buf(mtd, bufpoi, toread);
-		bufpoi += toread;
-		length -= toread;
-	}
-	if (length > 0)
-		chip->read_buf(mtd, bufpoi, length);
-
-	return 0;
-}
-EXPORT_SYMBOL(nand_read_oob_syndrome);
-
-/**
- * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
- */
-int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
-{
-	int status = 0;
-	const uint8_t *buf = chip->oob_poi;
-	int length = mtd->oobsize;
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
-	chip->write_buf(mtd, buf, length);
-	/* Send command to program the OOB data */
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
-	status = chip->waitfunc(mtd, chip);
-
-	return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-EXPORT_SYMBOL(nand_write_oob_std);
-
-/**
- * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
- *			     with syndrome - only for large page flash
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
- */
-int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
-			    int page)
-{
-	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
-	int eccsize = chip->ecc.size, length = mtd->oobsize;
-	int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
-	const uint8_t *bufpoi = chip->oob_poi;
-
-	/*
-	 * data-ecc-data-ecc ... ecc-oob
-	 * or
-	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
-	 */
-	if (!chip->ecc.prepad && !chip->ecc.postpad) {
-		pos = steps * (eccsize + chunk);
-		steps = 0;
-	} else
-		pos = eccsize;
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
-	for (i = 0; i < steps; i++) {
-		if (sndcmd) {
-			if (mtd->writesize <= 512) {
-				uint32_t fill = 0xFFFFFFFF;
-
-				len = eccsize;
-				while (len > 0) {
-					int num = min_t(int, len, 4);
-					chip->write_buf(mtd, (uint8_t *)&fill,
-							num);
-					len -= num;
-				}
-			} else {
-				pos = eccsize + i * (eccsize + chunk);
-				chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
-			}
-		} else
-			sndcmd = 1;
-		len = min_t(int, length, chunk);
-		chip->write_buf(mtd, bufpoi, len);
-		bufpoi += len;
-		length -= len;
-	}
-	if (length > 0)
-		chip->write_buf(mtd, bufpoi, length);
-
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-	status = chip->waitfunc(mtd, chip);
-
-	return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-EXPORT_SYMBOL(nand_write_oob_syndrome);
-
-/**
- * nand_do_read_oob - [INTERN] NAND read out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operations description structure
- *
- * NAND read out-of-band data from the spare area.
- */
-static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
-			    struct mtd_oob_ops *ops)
-{
-	int page, realpage, chipnr;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtd_ecc_stats stats;
-	int readlen = ops->ooblen;
-	int len;
-	uint8_t *buf = ops->oobbuf;
-	int ret = 0;
-
-	pr_debug("%s: from = 0x%08Lx, len = %i\n",
-			__func__, (unsigned long long)from, readlen);
-
-	stats = mtd->ecc_stats;
-
-	len = mtd_oobavail(mtd, ops);
-
-	if (unlikely(ops->ooboffs >= len)) {
-		pr_debug("%s: attempt to start read outside oob\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	/* Do not allow reads past end of device */
-	if (unlikely(from >= mtd->size ||
-		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
-					(from >> chip->page_shift)) * len)) {
-		pr_debug("%s: attempt to read beyond end of device\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	chipnr = (int)(from >> chip->chip_shift);
-	chip->select_chip(mtd, chipnr);
-
-	/* Shift to get page */
-	realpage = (int)(from >> chip->page_shift);
-	page = realpage & chip->pagemask;
-
-	while (1) {
-		if (ops->mode == MTD_OPS_RAW)
-			ret = chip->ecc.read_oob_raw(mtd, chip, page);
-		else
-			ret = chip->ecc.read_oob(mtd, chip, page);
-
-		if (ret < 0)
-			break;
-
-		len = min(len, readlen);
-		buf = nand_transfer_oob(mtd, buf, ops, len);
-
-		if (chip->options & NAND_NEED_READRDY) {
-			/* Apply delay or wait for ready/busy pin */
-			if (!chip->dev_ready)
-				udelay(chip->chip_delay);
-			else
-				nand_wait_ready(mtd);
-		}
-
-		readlen -= len;
-		if (!readlen)
-			break;
-
-		/* Increment page address */
-		realpage++;
-
-		page = realpage & chip->pagemask;
-		/* Check, if we cross a chip boundary */
-		if (!page) {
-			chipnr++;
-			chip->select_chip(mtd, -1);
-			chip->select_chip(mtd, chipnr);
-		}
-	}
-	chip->select_chip(mtd, -1);
-
-	ops->oobretlen = ops->ooblen - readlen;
-
-	if (ret < 0)
-		return ret;
-
-	if (mtd->ecc_stats.failed - stats.failed)
-		return -EBADMSG;
-
-	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
-}
-
-/**
- * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operation description structure
- *
- * NAND read data and/or out-of-band data.
- */
-static int nand_read_oob(struct mtd_info *mtd, loff_t from,
-			 struct mtd_oob_ops *ops)
-{
-	int ret;
-
-	ops->retlen = 0;
-
-	/* Do not allow reads past end of device */
-	if (ops->datbuf && (from + ops->len) > mtd->size) {
-		pr_debug("%s: attempt to read beyond end of device\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	if (ops->mode != MTD_OPS_PLACE_OOB &&
-	    ops->mode != MTD_OPS_AUTO_OOB &&
-	    ops->mode != MTD_OPS_RAW)
-		return -ENOTSUPP;
-
-	nand_get_device(mtd, FL_READING);
-
-	if (!ops->datbuf)
-		ret = nand_do_read_oob(mtd, from, ops);
-	else
-		ret = nand_do_read_ops(mtd, from, ops);
-
-	nand_release_device(mtd);
-	return ret;
-}
-
-
-/**
- * nand_write_page_raw - [INTERN] raw page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
- * @page: page number to write
- *
- * Not for syndrome calculating ECC controllers, which use a special oob layout.
- */
-static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
-			       const uint8_t *buf, int oob_required, int page)
-{
-	chip->write_buf(mtd, buf, mtd->writesize);
-	if (oob_required)
-		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-/**
- * nand_write_page_raw_syndrome - [INTERN] raw page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
- * @page: page number to write
- *
- * We need a special oob layout and handling even when ECC isn't checked.
- */
-static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
-					struct nand_chip *chip,
-					const uint8_t *buf, int oob_required,
-					int page)
-{
-	int eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	uint8_t *oob = chip->oob_poi;
-	int steps, size;
-
-	for (steps = chip->ecc.steps; steps > 0; steps--) {
-		chip->write_buf(mtd, buf, eccsize);
-		buf += eccsize;
-
-		if (chip->ecc.prepad) {
-			chip->write_buf(mtd, oob, chip->ecc.prepad);
-			oob += chip->ecc.prepad;
-		}
-
-		chip->write_buf(mtd, oob, eccbytes);
-		oob += eccbytes;
-
-		if (chip->ecc.postpad) {
-			chip->write_buf(mtd, oob, chip->ecc.postpad);
-			oob += chip->ecc.postpad;
-		}
-	}
-
-	size = mtd->oobsize - (oob - chip->oob_poi);
-	if (size)
-		chip->write_buf(mtd, oob, size);
-
-	return 0;
-}
-/**
- * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
- * @page: page number to write
- */
-static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
-				 const uint8_t *buf, int oob_required,
-				 int page)
-{
-	int i, eccsize = chip->ecc.size, ret;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *ecc_calc = chip->buffers->ecccalc;
-	const uint8_t *p = buf;
-
-	/* Software ECC calculation */
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
-		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
-
-	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
-					 chip->ecc.total);
-	if (ret)
-		return ret;
-
-	return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
-}
-
-/**
- * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
- * @page: page number to write
- */
-static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				  const uint8_t *buf, int oob_required,
-				  int page)
-{
-	int i, eccsize = chip->ecc.size, ret;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	uint8_t *ecc_calc = chip->buffers->ecccalc;
-	const uint8_t *p = buf;
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
-		chip->write_buf(mtd, p, eccsize);
-		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
-	}
-
-	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
-					 chip->ecc.total);
-	if (ret)
-		return ret;
-
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-
-/**
- * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
- * @mtd:	mtd info structure
- * @chip:	nand chip info structure
- * @offset:	column address of subpage within the page
- * @data_len:	data length
- * @buf:	data buffer
- * @oob_required: must write chip->oob_poi to OOB
- * @page: page number to write
- */
-static int nand_write_subpage_hwecc(struct mtd_info *mtd,
-				struct nand_chip *chip, uint32_t offset,
-				uint32_t data_len, const uint8_t *buf,
-				int oob_required, int page)
-{
-	uint8_t *oob_buf  = chip->oob_poi;
-	uint8_t *ecc_calc = chip->buffers->ecccalc;
-	int ecc_size      = chip->ecc.size;
-	int ecc_bytes     = chip->ecc.bytes;
-	int ecc_steps     = chip->ecc.steps;
-	uint32_t start_step = offset / ecc_size;
-	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
-	int oob_bytes       = mtd->oobsize / ecc_steps;
-	int step, ret;
-
-	for (step = 0; step < ecc_steps; step++) {
-		/* configure controller for WRITE access */
-		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
-
-		/* write data (untouched subpages already masked by 0xFF) */
-		chip->write_buf(mtd, buf, ecc_size);
-
-		/* mask ECC of un-touched subpages by padding 0xFF */
-		if ((step < start_step) || (step > end_step))
-			memset(ecc_calc, 0xff, ecc_bytes);
-		else
-			chip->ecc.calculate(mtd, buf, ecc_calc);
-
-		/* mask OOB of un-touched subpages by padding 0xFF */
-		/* if oob_required, preserve OOB metadata of written subpage */
-		if (!oob_required || (step < start_step) || (step > end_step))
-			memset(oob_buf, 0xff, oob_bytes);
-
-		buf += ecc_size;
-		ecc_calc += ecc_bytes;
-		oob_buf  += oob_bytes;
-	}
-
-	/* copy calculated ECC for whole page to chip->buffer->oob */
-	/* this include masked-value(0xFF) for unwritten subpages */
-	ecc_calc = chip->buffers->ecccalc;
-	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
-					 chip->ecc.total);
-	if (ret)
-		return ret;
-
-	/* write OOB buffer to NAND device */
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-
-/**
- * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
- * @page: page number to write
- *
- * The hw generator calculates the error syndrome automatically. Therefore we
- * need a special oob layout and handling.
- */
-static int nand_write_page_syndrome(struct mtd_info *mtd,
-				    struct nand_chip *chip,
-				    const uint8_t *buf, int oob_required,
-				    int page)
-{
-	int i, eccsize = chip->ecc.size;
-	int eccbytes = chip->ecc.bytes;
-	int eccsteps = chip->ecc.steps;
-	const uint8_t *p = buf;
-	uint8_t *oob = chip->oob_poi;
-
-	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
-
-		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
-		chip->write_buf(mtd, p, eccsize);
-
-		if (chip->ecc.prepad) {
-			chip->write_buf(mtd, oob, chip->ecc.prepad);
-			oob += chip->ecc.prepad;
-		}
-
-		chip->ecc.calculate(mtd, p, oob);
-		chip->write_buf(mtd, oob, eccbytes);
-		oob += eccbytes;
-
-		if (chip->ecc.postpad) {
-			chip->write_buf(mtd, oob, chip->ecc.postpad);
-			oob += chip->ecc.postpad;
-		}
-	}
-
-	/* Calculate remaining oob bytes */
-	i = mtd->oobsize - (oob - chip->oob_poi);
-	if (i)
-		chip->write_buf(mtd, oob, i);
-
-	return 0;
-}
-
-/**
- * nand_write_page - [REPLACEABLE] write one page
- * @mtd: MTD device structure
- * @chip: NAND chip descriptor
- * @offset: address offset within the page
- * @data_len: length of actual data to be written
- * @buf: the data to write
- * @oob_required: must write chip->oob_poi to OOB
- * @page: page number to write
- * @cached: cached programming
- * @raw: use _raw version of write_page
- */
-static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-		uint32_t offset, int data_len, const uint8_t *buf,
-		int oob_required, int page, int cached, int raw)
-{
-	int status, subpage;
-
-	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
-		chip->ecc.write_subpage)
-		subpage = offset || (data_len < mtd->writesize);
-	else
-		subpage = 0;
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
-
-	if (unlikely(raw))
-		status = chip->ecc.write_page_raw(mtd, chip, buf,
-						  oob_required, page);
-	else if (subpage)
-		status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
-						 buf, oob_required, page);
-	else
-		status = chip->ecc.write_page(mtd, chip, buf, oob_required,
-					      page);
-
-	if (status < 0)
-		return status;
-
-	/*
-	 * Cached progamming disabled for now. Not sure if it's worth the
-	 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
-	 */
-	cached = 0;
-
-	if (!cached || !NAND_HAS_CACHEPROG(chip)) {
-
-		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-		status = chip->waitfunc(mtd, chip);
-		/*
-		 * See if operation failed and additional status checks are
-		 * available.
-		 */
-		if ((status & NAND_STATUS_FAIL) && (chip->errstat))
-			status = chip->errstat(mtd, chip, FL_WRITING, status,
-					       page);
-
-		if (status & NAND_STATUS_FAIL)
-			return -EIO;
-	} else {
-		chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
-		status = chip->waitfunc(mtd, chip);
-	}
-
-	return 0;
-}
-
-/**
- * nand_fill_oob - [INTERN] Transfer client buffer to oob
- * @mtd: MTD device structure
- * @oob: oob data buffer
- * @len: oob data write length
- * @ops: oob ops structure
- */
-static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
-			      struct mtd_oob_ops *ops)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int ret;
-
-	/*
-	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
-	 * data from a previous OOB read.
-	 */
-	memset(chip->oob_poi, 0xff, mtd->oobsize);
-
-	switch (ops->mode) {
-
-	case MTD_OPS_PLACE_OOB:
-	case MTD_OPS_RAW:
-		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
-		return oob + len;
-
-	case MTD_OPS_AUTO_OOB:
-		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
-						  ops->ooboffs, len);
-		BUG_ON(ret);
-		return oob + len;
-
-	default:
-		BUG();
-	}
-	return NULL;
-}
-
-#define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
-
-/**
- * nand_do_write_ops - [INTERN] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operations description structure
- *
- * NAND write with ECC.
- */
-static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
-			     struct mtd_oob_ops *ops)
-{
-	int chipnr, realpage, page, blockmask, column;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	uint32_t writelen = ops->len;
-
-	uint32_t oobwritelen = ops->ooblen;
-	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
-
-	uint8_t *oob = ops->oobbuf;
-	uint8_t *buf = ops->datbuf;
-	int ret;
-	int oob_required = oob ? 1 : 0;
-
-	ops->retlen = 0;
-	if (!writelen)
-		return 0;
-
-	/* Reject writes, which are not page aligned */
-	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
-		pr_notice("%s: attempt to write non page aligned data\n",
-			   __func__);
-		return -EINVAL;
-	}
-
-	column = to & (mtd->writesize - 1);
-
-	chipnr = (int)(to >> chip->chip_shift);
-	chip->select_chip(mtd, chipnr);
-
-	/* Check, if it is write protected */
-	if (nand_check_wp(mtd)) {
-		ret = -EIO;
-		goto err_out;
-	}
-
-	realpage = (int)(to >> chip->page_shift);
-	page = realpage & chip->pagemask;
-	blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
-
-	/* Invalidate the page cache, when we write to the cached page */
-	if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
-	    ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
-		chip->pagebuf = -1;
-
-	/* Don't allow multipage oob writes with offset */
-	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
-		ret = -EINVAL;
-		goto err_out;
-	}
-
-	while (1) {
-		int bytes = mtd->writesize;
-		int cached = writelen > bytes && page != blockmask;
-		uint8_t *wbuf = buf;
-		int use_bufpoi;
-		int part_pagewr = (column || writelen < mtd->writesize);
-
-		if (part_pagewr)
-			use_bufpoi = 1;
-		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
-			use_bufpoi = !virt_addr_valid(buf);
-		else
-			use_bufpoi = 0;
-
-		/* Partial page write?, or need to use bounce buffer */
-		if (use_bufpoi) {
-			pr_debug("%s: using write bounce buffer for buf@%p\n",
-					 __func__, buf);
-			cached = 0;
-			if (part_pagewr)
-				bytes = min_t(int, bytes - column, writelen);
-			chip->pagebuf = -1;
-			memset(chip->buffers->databuf, 0xff, mtd->writesize);
-			memcpy(&chip->buffers->databuf[column], buf, bytes);
-			wbuf = chip->buffers->databuf;
-		}
-
-		if (unlikely(oob)) {
-			size_t len = min(oobwritelen, oobmaxlen);
-			oob = nand_fill_oob(mtd, oob, len, ops);
-			oobwritelen -= len;
-		} else {
-			/* We still need to erase leftover OOB data */
-			memset(chip->oob_poi, 0xff, mtd->oobsize);
-		}
-		ret = chip->write_page(mtd, chip, column, bytes, wbuf,
-					oob_required, page, cached,
-					(ops->mode == MTD_OPS_RAW));
-		if (ret)
-			break;
-
-		writelen -= bytes;
-		if (!writelen)
-			break;
-
-		column = 0;
-		buf += bytes;
-		realpage++;
-
-		page = realpage & chip->pagemask;
-		/* Check, if we cross a chip boundary */
-		if (!page) {
-			chipnr++;
-			chip->select_chip(mtd, -1);
-			chip->select_chip(mtd, chipnr);
-		}
-	}
-
-	ops->retlen = ops->len - writelen;
-	if (unlikely(oob))
-		ops->oobretlen = ops->ooblen;
-
-err_out:
-	chip->select_chip(mtd, -1);
-	return ret;
-}
-
-/**
- * panic_nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write with ECC. Used when performing writes in interrupt context, this
- * may for example be called by mtdoops when writing an oops while in panic.
- */
-static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
-			    size_t *retlen, const uint8_t *buf)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct mtd_oob_ops ops;
-	int ret;
-
-	/* Wait for the device to get ready */
-	panic_nand_wait(mtd, chip, 400);
-
-	/* Grab the device */
-	panic_nand_get_device(chip, mtd, FL_WRITING);
-
-	memset(&ops, 0, sizeof(ops));
-	ops.len = len;
-	ops.datbuf = (uint8_t *)buf;
-	ops.mode = MTD_OPS_PLACE_OOB;
-
-	ret = nand_do_write_ops(mtd, to, &ops);
-
-	*retlen = ops.retlen;
-	return ret;
-}
-
-/**
- * nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write with ECC.
- */
-static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
-			  size_t *retlen, const uint8_t *buf)
-{
-	struct mtd_oob_ops ops;
-	int ret;
-
-	nand_get_device(mtd, FL_WRITING);
-	memset(&ops, 0, sizeof(ops));
-	ops.len = len;
-	ops.datbuf = (uint8_t *)buf;
-	ops.mode = MTD_OPS_PLACE_OOB;
-	ret = nand_do_write_ops(mtd, to, &ops);
-	*retlen = ops.retlen;
-	nand_release_device(mtd);
-	return ret;
-}
-
-/**
- * nand_do_write_oob - [MTD Interface] NAND write out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
- *
- * NAND write out-of-band.
- */
-static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
-			     struct mtd_oob_ops *ops)
-{
-	int chipnr, page, status, len;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	pr_debug("%s: to = 0x%08x, len = %i\n",
-			 __func__, (unsigned int)to, (int)ops->ooblen);
-
-	len = mtd_oobavail(mtd, ops);
-
-	/* Do not allow write past end of page */
-	if ((ops->ooboffs + ops->ooblen) > len) {
-		pr_debug("%s: attempt to write past end of page\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	if (unlikely(ops->ooboffs >= len)) {
-		pr_debug("%s: attempt to start write outside oob\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	/* Do not allow write past end of device */
-	if (unlikely(to >= mtd->size ||
-		     ops->ooboffs + ops->ooblen >
-			((mtd->size >> chip->page_shift) -
-			 (to >> chip->page_shift)) * len)) {
-		pr_debug("%s: attempt to write beyond end of device\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	chipnr = (int)(to >> chip->chip_shift);
-	chip->select_chip(mtd, chipnr);
-
-	/* Shift to get page */
-	page = (int)(to >> chip->page_shift);
-
-	/*
-	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
-	 * of my DiskOnChip 2000 test units) will clear the whole data page too
-	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
-	 * it in the doc2000 driver in August 1999.  dwmw2.
-	 */
-	nand_reset(chip);
-
-	/* Check, if it is write protected */
-	if (nand_check_wp(mtd)) {
-		chip->select_chip(mtd, -1);
-		return -EROFS;
-	}
-
-	/* Invalidate the page cache, if we write to the cached page */
-	if (page == chip->pagebuf)
-		chip->pagebuf = -1;
-
-	nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
-
-	if (ops->mode == MTD_OPS_RAW)
-		status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
-	else
-		status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
-
-	chip->select_chip(mtd, -1);
-
-	if (status)
-		return status;
-
-	ops->oobretlen = ops->ooblen;
-
-	return 0;
-}
-
-/**
- * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
- */
-static int nand_write_oob(struct mtd_info *mtd, loff_t to,
-			  struct mtd_oob_ops *ops)
-{
-	int ret = -ENOTSUPP;
-
-	ops->retlen = 0;
-
-	/* Do not allow writes past end of device */
-	if (ops->datbuf && (to + ops->len) > mtd->size) {
-		pr_debug("%s: attempt to write beyond end of device\n",
-				__func__);
-		return -EINVAL;
-	}
-
-	nand_get_device(mtd, FL_WRITING);
-
-	switch (ops->mode) {
-	case MTD_OPS_PLACE_OOB:
-	case MTD_OPS_AUTO_OOB:
-	case MTD_OPS_RAW:
-		break;
-
-	default:
-		goto out;
-	}
-
-	if (!ops->datbuf)
-		ret = nand_do_write_oob(mtd, to, ops);
-	else
-		ret = nand_do_write_ops(mtd, to, ops);
-
-out:
-	nand_release_device(mtd);
-	return ret;
-}
-
-/**
- * single_erase - [GENERIC] NAND standard block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * Standard erase command for NAND chips. Returns NAND status.
- */
-static int single_erase(struct mtd_info *mtd, int page)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	/* Send commands to erase a block */
-	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
-	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
-
-	return chip->waitfunc(mtd, chip);
-}
-
-/**
- * nand_erase - [MTD Interface] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- *
- * Erase one ore more blocks.
- */
-static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
-{
-	return nand_erase_nand(mtd, instr, 0);
-}
-
-/**
- * nand_erase_nand - [INTERN] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- * @allowbbt: allow erasing the bbt area
- *
- * Erase one ore more blocks.
- */
-int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
-		    int allowbbt)
-{
-	int page, status, pages_per_block, ret, chipnr;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	loff_t len;
-
-	pr_debug("%s: start = 0x%012llx, len = %llu\n",
-			__func__, (unsigned long long)instr->addr,
-			(unsigned long long)instr->len);
-
-	if (check_offs_len(mtd, instr->addr, instr->len))
-		return -EINVAL;
-
-	/* Grab the lock and see if the device is available */
-	nand_get_device(mtd, FL_ERASING);
-
-	/* Shift to get first page */
-	page = (int)(instr->addr >> chip->page_shift);
-	chipnr = (int)(instr->addr >> chip->chip_shift);
-
-	/* Calculate pages in each block */
-	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
-
-	/* Select the NAND device */
-	chip->select_chip(mtd, chipnr);
-
-	/* Check, if it is write protected */
-	if (nand_check_wp(mtd)) {
-		pr_debug("%s: device is write protected!\n",
-				__func__);
-		instr->state = MTD_ERASE_FAILED;
-		goto erase_exit;
-	}
-
-	/* Loop through the pages */
-	len = instr->len;
-
-	instr->state = MTD_ERASING;
-
-	while (len) {
-		/* Check if we have a bad block, we do not erase bad blocks! */
-		if (nand_block_checkbad(mtd, ((loff_t) page) <<
-					chip->page_shift, allowbbt)) {
-			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
-				    __func__, page);
-			instr->state = MTD_ERASE_FAILED;
-			goto erase_exit;
-		}
-
-		/*
-		 * Invalidate the page cache, if we erase the block which
-		 * contains the current cached page.
-		 */
-		if (page <= chip->pagebuf && chip->pagebuf <
-		    (page + pages_per_block))
-			chip->pagebuf = -1;
-
-		status = chip->erase(mtd, page & chip->pagemask);
-
-		/*
-		 * See if operation failed and additional status checks are
-		 * available
-		 */
-		if ((status & NAND_STATUS_FAIL) && (chip->errstat))
-			status = chip->errstat(mtd, chip, FL_ERASING,
-					       status, page);
-
-		/* See if block erase succeeded */
-		if (status & NAND_STATUS_FAIL) {
-			pr_debug("%s: failed erase, page 0x%08x\n",
-					__func__, page);
-			instr->state = MTD_ERASE_FAILED;
-			instr->fail_addr =
-				((loff_t)page << chip->page_shift);
-			goto erase_exit;
-		}
-
-		/* Increment page address and decrement length */
-		len -= (1ULL << chip->phys_erase_shift);
-		page += pages_per_block;
-
-		/* Check, if we cross a chip boundary */
-		if (len && !(page & chip->pagemask)) {
-			chipnr++;
-			chip->select_chip(mtd, -1);
-			chip->select_chip(mtd, chipnr);
-		}
-	}
-	instr->state = MTD_ERASE_DONE;
-
-erase_exit:
-
-	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
-
-	/* Deselect and wake up anyone waiting on the device */
-	chip->select_chip(mtd, -1);
-	nand_release_device(mtd);
-
-	/* Do call back function */
-	if (!ret)
-		mtd_erase_callback(instr);
-
-	/* Return more or less happy */
-	return ret;
-}
-
-/**
- * nand_sync - [MTD Interface] sync
- * @mtd: MTD device structure
- *
- * Sync is actually a wait for chip ready function.
- */
-static void nand_sync(struct mtd_info *mtd)
-{
-	pr_debug("%s: called\n", __func__);
-
-	/* Grab the lock and see if the device is available */
-	nand_get_device(mtd, FL_SYNCING);
-	/* Release it and go back */
-	nand_release_device(mtd);
-}
-
-/**
- * nand_block_isbad - [MTD Interface] Check if block at offset is bad
- * @mtd: MTD device structure
- * @offs: offset relative to mtd start
- */
-static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int chipnr = (int)(offs >> chip->chip_shift);
-	int ret;
-
-	/* Select the NAND device */
-	nand_get_device(mtd, FL_READING);
-	chip->select_chip(mtd, chipnr);
-
-	ret = nand_block_checkbad(mtd, offs, 0);
-
-	chip->select_chip(mtd, -1);
-	nand_release_device(mtd);
-
-	return ret;
-}
-
-/**
- * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
- */
-static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
-	int ret;
-
-	ret = nand_block_isbad(mtd, ofs);
-	if (ret) {
-		/* If it was bad already, return success and do nothing */
-		if (ret > 0)
-			return 0;
-		return ret;
-	}
-
-	return nand_block_markbad_lowlevel(mtd, ofs);
-}
-
-/**
- * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
- * @mtd: MTD device structure
- * @chip: nand chip info structure
- * @addr: feature address.
- * @subfeature_param: the subfeature parameters, a four bytes array.
- */
-static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
-			int addr, uint8_t *subfeature_param)
-{
-	int status;
-	int i;
-
-	if (!chip->onfi_version ||
-	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
-	      & ONFI_OPT_CMD_SET_GET_FEATURES))
-		return -EINVAL;
-
-	chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
-	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
-		chip->write_byte(mtd, subfeature_param[i]);
-
-	status = chip->waitfunc(mtd, chip);
-	if (status & NAND_STATUS_FAIL)
-		return -EIO;
-	return 0;
-}
-
-/**
- * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
- * @mtd: MTD device structure
- * @chip: nand chip info structure
- * @addr: feature address.
- * @subfeature_param: the subfeature parameters, a four bytes array.
- */
-static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
-			int addr, uint8_t *subfeature_param)
-{
-	int i;
-
-	if (!chip->onfi_version ||
-	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
-	      & ONFI_OPT_CMD_SET_GET_FEATURES))
-		return -EINVAL;
-
-	chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
-	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
-		*subfeature_param++ = chip->read_byte(mtd);
-	return 0;
-}
-
-/**
- * nand_suspend - [MTD Interface] Suspend the NAND flash
- * @mtd: MTD device structure
- */
-static int nand_suspend(struct mtd_info *mtd)
-{
-	return nand_get_device(mtd, FL_PM_SUSPENDED);
-}
-
-/**
- * nand_resume - [MTD Interface] Resume the NAND flash
- * @mtd: MTD device structure
- */
-static void nand_resume(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (chip->state == FL_PM_SUSPENDED)
-		nand_release_device(mtd);
-	else
-		pr_err("%s called for a chip which is not in suspended state\n",
-			__func__);
-}
-
-/**
- * nand_shutdown - [MTD Interface] Finish the current NAND operation and
- *                 prevent further operations
- * @mtd: MTD device structure
- */
-static void nand_shutdown(struct mtd_info *mtd)
-{
-	nand_get_device(mtd, FL_PM_SUSPENDED);
-}
-
-/* Set default functions */
-static void nand_set_defaults(struct nand_chip *chip, int busw)
-{
-	/* check for proper chip_delay setup, set 20us if not */
-	if (!chip->chip_delay)
-		chip->chip_delay = 20;
-
-	/* check, if a user supplied command function given */
-	if (chip->cmdfunc == NULL)
-		chip->cmdfunc = nand_command;
-
-	/* check, if a user supplied wait function given */
-	if (chip->waitfunc == NULL)
-		chip->waitfunc = nand_wait;
-
-	if (!chip->select_chip)
-		chip->select_chip = nand_select_chip;
-
-	/* set for ONFI nand */
-	if (!chip->onfi_set_features)
-		chip->onfi_set_features = nand_onfi_set_features;
-	if (!chip->onfi_get_features)
-		chip->onfi_get_features = nand_onfi_get_features;
-
-	/* If called twice, pointers that depend on busw may need to be reset */
-	if (!chip->read_byte || chip->read_byte == nand_read_byte)
-		chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
-	if (!chip->read_word)
-		chip->read_word = nand_read_word;
-	if (!chip->block_bad)
-		chip->block_bad = nand_block_bad;
-	if (!chip->block_markbad)
-		chip->block_markbad = nand_default_block_markbad;
-	if (!chip->write_buf || chip->write_buf == nand_write_buf)
-		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
-	if (!chip->write_byte || chip->write_byte == nand_write_byte)
-		chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
-	if (!chip->read_buf || chip->read_buf == nand_read_buf)
-		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
-	if (!chip->scan_bbt)
-		chip->scan_bbt = nand_default_bbt;
-
-	if (!chip->controller) {
-		chip->controller = &chip->hwcontrol;
-		nand_hw_control_init(chip->controller);
-	}
-
-}
-
-/* Sanitize ONFI strings so we can safely print them */
-static void sanitize_string(uint8_t *s, size_t len)
-{
-	ssize_t i;
-
-	/* Null terminate */
-	s[len - 1] = 0;
-
-	/* Remove non printable chars */
-	for (i = 0; i < len - 1; i++) {
-		if (s[i] < ' ' || s[i] > 127)
-			s[i] = '?';
-	}
-
-	/* Remove trailing spaces */
-	strim(s);
-}
-
-static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
-{
-	int i;
-	while (len--) {
-		crc ^= *p++ << 8;
-		for (i = 0; i < 8; i++)
-			crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
-	}
-
-	return crc;
-}
-
-/* Parse the Extended Parameter Page. */
-static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
-		struct nand_chip *chip, struct nand_onfi_params *p)
-{
-	struct onfi_ext_param_page *ep;
-	struct onfi_ext_section *s;
-	struct onfi_ext_ecc_info *ecc;
-	uint8_t *cursor;
-	int ret = -EINVAL;
-	int len;
-	int i;
-
-	len = le16_to_cpu(p->ext_param_page_length) * 16;
-	ep = kmalloc(len, GFP_KERNEL);
-	if (!ep)
-		return -ENOMEM;
-
-	/* Send our own NAND_CMD_PARAM. */
-	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
-
-	/* Use the Change Read Column command to skip the ONFI param pages. */
-	chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
-			sizeof(*p) * p->num_of_param_pages , -1);
-
-	/* Read out the Extended Parameter Page. */
-	chip->read_buf(mtd, (uint8_t *)ep, len);
-	if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
-		!= le16_to_cpu(ep->crc))) {
-		pr_debug("fail in the CRC.\n");
-		goto ext_out;
-	}
-
-	/*
-	 * Check the signature.
-	 * Do not strictly follow the ONFI spec, maybe changed in future.
-	 */
-	if (strncmp(ep->sig, "EPPS", 4)) {
-		pr_debug("The signature is invalid.\n");
-		goto ext_out;
-	}
-
-	/* find the ECC section. */
-	cursor = (uint8_t *)(ep + 1);
-	for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
-		s = ep->sections + i;
-		if (s->type == ONFI_SECTION_TYPE_2)
-			break;
-		cursor += s->length * 16;
-	}
-	if (i == ONFI_EXT_SECTION_MAX) {
-		pr_debug("We can not find the ECC section.\n");
-		goto ext_out;
-	}
-
-	/* get the info we want. */
-	ecc = (struct onfi_ext_ecc_info *)cursor;
-
-	if (!ecc->codeword_size) {
-		pr_debug("Invalid codeword size\n");
-		goto ext_out;
-	}
-
-	chip->ecc_strength_ds = ecc->ecc_bits;
-	chip->ecc_step_ds = 1 << ecc->codeword_size;
-	ret = 0;
-
-ext_out:
-	kfree(ep);
-	return ret;
-}
-
-static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
-
-	return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
-			feature);
-}
-
-/*
- * Configure chip properties from Micron vendor-specific ONFI table
- */
-static void nand_onfi_detect_micron(struct nand_chip *chip,
-		struct nand_onfi_params *p)
-{
-	struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
-
-	if (le16_to_cpu(p->vendor_revision) < 1)
-		return;
-
-	chip->read_retries = micron->read_retry_options;
-	chip->setup_read_retry = nand_setup_read_retry_micron;
-}
-
-/*
- * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
- */
-static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
-					int *busw)
-{
-	struct nand_onfi_params *p = &chip->onfi_params;
-	int i, j;
-	int val;
-
-	/* Try ONFI for unknown chip or LP */
-	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
-	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
-		chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
-		return 0;
-
-	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
-	for (i = 0; i < 3; i++) {
-		for (j = 0; j < sizeof(*p); j++)
-			((uint8_t *)p)[j] = chip->read_byte(mtd);
-		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
-				le16_to_cpu(p->crc)) {
-			break;
-		}
-	}
-
-	if (i == 3) {
-		pr_err("Could not find valid ONFI parameter page; aborting\n");
-		return 0;
-	}
-
-	/* Check version */
-	val = le16_to_cpu(p->revision);
-	if (val & (1 << 5))
-		chip->onfi_version = 23;
-	else if (val & (1 << 4))
-		chip->onfi_version = 22;
-	else if (val & (1 << 3))
-		chip->onfi_version = 21;
-	else if (val & (1 << 2))
-		chip->onfi_version = 20;
-	else if (val & (1 << 1))
-		chip->onfi_version = 10;
-
-	if (!chip->onfi_version) {
-		pr_info("unsupported ONFI version: %d\n", val);
-		return 0;
-	}
-
-	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
-	sanitize_string(p->model, sizeof(p->model));
-	if (!mtd->name)
-		mtd->name = p->model;
-
-	mtd->writesize = le32_to_cpu(p->byte_per_page);
-
-	/*
-	 * pages_per_block and blocks_per_lun may not be a power-of-2 size
-	 * (don't ask me who thought of this...). MTD assumes that these
-	 * dimensions will be power-of-2, so just truncate the remaining area.
-	 */
-	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
-	mtd->erasesize *= mtd->writesize;
-
-	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-
-	/* See erasesize comment */
-	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
-	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
-	chip->bits_per_cell = p->bits_per_cell;
-
-	if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
-		*busw = NAND_BUSWIDTH_16;
-	else
-		*busw = 0;
-
-	if (p->ecc_bits != 0xff) {
-		chip->ecc_strength_ds = p->ecc_bits;
-		chip->ecc_step_ds = 512;
-	} else if (chip->onfi_version >= 21 &&
-		(onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
-
-		/*
-		 * The nand_flash_detect_ext_param_page() uses the
-		 * Change Read Column command which maybe not supported
-		 * by the chip->cmdfunc. So try to update the chip->cmdfunc
-		 * now. We do not replace user supplied command function.
-		 */
-		if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
-			chip->cmdfunc = nand_command_lp;
-
-		/* The Extended Parameter Page is supported since ONFI 2.1. */
-		if (nand_flash_detect_ext_param_page(mtd, chip, p))
-			pr_warn("Failed to detect ONFI extended param page\n");
-	} else {
-		pr_warn("Could not retrieve ONFI ECC requirements\n");
-	}
-
-	if (p->jedec_id == NAND_MFR_MICRON)
-		nand_onfi_detect_micron(chip, p);
-
-	return 1;
-}
-
-/*
- * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
- */
-static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
-					int *busw)
-{
-	struct nand_jedec_params *p = &chip->jedec_params;
-	struct jedec_ecc_info *ecc;
-	int val;
-	int i, j;
-
-	/* Try JEDEC for unknown chip or LP */
-	chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
-	if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
-		chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
-		chip->read_byte(mtd) != 'C')
-		return 0;
-
-	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
-	for (i = 0; i < 3; i++) {
-		for (j = 0; j < sizeof(*p); j++)
-			((uint8_t *)p)[j] = chip->read_byte(mtd);
-
-		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
-				le16_to_cpu(p->crc))
-			break;
-	}
-
-	if (i == 3) {
-		pr_err("Could not find valid JEDEC parameter page; aborting\n");
-		return 0;
-	}
-
-	/* Check version */
-	val = le16_to_cpu(p->revision);
-	if (val & (1 << 2))
-		chip->jedec_version = 10;
-	else if (val & (1 << 1))
-		chip->jedec_version = 1; /* vendor specific version */
-
-	if (!chip->jedec_version) {
-		pr_info("unsupported JEDEC version: %d\n", val);
-		return 0;
-	}
-
-	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
-	sanitize_string(p->model, sizeof(p->model));
-	if (!mtd->name)
-		mtd->name = p->model;
-
-	mtd->writesize = le32_to_cpu(p->byte_per_page);
-
-	/* Please reference to the comment for nand_flash_detect_onfi. */
-	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
-	mtd->erasesize *= mtd->writesize;
-
-	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
-
-	/* Please reference to the comment for nand_flash_detect_onfi. */
-	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
-	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
-	chip->bits_per_cell = p->bits_per_cell;
-
-	if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
-		*busw = NAND_BUSWIDTH_16;
-	else
-		*busw = 0;
-
-	/* ECC info */
-	ecc = &p->ecc_info[0];
-
-	if (ecc->codeword_size >= 9) {
-		chip->ecc_strength_ds = ecc->ecc_bits;
-		chip->ecc_step_ds = 1 << ecc->codeword_size;
-	} else {
-		pr_warn("Invalid codeword size\n");
-	}
-
-	return 1;
-}
-
-/*
- * nand_id_has_period - Check if an ID string has a given wraparound period
- * @id_data: the ID string
- * @arrlen: the length of the @id_data array
- * @period: the period of repitition
- *
- * Check if an ID string is repeated within a given sequence of bytes at
- * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
- * period of 3). This is a helper function for nand_id_len(). Returns non-zero
- * if the repetition has a period of @period; otherwise, returns zero.
- */
-static int nand_id_has_period(u8 *id_data, int arrlen, int period)
-{
-	int i, j;
-	for (i = 0; i < period; i++)
-		for (j = i + period; j < arrlen; j += period)
-			if (id_data[i] != id_data[j])
-				return 0;
-	return 1;
-}
-
-/*
- * nand_id_len - Get the length of an ID string returned by CMD_READID
- * @id_data: the ID string
- * @arrlen: the length of the @id_data array
-
- * Returns the length of the ID string, according to known wraparound/trailing
- * zero patterns. If no pattern exists, returns the length of the array.
- */
-static int nand_id_len(u8 *id_data, int arrlen)
-{
-	int last_nonzero, period;
-
-	/* Find last non-zero byte */
-	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
-		if (id_data[last_nonzero])
-			break;
-
-	/* All zeros */
-	if (last_nonzero < 0)
-		return 0;
-
-	/* Calculate wraparound period */
-	for (period = 1; period < arrlen; period++)
-		if (nand_id_has_period(id_data, arrlen, period))
-			break;
-
-	/* There's a repeated pattern */
-	if (period < arrlen)
-		return period;
-
-	/* There are trailing zeros */
-	if (last_nonzero < arrlen - 1)
-		return last_nonzero + 1;
-
-	/* No pattern detected */
-	return arrlen;
-}
-
-/* Extract the bits of per cell from the 3rd byte of the extended ID */
-static int nand_get_bits_per_cell(u8 cellinfo)
-{
-	int bits;
-
-	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
-	bits >>= NAND_CI_CELLTYPE_SHIFT;
-	return bits + 1;
-}
-
-/*
- * Many new NAND share similar device ID codes, which represent the size of the
- * chip. The rest of the parameters must be decoded according to generic or
- * manufacturer-specific "extended ID" decoding patterns.
- */
-static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
-				u8 id_data[8], int *busw)
-{
-	int extid, id_len;
-	/* The 3rd id byte holds MLC / multichip data */
-	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
-	/* The 4th id byte is the important one */
-	extid = id_data[3];
-
-	id_len = nand_id_len(id_data, 8);
-
-	/*
-	 * Field definitions are in the following datasheets:
-	 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
-	 * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
-	 * Hynix MLC   (6 byte ID): Hynix H27UBG8T2B (p.22)
-	 *
-	 * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
-	 * ID to decide what to do.
-	 */
-	if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
-			!nand_is_slc(chip) && id_data[5] != 0x00) {
-		/* Calc pagesize */
-		mtd->writesize = 2048 << (extid & 0x03);
-		extid >>= 2;
-		/* Calc oobsize */
-		switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
-		case 1:
-			mtd->oobsize = 128;
-			break;
-		case 2:
-			mtd->oobsize = 218;
-			break;
-		case 3:
-			mtd->oobsize = 400;
-			break;
-		case 4:
-			mtd->oobsize = 436;
-			break;
-		case 5:
-			mtd->oobsize = 512;
-			break;
-		case 6:
-			mtd->oobsize = 640;
-			break;
-		case 7:
-		default: /* Other cases are "reserved" (unknown) */
-			mtd->oobsize = 1024;
-			break;
-		}
-		extid >>= 2;
-		/* Calc blocksize */
-		mtd->erasesize = (128 * 1024) <<
-			(((extid >> 1) & 0x04) | (extid & 0x03));
-		*busw = 0;
-	} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
-			!nand_is_slc(chip)) {
-		unsigned int tmp;
-
-		/* Calc pagesize */
-		mtd->writesize = 2048 << (extid & 0x03);
-		extid >>= 2;
-		/* Calc oobsize */
-		switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
-		case 0:
-			mtd->oobsize = 128;
-			break;
-		case 1:
-			mtd->oobsize = 224;
-			break;
-		case 2:
-			mtd->oobsize = 448;
-			break;
-		case 3:
-			mtd->oobsize = 64;
-			break;
-		case 4:
-			mtd->oobsize = 32;
-			break;
-		case 5:
-			mtd->oobsize = 16;
-			break;
-		default:
-			mtd->oobsize = 640;
-			break;
-		}
-		extid >>= 2;
-		/* Calc blocksize */
-		tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
-		if (tmp < 0x03)
-			mtd->erasesize = (128 * 1024) << tmp;
-		else if (tmp == 0x03)
-			mtd->erasesize = 768 * 1024;
-		else
-			mtd->erasesize = (64 * 1024) << tmp;
-		*busw = 0;
-	} else {
-		/* Calc pagesize */
-		mtd->writesize = 1024 << (extid & 0x03);
-		extid >>= 2;
-		/* Calc oobsize */
-		mtd->oobsize = (8 << (extid & 0x01)) *
-			(mtd->writesize >> 9);
-		extid >>= 2;
-		/* Calc blocksize. Blocksize is multiples of 64KiB */
-		mtd->erasesize = (64 * 1024) << (extid & 0x03);
-		extid >>= 2;
-		/* Get buswidth information */
-		*busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
-
-		/*
-		 * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
-		 * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
-		 * follows:
-		 * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
-		 *                         110b -> 24nm
-		 * - ID byte 5, bit[7]:    1 -> BENAND, 0 -> raw SLC
-		 */
-		if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
-				nand_is_slc(chip) &&
-				(id_data[5] & 0x7) == 0x6 /* 24nm */ &&
-				!(id_data[4] & 0x80) /* !BENAND */) {
-			mtd->oobsize = 32 * mtd->writesize >> 9;
-		}
-
-	}
-}
-
-/*
- * Old devices have chip data hardcoded in the device ID table. nand_decode_id
- * decodes a matching ID table entry and assigns the MTD size parameters for
- * the chip.
- */
-static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
-				struct nand_flash_dev *type, u8 id_data[8],
-				int *busw)
-{
-	int maf_id = id_data[0];
-
-	mtd->erasesize = type->erasesize;
-	mtd->writesize = type->pagesize;
-	mtd->oobsize = mtd->writesize / 32;
-	*busw = type->options & NAND_BUSWIDTH_16;
-
-	/* All legacy ID NAND are small-page, SLC */
-	chip->bits_per_cell = 1;
-
-	/*
-	 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
-	 * some Spansion chips have erasesize that conflicts with size
-	 * listed in nand_ids table.
-	 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
-	 */
-	if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
-			&& id_data[6] == 0x00 && id_data[7] == 0x00
-			&& mtd->writesize == 512) {
-		mtd->erasesize = 128 * 1024;
-		mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
-	}
-}
-
-/*
- * Set the bad block marker/indicator (BBM/BBI) patterns according to some
- * heuristic patterns using various detected parameters (e.g., manufacturer,
- * page size, cell-type information).
- */
-static void nand_decode_bbm_options(struct mtd_info *mtd,
-				    struct nand_chip *chip, u8 id_data[8])
-{
-	int maf_id = id_data[0];
-
-	/* Set the bad block position */
-	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
-		chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
-	else
-		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
-
-	/*
-	 * Bad block marker is stored in the last page of each block on Samsung
-	 * and Hynix MLC devices; stored in first two pages of each block on
-	 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
-	 * AMD/Spansion, and Macronix.  All others scan only the first page.
-	 */
-	if (!nand_is_slc(chip) &&
-			(maf_id == NAND_MFR_SAMSUNG ||
-			 maf_id == NAND_MFR_HYNIX))
-		chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
-	else if ((nand_is_slc(chip) &&
-				(maf_id == NAND_MFR_SAMSUNG ||
-				 maf_id == NAND_MFR_HYNIX ||
-				 maf_id == NAND_MFR_TOSHIBA ||
-				 maf_id == NAND_MFR_AMD ||
-				 maf_id == NAND_MFR_MACRONIX)) ||
-			(mtd->writesize == 2048 &&
-			 maf_id == NAND_MFR_MICRON))
-		chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
-}
-
-static inline bool is_full_id_nand(struct nand_flash_dev *type)
-{
-	return type->id_len;
-}
-
-static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
-		   struct nand_flash_dev *type, u8 *id_data, int *busw)
-{
-	if (!strncmp(type->id, id_data, type->id_len)) {
-		mtd->writesize = type->pagesize;
-		mtd->erasesize = type->erasesize;
-		mtd->oobsize = type->oobsize;
-
-		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
-		chip->chipsize = (uint64_t)type->chipsize << 20;
-		chip->options |= type->options;
-		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
-		chip->ecc_step_ds = NAND_ECC_STEP(type);
-		chip->onfi_timing_mode_default =
-					type->onfi_timing_mode_default;
-
-		*busw = type->options & NAND_BUSWIDTH_16;
-
-		if (!mtd->name)
-			mtd->name = type->name;
-
-		return true;
-	}
-	return false;
-}
-
-/*
- * Get the flash and manufacturer id and lookup if the type is supported.
- */
-static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
-						  struct nand_chip *chip,
-						  int *maf_id, int *dev_id,
-						  struct nand_flash_dev *type)
-{
-	int busw;
-	int i, maf_idx;
-	u8 id_data[8];
-
-	/* Select the device */
-	chip->select_chip(mtd, 0);
-
-	/*
-	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
-	 * after power-up.
-	 */
-	nand_reset(chip);
-
-	/* Send the command for reading device ID */
-	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
-	/* Read manufacturer and device IDs */
-	*maf_id = chip->read_byte(mtd);
-	*dev_id = chip->read_byte(mtd);
-
-	/*
-	 * Try again to make sure, as some systems the bus-hold or other
-	 * interface concerns can cause random data which looks like a
-	 * possibly credible NAND flash to appear. If the two results do
-	 * not match, ignore the device completely.
-	 */
-
-	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
-	/* Read entire ID string */
-	for (i = 0; i < 8; i++)
-		id_data[i] = chip->read_byte(mtd);
-
-	if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
-		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
-			*maf_id, *dev_id, id_data[0], id_data[1]);
-		return ERR_PTR(-ENODEV);
-	}
-
-	if (!type)
-		type = nand_flash_ids;
-
-	for (; type->name != NULL; type++) {
-		if (is_full_id_nand(type)) {
-			if (find_full_id_nand(mtd, chip, type, id_data, &busw))
-				goto ident_done;
-		} else if (*dev_id == type->dev_id) {
-			break;
-		}
-	}
-
-	chip->onfi_version = 0;
-	if (!type->name || !type->pagesize) {
-		/* Check if the chip is ONFI compliant */
-		if (nand_flash_detect_onfi(mtd, chip, &busw))
-			goto ident_done;
-
-		/* Check if the chip is JEDEC compliant */
-		if (nand_flash_detect_jedec(mtd, chip, &busw))
-			goto ident_done;
-	}
-
-	if (!type->name)
-		return ERR_PTR(-ENODEV);
-
-	if (!mtd->name)
-		mtd->name = type->name;
-
-	chip->chipsize = (uint64_t)type->chipsize << 20;
-
-	if (!type->pagesize) {
-		/* Decode parameters from extended ID */
-		nand_decode_ext_id(mtd, chip, id_data, &busw);
-	} else {
-		nand_decode_id(mtd, chip, type, id_data, &busw);
-	}
-	/* Get chip options */
-	chip->options |= type->options;
-
-	/*
-	 * Check if chip is not a Samsung device. Do not clear the
-	 * options for chips which do not have an extended id.
-	 */
-	if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
-		chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
-ident_done:
-
-	/* Try to identify manufacturer */
-	for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
-		if (nand_manuf_ids[maf_idx].id == *maf_id)
-			break;
-	}
-
-	if (chip->options & NAND_BUSWIDTH_AUTO) {
-		WARN_ON(chip->options & NAND_BUSWIDTH_16);
-		chip->options |= busw;
-		nand_set_defaults(chip, busw);
-	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
-		/*
-		 * Check, if buswidth is correct. Hardware drivers should set
-		 * chip correct!
-		 */
-		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
-			*maf_id, *dev_id);
-		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
-		pr_warn("bus width %d instead %d bit\n",
-			   (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
-			   busw ? 16 : 8);
-		return ERR_PTR(-EINVAL);
-	}
-
-	nand_decode_bbm_options(mtd, chip, id_data);
-
-	/* Calculate the address shift from the page size */
-	chip->page_shift = ffs(mtd->writesize) - 1;
-	/* Convert chipsize to number of pages per chip -1 */
-	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
-
-	chip->bbt_erase_shift = chip->phys_erase_shift =
-		ffs(mtd->erasesize) - 1;
-	if (chip->chipsize & 0xffffffff)
-		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
-	else {
-		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
-		chip->chip_shift += 32 - 1;
-	}
-
-	chip->badblockbits = 8;
-	chip->erase = single_erase;
-
-	/* Do not replace user supplied command function! */
-	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
-		chip->cmdfunc = nand_command_lp;
-
-	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
-		*maf_id, *dev_id);
-
-	if (chip->onfi_version)
-		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
-				chip->onfi_params.model);
-	else if (chip->jedec_version)
-		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
-				chip->jedec_params.model);
-	else
-		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
-				type->name);
-
-	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
-		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
-		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
-	return type;
-}
-
-static const char * const nand_ecc_modes[] = {
-	[NAND_ECC_NONE]		= "none",
-	[NAND_ECC_SOFT]		= "soft",
-	[NAND_ECC_HW]		= "hw",
-	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
-	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
-};
-
-static int of_get_nand_ecc_mode(struct device_node *np)
-{
-	const char *pm;
-	int err, i;
-
-	err = of_property_read_string(np, "nand-ecc-mode", &pm);
-	if (err < 0)
-		return err;
-
-	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
-		if (!strcasecmp(pm, nand_ecc_modes[i]))
-			return i;
-
-	/*
-	 * For backward compatibility we support few obsoleted values that don't
-	 * have their mappings into nand_ecc_modes_t anymore (they were merged
-	 * with other enums).
-	 */
-	if (!strcasecmp(pm, "soft_bch"))
-		return NAND_ECC_SOFT;
-
-	return -ENODEV;
-}
-
-static const char * const nand_ecc_algos[] = {
-	[NAND_ECC_HAMMING]	= "hamming",
-	[NAND_ECC_BCH]		= "bch",
-};
-
-static int of_get_nand_ecc_algo(struct device_node *np)
-{
-	const char *pm;
-	int err, i;
-
-	err = of_property_read_string(np, "nand-ecc-algo", &pm);
-	if (!err) {
-		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
-			if (!strcasecmp(pm, nand_ecc_algos[i]))
-				return i;
-		return -ENODEV;
-	}
-
-	/*
-	 * For backward compatibility we also read "nand-ecc-mode" checking
-	 * for some obsoleted values that were specifying ECC algorithm.
-	 */
-	err = of_property_read_string(np, "nand-ecc-mode", &pm);
-	if (err < 0)
-		return err;
-
-	if (!strcasecmp(pm, "soft"))
-		return NAND_ECC_HAMMING;
-	else if (!strcasecmp(pm, "soft_bch"))
-		return NAND_ECC_BCH;
-
-	return -ENODEV;
-}
-
-static int of_get_nand_ecc_step_size(struct device_node *np)
-{
-	int ret;
-	u32 val;
-
-	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
-	return ret ? ret : val;
-}
-
-static int of_get_nand_ecc_strength(struct device_node *np)
-{
-	int ret;
-	u32 val;
-
-	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
-	return ret ? ret : val;
-}
-
-static int of_get_nand_bus_width(struct device_node *np)
-{
-	u32 val;
-
-	if (of_property_read_u32(np, "nand-bus-width", &val))
-		return 8;
-
-	switch (val) {
-	case 8:
-	case 16:
-		return val;
-	default:
-		return -EIO;
-	}
-}
-
-static bool of_get_nand_on_flash_bbt(struct device_node *np)
-{
-	return of_property_read_bool(np, "nand-on-flash-bbt");
-}
-
-static int nand_dt_init(struct nand_chip *chip)
-{
-	struct device_node *dn = nand_get_flash_node(chip);
-	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
-
-	if (!dn)
-		return 0;
-
-	if (of_get_nand_bus_width(dn) == 16)
-		chip->options |= NAND_BUSWIDTH_16;
-
-	if (of_get_nand_on_flash_bbt(dn))
-		chip->bbt_options |= NAND_BBT_USE_FLASH;
-
-	ecc_mode = of_get_nand_ecc_mode(dn);
-	ecc_algo = of_get_nand_ecc_algo(dn);
-	ecc_strength = of_get_nand_ecc_strength(dn);
-	ecc_step = of_get_nand_ecc_step_size(dn);
-
-	if ((ecc_step >= 0 && !(ecc_strength >= 0)) ||
-	    (!(ecc_step >= 0) && ecc_strength >= 0)) {
-		pr_err("must set both strength and step size in DT\n");
-		return -EINVAL;
-	}
-
-	if (ecc_mode >= 0)
-		chip->ecc.mode = ecc_mode;
-
-	if (ecc_algo >= 0)
-		chip->ecc.algo = ecc_algo;
-
-	if (ecc_strength >= 0)
-		chip->ecc.strength = ecc_strength;
-
-	if (ecc_step > 0)
-		chip->ecc.size = ecc_step;
-
-	if (of_property_read_bool(dn, "nand-ecc-maximize"))
-		chip->ecc.options |= NAND_ECC_MAXIMIZE;
-
-	return 0;
-}
-
-/**
- * nand_scan_ident - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: number of chips to scan for
- * @table: alternative NAND ID table
- *
- * This is the first phase of the normal nand_scan() function. It reads the
- * flash ID and sets up MTD fields accordingly.
- *
- */
-int nand_scan_ident(struct mtd_info *mtd, int maxchips,
-		    struct nand_flash_dev *table)
-{
-	int i, nand_maf_id, nand_dev_id;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_flash_dev *type;
-	int ret;
-
-	ret = nand_dt_init(chip);
-	if (ret)
-		return ret;
-
-	if (!mtd->name && mtd->dev.parent)
-		mtd->name = dev_name(mtd->dev.parent);
-
-	if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
-		/*
-		 * Default functions assigned for chip_select() and
-		 * cmdfunc() both expect cmd_ctrl() to be populated,
-		 * so we need to check that that's the case
-		 */
-		pr_err("chip.cmd_ctrl() callback is not provided");
-		return -EINVAL;
-	}
-	/* Set the default functions */
-	nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
-
-	/* Read the flash type */
-	type = nand_get_flash_type(mtd, chip, &nand_maf_id,
-				   &nand_dev_id, table);
-
-	if (IS_ERR(type)) {
-		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
-			pr_warn("No NAND device found\n");
-		chip->select_chip(mtd, -1);
-		return PTR_ERR(type);
-	}
-
-	ret = nand_init_data_interface(chip);
-	if (ret)
-		return ret;
-
-	chip->select_chip(mtd, -1);
-
-	/* Check for a chip array */
-	for (i = 1; i < maxchips; i++) {
-		chip->select_chip(mtd, i);
-		/* See comment in nand_get_flash_type for reset */
-		nand_reset(chip);
-		/* Send the command for reading device ID */
-		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-		/* Read manufacturer and device IDs */
-		if (nand_maf_id != chip->read_byte(mtd) ||
-		    nand_dev_id != chip->read_byte(mtd)) {
-			chip->select_chip(mtd, -1);
-			break;
-		}
-		chip->select_chip(mtd, -1);
-	}
-	if (i > 1)
-		pr_info("%d chips detected\n", i);
-
-	/* Store the number of chips and calc total size for mtd */
-	chip->numchips = i;
-	mtd->size = i * chip->chipsize;
-
-	return 0;
-}
-EXPORT_SYMBOL(nand_scan_ident);
-
-static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
-		return -EINVAL;
-
-	switch (ecc->algo) {
-	case NAND_ECC_HAMMING:
-		ecc->calculate = nand_calculate_ecc;
-		ecc->correct = nand_correct_data;
-		ecc->read_page = nand_read_page_swecc;
-		ecc->read_subpage = nand_read_subpage;
-		ecc->write_page = nand_write_page_swecc;
-		ecc->read_page_raw = nand_read_page_raw;
-		ecc->write_page_raw = nand_write_page_raw;
-		ecc->read_oob = nand_read_oob_std;
-		ecc->write_oob = nand_write_oob_std;
-		if (!ecc->size)
-			ecc->size = 256;
-		ecc->bytes = 3;
-		ecc->strength = 1;
-		return 0;
-	case NAND_ECC_BCH:
-		if (!mtd_nand_has_bch()) {
-			WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
-			return -EINVAL;
-		}
-		ecc->calculate = nand_bch_calculate_ecc;
-		ecc->correct = nand_bch_correct_data;
-		ecc->read_page = nand_read_page_swecc;
-		ecc->read_subpage = nand_read_subpage;
-		ecc->write_page = nand_write_page_swecc;
-		ecc->read_page_raw = nand_read_page_raw;
-		ecc->write_page_raw = nand_write_page_raw;
-		ecc->read_oob = nand_read_oob_std;
-		ecc->write_oob = nand_write_oob_std;
-
-		/*
-		* Board driver should supply ecc.size and ecc.strength
-		* values to select how many bits are correctable.
-		* Otherwise, default to 4 bits for large page devices.
-		*/
-		if (!ecc->size && (mtd->oobsize >= 64)) {
-			ecc->size = 512;
-			ecc->strength = 4;
-		}
-
-		/*
-		 * if no ecc placement scheme was provided pickup the default
-		 * large page one.
-		 */
-		if (!mtd->ooblayout) {
-			/* handle large page devices only */
-			if (mtd->oobsize < 64) {
-				WARN(1, "OOB layout is required when using software BCH on small pages\n");
-				return -EINVAL;
-			}
-
-			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-
-		}
-
-		/*
-		 * We can only maximize ECC config when the default layout is
-		 * used, otherwise we don't know how many bytes can really be
-		 * used.
-		 */
-		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
-		    ecc->options & NAND_ECC_MAXIMIZE) {
-			int steps, bytes;
-
-			/* Always prefer 1k blocks over 512bytes ones */
-			ecc->size = 1024;
-			steps = mtd->writesize / ecc->size;
-
-			/* Reserve 2 bytes for the BBM */
-			bytes = (mtd->oobsize - 2) / steps;
-			ecc->strength = bytes * 8 / fls(8 * ecc->size);
-		}
-
-		/* See nand_bch_init() for details. */
-		ecc->bytes = 0;
-		ecc->priv = nand_bch_init(mtd);
-		if (!ecc->priv) {
-			WARN(1, "BCH ECC initialization failed!\n");
-			return -EINVAL;
-		}
-		return 0;
-	default:
-		WARN(1, "Unsupported ECC algorithm!\n");
-		return -EINVAL;
-	}
-}
-
-/*
- * Check if the chip configuration meet the datasheet requirements.
-
- * If our configuration corrects A bits per B bytes and the minimum
- * required correction level is X bits per Y bytes, then we must ensure
- * both of the following are true:
- *
- * (1) A / B >= X / Y
- * (2) A >= X
- *
- * Requirement (1) ensures we can correct for the required bitflip density.
- * Requirement (2) ensures we can correct even when all bitflips are clumped
- * in the same sector.
- */
-static bool nand_ecc_strength_good(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int corr, ds_corr;
-
-	if (ecc->size == 0 || chip->ecc_step_ds == 0)
-		/* Not enough information */
-		return true;
-
-	/*
-	 * We get the number of corrected bits per page to compare
-	 * the correction density.
-	 */
-	corr = (mtd->writesize * ecc->strength) / ecc->size;
-	ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
-
-	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
-}
-
-/**
- * nand_scan_tail - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- *
- * This is the second phase of the normal nand_scan() function. It fills out
- * all the uninitialized function pointers with the defaults and scans for a
- * bad block table if appropriate.
- */
-int nand_scan_tail(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	struct nand_buffers *nbuf;
-	int ret;
-
-	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
-	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
-		   !(chip->bbt_options & NAND_BBT_USE_FLASH)))
-		return -EINVAL;
-
-	if (!(chip->options & NAND_OWN_BUFFERS)) {
-		nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
-				+ mtd->oobsize * 3, GFP_KERNEL);
-		if (!nbuf)
-			return -ENOMEM;
-		nbuf->ecccalc = (uint8_t *)(nbuf + 1);
-		nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
-		nbuf->databuf = nbuf->ecccode + mtd->oobsize;
-
-		chip->buffers = nbuf;
-	} else {
-		if (!chip->buffers)
-			return -ENOMEM;
-	}
-
-	/* Set the internal oob buffer location, just after the page data */
-	chip->oob_poi = chip->buffers->databuf + mtd->writesize;
-
-	/*
-	 * If no default placement scheme is given, select an appropriate one.
-	 */
-	if (!mtd->ooblayout &&
-	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
-		switch (mtd->oobsize) {
-		case 8:
-		case 16:
-			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
-			break;
-		case 64:
-		case 128:
-			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
-			break;
-		default:
-			WARN(1, "No oob scheme defined for oobsize %d\n",
-				mtd->oobsize);
-			ret = -EINVAL;
-			goto err_free;
-		}
-	}
-
-	if (!chip->write_page)
-		chip->write_page = nand_write_page;
-
-	/*
-	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
-	 * selected and we have 256 byte pagesize fallback to software ECC
-	 */
-
-	switch (ecc->mode) {
-	case NAND_ECC_HW_OOB_FIRST:
-		/* Similar to NAND_ECC_HW, but a separate read_page handle */
-		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
-			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
-			ret = -EINVAL;
-			goto err_free;
-		}
-		if (!ecc->read_page)
-			ecc->read_page = nand_read_page_hwecc_oob_first;
-
-	case NAND_ECC_HW:
-		/* Use standard hwecc read page function? */
-		if (!ecc->read_page)
-			ecc->read_page = nand_read_page_hwecc;
-		if (!ecc->write_page)
-			ecc->write_page = nand_write_page_hwecc;
-		if (!ecc->read_page_raw)
-			ecc->read_page_raw = nand_read_page_raw;
-		if (!ecc->write_page_raw)
-			ecc->write_page_raw = nand_write_page_raw;
-		if (!ecc->read_oob)
-			ecc->read_oob = nand_read_oob_std;
-		if (!ecc->write_oob)
-			ecc->write_oob = nand_write_oob_std;
-		if (!ecc->read_subpage)
-			ecc->read_subpage = nand_read_subpage;
-		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
-			ecc->write_subpage = nand_write_subpage_hwecc;
-
-	case NAND_ECC_HW_SYNDROME:
-		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
-		    (!ecc->read_page ||
-		     ecc->read_page == nand_read_page_hwecc ||
-		     !ecc->write_page ||
-		     ecc->write_page == nand_write_page_hwecc)) {
-			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
-			ret = -EINVAL;
-			goto err_free;
-		}
-		/* Use standard syndrome read/write page function? */
-		if (!ecc->read_page)
-			ecc->read_page = nand_read_page_syndrome;
-		if (!ecc->write_page)
-			ecc->write_page = nand_write_page_syndrome;
-		if (!ecc->read_page_raw)
-			ecc->read_page_raw = nand_read_page_raw_syndrome;
-		if (!ecc->write_page_raw)
-			ecc->write_page_raw = nand_write_page_raw_syndrome;
-		if (!ecc->read_oob)
-			ecc->read_oob = nand_read_oob_syndrome;
-		if (!ecc->write_oob)
-			ecc->write_oob = nand_write_oob_syndrome;
-
-		if (mtd->writesize >= ecc->size) {
-			if (!ecc->strength) {
-				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
-				ret = -EINVAL;
-				goto err_free;
-			}
-			break;
-		}
-		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
-			ecc->size, mtd->writesize);
-		ecc->mode = NAND_ECC_SOFT;
-		ecc->algo = NAND_ECC_HAMMING;
-
-	case NAND_ECC_SOFT:
-		ret = nand_set_ecc_soft_ops(mtd);
-		if (ret) {
-			ret = -EINVAL;
-			goto err_free;
-		}
-		break;
-
-	case NAND_ECC_NONE:
-		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
-		ecc->read_page = nand_read_page_raw;
-		ecc->write_page = nand_write_page_raw;
-		ecc->read_oob = nand_read_oob_std;
-		ecc->read_page_raw = nand_read_page_raw;
-		ecc->write_page_raw = nand_write_page_raw;
-		ecc->write_oob = nand_write_oob_std;
-		ecc->size = mtd->writesize;
-		ecc->bytes = 0;
-		ecc->strength = 0;
-		break;
-
-	default:
-		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
-		ret = -EINVAL;
-		goto err_free;
-	}
-
-	/* For many systems, the standard OOB write also works for raw */
-	if (!ecc->read_oob_raw)
-		ecc->read_oob_raw = ecc->read_oob;
-	if (!ecc->write_oob_raw)
-		ecc->write_oob_raw = ecc->write_oob;
-
-	/* propagate ecc info to mtd_info */
-	mtd->ecc_strength = ecc->strength;
-	mtd->ecc_step_size = ecc->size;
-
-	/*
-	 * Set the number of read / write steps for one page depending on ECC
-	 * mode.
-	 */
-	ecc->steps = mtd->writesize / ecc->size;
-	if (ecc->steps * ecc->size != mtd->writesize) {
-		WARN(1, "Invalid ECC parameters\n");
-		ret = -EINVAL;
-		goto err_free;
-	}
-	ecc->total = ecc->steps * ecc->bytes;
-
-	/*
-	 * The number of bytes available for a client to place data into
-	 * the out of band area.
-	 */
-	ret = mtd_ooblayout_count_freebytes(mtd);
-	if (ret < 0)
-		ret = 0;
-
-	mtd->oobavail = ret;
-
-	/* ECC sanity check: warn if it's too weak */
-	if (!nand_ecc_strength_good(mtd))
-		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
-			mtd->name);
-
-	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
-	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
-		switch (ecc->steps) {
-		case 2:
-			mtd->subpage_sft = 1;
-			break;
-		case 4:
-		case 8:
-		case 16:
-			mtd->subpage_sft = 2;
-			break;
-		}
-	}
-	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
-
-	/* Initialize state */
-	chip->state = FL_READY;
-
-	/* Invalidate the pagebuffer reference */
-	chip->pagebuf = -1;
-
-	/* Large page NAND with SOFT_ECC should support subpage reads */
-	switch (ecc->mode) {
-	case NAND_ECC_SOFT:
-		if (chip->page_shift > 9)
-			chip->options |= NAND_SUBPAGE_READ;
-		break;
-
-	default:
-		break;
-	}
-
-	/* Fill in remaining MTD driver data */
-	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
-	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
-						MTD_CAP_NANDFLASH;
-	mtd->_erase = nand_erase;
-	mtd->_point = NULL;
-	mtd->_unpoint = NULL;
-	mtd->_read = nand_read;
-	mtd->_write = nand_write;
-	mtd->_panic_write = panic_nand_write;
-	mtd->_read_oob = nand_read_oob;
-	mtd->_write_oob = nand_write_oob;
-	mtd->_sync = nand_sync;
-	mtd->_lock = NULL;
-	mtd->_unlock = NULL;
-	mtd->_suspend = nand_suspend;
-	mtd->_resume = nand_resume;
-	mtd->_reboot = nand_shutdown;
-	mtd->_block_isreserved = nand_block_isreserved;
-	mtd->_block_isbad = nand_block_isbad;
-	mtd->_block_markbad = nand_block_markbad;
-	mtd->writebufsize = mtd->writesize;
-
-	/*
-	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
-	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
-	 * properly set.
-	 */
-	if (!mtd->bitflip_threshold)
-		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
-
-	/* Check, if we should skip the bad block table scan */
-	if (chip->options & NAND_SKIP_BBTSCAN)
-		return 0;
-
-	/* Build bad block table */
-	return chip->scan_bbt(mtd);
-err_free:
-	if (!(chip->options & NAND_OWN_BUFFERS))
-		kfree(chip->buffers);
-	return ret;
-}
-EXPORT_SYMBOL(nand_scan_tail);
-
-/*
- * is_module_text_address() isn't exported, and it's mostly a pointless
- * test if this is a module _anyway_ -- they'd have to try _really_ hard
- * to call us from in-kernel code if the core NAND support is modular.
- */
-#ifdef MODULE
-#define caller_is_module() (1)
-#else
-#define caller_is_module() \
-	is_module_text_address((unsigned long)__builtin_return_address(0))
-#endif
-
-/**
- * nand_scan - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: number of chips to scan for
- *
- * This fills out all the uninitialized function pointers with the defaults.
- * The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values.
- */
-int nand_scan(struct mtd_info *mtd, int maxchips)
-{
-	int ret;
-
-	ret = nand_scan_ident(mtd, maxchips, NULL);
-	if (!ret)
-		ret = nand_scan_tail(mtd);
-	return ret;
-}
-EXPORT_SYMBOL(nand_scan);
-
-/**
- * nand_cleanup - [NAND Interface] Free resources held by the NAND device
- * @chip: NAND chip object
- */
-void nand_cleanup(struct nand_chip *chip)
-{
-	if (chip->ecc.mode == NAND_ECC_SOFT &&
-	    chip->ecc.algo == NAND_ECC_BCH)
-		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
-
-	nand_release_data_interface(chip);
-
-	/* Free bad block table memory */
-	kfree(chip->bbt);
-	if (!(chip->options & NAND_OWN_BUFFERS))
-		kfree(chip->buffers);
-
-	/* Free bad block descriptor memory */
-	if (chip->badblock_pattern && chip->badblock_pattern->options
-			& NAND_BBT_DYNAMICSTRUCT)
-		kfree(chip->badblock_pattern);
-}
-EXPORT_SYMBOL_GPL(nand_cleanup);
-
-/**
- * nand_release - [NAND Interface] Unregister the MTD device and free resources
- *		  held by the NAND device
- * @mtd: MTD device structure
- */
-void nand_release(struct mtd_info *mtd)
-{
-	mtd_device_unregister(mtd);
-	nand_cleanup(mtd_to_nand(mtd));
-}
-EXPORT_SYMBOL_GPL(nand_release);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
deleted file mode 100644
index 2915b6739bf8..000000000000
--- a/drivers/mtd/nand/nand_bbt.c
+++ /dev/null
@@ -1,1452 +0,0 @@ 
-/*
- *  Overview:
- *   Bad block table support for the NAND driver
- *
- *  Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Description:
- *
- * When nand_scan_bbt is called, then it tries to find the bad block table
- * depending on the options in the BBT descriptor(s). If no flash based BBT
- * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
- * marked good / bad blocks. This information is used to create a memory BBT.
- * Once a new bad block is discovered then the "factory" information is updated
- * on the device.
- * If a flash based BBT is specified then the function first tries to find the
- * BBT on flash. If a BBT is found then the contents are read and the memory
- * based BBT is created. If a mirrored BBT is selected then the mirror is
- * searched too and the versions are compared. If the mirror has a greater
- * version number, then the mirror BBT is used to build the memory based BBT.
- * If the tables are not versioned, then we "or" the bad block information.
- * If one of the BBTs is out of date or does not exist it is (re)created.
- * If no BBT exists at all then the device is scanned for factory marked
- * good / bad blocks and the bad block tables are created.
- *
- * For manufacturer created BBTs like the one found on M-SYS DOC devices
- * the BBT is searched and read but never created
- *
- * The auto generated bad block table is located in the last good blocks
- * of the device. The table is mirrored, so it can be updated eventually.
- * The table is marked in the OOB area with an ident pattern and a version
- * number which indicates which of both tables is more up to date. If the NAND
- * controller needs the complete OOB area for the ECC information then the
- * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
- * course): it moves the ident pattern and the version byte into the data area
- * and the OOB area will remain untouched.
- *
- * The table uses 2 bits per block
- * 11b:		block is good
- * 00b:		block is factory marked bad
- * 01b, 10b:	block is marked bad due to wear
- *
- * The memory bad block table uses the following scheme:
- * 00b:		block is good
- * 01b:		block is marked bad due to wear
- * 10b:		block is reserved (to protect the bbt area)
- * 11b:		block is factory marked bad
- *
- * Multichip devices like DOC store the bad block info per floor.
- *
- * Following assumptions are made:
- * - bbts start at a page boundary, if autolocated on a block boundary
- * - the space necessary for a bbt in FLASH does not exceed a block boundary
- *
- */
-
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/bbm.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/bitops.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <linux/export.h>
-#include <linux/string.h>
-
-#define BBT_BLOCK_GOOD		0x00
-#define BBT_BLOCK_WORN		0x01
-#define BBT_BLOCK_RESERVED	0x02
-#define BBT_BLOCK_FACTORY_BAD	0x03
-
-#define BBT_ENTRY_MASK		0x03
-#define BBT_ENTRY_SHIFT		2
-
-static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
-
-static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
-{
-	uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
-	entry >>= (block & BBT_ENTRY_MASK) * 2;
-	return entry & BBT_ENTRY_MASK;
-}
-
-static inline void bbt_mark_entry(struct nand_chip *chip, int block,
-		uint8_t mark)
-{
-	uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
-	chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
-}
-
-static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
-{
-	if (memcmp(buf, td->pattern, td->len))
-		return -1;
-	return 0;
-}
-
-/**
- * check_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf: the buffer to search
- * @len: the length of buffer to search
- * @paglen: the pagelength
- * @td: search pattern descriptor
- *
- * Check for a pattern at the given place. Used to search bad block tables and
- * good / bad block identifiers.
- */
-static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
-{
-	if (td->options & NAND_BBT_NO_OOB)
-		return check_pattern_no_oob(buf, td);
-
-	/* Compare the pattern */
-	if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
-		return -1;
-
-	return 0;
-}
-
-/**
- * check_short_pattern - [GENERIC] check if a pattern is in the buffer
- * @buf: the buffer to search
- * @td:	search pattern descriptor
- *
- * Check for a pattern at the given place. Used to search bad block tables and
- * good / bad block identifiers. Same as check_pattern, but no optional empty
- * check.
- */
-static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
-{
-	/* Compare the pattern */
-	if (memcmp(buf + td->offs, td->pattern, td->len))
-		return -1;
-	return 0;
-}
-
-/**
- * add_marker_len - compute the length of the marker in data area
- * @td: BBT descriptor used for computation
- *
- * The length will be 0 if the marker is located in OOB area.
- */
-static u32 add_marker_len(struct nand_bbt_descr *td)
-{
-	u32 len;
-
-	if (!(td->options & NAND_BBT_NO_OOB))
-		return 0;
-
-	len = td->len;
-	if (td->options & NAND_BBT_VERSION)
-		len++;
-	return len;
-}
-
-/**
- * read_bbt - [GENERIC] Read the bad block table starting from page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @page: the starting page
- * @num: the number of bbt descriptors to read
- * @td: the bbt describtion table
- * @offs: block number offset in the table
- *
- * Read the bad block table starting from page.
- */
-static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
-		struct nand_bbt_descr *td, int offs)
-{
-	int res, ret = 0, i, j, act = 0;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	size_t retlen, len, totlen;
-	loff_t from;
-	int bits = td->options & NAND_BBT_NRBITS_MSK;
-	uint8_t msk = (uint8_t)((1 << bits) - 1);
-	u32 marker_len;
-	int reserved_block_code = td->reserved_block_code;
-
-	totlen = (num * bits) >> 3;
-	marker_len = add_marker_len(td);
-	from = ((loff_t)page) << this->page_shift;
-
-	while (totlen) {
-		len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
-		if (marker_len) {
-			/*
-			 * In case the BBT marker is not in the OOB area it
-			 * will be just in the first page.
-			 */
-			len -= marker_len;
-			from += marker_len;
-			marker_len = 0;
-		}
-		res = mtd_read(mtd, from, len, &retlen, buf);
-		if (res < 0) {
-			if (mtd_is_eccerr(res)) {
-				pr_info("nand_bbt: ECC error in BBT at 0x%012llx\n",
-					from & ~mtd->writesize);
-				return res;
-			} else if (mtd_is_bitflip(res)) {
-				pr_info("nand_bbt: corrected error in BBT at 0x%012llx\n",
-					from & ~mtd->writesize);
-				ret = res;
-			} else {
-				pr_info("nand_bbt: error reading BBT\n");
-				return res;
-			}
-		}
-
-		/* Analyse data */
-		for (i = 0; i < len; i++) {
-			uint8_t dat = buf[i];
-			for (j = 0; j < 8; j += bits, act++) {
-				uint8_t tmp = (dat >> j) & msk;
-				if (tmp == msk)
-					continue;
-				if (reserved_block_code && (tmp == reserved_block_code)) {
-					pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
-						 (loff_t)(offs + act) <<
-						 this->bbt_erase_shift);
-					bbt_mark_entry(this, offs + act,
-							BBT_BLOCK_RESERVED);
-					mtd->ecc_stats.bbtblocks++;
-					continue;
-				}
-				/*
-				 * Leave it for now, if it's matured we can
-				 * move this message to pr_debug.
-				 */
-				pr_info("nand_read_bbt: bad block at 0x%012llx\n",
-					 (loff_t)(offs + act) <<
-					 this->bbt_erase_shift);
-				/* Factory marked bad or worn out? */
-				if (tmp == 0)
-					bbt_mark_entry(this, offs + act,
-							BBT_BLOCK_FACTORY_BAD);
-				else
-					bbt_mark_entry(this, offs + act,
-							BBT_BLOCK_WORN);
-				mtd->ecc_stats.badblocks++;
-			}
-		}
-		totlen -= len;
-		from += len;
-	}
-	return ret;
-}
-
-/**
- * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @chip: read the table for a specific chip, -1 read all chips; applies only if
- *        NAND_BBT_PERCHIP option is set
- *
- * Read the bad block table for all chips starting at a given page. We assume
- * that the bbt bits are in consecutive order.
- */
-static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int res = 0, i;
-
-	if (td->options & NAND_BBT_PERCHIP) {
-		int offs = 0;
-		for (i = 0; i < this->numchips; i++) {
-			if (chip == -1 || chip == i)
-				res = read_bbt(mtd, buf, td->pages[i],
-					this->chipsize >> this->bbt_erase_shift,
-					td, offs);
-			if (res)
-				return res;
-			offs += this->chipsize >> this->bbt_erase_shift;
-		}
-	} else {
-		res = read_bbt(mtd, buf, td->pages[0],
-				mtd->size >> this->bbt_erase_shift, td, 0);
-		if (res)
-			return res;
-	}
-	return 0;
-}
-
-/* BBT marker is in the first page, no OOB */
-static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
-			 struct nand_bbt_descr *td)
-{
-	size_t retlen;
-	size_t len;
-
-	len = td->len;
-	if (td->options & NAND_BBT_VERSION)
-		len++;
-
-	return mtd_read(mtd, offs, len, &retlen, buf);
-}
-
-/**
- * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @offs: offset at which to scan
- * @len: length of data region to read
- *
- * Scan read data from data+OOB. May traverse multiple pages, interleaving
- * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
- * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
- */
-static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
-			 size_t len)
-{
-	struct mtd_oob_ops ops;
-	int res, ret = 0;
-
-	ops.mode = MTD_OPS_PLACE_OOB;
-	ops.ooboffs = 0;
-	ops.ooblen = mtd->oobsize;
-
-	while (len > 0) {
-		ops.datbuf = buf;
-		ops.len = min(len, (size_t)mtd->writesize);
-		ops.oobbuf = buf + ops.len;
-
-		res = mtd_read_oob(mtd, offs, &ops);
-		if (res) {
-			if (!mtd_is_bitflip_or_eccerr(res))
-				return res;
-			else if (mtd_is_eccerr(res) || !ret)
-				ret = res;
-		}
-
-		buf += mtd->oobsize + mtd->writesize;
-		len -= mtd->writesize;
-		offs += mtd->writesize;
-	}
-	return ret;
-}
-
-static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
-			 size_t len, struct nand_bbt_descr *td)
-{
-	if (td->options & NAND_BBT_NO_OOB)
-		return scan_read_data(mtd, buf, offs, td);
-	else
-		return scan_read_oob(mtd, buf, offs, len);
-}
-
-/* Scan write data with oob to flash */
-static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
-			  uint8_t *buf, uint8_t *oob)
-{
-	struct mtd_oob_ops ops;
-
-	ops.mode = MTD_OPS_PLACE_OOB;
-	ops.ooboffs = 0;
-	ops.ooblen = mtd->oobsize;
-	ops.datbuf = buf;
-	ops.oobbuf = oob;
-	ops.len = len;
-
-	return mtd_write_oob(mtd, offs, &ops);
-}
-
-static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
-{
-	u32 ver_offs = td->veroffs;
-
-	if (!(td->options & NAND_BBT_NO_OOB))
-		ver_offs += mtd->writesize;
-	return ver_offs;
-}
-
-/**
- * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md:	descriptor for the bad block table mirror
- *
- * Read the bad block table(s) for all chips starting at a given page. We
- * assume that the bbt bits are in consecutive order.
- */
-static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
-			  struct nand_bbt_descr *td, struct nand_bbt_descr *md)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-
-	/* Read the primary version, if available */
-	if (td->options & NAND_BBT_VERSION) {
-		scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
-			      mtd->writesize, td);
-		td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
-		pr_info("Bad block table at page %d, version 0x%02X\n",
-			 td->pages[0], td->version[0]);
-	}
-
-	/* Read the mirror version, if available */
-	if (md && (md->options & NAND_BBT_VERSION)) {
-		scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
-			      mtd->writesize, md);
-		md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
-		pr_info("Bad block table at page %d, version 0x%02X\n",
-			 md->pages[0], md->version[0]);
-	}
-}
-
-/* Scan a given block partially */
-static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
-			   loff_t offs, uint8_t *buf, int numpages)
-{
-	struct mtd_oob_ops ops;
-	int j, ret;
-
-	ops.ooblen = mtd->oobsize;
-	ops.oobbuf = buf;
-	ops.ooboffs = 0;
-	ops.datbuf = NULL;
-	ops.mode = MTD_OPS_PLACE_OOB;
-
-	for (j = 0; j < numpages; j++) {
-		/*
-		 * Read the full oob until read_oob is fixed to handle single
-		 * byte reads for 16 bit buswidth.
-		 */
-		ret = mtd_read_oob(mtd, offs, &ops);
-		/* Ignore ECC errors when checking for BBM */
-		if (ret && !mtd_is_bitflip_or_eccerr(ret))
-			return ret;
-
-		if (check_short_pattern(buf, bd))
-			return 1;
-
-		offs += mtd->writesize;
-	}
-	return 0;
-}
-
-/**
- * create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
- * @chip: create the table for a specific chip, -1 read all chips; applies only
- *        if NAND_BBT_PERCHIP option is set
- *
- * Create a bad block table by scanning the device for the given good/bad block
- * identify pattern.
- */
-static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
-	struct nand_bbt_descr *bd, int chip)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int i, numblocks, numpages;
-	int startblock;
-	loff_t from;
-
-	pr_info("Scanning device for bad blocks\n");
-
-	if (bd->options & NAND_BBT_SCAN2NDPAGE)
-		numpages = 2;
-	else
-		numpages = 1;
-
-	if (chip == -1) {
-		numblocks = mtd->size >> this->bbt_erase_shift;
-		startblock = 0;
-		from = 0;
-	} else {
-		if (chip >= this->numchips) {
-			pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
-			       chip + 1, this->numchips);
-			return -EINVAL;
-		}
-		numblocks = this->chipsize >> this->bbt_erase_shift;
-		startblock = chip * numblocks;
-		numblocks += startblock;
-		from = (loff_t)startblock << this->bbt_erase_shift;
-	}
-
-	if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
-		from += mtd->erasesize - (mtd->writesize * numpages);
-
-	for (i = startblock; i < numblocks; i++) {
-		int ret;
-
-		BUG_ON(bd->options & NAND_BBT_NO_OOB);
-
-		ret = scan_block_fast(mtd, bd, from, buf, numpages);
-		if (ret < 0)
-			return ret;
-
-		if (ret) {
-			bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
-			pr_warn("Bad eraseblock %d at 0x%012llx\n",
-				i, (unsigned long long)from);
-			mtd->ecc_stats.badblocks++;
-		}
-
-		from += (1 << this->bbt_erase_shift);
-	}
-	return 0;
-}
-
-/**
- * search_bbt - [GENERIC] scan the device for a specific bad block table
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- *
- * Read the bad block table by searching for a given ident pattern. Search is
- * preformed either from the beginning up or from the end of the device
- * downwards. The search starts always at the start of a block. If the option
- * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
- * the bad block information of this chip. This is necessary to provide support
- * for certain DOC devices.
- *
- * The bbt ident pattern resides in the oob area of the first page in a block.
- */
-static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int i, chips;
-	int startblock, block, dir;
-	int scanlen = mtd->writesize + mtd->oobsize;
-	int bbtblocks;
-	int blocktopage = this->bbt_erase_shift - this->page_shift;
-
-	/* Search direction top -> down? */
-	if (td->options & NAND_BBT_LASTBLOCK) {
-		startblock = (mtd->size >> this->bbt_erase_shift) - 1;
-		dir = -1;
-	} else {
-		startblock = 0;
-		dir = 1;
-	}
-
-	/* Do we have a bbt per chip? */
-	if (td->options & NAND_BBT_PERCHIP) {
-		chips = this->numchips;
-		bbtblocks = this->chipsize >> this->bbt_erase_shift;
-		startblock &= bbtblocks - 1;
-	} else {
-		chips = 1;
-		bbtblocks = mtd->size >> this->bbt_erase_shift;
-	}
-
-	for (i = 0; i < chips; i++) {
-		/* Reset version information */
-		td->version[i] = 0;
-		td->pages[i] = -1;
-		/* Scan the maximum number of blocks */
-		for (block = 0; block < td->maxblocks; block++) {
-
-			int actblock = startblock + dir * block;
-			loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
-
-			/* Read first page */
-			scan_read(mtd, buf, offs, mtd->writesize, td);
-			if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
-				td->pages[i] = actblock << blocktopage;
-				if (td->options & NAND_BBT_VERSION) {
-					offs = bbt_get_ver_offs(mtd, td);
-					td->version[i] = buf[offs];
-				}
-				break;
-			}
-		}
-		startblock += this->chipsize >> this->bbt_erase_shift;
-	}
-	/* Check, if we found a bbt for each requested chip */
-	for (i = 0; i < chips; i++) {
-		if (td->pages[i] == -1)
-			pr_warn("Bad block table not found for chip %d\n", i);
-		else
-			pr_info("Bad block table found at page %d, version 0x%02X\n",
-				td->pages[i], td->version[i]);
-	}
-	return 0;
-}
-
-/**
- * search_read_bbts - [GENERIC] scan the device for bad block table(s)
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
- *
- * Search and read the bad block table(s).
- */
-static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
-			     struct nand_bbt_descr *td,
-			     struct nand_bbt_descr *md)
-{
-	/* Search the primary table */
-	search_bbt(mtd, buf, td);
-
-	/* Search the mirror table */
-	if (md)
-		search_bbt(mtd, buf, md);
-}
-
-/**
- * get_bbt_block - Get the first valid eraseblock suitable to store a BBT
- * @this: the NAND device
- * @td: the BBT description
- * @md: the mirror BBT descriptor
- * @chip: the CHIP selector
- *
- * This functions returns a positive block number pointing a valid eraseblock
- * suitable to store a BBT (i.e. in the range reserved for BBT), or -ENOSPC if
- * all blocks are already used of marked bad. If td->pages[chip] was already
- * pointing to a valid block we re-use it, otherwise we search for the next
- * valid one.
- */
-static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
-			 struct nand_bbt_descr *md, int chip)
-{
-	int startblock, dir, page, numblocks, i;
-
-	/*
-	 * There was already a version of the table, reuse the page. This
-	 * applies for absolute placement too, as we have the page number in
-	 * td->pages.
-	 */
-	if (td->pages[chip] != -1)
-		return td->pages[chip] >>
-				(this->bbt_erase_shift - this->page_shift);
-
-	numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
-	if (!(td->options & NAND_BBT_PERCHIP))
-		numblocks *= this->numchips;
-
-	/*
-	 * Automatic placement of the bad block table. Search direction
-	 * top -> down?
-	 */
-	if (td->options & NAND_BBT_LASTBLOCK) {
-		startblock = numblocks * (chip + 1) - 1;
-		dir = -1;
-	} else {
-		startblock = chip * numblocks;
-		dir = 1;
-	}
-
-	for (i = 0; i < td->maxblocks; i++) {
-		int block = startblock + dir * i;
-
-		/* Check, if the block is bad */
-		switch (bbt_get_entry(this, block)) {
-		case BBT_BLOCK_WORN:
-		case BBT_BLOCK_FACTORY_BAD:
-			continue;
-		}
-
-		page = block << (this->bbt_erase_shift - this->page_shift);
-
-		/* Check, if the block is used by the mirror table */
-		if (!md || md->pages[chip] != page)
-			return block;
-	}
-
-	return -ENOSPC;
-}
-
-/**
- * mark_bbt_block_bad - Mark one of the block reserved for BBT bad
- * @this: the NAND device
- * @td: the BBT description
- * @chip: the CHIP selector
- * @block: the BBT block to mark
- *
- * Blocks reserved for BBT can become bad. This functions is an helper to mark
- * such blocks as bad. It takes care of updating the in-memory BBT, marking the
- * block as bad using a bad block marker and invalidating the associated
- * td->pages[] entry.
- */
-static void mark_bbt_block_bad(struct nand_chip *this,
-			       struct nand_bbt_descr *td,
-			       int chip, int block)
-{
-	struct mtd_info *mtd = nand_to_mtd(this);
-	loff_t to;
-	int res;
-
-	bbt_mark_entry(this, block, BBT_BLOCK_WORN);
-
-	to = (loff_t)block << this->bbt_erase_shift;
-	res = this->block_markbad(mtd, to);
-	if (res)
-		pr_warn("nand_bbt: error %d while marking block %d bad\n",
-			res, block);
-
-	td->pages[chip] = -1;
-}
-
-/**
- * write_bbt - [GENERIC] (Re)write the bad block table
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @td: descriptor for the bad block table
- * @md: descriptor for the bad block table mirror
- * @chipsel: selector for a specific chip, -1 for all
- *
- * (Re)write the bad block table.
- */
-static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
-		     struct nand_bbt_descr *td, struct nand_bbt_descr *md,
-		     int chipsel)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct erase_info einfo;
-	int i, res, chip = 0;
-	int bits, page, offs, numblocks, sft, sftmsk;
-	int nrchips, pageoffs, ooboffs;
-	uint8_t msk[4];
-	uint8_t rcode = td->reserved_block_code;
-	size_t retlen, len = 0;
-	loff_t to;
-	struct mtd_oob_ops ops;
-
-	ops.ooblen = mtd->oobsize;
-	ops.ooboffs = 0;
-	ops.datbuf = NULL;
-	ops.mode = MTD_OPS_PLACE_OOB;
-
-	if (!rcode)
-		rcode = 0xff;
-	/* Write bad block table per chip rather than per device? */
-	if (td->options & NAND_BBT_PERCHIP) {
-		numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
-		/* Full device write or specific chip? */
-		if (chipsel == -1) {
-			nrchips = this->numchips;
-		} else {
-			nrchips = chipsel + 1;
-			chip = chipsel;
-		}
-	} else {
-		numblocks = (int)(mtd->size >> this->bbt_erase_shift);
-		nrchips = 1;
-	}
-
-	/* Loop through the chips */
-	while (chip < nrchips) {
-		int block;
-
-		block = get_bbt_block(this, td, md, chip);
-		if (block < 0) {
-			pr_err("No space left to write bad block table\n");
-			res = block;
-			goto outerr;
-		}
-
-		/*
-		 * get_bbt_block() returns a block number, shift the value to
-		 * get a page number.
-		 */
-		page = block << (this->bbt_erase_shift - this->page_shift);
-
-		/* Set up shift count and masks for the flash table */
-		bits = td->options & NAND_BBT_NRBITS_MSK;
-		msk[2] = ~rcode;
-		switch (bits) {
-		case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
-			msk[3] = 0x01;
-			break;
-		case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
-			msk[3] = 0x03;
-			break;
-		case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
-			msk[3] = 0x0f;
-			break;
-		case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
-			msk[3] = 0xff;
-			break;
-		default: return -EINVAL;
-		}
-
-		to = ((loff_t)page) << this->page_shift;
-
-		/* Must we save the block contents? */
-		if (td->options & NAND_BBT_SAVECONTENT) {
-			/* Make it block aligned */
-			to &= ~(((loff_t)1 << this->bbt_erase_shift) - 1);
-			len = 1 << this->bbt_erase_shift;
-			res = mtd_read(mtd, to, len, &retlen, buf);
-			if (res < 0) {
-				if (retlen != len) {
-					pr_info("nand_bbt: error reading block for writing the bad block table\n");
-					return res;
-				}
-				pr_warn("nand_bbt: ECC error while reading block for writing bad block table\n");
-			}
-			/* Read oob data */
-			ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
-			ops.oobbuf = &buf[len];
-			res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
-			if (res < 0 || ops.oobretlen != ops.ooblen)
-				goto outerr;
-
-			/* Calc the byte offset in the buffer */
-			pageoffs = page - (int)(to >> this->page_shift);
-			offs = pageoffs << this->page_shift;
-			/* Preset the bbt area with 0xff */
-			memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
-			ooboffs = len + (pageoffs * mtd->oobsize);
-
-		} else if (td->options & NAND_BBT_NO_OOB) {
-			ooboffs = 0;
-			offs = td->len;
-			/* The version byte */
-			if (td->options & NAND_BBT_VERSION)
-				offs++;
-			/* Calc length */
-			len = (size_t)(numblocks >> sft);
-			len += offs;
-			/* Make it page aligned! */
-			len = ALIGN(len, mtd->writesize);
-			/* Preset the buffer with 0xff */
-			memset(buf, 0xff, len);
-			/* Pattern is located at the begin of first page */
-			memcpy(buf, td->pattern, td->len);
-		} else {
-			/* Calc length */
-			len = (size_t)(numblocks >> sft);
-			/* Make it page aligned! */
-			len = ALIGN(len, mtd->writesize);
-			/* Preset the buffer with 0xff */
-			memset(buf, 0xff, len +
-			       (len >> this->page_shift)* mtd->oobsize);
-			offs = 0;
-			ooboffs = len;
-			/* Pattern is located in oob area of first page */
-			memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
-		}
-
-		if (td->options & NAND_BBT_VERSION)
-			buf[ooboffs + td->veroffs] = td->version[chip];
-
-		/* Walk through the memory table */
-		for (i = 0; i < numblocks; i++) {
-			uint8_t dat;
-			int sftcnt = (i << (3 - sft)) & sftmsk;
-			dat = bbt_get_entry(this, chip * numblocks + i);
-			/* Do not store the reserved bbt blocks! */
-			buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
-		}
-
-		memset(&einfo, 0, sizeof(einfo));
-		einfo.mtd = mtd;
-		einfo.addr = to;
-		einfo.len = 1 << this->bbt_erase_shift;
-		res = nand_erase_nand(mtd, &einfo, 1);
-		if (res < 0) {
-			pr_warn("nand_bbt: error while erasing BBT block %d\n",
-				res);
-			mark_bbt_block_bad(this, td, chip, block);
-			continue;
-		}
-
-		res = scan_write_bbt(mtd, to, len, buf,
-				td->options & NAND_BBT_NO_OOB ? NULL :
-				&buf[len]);
-		if (res < 0) {
-			pr_warn("nand_bbt: error while writing BBT block %d\n",
-				res);
-			mark_bbt_block_bad(this, td, chip, block);
-			continue;
-		}
-
-		pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
-			 (unsigned long long)to, td->version[chip]);
-
-		/* Mark it as used */
-		td->pages[chip++] = page;
-	}
-	return 0;
-
- outerr:
-	pr_warn("nand_bbt: error while writing bad block table %d\n", res);
-	return res;
-}
-
-/**
- * nand_memory_bbt - [GENERIC] create a memory based bad block table
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
- *
- * The function creates a memory based bbt by scanning the device for
- * manufacturer / software marked good / bad blocks.
- */
-static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-
-	return create_bbt(mtd, this->buffers->databuf, bd, -1);
-}
-
-/**
- * check_create - [GENERIC] create and write bbt(s) if necessary
- * @mtd: MTD device structure
- * @buf: temporary buffer
- * @bd: descriptor for the good/bad block search pattern
- *
- * The function checks the results of the previous call to read_bbt and creates
- * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
- * for the chip/device. Update is necessary if one of the tables is missing or
- * the version nr. of one table is less than the other.
- */
-static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
-{
-	int i, chips, writeops, create, chipsel, res, res2;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct nand_bbt_descr *td = this->bbt_td;
-	struct nand_bbt_descr *md = this->bbt_md;
-	struct nand_bbt_descr *rd, *rd2;
-
-	/* Do we have a bbt per chip? */
-	if (td->options & NAND_BBT_PERCHIP)
-		chips = this->numchips;
-	else
-		chips = 1;
-
-	for (i = 0; i < chips; i++) {
-		writeops = 0;
-		create = 0;
-		rd = NULL;
-		rd2 = NULL;
-		res = res2 = 0;
-		/* Per chip or per device? */
-		chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
-		/* Mirrored table available? */
-		if (md) {
-			if (td->pages[i] == -1 && md->pages[i] == -1) {
-				create = 1;
-				writeops = 0x03;
-			} else if (td->pages[i] == -1) {
-				rd = md;
-				writeops = 0x01;
-			} else if (md->pages[i] == -1) {
-				rd = td;
-				writeops = 0x02;
-			} else if (td->version[i] == md->version[i]) {
-				rd = td;
-				if (!(td->options & NAND_BBT_VERSION))
-					rd2 = md;
-			} else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
-				rd = td;
-				writeops = 0x02;
-			} else {
-				rd = md;
-				writeops = 0x01;
-			}
-		} else {
-			if (td->pages[i] == -1) {
-				create = 1;
-				writeops = 0x01;
-			} else {
-				rd = td;
-			}
-		}
-
-		if (create) {
-			/* Create the bad block table by scanning the device? */
-			if (!(td->options & NAND_BBT_CREATE))
-				continue;
-
-			/* Create the table in memory by scanning the chip(s) */
-			if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
-				create_bbt(mtd, buf, bd, chipsel);
-
-			td->version[i] = 1;
-			if (md)
-				md->version[i] = 1;
-		}
-
-		/* Read back first? */
-		if (rd) {
-			res = read_abs_bbt(mtd, buf, rd, chipsel);
-			if (mtd_is_eccerr(res)) {
-				/* Mark table as invalid */
-				rd->pages[i] = -1;
-				rd->version[i] = 0;
-				i--;
-				continue;
-			}
-		}
-		/* If they weren't versioned, read both */
-		if (rd2) {
-			res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
-			if (mtd_is_eccerr(res2)) {
-				/* Mark table as invalid */
-				rd2->pages[i] = -1;
-				rd2->version[i] = 0;
-				i--;
-				continue;
-			}
-		}
-
-		/* Scrub the flash table(s)? */
-		if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
-			writeops = 0x03;
-
-		/* Update version numbers before writing */
-		if (md) {
-			td->version[i] = max(td->version[i], md->version[i]);
-			md->version[i] = td->version[i];
-		}
-
-		/* Write the bad block table to the device? */
-		if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
-			res = write_bbt(mtd, buf, td, md, chipsel);
-			if (res < 0)
-				return res;
-		}
-
-		/* Write the mirror bad block table to the device? */
-		if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
-			res = write_bbt(mtd, buf, md, td, chipsel);
-			if (res < 0)
-				return res;
-		}
-	}
-	return 0;
-}
-
-/**
- * mark_bbt_regions - [GENERIC] mark the bad block table regions
- * @mtd: MTD device structure
- * @td: bad block table descriptor
- *
- * The bad block table regions are marked as "bad" to prevent accidental
- * erasures / writes. The regions are identified by the mark 0x02.
- */
-static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int i, j, chips, block, nrblocks, update;
-	uint8_t oldval;
-
-	/* Do we have a bbt per chip? */
-	if (td->options & NAND_BBT_PERCHIP) {
-		chips = this->numchips;
-		nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
-	} else {
-		chips = 1;
-		nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
-	}
-
-	for (i = 0; i < chips; i++) {
-		if ((td->options & NAND_BBT_ABSPAGE) ||
-		    !(td->options & NAND_BBT_WRITE)) {
-			if (td->pages[i] == -1)
-				continue;
-			block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
-			oldval = bbt_get_entry(this, block);
-			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
-			if ((oldval != BBT_BLOCK_RESERVED) &&
-					td->reserved_block_code)
-				nand_update_bbt(mtd, (loff_t)block <<
-						this->bbt_erase_shift);
-			continue;
-		}
-		update = 0;
-		if (td->options & NAND_BBT_LASTBLOCK)
-			block = ((i + 1) * nrblocks) - td->maxblocks;
-		else
-			block = i * nrblocks;
-		for (j = 0; j < td->maxblocks; j++) {
-			oldval = bbt_get_entry(this, block);
-			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
-			if (oldval != BBT_BLOCK_RESERVED)
-				update = 1;
-			block++;
-		}
-		/*
-		 * If we want reserved blocks to be recorded to flash, and some
-		 * new ones have been marked, then we need to update the stored
-		 * bbts.  This should only happen once.
-		 */
-		if (update && td->reserved_block_code)
-			nand_update_bbt(mtd, (loff_t)(block - 1) <<
-					this->bbt_erase_shift);
-	}
-}
-
-/**
- * verify_bbt_descr - verify the bad block description
- * @mtd: MTD device structure
- * @bd: the table to verify
- *
- * This functions performs a few sanity checks on the bad block description
- * table.
- */
-static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	u32 pattern_len;
-	u32 bits;
-	u32 table_size;
-
-	if (!bd)
-		return;
-
-	pattern_len = bd->len;
-	bits = bd->options & NAND_BBT_NRBITS_MSK;
-
-	BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
-			!(this->bbt_options & NAND_BBT_USE_FLASH));
-	BUG_ON(!bits);
-
-	if (bd->options & NAND_BBT_VERSION)
-		pattern_len++;
-
-	if (bd->options & NAND_BBT_NO_OOB) {
-		BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
-		BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
-		BUG_ON(bd->offs);
-		if (bd->options & NAND_BBT_VERSION)
-			BUG_ON(bd->veroffs != bd->len);
-		BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
-	}
-
-	if (bd->options & NAND_BBT_PERCHIP)
-		table_size = this->chipsize >> this->bbt_erase_shift;
-	else
-		table_size = mtd->size >> this->bbt_erase_shift;
-	table_size >>= 3;
-	table_size *= bits;
-	if (bd->options & NAND_BBT_NO_OOB)
-		table_size += pattern_len;
-	BUG_ON(table_size > (1 << this->bbt_erase_shift));
-}
-
-/**
- * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
- * @mtd: MTD device structure
- * @bd: descriptor for the good/bad block search pattern
- *
- * The function checks, if a bad block table(s) is/are already available. If
- * not it scans the device for manufacturer marked good / bad blocks and writes
- * the bad block table(s) to the selected place.
- *
- * The bad block table memory is allocated here. It must be freed by calling
- * the nand_free_bbt function.
- */
-static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int len, res;
-	uint8_t *buf;
-	struct nand_bbt_descr *td = this->bbt_td;
-	struct nand_bbt_descr *md = this->bbt_md;
-
-	len = (mtd->size >> (this->bbt_erase_shift + 2)) ? : 1;
-	/*
-	 * Allocate memory (2bit per block) and clear the memory bad block
-	 * table.
-	 */
-	this->bbt = kzalloc(len, GFP_KERNEL);
-	if (!this->bbt)
-		return -ENOMEM;
-
-	/*
-	 * If no primary table decriptor is given, scan the device to build a
-	 * memory based bad block table.
-	 */
-	if (!td) {
-		if ((res = nand_memory_bbt(mtd, bd))) {
-			pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
-			goto err;
-		}
-		return 0;
-	}
-	verify_bbt_descr(mtd, td);
-	verify_bbt_descr(mtd, md);
-
-	/* Allocate a temporary buffer for one eraseblock incl. oob */
-	len = (1 << this->bbt_erase_shift);
-	len += (len >> this->page_shift) * mtd->oobsize;
-	buf = vmalloc(len);
-	if (!buf) {
-		res = -ENOMEM;
-		goto err;
-	}
-
-	/* Is the bbt at a given page? */
-	if (td->options & NAND_BBT_ABSPAGE) {
-		read_abs_bbts(mtd, buf, td, md);
-	} else {
-		/* Search the bad block table using a pattern in oob */
-		search_read_bbts(mtd, buf, td, md);
-	}
-
-	res = check_create(mtd, buf, bd);
-	if (res)
-		goto err;
-
-	/* Prevent the bbt regions from erasing / writing */
-	mark_bbt_region(mtd, td);
-	if (md)
-		mark_bbt_region(mtd, md);
-
-	vfree(buf);
-	return 0;
-
-err:
-	kfree(this->bbt);
-	this->bbt = NULL;
-	return res;
-}
-
-/**
- * nand_update_bbt - update bad block table(s)
- * @mtd: MTD device structure
- * @offs: the offset of the newly marked block
- *
- * The function updates the bad block table(s).
- */
-static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int len, res = 0;
-	int chip, chipsel;
-	uint8_t *buf;
-	struct nand_bbt_descr *td = this->bbt_td;
-	struct nand_bbt_descr *md = this->bbt_md;
-
-	if (!this->bbt || !td)
-		return -EINVAL;
-
-	/* Allocate a temporary buffer for one eraseblock incl. oob */
-	len = (1 << this->bbt_erase_shift);
-	len += (len >> this->page_shift) * mtd->oobsize;
-	buf = kmalloc(len, GFP_KERNEL);
-	if (!buf)
-		return -ENOMEM;
-
-	/* Do we have a bbt per chip? */
-	if (td->options & NAND_BBT_PERCHIP) {
-		chip = (int)(offs >> this->chip_shift);
-		chipsel = chip;
-	} else {
-		chip = 0;
-		chipsel = -1;
-	}
-
-	td->version[chip]++;
-	if (md)
-		md->version[chip]++;
-
-	/* Write the bad block table to the device? */
-	if (td->options & NAND_BBT_WRITE) {
-		res = write_bbt(mtd, buf, td, md, chipsel);
-		if (res < 0)
-			goto out;
-	}
-	/* Write the mirror bad block table to the device? */
-	if (md && (md->options & NAND_BBT_WRITE)) {
-		res = write_bbt(mtd, buf, md, td, chipsel);
-	}
-
- out:
-	kfree(buf);
-	return res;
-}
-
-/*
- * Define some generic bad / good block scan pattern which are used
- * while scanning a device for factory marked good / bad blocks.
- */
-static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-
-/* Generic flash bbt descriptors */
-static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
-static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
-
-static struct nand_bbt_descr bbt_main_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs =	8,
-	.len = 4,
-	.veroffs = 12,
-	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
-	.pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr bbt_mirror_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
-	.offs =	8,
-	.len = 4,
-	.veroffs = 12,
-	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
-	.pattern = mirror_pattern
-};
-
-static struct nand_bbt_descr bbt_main_no_oob_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
-		| NAND_BBT_NO_OOB,
-	.len = 4,
-	.veroffs = 4,
-	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
-	.pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
-		| NAND_BBT_NO_OOB,
-	.len = 4,
-	.veroffs = 4,
-	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
-	.pattern = mirror_pattern
-};
-
-#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
-/**
- * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
- * @this: NAND chip to create descriptor for
- *
- * This function allocates and initializes a nand_bbt_descr for BBM detection
- * based on the properties of @this. The new descriptor is stored in
- * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
- * passed to this function.
- */
-static int nand_create_badblock_pattern(struct nand_chip *this)
-{
-	struct nand_bbt_descr *bd;
-	if (this->badblock_pattern) {
-		pr_warn("Bad block pattern already allocated; not replacing\n");
-		return -EINVAL;
-	}
-	bd = kzalloc(sizeof(*bd), GFP_KERNEL);
-	if (!bd)
-		return -ENOMEM;
-	bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
-	bd->offs = this->badblockpos;
-	bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
-	bd->pattern = scan_ff_pattern;
-	bd->options |= NAND_BBT_DYNAMICSTRUCT;
-	this->badblock_pattern = bd;
-	return 0;
-}
-
-/**
- * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
- * @mtd: MTD device structure
- *
- * This function selects the default bad block table support for the device and
- * calls the nand_scan_bbt function.
- */
-int nand_default_bbt(struct mtd_info *mtd)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int ret;
-
-	/* Is a flash based bad block table requested? */
-	if (this->bbt_options & NAND_BBT_USE_FLASH) {
-		/* Use the default pattern descriptors */
-		if (!this->bbt_td) {
-			if (this->bbt_options & NAND_BBT_NO_OOB) {
-				this->bbt_td = &bbt_main_no_oob_descr;
-				this->bbt_md = &bbt_mirror_no_oob_descr;
-			} else {
-				this->bbt_td = &bbt_main_descr;
-				this->bbt_md = &bbt_mirror_descr;
-			}
-		}
-	} else {
-		this->bbt_td = NULL;
-		this->bbt_md = NULL;
-	}
-
-	if (!this->badblock_pattern) {
-		ret = nand_create_badblock_pattern(this);
-		if (ret)
-			return ret;
-	}
-
-	return nand_scan_bbt(mtd, this->badblock_pattern);
-}
-
-/**
- * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
- * @mtd: MTD device structure
- * @offs: offset in the device
- */
-int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int block;
-
-	block = (int)(offs >> this->bbt_erase_shift);
-	return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
-}
-
-/**
- * nand_isbad_bbt - [NAND Interface] Check if a block is bad
- * @mtd: MTD device structure
- * @offs: offset in the device
- * @allowbbt: allow access to bad block table region
- */
-int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int block, res;
-
-	block = (int)(offs >> this->bbt_erase_shift);
-	res = bbt_get_entry(this, block);
-
-	pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
-		 (unsigned int)offs, block, res);
-
-	switch (res) {
-	case BBT_BLOCK_GOOD:
-		return 0;
-	case BBT_BLOCK_WORN:
-		return 1;
-	case BBT_BLOCK_RESERVED:
-		return allowbbt ? 0 : 1;
-	}
-	return 1;
-}
-
-/**
- * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
- * @mtd: MTD device structure
- * @offs: offset of the bad block
- */
-int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	int block, ret = 0;
-
-	block = (int)(offs >> this->bbt_erase_shift);
-
-	/* Mark bad block in memory */
-	bbt_mark_entry(this, block, BBT_BLOCK_WORN);
-
-	/* Update flash-based bad block table */
-	if (this->bbt_options & NAND_BBT_USE_FLASH)
-		ret = nand_update_bbt(mtd, offs);
-
-	return ret;
-}
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
deleted file mode 100644
index 505441c9373b..000000000000
--- a/drivers/mtd/nand/nand_bch.c
+++ /dev/null
@@ -1,234 +0,0 @@ 
-/*
- * This file provides ECC correction for more than 1 bit per block of data,
- * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
- *
- * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
- *
- * This file is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 or (at your option) any
- * later version.
- *
- * This file is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this file; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_bch.h>
-#include <linux/bch.h>
-
-/**
- * struct nand_bch_control - private NAND BCH control structure
- * @bch:       BCH control structure
- * @errloc:    error location array
- * @eccmask:   XOR ecc mask, allows erased pages to be decoded as valid
- */
-struct nand_bch_control {
-	struct bch_control   *bch;
-	unsigned int         *errloc;
-	unsigned char        *eccmask;
-};
-
-/**
- * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
- * @mtd:	MTD block structure
- * @buf:	input buffer with raw data
- * @code:	output buffer with ECC
- */
-int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
-			   unsigned char *code)
-{
-	const struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_bch_control *nbc = chip->ecc.priv;
-	unsigned int i;
-
-	memset(code, 0, chip->ecc.bytes);
-	encode_bch(nbc->bch, buf, chip->ecc.size, code);
-
-	/* apply mask so that an erased page is a valid codeword */
-	for (i = 0; i < chip->ecc.bytes; i++)
-		code[i] ^= nbc->eccmask[i];
-
-	return 0;
-}
-EXPORT_SYMBOL(nand_bch_calculate_ecc);
-
-/**
- * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @mtd:	MTD block structure
- * @buf:	raw data read from the chip
- * @read_ecc:	ECC from the chip
- * @calc_ecc:	the ECC calculated from raw data
- *
- * Detect and correct bit errors for a data byte block
- */
-int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
-			  unsigned char *read_ecc, unsigned char *calc_ecc)
-{
-	const struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_bch_control *nbc = chip->ecc.priv;
-	unsigned int *errloc = nbc->errloc;
-	int i, count;
-
-	count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
-			   NULL, errloc);
-	if (count > 0) {
-		for (i = 0; i < count; i++) {
-			if (errloc[i] < (chip->ecc.size*8))
-				/* error is located in data, correct it */
-				buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
-			/* else error in ecc, no action needed */
-
-			pr_debug("%s: corrected bitflip %u\n", __func__,
-					errloc[i]);
-		}
-	} else if (count < 0) {
-		printk(KERN_ERR "ecc unrecoverable error\n");
-		count = -EBADMSG;
-	}
-	return count;
-}
-EXPORT_SYMBOL(nand_bch_correct_data);
-
-/**
- * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
- * @mtd:	MTD block structure
- *
- * Returns:
- *  a pointer to a new NAND BCH control structure, or NULL upon failure
- *
- * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
- * are used to compute BCH parameters m (Galois field order) and t (error
- * correction capability). @eccbytes should be equal to the number of bytes
- * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
- *
- * Example: to configure 4 bit correction per 512 bytes, you should pass
- * @eccsize = 512  (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
- * @eccbytes = 7   (7 bytes are required to store m*t = 13*4 = 52 bits)
- */
-struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	unsigned int m, t, eccsteps, i;
-	struct nand_bch_control *nbc = NULL;
-	unsigned char *erased_page;
-	unsigned int eccsize = nand->ecc.size;
-	unsigned int eccbytes = nand->ecc.bytes;
-	unsigned int eccstrength = nand->ecc.strength;
-
-	if (!eccbytes && eccstrength) {
-		eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
-		nand->ecc.bytes = eccbytes;
-	}
-
-	if (!eccsize || !eccbytes) {
-		printk(KERN_WARNING "ecc parameters not supplied\n");
-		goto fail;
-	}
-
-	m = fls(1+8*eccsize);
-	t = (eccbytes*8)/m;
-
-	nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
-	if (!nbc)
-		goto fail;
-
-	nbc->bch = init_bch(m, t, 0);
-	if (!nbc->bch)
-		goto fail;
-
-	/* verify that eccbytes has the expected value */
-	if (nbc->bch->ecc_bytes != eccbytes) {
-		printk(KERN_WARNING "invalid eccbytes %u, should be %u\n",
-		       eccbytes, nbc->bch->ecc_bytes);
-		goto fail;
-	}
-
-	eccsteps = mtd->writesize/eccsize;
-
-	/* Check that we have an oob layout description. */
-	if (!mtd->ooblayout) {
-		pr_warn("missing oob scheme");
-		goto fail;
-	}
-
-	/* sanity checks */
-	if (8*(eccsize+eccbytes) >= (1 << m)) {
-		printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
-		goto fail;
-	}
-
-	/*
-	 * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(),
-	 * which is called by mtd_ooblayout_count_eccbytes().
-	 * Make sure they are properly initialized before calling
-	 * mtd_ooblayout_count_eccbytes().
-	 * FIXME: we should probably rework the sequencing in nand_scan_tail()
-	 * to avoid setting those fields twice.
-	 */
-	nand->ecc.steps = eccsteps;
-	nand->ecc.total = eccsteps * eccbytes;
-	if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
-		printk(KERN_WARNING "invalid ecc layout\n");
-		goto fail;
-	}
-
-	nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
-	nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
-	if (!nbc->eccmask || !nbc->errloc)
-		goto fail;
-	/*
-	 * compute and store the inverted ecc of an erased ecc block
-	 */
-	erased_page = kmalloc(eccsize, GFP_KERNEL);
-	if (!erased_page)
-		goto fail;
-
-	memset(erased_page, 0xff, eccsize);
-	memset(nbc->eccmask, 0, eccbytes);
-	encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
-	kfree(erased_page);
-
-	for (i = 0; i < eccbytes; i++)
-		nbc->eccmask[i] ^= 0xff;
-
-	if (!eccstrength)
-		nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
-
-	return nbc;
-fail:
-	nand_bch_free(nbc);
-	return NULL;
-}
-EXPORT_SYMBOL(nand_bch_init);
-
-/**
- * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
- * @nbc:	NAND BCH control structure
- */
-void nand_bch_free(struct nand_bch_control *nbc)
-{
-	if (nbc) {
-		free_bch(nbc->bch);
-		kfree(nbc->errloc);
-		kfree(nbc->eccmask);
-		kfree(nbc);
-	}
-}
-EXPORT_SYMBOL(nand_bch_free);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
-MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
deleted file mode 100644
index 7613a0388044..000000000000
--- a/drivers/mtd/nand/nand_ecc.c
+++ /dev/null
@@ -1,533 +0,0 @@ 
-/*
- * This file contains an ECC algorithm that detects and corrects 1 bit
- * errors in a 256 byte block of data.
- *
- * drivers/mtd/nand/nand_ecc.c
- *
- * Copyright © 2008 Koninklijke Philips Electronics NV.
- *                  Author: Frans Meulenbroeks
- *
- * Completely replaces the previous ECC implementation which was written by:
- *   Steven J. Hill (sjhill@realitydiluted.com)
- *   Thomas Gleixner (tglx@linutronix.de)
- *
- * Information on how this algorithm works and how it was developed
- * can be found in Documentation/mtd/nand_ecc.txt
- *
- * This file is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 or (at your option) any
- * later version.
- *
- * This file is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this file; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- */
-
-/*
- * The STANDALONE macro is useful when running the code outside the kernel
- * e.g. when running the code in a testbed or a benchmark program.
- * When STANDALONE is used, the module related macros are commented out
- * as well as the linux include files.
- * Instead a private definition of mtd_info is given to satisfy the compiler
- * (the code does not use mtd_info, so the code does not care)
- */
-#ifndef STANDALONE
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <asm/byteorder.h>
-#else
-#include <stdint.h>
-struct mtd_info;
-#define EXPORT_SYMBOL(x)  /* x */
-
-#define MODULE_LICENSE(x)	/* x */
-#define MODULE_AUTHOR(x)	/* x */
-#define MODULE_DESCRIPTION(x)	/* x */
-
-#define pr_err printf
-#endif
-
-/*
- * invparity is a 256 byte table that contains the odd parity
- * for each byte. So if the number of bits in a byte is even,
- * the array element is 1, and when the number of bits is odd
- * the array eleemnt is 0.
- */
-static const char invparity[256] = {
-	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
-	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
-	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
-	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
-	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
-	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
-	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
-	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
-	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
-	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
-	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
-	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
-	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
-	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
-	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
-	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
-};
-
-/*
- * bitsperbyte contains the number of bits per byte
- * this is only used for testing and repairing parity
- * (a precalculated value slightly improves performance)
- */
-static const char bitsperbyte[256] = {
-	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
-	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
-	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
-	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
-	4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
-};
-
-/*
- * addressbits is a lookup table to filter out the bits from the xor-ed
- * ECC data that identify the faulty location.
- * this is only used for repairing parity
- * see the comments in nand_correct_data for more details
- */
-static const char addressbits[256] = {
-	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
-	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
-	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
-	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
-	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
-	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
-	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
-	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
-	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
-	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
-	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
-	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
-	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
-	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
-	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
-	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
-	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
-	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
-	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
-	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
-	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
-	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
-	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
-	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
-	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
-	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
-	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
-	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
-	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
-	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
-	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
-	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
-};
-
-/**
- * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
- *			 block
- * @buf:	input buffer with raw data
- * @eccsize:	data bytes per ECC step (256 or 512)
- * @code:	output buffer with ECC
- */
-void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
-		       unsigned char *code)
-{
-	int i;
-	const uint32_t *bp = (uint32_t *)buf;
-	/* 256 or 512 bytes/ecc  */
-	const uint32_t eccsize_mult = eccsize >> 8;
-	uint32_t cur;		/* current value in buffer */
-	/* rp0..rp15..rp17 are the various accumulated parities (per byte) */
-	uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
-	uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
-	uint32_t uninitialized_var(rp17);	/* to make compiler happy */
-	uint32_t par;		/* the cumulative parity for all data */
-	uint32_t tmppar;	/* the cumulative parity for this iteration;
-				   for rp12, rp14 and rp16 at the end of the
-				   loop */
-
-	par = 0;
-	rp4 = 0;
-	rp6 = 0;
-	rp8 = 0;
-	rp10 = 0;
-	rp12 = 0;
-	rp14 = 0;
-	rp16 = 0;
-
-	/*
-	 * The loop is unrolled a number of times;
-	 * This avoids if statements to decide on which rp value to update
-	 * Also we process the data by longwords.
-	 * Note: passing unaligned data might give a performance penalty.
-	 * It is assumed that the buffers are aligned.
-	 * tmppar is the cumulative sum of this iteration.
-	 * needed for calculating rp12, rp14, rp16 and par
-	 * also used as a performance improvement for rp6, rp8 and rp10
-	 */
-	for (i = 0; i < eccsize_mult << 2; i++) {
-		cur = *bp++;
-		tmppar = cur;
-		rp4 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp6 ^= tmppar;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp4 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp8 ^= tmppar;
-
-		cur = *bp++;
-		tmppar ^= cur;
-		rp4 ^= cur;
-		rp6 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp6 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp4 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp10 ^= tmppar;
-
-		cur = *bp++;
-		tmppar ^= cur;
-		rp4 ^= cur;
-		rp6 ^= cur;
-		rp8 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp6 ^= cur;
-		rp8 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp4 ^= cur;
-		rp8 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp8 ^= cur;
-
-		cur = *bp++;
-		tmppar ^= cur;
-		rp4 ^= cur;
-		rp6 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp6 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-		rp4 ^= cur;
-		cur = *bp++;
-		tmppar ^= cur;
-
-		par ^= tmppar;
-		if ((i & 0x1) == 0)
-			rp12 ^= tmppar;
-		if ((i & 0x2) == 0)
-			rp14 ^= tmppar;
-		if (eccsize_mult == 2 && (i & 0x4) == 0)
-			rp16 ^= tmppar;
-	}
-
-	/*
-	 * handle the fact that we use longword operations
-	 * we'll bring rp4..rp14..rp16 back to single byte entities by
-	 * shifting and xoring first fold the upper and lower 16 bits,
-	 * then the upper and lower 8 bits.
-	 */
-	rp4 ^= (rp4 >> 16);
-	rp4 ^= (rp4 >> 8);
-	rp4 &= 0xff;
-	rp6 ^= (rp6 >> 16);
-	rp6 ^= (rp6 >> 8);
-	rp6 &= 0xff;
-	rp8 ^= (rp8 >> 16);
-	rp8 ^= (rp8 >> 8);
-	rp8 &= 0xff;
-	rp10 ^= (rp10 >> 16);
-	rp10 ^= (rp10 >> 8);
-	rp10 &= 0xff;
-	rp12 ^= (rp12 >> 16);
-	rp12 ^= (rp12 >> 8);
-	rp12 &= 0xff;
-	rp14 ^= (rp14 >> 16);
-	rp14 ^= (rp14 >> 8);
-	rp14 &= 0xff;
-	if (eccsize_mult == 2) {
-		rp16 ^= (rp16 >> 16);
-		rp16 ^= (rp16 >> 8);
-		rp16 &= 0xff;
-	}
-
-	/*
-	 * we also need to calculate the row parity for rp0..rp3
-	 * This is present in par, because par is now
-	 * rp3 rp3 rp2 rp2 in little endian and
-	 * rp2 rp2 rp3 rp3 in big endian
-	 * as well as
-	 * rp1 rp0 rp1 rp0 in little endian and
-	 * rp0 rp1 rp0 rp1 in big endian
-	 * First calculate rp2 and rp3
-	 */
-#ifdef __BIG_ENDIAN
-	rp2 = (par >> 16);
-	rp2 ^= (rp2 >> 8);
-	rp2 &= 0xff;
-	rp3 = par & 0xffff;
-	rp3 ^= (rp3 >> 8);
-	rp3 &= 0xff;
-#else
-	rp3 = (par >> 16);
-	rp3 ^= (rp3 >> 8);
-	rp3 &= 0xff;
-	rp2 = par & 0xffff;
-	rp2 ^= (rp2 >> 8);
-	rp2 &= 0xff;
-#endif
-
-	/* reduce par to 16 bits then calculate rp1 and rp0 */
-	par ^= (par >> 16);
-#ifdef __BIG_ENDIAN
-	rp0 = (par >> 8) & 0xff;
-	rp1 = (par & 0xff);
-#else
-	rp1 = (par >> 8) & 0xff;
-	rp0 = (par & 0xff);
-#endif
-
-	/* finally reduce par to 8 bits */
-	par ^= (par >> 8);
-	par &= 0xff;
-
-	/*
-	 * and calculate rp5..rp15..rp17
-	 * note that par = rp4 ^ rp5 and due to the commutative property
-	 * of the ^ operator we can say:
-	 * rp5 = (par ^ rp4);
-	 * The & 0xff seems superfluous, but benchmarking learned that
-	 * leaving it out gives slightly worse results. No idea why, probably
-	 * it has to do with the way the pipeline in pentium is organized.
-	 */
-	rp5 = (par ^ rp4) & 0xff;
-	rp7 = (par ^ rp6) & 0xff;
-	rp9 = (par ^ rp8) & 0xff;
-	rp11 = (par ^ rp10) & 0xff;
-	rp13 = (par ^ rp12) & 0xff;
-	rp15 = (par ^ rp14) & 0xff;
-	if (eccsize_mult == 2)
-		rp17 = (par ^ rp16) & 0xff;
-
-	/*
-	 * Finally calculate the ECC bits.
-	 * Again here it might seem that there are performance optimisations
-	 * possible, but benchmarks showed that on the system this is developed
-	 * the code below is the fastest
-	 */
-#ifdef CONFIG_MTD_NAND_ECC_SMC
-	code[0] =
-	    (invparity[rp7] << 7) |
-	    (invparity[rp6] << 6) |
-	    (invparity[rp5] << 5) |
-	    (invparity[rp4] << 4) |
-	    (invparity[rp3] << 3) |
-	    (invparity[rp2] << 2) |
-	    (invparity[rp1] << 1) |
-	    (invparity[rp0]);
-	code[1] =
-	    (invparity[rp15] << 7) |
-	    (invparity[rp14] << 6) |
-	    (invparity[rp13] << 5) |
-	    (invparity[rp12] << 4) |
-	    (invparity[rp11] << 3) |
-	    (invparity[rp10] << 2) |
-	    (invparity[rp9] << 1)  |
-	    (invparity[rp8]);
-#else
-	code[1] =
-	    (invparity[rp7] << 7) |
-	    (invparity[rp6] << 6) |
-	    (invparity[rp5] << 5) |
-	    (invparity[rp4] << 4) |
-	    (invparity[rp3] << 3) |
-	    (invparity[rp2] << 2) |
-	    (invparity[rp1] << 1) |
-	    (invparity[rp0]);
-	code[0] =
-	    (invparity[rp15] << 7) |
-	    (invparity[rp14] << 6) |
-	    (invparity[rp13] << 5) |
-	    (invparity[rp12] << 4) |
-	    (invparity[rp11] << 3) |
-	    (invparity[rp10] << 2) |
-	    (invparity[rp9] << 1)  |
-	    (invparity[rp8]);
-#endif
-	if (eccsize_mult == 1)
-		code[2] =
-		    (invparity[par & 0xf0] << 7) |
-		    (invparity[par & 0x0f] << 6) |
-		    (invparity[par & 0xcc] << 5) |
-		    (invparity[par & 0x33] << 4) |
-		    (invparity[par & 0xaa] << 3) |
-		    (invparity[par & 0x55] << 2) |
-		    3;
-	else
-		code[2] =
-		    (invparity[par & 0xf0] << 7) |
-		    (invparity[par & 0x0f] << 6) |
-		    (invparity[par & 0xcc] << 5) |
-		    (invparity[par & 0x33] << 4) |
-		    (invparity[par & 0xaa] << 3) |
-		    (invparity[par & 0x55] << 2) |
-		    (invparity[rp17] << 1) |
-		    (invparity[rp16] << 0);
-}
-EXPORT_SYMBOL(__nand_calculate_ecc);
-
-/**
- * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
- *			 block
- * @mtd:	MTD block structure
- * @buf:	input buffer with raw data
- * @code:	output buffer with ECC
- */
-int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
-		       unsigned char *code)
-{
-	__nand_calculate_ecc(buf,
-			mtd_to_nand(mtd)->ecc.size, code);
-
-	return 0;
-}
-EXPORT_SYMBOL(nand_calculate_ecc);
-
-/**
- * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @buf:	raw data read from the chip
- * @read_ecc:	ECC from the chip
- * @calc_ecc:	the ECC calculated from raw data
- * @eccsize:	data bytes per ECC step (256 or 512)
- *
- * Detect and correct a 1 bit error for eccsize byte block
- */
-int __nand_correct_data(unsigned char *buf,
-			unsigned char *read_ecc, unsigned char *calc_ecc,
-			unsigned int eccsize)
-{
-	unsigned char b0, b1, b2, bit_addr;
-	unsigned int byte_addr;
-	/* 256 or 512 bytes/ecc  */
-	const uint32_t eccsize_mult = eccsize >> 8;
-
-	/*
-	 * b0 to b2 indicate which bit is faulty (if any)
-	 * we might need the xor result  more than once,
-	 * so keep them in a local var
-	*/
-#ifdef CONFIG_MTD_NAND_ECC_SMC
-	b0 = read_ecc[0] ^ calc_ecc[0];
-	b1 = read_ecc[1] ^ calc_ecc[1];
-#else
-	b0 = read_ecc[1] ^ calc_ecc[1];
-	b1 = read_ecc[0] ^ calc_ecc[0];
-#endif
-	b2 = read_ecc[2] ^ calc_ecc[2];
-
-	/* check if there are any bitfaults */
-
-	/* repeated if statements are slightly more efficient than switch ... */
-	/* ordered in order of likelihood */
-
-	if ((b0 | b1 | b2) == 0)
-		return 0;	/* no error */
-
-	if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
-	    (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
-	    ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
-	     (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
-	/* single bit error */
-		/*
-		 * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
-		 * byte, cp 5/3/1 indicate the faulty bit.
-		 * A lookup table (called addressbits) is used to filter
-		 * the bits from the byte they are in.
-		 * A marginal optimisation is possible by having three
-		 * different lookup tables.
-		 * One as we have now (for b0), one for b2
-		 * (that would avoid the >> 1), and one for b1 (with all values
-		 * << 4). However it was felt that introducing two more tables
-		 * hardly justify the gain.
-		 *
-		 * The b2 shift is there to get rid of the lowest two bits.
-		 * We could also do addressbits[b2] >> 1 but for the
-		 * performance it does not make any difference
-		 */
-		if (eccsize_mult == 1)
-			byte_addr = (addressbits[b1] << 4) + addressbits[b0];
-		else
-			byte_addr = (addressbits[b2 & 0x3] << 8) +
-				    (addressbits[b1] << 4) + addressbits[b0];
-		bit_addr = addressbits[b2 >> 2];
-		/* flip the bit */
-		buf[byte_addr] ^= (1 << bit_addr);
-		return 1;
-
-	}
-	/* count nr of bits; use table lookup, faster than calculating it */
-	if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
-		return 1;	/* error in ECC data; no action needed */
-
-	pr_err("%s: uncorrectable ECC error\n", __func__);
-	return -EBADMSG;
-}
-EXPORT_SYMBOL(__nand_correct_data);
-
-/**
- * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @mtd:	MTD block structure
- * @buf:	raw data read from the chip
- * @read_ecc:	ECC from the chip
- * @calc_ecc:	the ECC calculated from raw data
- *
- * Detect and correct a 1 bit error for 256/512 byte block
- */
-int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
-		      unsigned char *read_ecc, unsigned char *calc_ecc)
-{
-	return __nand_correct_data(buf, read_ecc, calc_ecc,
-				   mtd_to_nand(mtd)->ecc.size);
-}
-EXPORT_SYMBOL(nand_correct_data);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
-MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
deleted file mode 100644
index 80550dbf9467..000000000000
--- a/drivers/mtd/nand/nand_ids.c
+++ /dev/null
@@ -1,193 +0,0 @@ 
-/*
- *  Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/module.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/sizes.h>
-
-#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
-#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
-
-#define SP_OPTIONS NAND_NEED_READRDY
-#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
-
-/*
- * The chip ID list:
- *    name, device ID, page size, chip size in MiB, eraseblock size, options
- *
- * If page size and eraseblock size are 0, the sizes are taken from the
- * extended chip ID.
- */
-struct nand_flash_dev nand_flash_ids[] = {
-	/*
-	 * Some incompatible NAND chips share device ID's and so must be
-	 * listed by full ID. We list them first so that we can easily identify
-	 * the most specific match.
-	 */
-	{"TC58NVG0S3E 1G 3.3V 8-bit",
-		{ .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
-		  SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512),
-		  2 },
-	{"TC58NVG2S0F 4G 3.3V 8-bit",
-		{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
-		  SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
-	{"TC58NVG3S0F 8G 3.3V 8-bit",
-		{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
-		  SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
-	{"TC58NVG5D2 32G 3.3V 8-bit",
-		{ .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
-		  SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
-	{"TC58NVG6D2 64G 3.3V 8-bit",
-		{ .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
-		  SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
-	{"SDTNRGAMA 64G 3.3V 8-bit",
-		{ .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
-		  SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
-	{"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
-		{ .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
-		  SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
-		  NAND_ECC_INFO(40, SZ_1K), 4 },
-
-	LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
-
-	LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit",  0x33, 16, SZ_16K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit",  0x73, 16, SZ_16K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
-	LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
-
-	LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit",  0x35, 32, SZ_16K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit",  0x75, 32, SZ_16K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
-	LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
-
-	LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit",  0x36, 64, SZ_16K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit",  0x76, 64, SZ_16K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
-	LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
-
-	LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit",  0x78, 128, SZ_16K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit",  0x39, 128, SZ_16K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit",  0x79, 128, SZ_16K, SP_OPTIONS),
-	LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
-	LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
-	LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
-	LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
-
-	LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
-
-	/*
-	 * These are the new chips with large page size. Their page size and
-	 * eraseblock size are determined from the extended ID bytes.
-	 */
-
-	/* 512 Megabit */
-	EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit",  0xA2,  64, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit",  0xA0,  64, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xF2,  64, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xD0,  64, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xF0,  64, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2,  64, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0,  64, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2,  64, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0,  64, LP_OPTIONS16),
-
-	/* 1 Gigabit */
-	EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit",  0xA1, 128, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit",  0xF1, 128, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit",  0xD1, 128, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
-
-	/* 2 Gigabit */
-	EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit",  0xAA, 256, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit",  0xDA, 256, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
-
-	/* 4 Gigabit */
-	EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit",  0xAC, 512, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit",  0xDC, 512, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
-
-	/* 8 Gigabit */
-	EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit",  0xA3, 1024, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit",  0xD3, 1024, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
-
-	/* 16 Gigabit */
-	EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit",  0xA5, 2048, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit",  0xD5, 2048, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
-
-	/* 32 Gigabit */
-	EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit",  0xA7, 4096, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit",  0xD7, 4096, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
-
-	/* 64 Gigabit */
-	EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit",  0xAE, 8192, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit",  0xDE, 8192, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
-
-	/* 128 Gigabit */
-	EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit",  0x1A, 16384, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit",  0x3A, 16384, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
-
-	/* 256 Gigabit */
-	EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit",  0x1C, 32768, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit",  0x3C, 32768, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
-
-	/* 512 Gigabit */
-	EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit",  0x1E, 65536, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit",  0x3E, 65536, LP_OPTIONS),
-	EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
-	EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
-
-	{NULL}
-};
-
-/* Manufacturer IDs */
-struct nand_manufacturers nand_manuf_ids[] = {
-	{NAND_MFR_TOSHIBA, "Toshiba"},
-	{NAND_MFR_ESMT, "ESMT"},
-	{NAND_MFR_SAMSUNG, "Samsung"},
-	{NAND_MFR_FUJITSU, "Fujitsu"},
-	{NAND_MFR_NATIONAL, "National"},
-	{NAND_MFR_RENESAS, "Renesas"},
-	{NAND_MFR_STMICRO, "ST Micro"},
-	{NAND_MFR_HYNIX, "Hynix"},
-	{NAND_MFR_MICRON, "Micron"},
-	{NAND_MFR_AMD, "AMD/Spansion"},
-	{NAND_MFR_MACRONIX, "Macronix"},
-	{NAND_MFR_EON, "Eon"},
-	{NAND_MFR_SANDISK, "SanDisk"},
-	{NAND_MFR_INTEL, "Intel"},
-	{NAND_MFR_ATO, "ATO"},
-	{0x0, "Unknown"}
-};
-
-EXPORT_SYMBOL(nand_manuf_ids);
-EXPORT_SYMBOL(nand_flash_ids);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("Nand device & manufacturer IDs");
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
deleted file mode 100644
index 5cf237268284..000000000000
--- a/drivers/mtd/nand/nand_timings.c
+++ /dev/null
@@ -1,311 +0,0 @@ 
-/*
- *  Copyright (C) 2014 Free Electrons
- *
- *  Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/export.h>
-#include <linux/mtd/rawnand.h>
-
-static const struct nand_data_interface onfi_sdr_timings[] = {
-	/* Mode 0 */
-	{
-		.type = NAND_SDR_IFACE,
-		.timings.sdr = {
-			.tADL_min = 400000,
-			.tALH_min = 20000,
-			.tALS_min = 50000,
-			.tAR_min = 25000,
-			.tCEA_max = 100000,
-			.tCEH_min = 20000,
-			.tCH_min = 20000,
-			.tCHZ_max = 100000,
-			.tCLH_min = 20000,
-			.tCLR_min = 20000,
-			.tCLS_min = 50000,
-			.tCOH_min = 0,
-			.tCS_min = 70000,
-			.tDH_min = 20000,
-			.tDS_min = 40000,
-			.tFEAT_max = 1000000,
-			.tIR_min = 10000,
-			.tITC_max = 1000000,
-			.tRC_min = 100000,
-			.tREA_max = 40000,
-			.tREH_min = 30000,
-			.tRHOH_min = 0,
-			.tRHW_min = 200000,
-			.tRHZ_max = 200000,
-			.tRLOH_min = 0,
-			.tRP_min = 50000,
-			.tRR_min = 40000,
-			.tRST_max = 250000000000ULL,
-			.tWB_max = 200000,
-			.tWC_min = 100000,
-			.tWH_min = 30000,
-			.tWHR_min = 120000,
-			.tWP_min = 50000,
-			.tWW_min = 100000,
-		},
-	},
-	/* Mode 1 */
-	{
-		.type = NAND_SDR_IFACE,
-		.timings.sdr = {
-			.tADL_min = 400000,
-			.tALH_min = 10000,
-			.tALS_min = 25000,
-			.tAR_min = 10000,
-			.tCEA_max = 45000,
-			.tCEH_min = 20000,
-			.tCH_min = 10000,
-			.tCHZ_max = 50000,
-			.tCLH_min = 10000,
-			.tCLR_min = 10000,
-			.tCLS_min = 25000,
-			.tCOH_min = 15000,
-			.tCS_min = 35000,
-			.tDH_min = 10000,
-			.tDS_min = 20000,
-			.tFEAT_max = 1000000,
-			.tIR_min = 0,
-			.tITC_max = 1000000,
-			.tRC_min = 50000,
-			.tREA_max = 30000,
-			.tREH_min = 15000,
-			.tRHOH_min = 15000,
-			.tRHW_min = 100000,
-			.tRHZ_max = 100000,
-			.tRLOH_min = 0,
-			.tRP_min = 25000,
-			.tRR_min = 20000,
-			.tRST_max = 500000000,
-			.tWB_max = 100000,
-			.tWC_min = 45000,
-			.tWH_min = 15000,
-			.tWHR_min = 80000,
-			.tWP_min = 25000,
-			.tWW_min = 100000,
-		},
-	},
-	/* Mode 2 */
-	{
-		.type = NAND_SDR_IFACE,
-		.timings.sdr = {
-			.tADL_min = 400000,
-			.tALH_min = 10000,
-			.tALS_min = 15000,
-			.tAR_min = 10000,
-			.tCEA_max = 30000,
-			.tCEH_min = 20000,
-			.tCH_min = 10000,
-			.tCHZ_max = 50000,
-			.tCLH_min = 10000,
-			.tCLR_min = 10000,
-			.tCLS_min = 15000,
-			.tCOH_min = 15000,
-			.tCS_min = 25000,
-			.tDH_min = 5000,
-			.tDS_min = 15000,
-			.tFEAT_max = 1000000,
-			.tIR_min = 0,
-			.tITC_max = 1000000,
-			.tRC_min = 35000,
-			.tREA_max = 25000,
-			.tREH_min = 15000,
-			.tRHOH_min = 15000,
-			.tRHW_min = 100000,
-			.tRHZ_max = 100000,
-			.tRLOH_min = 0,
-			.tRR_min = 20000,
-			.tRST_max = 500000000,
-			.tWB_max = 100000,
-			.tRP_min = 17000,
-			.tWC_min = 35000,
-			.tWH_min = 15000,
-			.tWHR_min = 80000,
-			.tWP_min = 17000,
-			.tWW_min = 100000,
-		},
-	},
-	/* Mode 3 */
-	{
-		.type = NAND_SDR_IFACE,
-		.timings.sdr = {
-			.tADL_min = 400000,
-			.tALH_min = 5000,
-			.tALS_min = 10000,
-			.tAR_min = 10000,
-			.tCEA_max = 25000,
-			.tCEH_min = 20000,
-			.tCH_min = 5000,
-			.tCHZ_max = 50000,
-			.tCLH_min = 5000,
-			.tCLR_min = 10000,
-			.tCLS_min = 10000,
-			.tCOH_min = 15000,
-			.tCS_min = 25000,
-			.tDH_min = 5000,
-			.tDS_min = 10000,
-			.tFEAT_max = 1000000,
-			.tIR_min = 0,
-			.tITC_max = 1000000,
-			.tRC_min = 30000,
-			.tREA_max = 20000,
-			.tREH_min = 10000,
-			.tRHOH_min = 15000,
-			.tRHW_min = 100000,
-			.tRHZ_max = 100000,
-			.tRLOH_min = 0,
-			.tRP_min = 15000,
-			.tRR_min = 20000,
-			.tRST_max = 500000000,
-			.tWB_max = 100000,
-			.tWC_min = 30000,
-			.tWH_min = 10000,
-			.tWHR_min = 80000,
-			.tWP_min = 15000,
-			.tWW_min = 100000,
-		},
-	},
-	/* Mode 4 */
-	{
-		.type = NAND_SDR_IFACE,
-		.timings.sdr = {
-			.tADL_min = 400000,
-			.tALH_min = 5000,
-			.tALS_min = 10000,
-			.tAR_min = 10000,
-			.tCEA_max = 25000,
-			.tCEH_min = 20000,
-			.tCH_min = 5000,
-			.tCHZ_max = 30000,
-			.tCLH_min = 5000,
-			.tCLR_min = 10000,
-			.tCLS_min = 10000,
-			.tCOH_min = 15000,
-			.tCS_min = 20000,
-			.tDH_min = 5000,
-			.tDS_min = 10000,
-			.tFEAT_max = 1000000,
-			.tIR_min = 0,
-			.tITC_max = 1000000,
-			.tRC_min = 25000,
-			.tREA_max = 20000,
-			.tREH_min = 10000,
-			.tRHOH_min = 15000,
-			.tRHW_min = 100000,
-			.tRHZ_max = 100000,
-			.tRLOH_min = 5000,
-			.tRP_min = 12000,
-			.tRR_min = 20000,
-			.tRST_max = 500000000,
-			.tWB_max = 100000,
-			.tWC_min = 25000,
-			.tWH_min = 10000,
-			.tWHR_min = 80000,
-			.tWP_min = 12000,
-			.tWW_min = 100000,
-		},
-	},
-	/* Mode 5 */
-	{
-		.type = NAND_SDR_IFACE,
-		.timings.sdr = {
-			.tADL_min = 400000,
-			.tALH_min = 5000,
-			.tALS_min = 10000,
-			.tAR_min = 10000,
-			.tCEA_max = 25000,
-			.tCEH_min = 20000,
-			.tCH_min = 5000,
-			.tCHZ_max = 30000,
-			.tCLH_min = 5000,
-			.tCLR_min = 10000,
-			.tCLS_min = 10000,
-			.tCOH_min = 15000,
-			.tCS_min = 15000,
-			.tDH_min = 5000,
-			.tDS_min = 7000,
-			.tFEAT_max = 1000000,
-			.tIR_min = 0,
-			.tITC_max = 1000000,
-			.tRC_min = 20000,
-			.tREA_max = 16000,
-			.tREH_min = 7000,
-			.tRHOH_min = 15000,
-			.tRHW_min = 100000,
-			.tRHZ_max = 100000,
-			.tRLOH_min = 5000,
-			.tRP_min = 10000,
-			.tRR_min = 20000,
-			.tRST_max = 500000000,
-			.tWB_max = 100000,
-			.tWC_min = 20000,
-			.tWH_min = 7000,
-			.tWHR_min = 80000,
-			.tWP_min = 10000,
-			.tWW_min = 100000,
-		},
-	},
-};
-
-/**
- * onfi_async_timing_mode_to_sdr_timings - [NAND Interface] Retrieve NAND
- * timings according to the given ONFI timing mode
- * @mode: ONFI timing mode
- */
-const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
-{
-	if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
-		return ERR_PTR(-EINVAL);
-
-	return &onfi_sdr_timings[mode].timings.sdr;
-}
-EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
-
-/**
- * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
- * given ONFI mode
- * @iface: The data interface to be initialized
- * @mode: The ONFI timing mode
- */
-int onfi_init_data_interface(struct nand_chip *chip,
-			     struct nand_data_interface *iface,
-			     enum nand_data_interface_type type,
-			     int timing_mode)
-{
-	if (type != NAND_SDR_IFACE)
-		return -EINVAL;
-
-	if (timing_mode < 0 || timing_mode >= ARRAY_SIZE(onfi_sdr_timings))
-		return -EINVAL;
-
-	*iface = onfi_sdr_timings[timing_mode];
-
-	/*
-	 * TODO: initialize timings that cannot be deduced from timing mode:
-	 * tR, tPROG, tCCS, ...
-	 * These information are part of the ONFI parameter page.
-	 */
-
-	return 0;
-}
-EXPORT_SYMBOL(onfi_init_data_interface);
-
-/**
- * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
- * data interface for mode 0. This is used as default timing after
- * reset.
- */
-const struct nand_data_interface *nand_get_default_data_interface(void)
-{
-	return &onfi_sdr_timings[0];
-}
-EXPORT_SYMBOL(nand_get_default_data_interface);
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
deleted file mode 100644
index 9c16635b5338..000000000000
--- a/drivers/mtd/nand/nandsim.c
+++ /dev/null
@@ -1,2431 +0,0 @@ 
-/*
- * NAND flash simulator.
- *
- * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
- *
- * Copyright (C) 2004 Nokia Corporation
- *
- * Note: NS means "NAND Simulator".
- * Note: Input means input TO flash chip, output means output FROM chip.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any later
- * version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
- * Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
- */
-
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/vmalloc.h>
-#include <linux/math64.h>
-#include <linux/slab.h>
-#include <linux/errno.h>
-#include <linux/string.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_bch.h>
-#include <linux/mtd/partitions.h>
-#include <linux/delay.h>
-#include <linux/list.h>
-#include <linux/random.h>
-#include <linux/sched.h>
-#include <linux/fs.h>
-#include <linux/pagemap.h>
-#include <linux/seq_file.h>
-#include <linux/debugfs.h>
-
-/* Default simulator parameters values */
-#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE)  || \
-    !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
-    !defined(CONFIG_NANDSIM_THIRD_ID_BYTE)  || \
-    !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
-#define CONFIG_NANDSIM_FIRST_ID_BYTE  0x98
-#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
-#define CONFIG_NANDSIM_THIRD_ID_BYTE  0xFF /* No byte */
-#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
-#endif
-
-#ifndef CONFIG_NANDSIM_ACCESS_DELAY
-#define CONFIG_NANDSIM_ACCESS_DELAY 25
-#endif
-#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
-#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
-#endif
-#ifndef CONFIG_NANDSIM_ERASE_DELAY
-#define CONFIG_NANDSIM_ERASE_DELAY 2
-#endif
-#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
-#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
-#endif
-#ifndef CONFIG_NANDSIM_INPUT_CYCLE
-#define CONFIG_NANDSIM_INPUT_CYCLE  50
-#endif
-#ifndef CONFIG_NANDSIM_BUS_WIDTH
-#define CONFIG_NANDSIM_BUS_WIDTH  8
-#endif
-#ifndef CONFIG_NANDSIM_DO_DELAYS
-#define CONFIG_NANDSIM_DO_DELAYS  0
-#endif
-#ifndef CONFIG_NANDSIM_LOG
-#define CONFIG_NANDSIM_LOG        0
-#endif
-#ifndef CONFIG_NANDSIM_DBG
-#define CONFIG_NANDSIM_DBG        0
-#endif
-#ifndef CONFIG_NANDSIM_MAX_PARTS
-#define CONFIG_NANDSIM_MAX_PARTS  32
-#endif
-
-static uint access_delay   = CONFIG_NANDSIM_ACCESS_DELAY;
-static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
-static uint erase_delay    = CONFIG_NANDSIM_ERASE_DELAY;
-static uint output_cycle   = CONFIG_NANDSIM_OUTPUT_CYCLE;
-static uint input_cycle    = CONFIG_NANDSIM_INPUT_CYCLE;
-static uint bus_width      = CONFIG_NANDSIM_BUS_WIDTH;
-static uint do_delays      = CONFIG_NANDSIM_DO_DELAYS;
-static uint log            = CONFIG_NANDSIM_LOG;
-static uint dbg            = CONFIG_NANDSIM_DBG;
-static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
-static unsigned int parts_num;
-static char *badblocks = NULL;
-static char *weakblocks = NULL;
-static char *weakpages = NULL;
-static unsigned int bitflips = 0;
-static char *gravepages = NULL;
-static unsigned int overridesize = 0;
-static char *cache_file = NULL;
-static unsigned int bbt;
-static unsigned int bch;
-static u_char id_bytes[8] = {
-	[0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
-	[1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
-	[2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
-	[3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
-	[4 ... 7] = 0xFF,
-};
-
-module_param_array(id_bytes, byte, NULL, 0400);
-module_param_named(first_id_byte, id_bytes[0], byte, 0400);
-module_param_named(second_id_byte, id_bytes[1], byte, 0400);
-module_param_named(third_id_byte, id_bytes[2], byte, 0400);
-module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
-module_param(access_delay,   uint, 0400);
-module_param(programm_delay, uint, 0400);
-module_param(erase_delay,    uint, 0400);
-module_param(output_cycle,   uint, 0400);
-module_param(input_cycle,    uint, 0400);
-module_param(bus_width,      uint, 0400);
-module_param(do_delays,      uint, 0400);
-module_param(log,            uint, 0400);
-module_param(dbg,            uint, 0400);
-module_param_array(parts, ulong, &parts_num, 0400);
-module_param(badblocks,      charp, 0400);
-module_param(weakblocks,     charp, 0400);
-module_param(weakpages,      charp, 0400);
-module_param(bitflips,       uint, 0400);
-module_param(gravepages,     charp, 0400);
-module_param(overridesize,   uint, 0400);
-module_param(cache_file,     charp, 0400);
-module_param(bbt,	     uint, 0400);
-module_param(bch,	     uint, 0400);
-
-MODULE_PARM_DESC(id_bytes,       "The ID bytes returned by NAND Flash 'read ID' command");
-MODULE_PARM_DESC(first_id_byte,  "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
-MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
-MODULE_PARM_DESC(third_id_byte,  "The third byte returned by NAND Flash 'read ID' command (obsolete)");
-MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
-MODULE_PARM_DESC(access_delay,   "Initial page access delay (microseconds)");
-MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
-MODULE_PARM_DESC(erase_delay,    "Sector erase delay (milliseconds)");
-MODULE_PARM_DESC(output_cycle,   "Word output (from flash) time (nanoseconds)");
-MODULE_PARM_DESC(input_cycle,    "Word input (to flash) time (nanoseconds)");
-MODULE_PARM_DESC(bus_width,      "Chip's bus width (8- or 16-bit)");
-MODULE_PARM_DESC(do_delays,      "Simulate NAND delays using busy-waits if not zero");
-MODULE_PARM_DESC(log,            "Perform logging if not zero");
-MODULE_PARM_DESC(dbg,            "Output debug information if not zero");
-MODULE_PARM_DESC(parts,          "Partition sizes (in erase blocks) separated by commas");
-/* Page and erase block positions for the following parameters are independent of any partitions */
-MODULE_PARM_DESC(badblocks,      "Erase blocks that are initially marked bad, separated by commas");
-MODULE_PARM_DESC(weakblocks,     "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
-				 " separated by commas e.g. 113:2 means eb 113"
-				 " can be erased only twice before failing");
-MODULE_PARM_DESC(weakpages,      "Weak pages [: maximum writes (defaults to 3)]"
-				 " separated by commas e.g. 1401:2 means page 1401"
-				 " can be written only twice before failing");
-MODULE_PARM_DESC(bitflips,       "Maximum number of random bit flips per page (zero by default)");
-MODULE_PARM_DESC(gravepages,     "Pages that lose data [: maximum reads (defaults to 3)]"
-				 " separated by commas e.g. 1401:2 means page 1401"
-				 " can be read only twice before failing");
-MODULE_PARM_DESC(overridesize,   "Specifies the NAND Flash size overriding the ID bytes. "
-				 "The size is specified in erase blocks and as the exponent of a power of two"
-				 " e.g. 5 means a size of 32 erase blocks");
-MODULE_PARM_DESC(cache_file,     "File to use to cache nand pages instead of memory");
-MODULE_PARM_DESC(bbt,		 "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
-MODULE_PARM_DESC(bch,		 "Enable BCH ecc and set how many bits should "
-				 "be correctable in 512-byte blocks");
-
-/* The largest possible page size */
-#define NS_LARGEST_PAGE_SIZE	4096
-
-/* The prefix for simulator output */
-#define NS_OUTPUT_PREFIX "[nandsim]"
-
-/* Simulator's output macros (logging, debugging, warning, error) */
-#define NS_LOG(args...) \
-	do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
-#define NS_DBG(args...) \
-	do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
-#define NS_WARN(args...) \
-	do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
-#define NS_ERR(args...) \
-	do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
-#define NS_INFO(args...) \
-	do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
-
-/* Busy-wait delay macros (microseconds, milliseconds) */
-#define NS_UDELAY(us) \
-        do { if (do_delays) udelay(us); } while(0)
-#define NS_MDELAY(us) \
-        do { if (do_delays) mdelay(us); } while(0)
-
-/* Is the nandsim structure initialized ? */
-#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
-
-/* Good operation completion status */
-#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
-
-/* Operation failed completion status */
-#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
-
-/* Calculate the page offset in flash RAM image by (row, column) address */
-#define NS_RAW_OFFSET(ns) \
-	(((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
-
-/* Calculate the OOB offset in flash RAM image by (row, column) address */
-#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
-
-/* After a command is input, the simulator goes to one of the following states */
-#define STATE_CMD_READ0        0x00000001 /* read data from the beginning of page */
-#define STATE_CMD_READ1        0x00000002 /* read data from the second half of page */
-#define STATE_CMD_READSTART    0x00000003 /* read data second command (large page devices) */
-#define STATE_CMD_PAGEPROG     0x00000004 /* start page program */
-#define STATE_CMD_READOOB      0x00000005 /* read OOB area */
-#define STATE_CMD_ERASE1       0x00000006 /* sector erase first command */
-#define STATE_CMD_STATUS       0x00000007 /* read status */
-#define STATE_CMD_SEQIN        0x00000009 /* sequential data input */
-#define STATE_CMD_READID       0x0000000A /* read ID */
-#define STATE_CMD_ERASE2       0x0000000B /* sector erase second command */
-#define STATE_CMD_RESET        0x0000000C /* reset */
-#define STATE_CMD_RNDOUT       0x0000000D /* random output command */
-#define STATE_CMD_RNDOUTSTART  0x0000000E /* random output start command */
-#define STATE_CMD_MASK         0x0000000F /* command states mask */
-
-/* After an address is input, the simulator goes to one of these states */
-#define STATE_ADDR_PAGE        0x00000010 /* full (row, column) address is accepted */
-#define STATE_ADDR_SEC         0x00000020 /* sector address was accepted */
-#define STATE_ADDR_COLUMN      0x00000030 /* column address was accepted */
-#define STATE_ADDR_ZERO        0x00000040 /* one byte zero address was accepted */
-#define STATE_ADDR_MASK        0x00000070 /* address states mask */
-
-/* During data input/output the simulator is in these states */
-#define STATE_DATAIN           0x00000100 /* waiting for data input */
-#define STATE_DATAIN_MASK      0x00000100 /* data input states mask */
-
-#define STATE_DATAOUT          0x00001000 /* waiting for page data output */
-#define STATE_DATAOUT_ID       0x00002000 /* waiting for ID bytes output */
-#define STATE_DATAOUT_STATUS   0x00003000 /* waiting for status output */
-#define STATE_DATAOUT_MASK     0x00007000 /* data output states mask */
-
-/* Previous operation is done, ready to accept new requests */
-#define STATE_READY            0x00000000
-
-/* This state is used to mark that the next state isn't known yet */
-#define STATE_UNKNOWN          0x10000000
-
-/* Simulator's actions bit masks */
-#define ACTION_CPY       0x00100000 /* copy page/OOB to the internal buffer */
-#define ACTION_PRGPAGE   0x00200000 /* program the internal buffer to flash */
-#define ACTION_SECERASE  0x00300000 /* erase sector */
-#define ACTION_ZEROOFF   0x00400000 /* don't add any offset to address */
-#define ACTION_HALFOFF   0x00500000 /* add to address half of page */
-#define ACTION_OOBOFF    0x00600000 /* add to address OOB offset */
-#define ACTION_MASK      0x00700000 /* action mask */
-
-#define NS_OPER_NUM      13 /* Number of operations supported by the simulator */
-#define NS_OPER_STATES   6  /* Maximum number of states in operation */
-
-#define OPT_ANY          0xFFFFFFFF /* any chip supports this operation */
-#define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
-#define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
-#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
-#define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
-#define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
-#define OPT_SMALLPAGE    (OPT_PAGE512) /* 512-byte page chips */
-
-/* Remove action bits from state */
-#define NS_STATE(x) ((x) & ~ACTION_MASK)
-
-/*
- * Maximum previous states which need to be saved. Currently saving is
- * only needed for page program operation with preceded read command
- * (which is only valid for 512-byte pages).
- */
-#define NS_MAX_PREVSTATES 1
-
-/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
-#define NS_MAX_HELD_PAGES 16
-
-struct nandsim_debug_info {
-	struct dentry *dfs_root;
-	struct dentry *dfs_wear_report;
-};
-
-/*
- * A union to represent flash memory contents and flash buffer.
- */
-union ns_mem {
-	u_char *byte;    /* for byte access */
-	uint16_t *word;  /* for 16-bit word access */
-};
-
-/*
- * The structure which describes all the internal simulator data.
- */
-struct nandsim {
-	struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
-	unsigned int nbparts;
-
-	uint busw;              /* flash chip bus width (8 or 16) */
-	u_char ids[8];          /* chip's ID bytes */
-	uint32_t options;       /* chip's characteristic bits */
-	uint32_t state;         /* current chip state */
-	uint32_t nxstate;       /* next expected state */
-
-	uint32_t *op;           /* current operation, NULL operations isn't known yet  */
-	uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
-	uint16_t npstates;      /* number of previous states saved */
-	uint16_t stateidx;      /* current state index */
-
-	/* The simulated NAND flash pages array */
-	union ns_mem *pages;
-
-	/* Slab allocator for nand pages */
-	struct kmem_cache *nand_pages_slab;
-
-	/* Internal buffer of page + OOB size bytes */
-	union ns_mem buf;
-
-	/* NAND flash "geometry" */
-	struct {
-		uint64_t totsz;     /* total flash size, bytes */
-		uint32_t secsz;     /* flash sector (erase block) size, bytes */
-		uint pgsz;          /* NAND flash page size, bytes */
-		uint oobsz;         /* page OOB area size, bytes */
-		uint64_t totszoob;  /* total flash size including OOB, bytes */
-		uint pgszoob;       /* page size including OOB , bytes*/
-		uint secszoob;      /* sector size including OOB, bytes */
-		uint pgnum;         /* total number of pages */
-		uint pgsec;         /* number of pages per sector */
-		uint secshift;      /* bits number in sector size */
-		uint pgshift;       /* bits number in page size */
-		uint pgaddrbytes;   /* bytes per page address */
-		uint secaddrbytes;  /* bytes per sector address */
-		uint idbytes;       /* the number ID bytes that this chip outputs */
-	} geom;
-
-	/* NAND flash internal registers */
-	struct {
-		unsigned command; /* the command register */
-		u_char   status;  /* the status register */
-		uint     row;     /* the page number */
-		uint     column;  /* the offset within page */
-		uint     count;   /* internal counter */
-		uint     num;     /* number of bytes which must be processed */
-		uint     off;     /* fixed page offset */
-	} regs;
-
-	/* NAND flash lines state */
-        struct {
-                int ce;  /* chip Enable */
-                int cle; /* command Latch Enable */
-                int ale; /* address Latch Enable */
-                int wp;  /* write Protect */
-        } lines;
-
-	/* Fields needed when using a cache file */
-	struct file *cfile; /* Open file */
-	unsigned long *pages_written; /* Which pages have been written */
-	void *file_buf;
-	struct page *held_pages[NS_MAX_HELD_PAGES];
-	int held_cnt;
-
-	struct nandsim_debug_info dbg;
-};
-
-/*
- * Operations array. To perform any operation the simulator must pass
- * through the correspondent states chain.
- */
-static struct nandsim_operations {
-	uint32_t reqopts;  /* options which are required to perform the operation */
-	uint32_t states[NS_OPER_STATES]; /* operation's states */
-} ops[NS_OPER_NUM] = {
-	/* Read page + OOB from the beginning */
-	{OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
-			STATE_DATAOUT, STATE_READY}},
-	/* Read page + OOB from the second half */
-	{OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
-			STATE_DATAOUT, STATE_READY}},
-	/* Read OOB */
-	{OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
-			STATE_DATAOUT, STATE_READY}},
-	/* Program page starting from the beginning */
-	{OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
-			STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Program page starting from the beginning */
-	{OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
-			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Program page starting from the second half */
-	{OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
-			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Program OOB */
-	{OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
-			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
-	/* Erase sector */
-	{OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
-	/* Read status */
-	{OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
-	/* Read ID */
-	{OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
-	/* Large page devices read page */
-	{OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
-			       STATE_DATAOUT, STATE_READY}},
-	/* Large page devices random page read */
-	{OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
-			       STATE_DATAOUT, STATE_READY}},
-};
-
-struct weak_block {
-	struct list_head list;
-	unsigned int erase_block_no;
-	unsigned int max_erases;
-	unsigned int erases_done;
-};
-
-static LIST_HEAD(weak_blocks);
-
-struct weak_page {
-	struct list_head list;
-	unsigned int page_no;
-	unsigned int max_writes;
-	unsigned int writes_done;
-};
-
-static LIST_HEAD(weak_pages);
-
-struct grave_page {
-	struct list_head list;
-	unsigned int page_no;
-	unsigned int max_reads;
-	unsigned int reads_done;
-};
-
-static LIST_HEAD(grave_pages);
-
-static unsigned long *erase_block_wear = NULL;
-static unsigned int wear_eb_count = 0;
-static unsigned long total_wear = 0;
-
-/* MTD structure for NAND controller */
-static struct mtd_info *nsmtd;
-
-static int nandsim_debugfs_show(struct seq_file *m, void *private)
-{
-	unsigned long wmin = -1, wmax = 0, avg;
-	unsigned long deciles[10], decile_max[10], tot = 0;
-	unsigned int i;
-
-	/* Calc wear stats */
-	for (i = 0; i < wear_eb_count; ++i) {
-		unsigned long wear = erase_block_wear[i];
-		if (wear < wmin)
-			wmin = wear;
-		if (wear > wmax)
-			wmax = wear;
-		tot += wear;
-	}
-
-	for (i = 0; i < 9; ++i) {
-		deciles[i] = 0;
-		decile_max[i] = (wmax * (i + 1) + 5) / 10;
-	}
-	deciles[9] = 0;
-	decile_max[9] = wmax;
-	for (i = 0; i < wear_eb_count; ++i) {
-		int d;
-		unsigned long wear = erase_block_wear[i];
-		for (d = 0; d < 10; ++d)
-			if (wear <= decile_max[d]) {
-				deciles[d] += 1;
-				break;
-			}
-	}
-	avg = tot / wear_eb_count;
-
-	/* Output wear report */
-	seq_printf(m, "Total numbers of erases:  %lu\n", tot);
-	seq_printf(m, "Number of erase blocks:   %u\n", wear_eb_count);
-	seq_printf(m, "Average number of erases: %lu\n", avg);
-	seq_printf(m, "Maximum number of erases: %lu\n", wmax);
-	seq_printf(m, "Minimum number of erases: %lu\n", wmin);
-	for (i = 0; i < 10; ++i) {
-		unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
-		if (from > decile_max[i])
-			continue;
-		seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
-			from,
-			decile_max[i],
-			deciles[i]);
-	}
-
-	return 0;
-}
-
-static int nandsim_debugfs_open(struct inode *inode, struct file *file)
-{
-	return single_open(file, nandsim_debugfs_show, inode->i_private);
-}
-
-static const struct file_operations dfs_fops = {
-	.open		= nandsim_debugfs_open,
-	.read		= seq_read,
-	.llseek		= seq_lseek,
-	.release	= single_release,
-};
-
-/**
- * nandsim_debugfs_create - initialize debugfs
- * @dev: nandsim device description object
- *
- * This function creates all debugfs files for UBI device @ubi. Returns zero in
- * case of success and a negative error code in case of failure.
- */
-static int nandsim_debugfs_create(struct nandsim *dev)
-{
-	struct nandsim_debug_info *dbg = &dev->dbg;
-	struct dentry *dent;
-	int err;
-
-	if (!IS_ENABLED(CONFIG_DEBUG_FS))
-		return 0;
-
-	dent = debugfs_create_dir("nandsim", NULL);
-	if (IS_ERR_OR_NULL(dent)) {
-		int err = dent ? -ENODEV : PTR_ERR(dent);
-
-		NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
-			err);
-		return err;
-	}
-	dbg->dfs_root = dent;
-
-	dent = debugfs_create_file("wear_report", S_IRUSR,
-				   dbg->dfs_root, dev, &dfs_fops);
-	if (IS_ERR_OR_NULL(dent))
-		goto out_remove;
-	dbg->dfs_wear_report = dent;
-
-	return 0;
-
-out_remove:
-	debugfs_remove_recursive(dbg->dfs_root);
-	err = dent ? PTR_ERR(dent) : -ENODEV;
-	return err;
-}
-
-/**
- * nandsim_debugfs_remove - destroy all debugfs files
- */
-static void nandsim_debugfs_remove(struct nandsim *ns)
-{
-	if (IS_ENABLED(CONFIG_DEBUG_FS))
-		debugfs_remove_recursive(ns->dbg.dfs_root);
-}
-
-/*
- * Allocate array of page pointers, create slab allocation for an array
- * and initialize the array by NULL pointers.
- *
- * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
- */
-static int __init alloc_device(struct nandsim *ns)
-{
-	struct file *cfile;
-	int i, err;
-
-	if (cache_file) {
-		cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
-		if (IS_ERR(cfile))
-			return PTR_ERR(cfile);
-		if (!(cfile->f_mode & FMODE_CAN_READ)) {
-			NS_ERR("alloc_device: cache file not readable\n");
-			err = -EINVAL;
-			goto err_close;
-		}
-		if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
-			NS_ERR("alloc_device: cache file not writeable\n");
-			err = -EINVAL;
-			goto err_close;
-		}
-		ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) *
-					    sizeof(unsigned long));
-		if (!ns->pages_written) {
-			NS_ERR("alloc_device: unable to allocate pages written array\n");
-			err = -ENOMEM;
-			goto err_close;
-		}
-		ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
-		if (!ns->file_buf) {
-			NS_ERR("alloc_device: unable to allocate file buf\n");
-			err = -ENOMEM;
-			goto err_free;
-		}
-		ns->cfile = cfile;
-		return 0;
-	}
-
-	ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
-	if (!ns->pages) {
-		NS_ERR("alloc_device: unable to allocate page array\n");
-		return -ENOMEM;
-	}
-	for (i = 0; i < ns->geom.pgnum; i++) {
-		ns->pages[i].byte = NULL;
-	}
-	ns->nand_pages_slab = kmem_cache_create("nandsim",
-						ns->geom.pgszoob, 0, 0, NULL);
-	if (!ns->nand_pages_slab) {
-		NS_ERR("cache_create: unable to create kmem_cache\n");
-		return -ENOMEM;
-	}
-
-	return 0;
-
-err_free:
-	vfree(ns->pages_written);
-err_close:
-	filp_close(cfile, NULL);
-	return err;
-}
-
-/*
- * Free any allocated pages, and free the array of page pointers.
- */
-static void free_device(struct nandsim *ns)
-{
-	int i;
-
-	if (ns->cfile) {
-		kfree(ns->file_buf);
-		vfree(ns->pages_written);
-		filp_close(ns->cfile, NULL);
-		return;
-	}
-
-	if (ns->pages) {
-		for (i = 0; i < ns->geom.pgnum; i++) {
-			if (ns->pages[i].byte)
-				kmem_cache_free(ns->nand_pages_slab,
-						ns->pages[i].byte);
-		}
-		kmem_cache_destroy(ns->nand_pages_slab);
-		vfree(ns->pages);
-	}
-}
-
-static char __init *get_partition_name(int i)
-{
-	return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
-}
-
-/*
- * Initialize the nandsim structure.
- *
- * RETURNS: 0 if success, -ERRNO if failure.
- */
-static int __init init_nandsim(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nandsim   *ns   = nand_get_controller_data(chip);
-	int i, ret = 0;
-	uint64_t remains;
-	uint64_t next_offset;
-
-	if (NS_IS_INITIALIZED(ns)) {
-		NS_ERR("init_nandsim: nandsim is already initialized\n");
-		return -EIO;
-	}
-
-	/* Force mtd to not do delays */
-	chip->chip_delay = 0;
-
-	/* Initialize the NAND flash parameters */
-	ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
-	ns->geom.totsz    = mtd->size;
-	ns->geom.pgsz     = mtd->writesize;
-	ns->geom.oobsz    = mtd->oobsize;
-	ns->geom.secsz    = mtd->erasesize;
-	ns->geom.pgszoob  = ns->geom.pgsz + ns->geom.oobsz;
-	ns->geom.pgnum    = div_u64(ns->geom.totsz, ns->geom.pgsz);
-	ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
-	ns->geom.secshift = ffs(ns->geom.secsz) - 1;
-	ns->geom.pgshift  = chip->page_shift;
-	ns->geom.pgsec    = ns->geom.secsz / ns->geom.pgsz;
-	ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
-	ns->options = 0;
-
-	if (ns->geom.pgsz == 512) {
-		ns->options |= OPT_PAGE512;
-		if (ns->busw == 8)
-			ns->options |= OPT_PAGE512_8BIT;
-	} else if (ns->geom.pgsz == 2048) {
-		ns->options |= OPT_PAGE2048;
-	} else if (ns->geom.pgsz == 4096) {
-		ns->options |= OPT_PAGE4096;
-	} else {
-		NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
-		return -EIO;
-	}
-
-	if (ns->options & OPT_SMALLPAGE) {
-		if (ns->geom.totsz <= (32 << 20)) {
-			ns->geom.pgaddrbytes  = 3;
-			ns->geom.secaddrbytes = 2;
-		} else {
-			ns->geom.pgaddrbytes  = 4;
-			ns->geom.secaddrbytes = 3;
-		}
-	} else {
-		if (ns->geom.totsz <= (128 << 20)) {
-			ns->geom.pgaddrbytes  = 4;
-			ns->geom.secaddrbytes = 2;
-		} else {
-			ns->geom.pgaddrbytes  = 5;
-			ns->geom.secaddrbytes = 3;
-		}
-	}
-
-	/* Fill the partition_info structure */
-	if (parts_num > ARRAY_SIZE(ns->partitions)) {
-		NS_ERR("too many partitions.\n");
-		return -EINVAL;
-	}
-	remains = ns->geom.totsz;
-	next_offset = 0;
-	for (i = 0; i < parts_num; ++i) {
-		uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
-
-		if (!part_sz || part_sz > remains) {
-			NS_ERR("bad partition size.\n");
-			return -EINVAL;
-		}
-		ns->partitions[i].name   = get_partition_name(i);
-		if (!ns->partitions[i].name) {
-			NS_ERR("unable to allocate memory.\n");
-			return -ENOMEM;
-		}
-		ns->partitions[i].offset = next_offset;
-		ns->partitions[i].size   = part_sz;
-		next_offset += ns->partitions[i].size;
-		remains -= ns->partitions[i].size;
-	}
-	ns->nbparts = parts_num;
-	if (remains) {
-		if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
-			NS_ERR("too many partitions.\n");
-			return -EINVAL;
-		}
-		ns->partitions[i].name   = get_partition_name(i);
-		if (!ns->partitions[i].name) {
-			NS_ERR("unable to allocate memory.\n");
-			return -ENOMEM;
-		}
-		ns->partitions[i].offset = next_offset;
-		ns->partitions[i].size   = remains;
-		ns->nbparts += 1;
-	}
-
-	if (ns->busw == 16)
-		NS_WARN("16-bit flashes support wasn't tested\n");
-
-	printk("flash size: %llu MiB\n",
-			(unsigned long long)ns->geom.totsz >> 20);
-	printk("page size: %u bytes\n",         ns->geom.pgsz);
-	printk("OOB area size: %u bytes\n",     ns->geom.oobsz);
-	printk("sector size: %u KiB\n",         ns->geom.secsz >> 10);
-	printk("pages number: %u\n",            ns->geom.pgnum);
-	printk("pages per sector: %u\n",        ns->geom.pgsec);
-	printk("bus width: %u\n",               ns->busw);
-	printk("bits in sector size: %u\n",     ns->geom.secshift);
-	printk("bits in page size: %u\n",       ns->geom.pgshift);
-	printk("bits in OOB size: %u\n",	ffs(ns->geom.oobsz) - 1);
-	printk("flash size with OOB: %llu KiB\n",
-			(unsigned long long)ns->geom.totszoob >> 10);
-	printk("page address bytes: %u\n",      ns->geom.pgaddrbytes);
-	printk("sector address bytes: %u\n",    ns->geom.secaddrbytes);
-	printk("options: %#x\n",                ns->options);
-
-	if ((ret = alloc_device(ns)) != 0)
-		return ret;
-
-	/* Allocate / initialize the internal buffer */
-	ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
-	if (!ns->buf.byte) {
-		NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
-			ns->geom.pgszoob);
-		return -ENOMEM;
-	}
-	memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
-
-	return 0;
-}
-
-/*
- * Free the nandsim structure.
- */
-static void free_nandsim(struct nandsim *ns)
-{
-	kfree(ns->buf.byte);
-	free_device(ns);
-
-	return;
-}
-
-static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
-{
-	char *w;
-	int zero_ok;
-	unsigned int erase_block_no;
-	loff_t offset;
-
-	if (!badblocks)
-		return 0;
-	w = badblocks;
-	do {
-		zero_ok = (*w == '0' ? 1 : 0);
-		erase_block_no = simple_strtoul(w, &w, 0);
-		if (!zero_ok && !erase_block_no) {
-			NS_ERR("invalid badblocks.\n");
-			return -EINVAL;
-		}
-		offset = (loff_t)erase_block_no * ns->geom.secsz;
-		if (mtd_block_markbad(mtd, offset)) {
-			NS_ERR("invalid badblocks.\n");
-			return -EINVAL;
-		}
-		if (*w == ',')
-			w += 1;
-	} while (*w);
-	return 0;
-}
-
-static int parse_weakblocks(void)
-{
-	char *w;
-	int zero_ok;
-	unsigned int erase_block_no;
-	unsigned int max_erases;
-	struct weak_block *wb;
-
-	if (!weakblocks)
-		return 0;
-	w = weakblocks;
-	do {
-		zero_ok = (*w == '0' ? 1 : 0);
-		erase_block_no = simple_strtoul(w, &w, 0);
-		if (!zero_ok && !erase_block_no) {
-			NS_ERR("invalid weakblocks.\n");
-			return -EINVAL;
-		}
-		max_erases = 3;
-		if (*w == ':') {
-			w += 1;
-			max_erases = simple_strtoul(w, &w, 0);
-		}
-		if (*w == ',')
-			w += 1;
-		wb = kzalloc(sizeof(*wb), GFP_KERNEL);
-		if (!wb) {
-			NS_ERR("unable to allocate memory.\n");
-			return -ENOMEM;
-		}
-		wb->erase_block_no = erase_block_no;
-		wb->max_erases = max_erases;
-		list_add(&wb->list, &weak_blocks);
-	} while (*w);
-	return 0;
-}
-
-static int erase_error(unsigned int erase_block_no)
-{
-	struct weak_block *wb;
-
-	list_for_each_entry(wb, &weak_blocks, list)
-		if (wb->erase_block_no == erase_block_no) {
-			if (wb->erases_done >= wb->max_erases)
-				return 1;
-			wb->erases_done += 1;
-			return 0;
-		}
-	return 0;
-}
-
-static int parse_weakpages(void)
-{
-	char *w;
-	int zero_ok;
-	unsigned int page_no;
-	unsigned int max_writes;
-	struct weak_page *wp;
-
-	if (!weakpages)
-		return 0;
-	w = weakpages;
-	do {
-		zero_ok = (*w == '0' ? 1 : 0);
-		page_no = simple_strtoul(w, &w, 0);
-		if (!zero_ok && !page_no) {
-			NS_ERR("invalid weakpagess.\n");
-			return -EINVAL;
-		}
-		max_writes = 3;
-		if (*w == ':') {
-			w += 1;
-			max_writes = simple_strtoul(w, &w, 0);
-		}
-		if (*w == ',')
-			w += 1;
-		wp = kzalloc(sizeof(*wp), GFP_KERNEL);
-		if (!wp) {
-			NS_ERR("unable to allocate memory.\n");
-			return -ENOMEM;
-		}
-		wp->page_no = page_no;
-		wp->max_writes = max_writes;
-		list_add(&wp->list, &weak_pages);
-	} while (*w);
-	return 0;
-}
-
-static int write_error(unsigned int page_no)
-{
-	struct weak_page *wp;
-
-	list_for_each_entry(wp, &weak_pages, list)
-		if (wp->page_no == page_no) {
-			if (wp->writes_done >= wp->max_writes)
-				return 1;
-			wp->writes_done += 1;
-			return 0;
-		}
-	return 0;
-}
-
-static int parse_gravepages(void)
-{
-	char *g;
-	int zero_ok;
-	unsigned int page_no;
-	unsigned int max_reads;
-	struct grave_page *gp;
-
-	if (!gravepages)
-		return 0;
-	g = gravepages;
-	do {
-		zero_ok = (*g == '0' ? 1 : 0);
-		page_no = simple_strtoul(g, &g, 0);
-		if (!zero_ok && !page_no) {
-			NS_ERR("invalid gravepagess.\n");
-			return -EINVAL;
-		}
-		max_reads = 3;
-		if (*g == ':') {
-			g += 1;
-			max_reads = simple_strtoul(g, &g, 0);
-		}
-		if (*g == ',')
-			g += 1;
-		gp = kzalloc(sizeof(*gp), GFP_KERNEL);
-		if (!gp) {
-			NS_ERR("unable to allocate memory.\n");
-			return -ENOMEM;
-		}
-		gp->page_no = page_no;
-		gp->max_reads = max_reads;
-		list_add(&gp->list, &grave_pages);
-	} while (*g);
-	return 0;
-}
-
-static int read_error(unsigned int page_no)
-{
-	struct grave_page *gp;
-
-	list_for_each_entry(gp, &grave_pages, list)
-		if (gp->page_no == page_no) {
-			if (gp->reads_done >= gp->max_reads)
-				return 1;
-			gp->reads_done += 1;
-			return 0;
-		}
-	return 0;
-}
-
-static void free_lists(void)
-{
-	struct list_head *pos, *n;
-	list_for_each_safe(pos, n, &weak_blocks) {
-		list_del(pos);
-		kfree(list_entry(pos, struct weak_block, list));
-	}
-	list_for_each_safe(pos, n, &weak_pages) {
-		list_del(pos);
-		kfree(list_entry(pos, struct weak_page, list));
-	}
-	list_for_each_safe(pos, n, &grave_pages) {
-		list_del(pos);
-		kfree(list_entry(pos, struct grave_page, list));
-	}
-	kfree(erase_block_wear);
-}
-
-static int setup_wear_reporting(struct mtd_info *mtd)
-{
-	size_t mem;
-
-	wear_eb_count = div_u64(mtd->size, mtd->erasesize);
-	mem = wear_eb_count * sizeof(unsigned long);
-	if (mem / sizeof(unsigned long) != wear_eb_count) {
-		NS_ERR("Too many erase blocks for wear reporting\n");
-		return -ENOMEM;
-	}
-	erase_block_wear = kzalloc(mem, GFP_KERNEL);
-	if (!erase_block_wear) {
-		NS_ERR("Too many erase blocks for wear reporting\n");
-		return -ENOMEM;
-	}
-	return 0;
-}
-
-static void update_wear(unsigned int erase_block_no)
-{
-	if (!erase_block_wear)
-		return;
-	total_wear += 1;
-	/*
-	 * TODO: Notify this through a debugfs entry,
-	 * instead of showing an error message.
-	 */
-	if (total_wear == 0)
-		NS_ERR("Erase counter total overflow\n");
-	erase_block_wear[erase_block_no] += 1;
-	if (erase_block_wear[erase_block_no] == 0)
-		NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
-}
-
-/*
- * Returns the string representation of 'state' state.
- */
-static char *get_state_name(uint32_t state)
-{
-	switch (NS_STATE(state)) {
-		case STATE_CMD_READ0:
-			return "STATE_CMD_READ0";
-		case STATE_CMD_READ1:
-			return "STATE_CMD_READ1";
-		case STATE_CMD_PAGEPROG:
-			return "STATE_CMD_PAGEPROG";
-		case STATE_CMD_READOOB:
-			return "STATE_CMD_READOOB";
-		case STATE_CMD_READSTART:
-			return "STATE_CMD_READSTART";
-		case STATE_CMD_ERASE1:
-			return "STATE_CMD_ERASE1";
-		case STATE_CMD_STATUS:
-			return "STATE_CMD_STATUS";
-		case STATE_CMD_SEQIN:
-			return "STATE_CMD_SEQIN";
-		case STATE_CMD_READID:
-			return "STATE_CMD_READID";
-		case STATE_CMD_ERASE2:
-			return "STATE_CMD_ERASE2";
-		case STATE_CMD_RESET:
-			return "STATE_CMD_RESET";
-		case STATE_CMD_RNDOUT:
-			return "STATE_CMD_RNDOUT";
-		case STATE_CMD_RNDOUTSTART:
-			return "STATE_CMD_RNDOUTSTART";
-		case STATE_ADDR_PAGE:
-			return "STATE_ADDR_PAGE";
-		case STATE_ADDR_SEC:
-			return "STATE_ADDR_SEC";
-		case STATE_ADDR_ZERO:
-			return "STATE_ADDR_ZERO";
-		case STATE_ADDR_COLUMN:
-			return "STATE_ADDR_COLUMN";
-		case STATE_DATAIN:
-			return "STATE_DATAIN";
-		case STATE_DATAOUT:
-			return "STATE_DATAOUT";
-		case STATE_DATAOUT_ID:
-			return "STATE_DATAOUT_ID";
-		case STATE_DATAOUT_STATUS:
-			return "STATE_DATAOUT_STATUS";
-		case STATE_READY:
-			return "STATE_READY";
-		case STATE_UNKNOWN:
-			return "STATE_UNKNOWN";
-	}
-
-	NS_ERR("get_state_name: unknown state, BUG\n");
-	return NULL;
-}
-
-/*
- * Check if command is valid.
- *
- * RETURNS: 1 if wrong command, 0 if right.
- */
-static int check_command(int cmd)
-{
-	switch (cmd) {
-
-	case NAND_CMD_READ0:
-	case NAND_CMD_READ1:
-	case NAND_CMD_READSTART:
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_READOOB:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_STATUS:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_READID:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_RESET:
-	case NAND_CMD_RNDOUT:
-	case NAND_CMD_RNDOUTSTART:
-		return 0;
-
-	default:
-		return 1;
-	}
-}
-
-/*
- * Returns state after command is accepted by command number.
- */
-static uint32_t get_state_by_command(unsigned command)
-{
-	switch (command) {
-		case NAND_CMD_READ0:
-			return STATE_CMD_READ0;
-		case NAND_CMD_READ1:
-			return STATE_CMD_READ1;
-		case NAND_CMD_PAGEPROG:
-			return STATE_CMD_PAGEPROG;
-		case NAND_CMD_READSTART:
-			return STATE_CMD_READSTART;
-		case NAND_CMD_READOOB:
-			return STATE_CMD_READOOB;
-		case NAND_CMD_ERASE1:
-			return STATE_CMD_ERASE1;
-		case NAND_CMD_STATUS:
-			return STATE_CMD_STATUS;
-		case NAND_CMD_SEQIN:
-			return STATE_CMD_SEQIN;
-		case NAND_CMD_READID:
-			return STATE_CMD_READID;
-		case NAND_CMD_ERASE2:
-			return STATE_CMD_ERASE2;
-		case NAND_CMD_RESET:
-			return STATE_CMD_RESET;
-		case NAND_CMD_RNDOUT:
-			return STATE_CMD_RNDOUT;
-		case NAND_CMD_RNDOUTSTART:
-			return STATE_CMD_RNDOUTSTART;
-	}
-
-	NS_ERR("get_state_by_command: unknown command, BUG\n");
-	return 0;
-}
-
-/*
- * Move an address byte to the correspondent internal register.
- */
-static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
-{
-	uint byte = (uint)bt;
-
-	if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
-		ns->regs.column |= (byte << 8 * ns->regs.count);
-	else {
-		ns->regs.row |= (byte << 8 * (ns->regs.count -
-						ns->geom.pgaddrbytes +
-						ns->geom.secaddrbytes));
-	}
-
-	return;
-}
-
-/*
- * Switch to STATE_READY state.
- */
-static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
-{
-	NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
-
-	ns->state       = STATE_READY;
-	ns->nxstate     = STATE_UNKNOWN;
-	ns->op          = NULL;
-	ns->npstates    = 0;
-	ns->stateidx    = 0;
-	ns->regs.num    = 0;
-	ns->regs.count  = 0;
-	ns->regs.off    = 0;
-	ns->regs.row    = 0;
-	ns->regs.column = 0;
-	ns->regs.status = status;
-}
-
-/*
- * If the operation isn't known yet, try to find it in the global array
- * of supported operations.
- *
- * Operation can be unknown because of the following.
- *   1. New command was accepted and this is the first call to find the
- *      correspondent states chain. In this case ns->npstates = 0;
- *   2. There are several operations which begin with the same command(s)
- *      (for example program from the second half and read from the
- *      second half operations both begin with the READ1 command). In this
- *      case the ns->pstates[] array contains previous states.
- *
- * Thus, the function tries to find operation containing the following
- * states (if the 'flag' parameter is 0):
- *    ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
- *
- * If (one and only one) matching operation is found, it is accepted (
- * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
- * zeroed).
- *
- * If there are several matches, the current state is pushed to the
- * ns->pstates.
- *
- * The operation can be unknown only while commands are input to the chip.
- * As soon as address command is accepted, the operation must be known.
- * In such situation the function is called with 'flag' != 0, and the
- * operation is searched using the following pattern:
- *     ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
- *
- * It is supposed that this pattern must either match one operation or
- * none. There can't be ambiguity in that case.
- *
- * If no matches found, the function does the following:
- *   1. if there are saved states present, try to ignore them and search
- *      again only using the last command. If nothing was found, switch
- *      to the STATE_READY state.
- *   2. if there are no saved states, switch to the STATE_READY state.
- *
- * RETURNS: -2 - no matched operations found.
- *          -1 - several matches.
- *           0 - operation is found.
- */
-static int find_operation(struct nandsim *ns, uint32_t flag)
-{
-	int opsfound = 0;
-	int i, j, idx = 0;
-
-	for (i = 0; i < NS_OPER_NUM; i++) {
-
-		int found = 1;
-
-		if (!(ns->options & ops[i].reqopts))
-			/* Ignore operations we can't perform */
-			continue;
-
-		if (flag) {
-			if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
-				continue;
-		} else {
-			if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
-				continue;
-		}
-
-		for (j = 0; j < ns->npstates; j++)
-			if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
-				&& (ns->options & ops[idx].reqopts)) {
-				found = 0;
-				break;
-			}
-
-		if (found) {
-			idx = i;
-			opsfound += 1;
-		}
-	}
-
-	if (opsfound == 1) {
-		/* Exact match */
-		ns->op = &ops[idx].states[0];
-		if (flag) {
-			/*
-			 * In this case the find_operation function was
-			 * called when address has just began input. But it isn't
-			 * yet fully input and the current state must
-			 * not be one of STATE_ADDR_*, but the STATE_ADDR_*
-			 * state must be the next state (ns->nxstate).
-			 */
-			ns->stateidx = ns->npstates - 1;
-		} else {
-			ns->stateidx = ns->npstates;
-		}
-		ns->npstates = 0;
-		ns->state = ns->op[ns->stateidx];
-		ns->nxstate = ns->op[ns->stateidx + 1];
-		NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
-				idx, get_state_name(ns->state), get_state_name(ns->nxstate));
-		return 0;
-	}
-
-	if (opsfound == 0) {
-		/* Nothing was found. Try to ignore previous commands (if any) and search again */
-		if (ns->npstates != 0) {
-			NS_DBG("find_operation: no operation found, try again with state %s\n",
-					get_state_name(ns->state));
-			ns->npstates = 0;
-			return find_operation(ns, 0);
-
-		}
-		NS_DBG("find_operation: no operations found\n");
-		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-		return -2;
-	}
-
-	if (flag) {
-		/* This shouldn't happen */
-		NS_DBG("find_operation: BUG, operation must be known if address is input\n");
-		return -2;
-	}
-
-	NS_DBG("find_operation: there is still ambiguity\n");
-
-	ns->pstates[ns->npstates++] = ns->state;
-
-	return -1;
-}
-
-static void put_pages(struct nandsim *ns)
-{
-	int i;
-
-	for (i = 0; i < ns->held_cnt; i++)
-		put_page(ns->held_pages[i]);
-}
-
-/* Get page cache pages in advance to provide NOFS memory allocation */
-static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
-{
-	pgoff_t index, start_index, end_index;
-	struct page *page;
-	struct address_space *mapping = file->f_mapping;
-
-	start_index = pos >> PAGE_SHIFT;
-	end_index = (pos + count - 1) >> PAGE_SHIFT;
-	if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
-		return -EINVAL;
-	ns->held_cnt = 0;
-	for (index = start_index; index <= end_index; index++) {
-		page = find_get_page(mapping, index);
-		if (page == NULL) {
-			page = find_or_create_page(mapping, index, GFP_NOFS);
-			if (page == NULL) {
-				write_inode_now(mapping->host, 1);
-				page = find_or_create_page(mapping, index, GFP_NOFS);
-			}
-			if (page == NULL) {
-				put_pages(ns);
-				return -ENOMEM;
-			}
-			unlock_page(page);
-		}
-		ns->held_pages[ns->held_cnt++] = page;
-	}
-	return 0;
-}
-
-static int set_memalloc(void)
-{
-	if (current->flags & PF_MEMALLOC)
-		return 0;
-	current->flags |= PF_MEMALLOC;
-	return 1;
-}
-
-static void clear_memalloc(int memalloc)
-{
-	if (memalloc)
-		current->flags &= ~PF_MEMALLOC;
-}
-
-static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
-{
-	ssize_t tx;
-	int err, memalloc;
-
-	err = get_pages(ns, file, count, pos);
-	if (err)
-		return err;
-	memalloc = set_memalloc();
-	tx = kernel_read(file, pos, buf, count);
-	clear_memalloc(memalloc);
-	put_pages(ns);
-	return tx;
-}
-
-static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
-{
-	ssize_t tx;
-	int err, memalloc;
-
-	err = get_pages(ns, file, count, pos);
-	if (err)
-		return err;
-	memalloc = set_memalloc();
-	tx = kernel_write(file, buf, count, pos);
-	clear_memalloc(memalloc);
-	put_pages(ns);
-	return tx;
-}
-
-/*
- * Returns a pointer to the current page.
- */
-static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
-{
-	return &(ns->pages[ns->regs.row]);
-}
-
-/*
- * Retuns a pointer to the current byte, within the current page.
- */
-static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
-{
-	return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
-}
-
-static int do_read_error(struct nandsim *ns, int num)
-{
-	unsigned int page_no = ns->regs.row;
-
-	if (read_error(page_no)) {
-		prandom_bytes(ns->buf.byte, num);
-		NS_WARN("simulating read error in page %u\n", page_no);
-		return 1;
-	}
-	return 0;
-}
-
-static void do_bit_flips(struct nandsim *ns, int num)
-{
-	if (bitflips && prandom_u32() < (1 << 22)) {
-		int flips = 1;
-		if (bitflips > 1)
-			flips = (prandom_u32() % (int) bitflips) + 1;
-		while (flips--) {
-			int pos = prandom_u32() % (num * 8);
-			ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
-			NS_WARN("read_page: flipping bit %d in page %d "
-				"reading from %d ecc: corrected=%u failed=%u\n",
-				pos, ns->regs.row, ns->regs.column + ns->regs.off,
-				nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
-		}
-	}
-}
-
-/*
- * Fill the NAND buffer with data read from the specified page.
- */
-static void read_page(struct nandsim *ns, int num)
-{
-	union ns_mem *mypage;
-
-	if (ns->cfile) {
-		if (!test_bit(ns->regs.row, ns->pages_written)) {
-			NS_DBG("read_page: page %d not written\n", ns->regs.row);
-			memset(ns->buf.byte, 0xFF, num);
-		} else {
-			loff_t pos;
-			ssize_t tx;
-
-			NS_DBG("read_page: page %d written, reading from %d\n",
-				ns->regs.row, ns->regs.column + ns->regs.off);
-			if (do_read_error(ns, num))
-				return;
-			pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
-			tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
-			if (tx != num) {
-				NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
-				return;
-			}
-			do_bit_flips(ns, num);
-		}
-		return;
-	}
-
-	mypage = NS_GET_PAGE(ns);
-	if (mypage->byte == NULL) {
-		NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
-		memset(ns->buf.byte, 0xFF, num);
-	} else {
-		NS_DBG("read_page: page %d allocated, reading from %d\n",
-			ns->regs.row, ns->regs.column + ns->regs.off);
-		if (do_read_error(ns, num))
-			return;
-		memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
-		do_bit_flips(ns, num);
-	}
-}
-
-/*
- * Erase all pages in the specified sector.
- */
-static void erase_sector(struct nandsim *ns)
-{
-	union ns_mem *mypage;
-	int i;
-
-	if (ns->cfile) {
-		for (i = 0; i < ns->geom.pgsec; i++)
-			if (__test_and_clear_bit(ns->regs.row + i,
-						 ns->pages_written)) {
-				NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
-			}
-		return;
-	}
-
-	mypage = NS_GET_PAGE(ns);
-	for (i = 0; i < ns->geom.pgsec; i++) {
-		if (mypage->byte != NULL) {
-			NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
-			kmem_cache_free(ns->nand_pages_slab, mypage->byte);
-			mypage->byte = NULL;
-		}
-		mypage++;
-	}
-}
-
-/*
- * Program the specified page with the contents from the NAND buffer.
- */
-static int prog_page(struct nandsim *ns, int num)
-{
-	int i;
-	union ns_mem *mypage;
-	u_char *pg_off;
-
-	if (ns->cfile) {
-		loff_t off;
-		ssize_t tx;
-		int all;
-
-		NS_DBG("prog_page: writing page %d\n", ns->regs.row);
-		pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
-		off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
-		if (!test_bit(ns->regs.row, ns->pages_written)) {
-			all = 1;
-			memset(ns->file_buf, 0xff, ns->geom.pgszoob);
-		} else {
-			all = 0;
-			tx = read_file(ns, ns->cfile, pg_off, num, off);
-			if (tx != num) {
-				NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
-				return -1;
-			}
-		}
-		for (i = 0; i < num; i++)
-			pg_off[i] &= ns->buf.byte[i];
-		if (all) {
-			loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
-			tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
-			if (tx != ns->geom.pgszoob) {
-				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
-				return -1;
-			}
-			__set_bit(ns->regs.row, ns->pages_written);
-		} else {
-			tx = write_file(ns, ns->cfile, pg_off, num, off);
-			if (tx != num) {
-				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
-				return -1;
-			}
-		}
-		return 0;
-	}
-
-	mypage = NS_GET_PAGE(ns);
-	if (mypage->byte == NULL) {
-		NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
-		/*
-		 * We allocate memory with GFP_NOFS because a flash FS may
-		 * utilize this. If it is holding an FS lock, then gets here,
-		 * then kernel memory alloc runs writeback which goes to the FS
-		 * again and deadlocks. This was seen in practice.
-		 */
-		mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
-		if (mypage->byte == NULL) {
-			NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
-			return -1;
-		}
-		memset(mypage->byte, 0xFF, ns->geom.pgszoob);
-	}
-
-	pg_off = NS_PAGE_BYTE_OFF(ns);
-	for (i = 0; i < num; i++)
-		pg_off[i] &= ns->buf.byte[i];
-
-	return 0;
-}
-
-/*
- * If state has any action bit, perform this action.
- *
- * RETURNS: 0 if success, -1 if error.
- */
-static int do_state_action(struct nandsim *ns, uint32_t action)
-{
-	int num;
-	int busdiv = ns->busw == 8 ? 1 : 2;
-	unsigned int erase_block_no, page_no;
-
-	action &= ACTION_MASK;
-
-	/* Check that page address input is correct */
-	if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
-		NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
-		return -1;
-	}
-
-	switch (action) {
-
-	case ACTION_CPY:
-		/*
-		 * Copy page data to the internal buffer.
-		 */
-
-		/* Column shouldn't be very large */
-		if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
-			NS_ERR("do_state_action: column number is too large\n");
-			break;
-		}
-		num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
-		read_page(ns, num);
-
-		NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
-			num, NS_RAW_OFFSET(ns) + ns->regs.off);
-
-		if (ns->regs.off == 0)
-			NS_LOG("read page %d\n", ns->regs.row);
-		else if (ns->regs.off < ns->geom.pgsz)
-			NS_LOG("read page %d (second half)\n", ns->regs.row);
-		else
-			NS_LOG("read OOB of page %d\n", ns->regs.row);
-
-		NS_UDELAY(access_delay);
-		NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
-
-		break;
-
-	case ACTION_SECERASE:
-		/*
-		 * Erase sector.
-		 */
-
-		if (ns->lines.wp) {
-			NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
-			return -1;
-		}
-
-		if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
-			|| (ns->regs.row & ~(ns->geom.secsz - 1))) {
-			NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
-			return -1;
-		}
-
-		ns->regs.row = (ns->regs.row <<
-				8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
-		ns->regs.column = 0;
-
-		erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
-
-		NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
-				ns->regs.row, NS_RAW_OFFSET(ns));
-		NS_LOG("erase sector %u\n", erase_block_no);
-
-		erase_sector(ns);
-
-		NS_MDELAY(erase_delay);
-
-		if (erase_block_wear)
-			update_wear(erase_block_no);
-
-		if (erase_error(erase_block_no)) {
-			NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
-			return -1;
-		}
-
-		break;
-
-	case ACTION_PRGPAGE:
-		/*
-		 * Program page - move internal buffer data to the page.
-		 */
-
-		if (ns->lines.wp) {
-			NS_WARN("do_state_action: device is write-protected, programm\n");
-			return -1;
-		}
-
-		num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
-		if (num != ns->regs.count) {
-			NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
-					ns->regs.count, num);
-			return -1;
-		}
-
-		if (prog_page(ns, num) == -1)
-			return -1;
-
-		page_no = ns->regs.row;
-
-		NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
-			num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
-		NS_LOG("programm page %d\n", ns->regs.row);
-
-		NS_UDELAY(programm_delay);
-		NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
-
-		if (write_error(page_no)) {
-			NS_WARN("simulating write failure in page %u\n", page_no);
-			return -1;
-		}
-
-		break;
-
-	case ACTION_ZEROOFF:
-		NS_DBG("do_state_action: set internal offset to 0\n");
-		ns->regs.off = 0;
-		break;
-
-	case ACTION_HALFOFF:
-		if (!(ns->options & OPT_PAGE512_8BIT)) {
-			NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
-				"byte page size 8x chips\n");
-			return -1;
-		}
-		NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
-		ns->regs.off = ns->geom.pgsz/2;
-		break;
-
-	case ACTION_OOBOFF:
-		NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
-		ns->regs.off = ns->geom.pgsz;
-		break;
-
-	default:
-		NS_DBG("do_state_action: BUG! unknown action\n");
-	}
-
-	return 0;
-}
-
-/*
- * Switch simulator's state.
- */
-static void switch_state(struct nandsim *ns)
-{
-	if (ns->op) {
-		/*
-		 * The current operation have already been identified.
-		 * Just follow the states chain.
-		 */
-
-		ns->stateidx += 1;
-		ns->state = ns->nxstate;
-		ns->nxstate = ns->op[ns->stateidx + 1];
-
-		NS_DBG("switch_state: operation is known, switch to the next state, "
-			"state: %s, nxstate: %s\n",
-			get_state_name(ns->state), get_state_name(ns->nxstate));
-
-		/* See, whether we need to do some action */
-		if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-			return;
-		}
-
-	} else {
-		/*
-		 * We don't yet know which operation we perform.
-		 * Try to identify it.
-		 */
-
-		/*
-		 *  The only event causing the switch_state function to
-		 *  be called with yet unknown operation is new command.
-		 */
-		ns->state = get_state_by_command(ns->regs.command);
-
-		NS_DBG("switch_state: operation is unknown, try to find it\n");
-
-		if (find_operation(ns, 0) != 0)
-			return;
-
-		if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-			return;
-		}
-	}
-
-	/* For 16x devices column means the page offset in words */
-	if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
-		NS_DBG("switch_state: double the column number for 16x device\n");
-		ns->regs.column <<= 1;
-	}
-
-	if (NS_STATE(ns->nxstate) == STATE_READY) {
-		/*
-		 * The current state is the last. Return to STATE_READY
-		 */
-
-		u_char status = NS_STATUS_OK(ns);
-
-		/* In case of data states, see if all bytes were input/output */
-		if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
-			&& ns->regs.count != ns->regs.num) {
-			NS_WARN("switch_state: not all bytes were processed, %d left\n",
-					ns->regs.num - ns->regs.count);
-			status = NS_STATUS_FAILED(ns);
-		}
-
-		NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
-
-		switch_to_ready_state(ns, status);
-
-		return;
-	} else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
-		/*
-		 * If the next state is data input/output, switch to it now
-		 */
-
-		ns->state      = ns->nxstate;
-		ns->nxstate    = ns->op[++ns->stateidx + 1];
-		ns->regs.num   = ns->regs.count = 0;
-
-		NS_DBG("switch_state: the next state is data I/O, switch, "
-			"state: %s, nxstate: %s\n",
-			get_state_name(ns->state), get_state_name(ns->nxstate));
-
-		/*
-		 * Set the internal register to the count of bytes which
-		 * are expected to be input or output
-		 */
-		switch (NS_STATE(ns->state)) {
-			case STATE_DATAIN:
-			case STATE_DATAOUT:
-				ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
-				break;
-
-			case STATE_DATAOUT_ID:
-				ns->regs.num = ns->geom.idbytes;
-				break;
-
-			case STATE_DATAOUT_STATUS:
-				ns->regs.count = ns->regs.num = 0;
-				break;
-
-			default:
-				NS_ERR("switch_state: BUG! unknown data state\n");
-		}
-
-	} else if (ns->nxstate & STATE_ADDR_MASK) {
-		/*
-		 * If the next state is address input, set the internal
-		 * register to the number of expected address bytes
-		 */
-
-		ns->regs.count = 0;
-
-		switch (NS_STATE(ns->nxstate)) {
-			case STATE_ADDR_PAGE:
-				ns->regs.num = ns->geom.pgaddrbytes;
-
-				break;
-			case STATE_ADDR_SEC:
-				ns->regs.num = ns->geom.secaddrbytes;
-				break;
-
-			case STATE_ADDR_ZERO:
-				ns->regs.num = 1;
-				break;
-
-			case STATE_ADDR_COLUMN:
-				/* Column address is always 2 bytes */
-				ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
-				break;
-
-			default:
-				NS_ERR("switch_state: BUG! unknown address state\n");
-		}
-	} else {
-		/*
-		 * Just reset internal counters.
-		 */
-
-		ns->regs.num = 0;
-		ns->regs.count = 0;
-	}
-}
-
-static u_char ns_nand_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nandsim *ns = nand_get_controller_data(chip);
-	u_char outb = 0x00;
-
-	/* Sanity and correctness checks */
-	if (!ns->lines.ce) {
-		NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
-		return outb;
-	}
-	if (ns->lines.ale || ns->lines.cle) {
-		NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
-		return outb;
-	}
-	if (!(ns->state & STATE_DATAOUT_MASK)) {
-		NS_WARN("read_byte: unexpected data output cycle, state is %s "
-			"return %#x\n", get_state_name(ns->state), (uint)outb);
-		return outb;
-	}
-
-	/* Status register may be read as many times as it is wanted */
-	if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
-		NS_DBG("read_byte: return %#x status\n", ns->regs.status);
-		return ns->regs.status;
-	}
-
-	/* Check if there is any data in the internal buffer which may be read */
-	if (ns->regs.count == ns->regs.num) {
-		NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
-		return outb;
-	}
-
-	switch (NS_STATE(ns->state)) {
-		case STATE_DATAOUT:
-			if (ns->busw == 8) {
-				outb = ns->buf.byte[ns->regs.count];
-				ns->regs.count += 1;
-			} else {
-				outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
-				ns->regs.count += 2;
-			}
-			break;
-		case STATE_DATAOUT_ID:
-			NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
-			outb = ns->ids[ns->regs.count];
-			ns->regs.count += 1;
-			break;
-		default:
-			BUG();
-	}
-
-	if (ns->regs.count == ns->regs.num) {
-		NS_DBG("read_byte: all bytes were read\n");
-
-		if (NS_STATE(ns->nxstate) == STATE_READY)
-			switch_state(ns);
-	}
-
-	return outb;
-}
-
-static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nandsim *ns = nand_get_controller_data(chip);
-
-	/* Sanity and correctness checks */
-	if (!ns->lines.ce) {
-		NS_ERR("write_byte: chip is disabled, ignore write\n");
-		return;
-	}
-	if (ns->lines.ale && ns->lines.cle) {
-		NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
-		return;
-	}
-
-	if (ns->lines.cle == 1) {
-		/*
-		 * The byte written is a command.
-		 */
-
-		if (byte == NAND_CMD_RESET) {
-			NS_LOG("reset chip\n");
-			switch_to_ready_state(ns, NS_STATUS_OK(ns));
-			return;
-		}
-
-		/* Check that the command byte is correct */
-		if (check_command(byte)) {
-			NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
-			return;
-		}
-
-		if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
-			|| NS_STATE(ns->state) == STATE_DATAOUT) {
-			int row = ns->regs.row;
-
-			switch_state(ns);
-			if (byte == NAND_CMD_RNDOUT)
-				ns->regs.row = row;
-		}
-
-		/* Check if chip is expecting command */
-		if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
-			/* Do not warn if only 2 id bytes are read */
-			if (!(ns->regs.command == NAND_CMD_READID &&
-			    NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
-				/*
-				 * We are in situation when something else (not command)
-				 * was expected but command was input. In this case ignore
-				 * previous command(s)/state(s) and accept the last one.
-				 */
-				NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
-					"ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
-			}
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-		}
-
-		NS_DBG("command byte corresponding to %s state accepted\n",
-			get_state_name(get_state_by_command(byte)));
-		ns->regs.command = byte;
-		switch_state(ns);
-
-	} else if (ns->lines.ale == 1) {
-		/*
-		 * The byte written is an address.
-		 */
-
-		if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
-
-			NS_DBG("write_byte: operation isn't known yet, identify it\n");
-
-			if (find_operation(ns, 1) < 0)
-				return;
-
-			if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
-				switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-				return;
-			}
-
-			ns->regs.count = 0;
-			switch (NS_STATE(ns->nxstate)) {
-				case STATE_ADDR_PAGE:
-					ns->regs.num = ns->geom.pgaddrbytes;
-					break;
-				case STATE_ADDR_SEC:
-					ns->regs.num = ns->geom.secaddrbytes;
-					break;
-				case STATE_ADDR_ZERO:
-					ns->regs.num = 1;
-					break;
-				default:
-					BUG();
-			}
-		}
-
-		/* Check that chip is expecting address */
-		if (!(ns->nxstate & STATE_ADDR_MASK)) {
-			NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
-				"switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-			return;
-		}
-
-		/* Check if this is expected byte */
-		if (ns->regs.count == ns->regs.num) {
-			NS_ERR("write_byte: no more address bytes expected\n");
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-			return;
-		}
-
-		accept_addr_byte(ns, byte);
-
-		ns->regs.count += 1;
-
-		NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
-				(uint)byte, ns->regs.count, ns->regs.num);
-
-		if (ns->regs.count == ns->regs.num) {
-			NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
-			switch_state(ns);
-		}
-
-	} else {
-		/*
-		 * The byte written is an input data.
-		 */
-
-		/* Check that chip is expecting data input */
-		if (!(ns->state & STATE_DATAIN_MASK)) {
-			NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
-				"switch to %s\n", (uint)byte,
-				get_state_name(ns->state), get_state_name(STATE_READY));
-			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-			return;
-		}
-
-		/* Check if this is expected byte */
-		if (ns->regs.count == ns->regs.num) {
-			NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
-					ns->regs.num);
-			return;
-		}
-
-		if (ns->busw == 8) {
-			ns->buf.byte[ns->regs.count] = byte;
-			ns->regs.count += 1;
-		} else {
-			ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
-			ns->regs.count += 2;
-		}
-	}
-
-	return;
-}
-
-static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nandsim *ns = nand_get_controller_data(chip);
-
-	ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
-	ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
-	ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
-
-	if (cmd != NAND_CMD_NONE)
-		ns_nand_write_byte(mtd, cmd);
-}
-
-static int ns_device_ready(struct mtd_info *mtd)
-{
-	NS_DBG("device_ready\n");
-	return 1;
-}
-
-static uint16_t ns_nand_read_word(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	NS_DBG("read_word\n");
-
-	return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
-}
-
-static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nandsim *ns = nand_get_controller_data(chip);
-
-	/* Check that chip is expecting data input */
-	if (!(ns->state & STATE_DATAIN_MASK)) {
-		NS_ERR("write_buf: data input isn't expected, state is %s, "
-			"switch to STATE_READY\n", get_state_name(ns->state));
-		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-		return;
-	}
-
-	/* Check if these are expected bytes */
-	if (ns->regs.count + len > ns->regs.num) {
-		NS_ERR("write_buf: too many input bytes\n");
-		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-		return;
-	}
-
-	memcpy(ns->buf.byte + ns->regs.count, buf, len);
-	ns->regs.count += len;
-
-	if (ns->regs.count == ns->regs.num) {
-		NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
-	}
-}
-
-static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nandsim *ns = nand_get_controller_data(chip);
-
-	/* Sanity and correctness checks */
-	if (!ns->lines.ce) {
-		NS_ERR("read_buf: chip is disabled\n");
-		return;
-	}
-	if (ns->lines.ale || ns->lines.cle) {
-		NS_ERR("read_buf: ALE or CLE pin is high\n");
-		return;
-	}
-	if (!(ns->state & STATE_DATAOUT_MASK)) {
-		NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
-			get_state_name(ns->state));
-		return;
-	}
-
-	if (NS_STATE(ns->state) != STATE_DATAOUT) {
-		int i;
-
-		for (i = 0; i < len; i++)
-			buf[i] = mtd_to_nand(mtd)->read_byte(mtd);
-
-		return;
-	}
-
-	/* Check if these are expected bytes */
-	if (ns->regs.count + len > ns->regs.num) {
-		NS_ERR("read_buf: too many bytes to read\n");
-		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
-		return;
-	}
-
-	memcpy(buf, ns->buf.byte + ns->regs.count, len);
-	ns->regs.count += len;
-
-	if (ns->regs.count == ns->regs.num) {
-		if (NS_STATE(ns->nxstate) == STATE_READY)
-			switch_state(ns);
-	}
-
-	return;
-}
-
-/*
- * Module initialization function
- */
-static int __init ns_init_module(void)
-{
-	struct nand_chip *chip;
-	struct nandsim *nand;
-	int retval = -ENOMEM, i;
-
-	if (bus_width != 8 && bus_width != 16) {
-		NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
-		return -EINVAL;
-	}
-
-	/* Allocate and initialize mtd_info, nand_chip and nandsim structures */
-	chip = kzalloc(sizeof(struct nand_chip) + sizeof(struct nandsim),
-		       GFP_KERNEL);
-	if (!chip) {
-		NS_ERR("unable to allocate core structures.\n");
-		return -ENOMEM;
-	}
-	nsmtd       = nand_to_mtd(chip);
-	nand        = (struct nandsim *)(chip + 1);
-	nand_set_controller_data(chip, (void *)nand);
-
-	/*
-	 * Register simulator's callbacks.
-	 */
-	chip->cmd_ctrl	 = ns_hwcontrol;
-	chip->read_byte  = ns_nand_read_byte;
-	chip->dev_ready  = ns_device_ready;
-	chip->write_buf  = ns_nand_write_buf;
-	chip->read_buf   = ns_nand_read_buf;
-	chip->read_word  = ns_nand_read_word;
-	chip->ecc.mode   = NAND_ECC_SOFT;
-	chip->ecc.algo   = NAND_ECC_HAMMING;
-	/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
-	/* and 'badblocks' parameters to work */
-	chip->options   |= NAND_SKIP_BBTSCAN;
-
-	switch (bbt) {
-	case 2:
-		 chip->bbt_options |= NAND_BBT_NO_OOB;
-	case 1:
-		 chip->bbt_options |= NAND_BBT_USE_FLASH;
-	case 0:
-		break;
-	default:
-		NS_ERR("bbt has to be 0..2\n");
-		retval = -EINVAL;
-		goto error;
-	}
-	/*
-	 * Perform minimum nandsim structure initialization to handle
-	 * the initial ID read command correctly
-	 */
-	if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
-		nand->geom.idbytes = 8;
-	else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
-		nand->geom.idbytes = 6;
-	else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
-		nand->geom.idbytes = 4;
-	else
-		nand->geom.idbytes = 2;
-	nand->regs.status = NS_STATUS_OK(nand);
-	nand->nxstate = STATE_UNKNOWN;
-	nand->options |= OPT_PAGE512; /* temporary value */
-	memcpy(nand->ids, id_bytes, sizeof(nand->ids));
-	if (bus_width == 16) {
-		nand->busw = 16;
-		chip->options |= NAND_BUSWIDTH_16;
-	}
-
-	nsmtd->owner = THIS_MODULE;
-
-	if ((retval = parse_weakblocks()) != 0)
-		goto error;
-
-	if ((retval = parse_weakpages()) != 0)
-		goto error;
-
-	if ((retval = parse_gravepages()) != 0)
-		goto error;
-
-	retval = nand_scan_ident(nsmtd, 1, NULL);
-	if (retval) {
-		NS_ERR("cannot scan NAND Simulator device\n");
-		if (retval > 0)
-			retval = -ENXIO;
-		goto error;
-	}
-
-	if (bch) {
-		unsigned int eccsteps, eccbytes;
-		if (!mtd_nand_has_bch()) {
-			NS_ERR("BCH ECC support is disabled\n");
-			retval = -EINVAL;
-			goto error;
-		}
-		/* use 512-byte ecc blocks */
-		eccsteps = nsmtd->writesize/512;
-		eccbytes = (bch*13+7)/8;
-		/* do not bother supporting small page devices */
-		if ((nsmtd->oobsize < 64) || !eccsteps) {
-			NS_ERR("bch not available on small page devices\n");
-			retval = -EINVAL;
-			goto error;
-		}
-		if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
-			NS_ERR("invalid bch value %u\n", bch);
-			retval = -EINVAL;
-			goto error;
-		}
-		chip->ecc.mode = NAND_ECC_SOFT;
-		chip->ecc.algo = NAND_ECC_BCH;
-		chip->ecc.size = 512;
-		chip->ecc.strength = bch;
-		chip->ecc.bytes = eccbytes;
-		NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
-	}
-
-	retval = nand_scan_tail(nsmtd);
-	if (retval) {
-		NS_ERR("can't register NAND Simulator\n");
-		if (retval > 0)
-			retval = -ENXIO;
-		goto error;
-	}
-
-	if (overridesize) {
-		uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
-		if (new_size >> overridesize != nsmtd->erasesize) {
-			NS_ERR("overridesize is too big\n");
-			retval = -EINVAL;
-			goto err_exit;
-		}
-		/* N.B. This relies on nand_scan not doing anything with the size before we change it */
-		nsmtd->size = new_size;
-		chip->chipsize = new_size;
-		chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
-		chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
-	}
-
-	if ((retval = setup_wear_reporting(nsmtd)) != 0)
-		goto err_exit;
-
-	if ((retval = nandsim_debugfs_create(nand)) != 0)
-		goto err_exit;
-
-	if ((retval = init_nandsim(nsmtd)) != 0)
-		goto err_exit;
-
-	if ((retval = chip->scan_bbt(nsmtd)) != 0)
-		goto err_exit;
-
-	if ((retval = parse_badblocks(nand, nsmtd)) != 0)
-		goto err_exit;
-
-	/* Register NAND partitions */
-	retval = mtd_device_register(nsmtd, &nand->partitions[0],
-				     nand->nbparts);
-	if (retval != 0)
-		goto err_exit;
-
-        return 0;
-
-err_exit:
-	free_nandsim(nand);
-	nand_release(nsmtd);
-	for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
-		kfree(nand->partitions[i].name);
-error:
-	kfree(chip);
-	free_lists();
-
-	return retval;
-}
-
-module_init(ns_init_module);
-
-/*
- * Module clean-up function
- */
-static void __exit ns_cleanup_module(void)
-{
-	struct nand_chip *chip = mtd_to_nand(nsmtd);
-	struct nandsim *ns = nand_get_controller_data(chip);
-	int i;
-
-	nandsim_debugfs_remove(ns);
-	free_nandsim(ns);    /* Free nandsim private resources */
-	nand_release(nsmtd); /* Unregister driver */
-	for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
-		kfree(ns->partitions[i].name);
-	kfree(mtd_to_nand(nsmtd));        /* Free other structures */
-	free_lists();
-}
-
-module_exit(ns_cleanup_module);
-
-MODULE_LICENSE ("GPL");
-MODULE_AUTHOR ("Artem B. Bityuckiy");
-MODULE_DESCRIPTION ("The NAND flash simulator");
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
deleted file mode 100644
index d8a806894937..000000000000
--- a/drivers/mtd/nand/ndfc.c
+++ /dev/null
@@ -1,286 +0,0 @@ 
-/*
- *  Overview:
- *   Platform independent driver for NDFC (NanD Flash Controller)
- *   integrated into EP440 cores
- *
- *   Ported to an OF platform driver by Sean MacLennan
- *
- *   The NDFC supports multiple chips, but this driver only supports a
- *   single chip since I do not have access to any boards with
- *   multiple chips.
- *
- *  Author: Thomas Gleixner
- *
- *  Copyright 2006 IBM
- *  Copyright 2008 PIKA Technologies
- *    Sean MacLennan <smaclennan@pikatech.com>
- *
- *  This program is free software; you can redistribute	 it and/or modify it
- *  under  the terms of	 the GNU General  Public License as published by the
- *  Free Software Foundation;  either version 2 of the	License, or (at your
- *  option) any later version.
- *
- */
-#include <linux/module.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/ndfc.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <asm/io.h>
-
-#define NDFC_MAX_CS    4
-
-struct ndfc_controller {
-	struct platform_device *ofdev;
-	void __iomem *ndfcbase;
-	struct nand_chip chip;
-	int chip_select;
-	struct nand_hw_control ndfc_control;
-};
-
-static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
-
-static void ndfc_select_chip(struct mtd_info *mtd, int chip)
-{
-	uint32_t ccr;
-	struct nand_chip *nchip = mtd_to_nand(mtd);
-	struct ndfc_controller *ndfc = nand_get_controller_data(nchip);
-
-	ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
-	if (chip >= 0) {
-		ccr &= ~NDFC_CCR_BS_MASK;
-		ccr |= NDFC_CCR_BS(chip + ndfc->chip_select);
-	} else
-		ccr |= NDFC_CCR_RESET_CE;
-	out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
-}
-
-static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_CLE)
-		writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
-	else
-		writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
-}
-
-static int ndfc_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
-
-	return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
-}
-
-static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	uint32_t ccr;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
-
-	ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
-	ccr |= NDFC_CCR_RESET_ECC;
-	out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
-	wmb();
-}
-
-static int ndfc_calculate_ecc(struct mtd_info *mtd,
-			      const u_char *dat, u_char *ecc_code)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
-	uint32_t ecc;
-	uint8_t *p = (uint8_t *)&ecc;
-
-	wmb();
-	ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
-	/* The NDFC uses Smart Media (SMC) bytes order */
-	ecc_code[0] = p[1];
-	ecc_code[1] = p[2];
-	ecc_code[2] = p[3];
-
-	return 0;
-}
-
-/*
- * Speedups for buffer read/write/verify
- *
- * NDFC allows 32bit read/write of data. So we can speed up the buffer
- * functions. No further checking, as nand_base will always read/write
- * page aligned.
- */
-static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
-	uint32_t *p = (uint32_t *) buf;
-
-	for(;len > 0; len -= 4)
-		*p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
-}
-
-static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
-	uint32_t *p = (uint32_t *) buf;
-
-	for(;len > 0; len -= 4)
-		out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
-}
-
-/*
- * Initialize chip structure
- */
-static int ndfc_chip_init(struct ndfc_controller *ndfc,
-			  struct device_node *node)
-{
-	struct device_node *flash_np;
-	struct nand_chip *chip = &ndfc->chip;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	int ret;
-
-	chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
-	chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
-	chip->cmd_ctrl = ndfc_hwcontrol;
-	chip->dev_ready = ndfc_ready;
-	chip->select_chip = ndfc_select_chip;
-	chip->chip_delay = 50;
-	chip->controller = &ndfc->ndfc_control;
-	chip->read_buf = ndfc_read_buf;
-	chip->write_buf = ndfc_write_buf;
-	chip->ecc.correct = nand_correct_data;
-	chip->ecc.hwctl = ndfc_enable_hwecc;
-	chip->ecc.calculate = ndfc_calculate_ecc;
-	chip->ecc.mode = NAND_ECC_HW;
-	chip->ecc.size = 256;
-	chip->ecc.bytes = 3;
-	chip->ecc.strength = 1;
-	nand_set_controller_data(chip, ndfc);
-
-	mtd->dev.parent = &ndfc->ofdev->dev;
-
-	flash_np = of_get_next_child(node, NULL);
-	if (!flash_np)
-		return -ENODEV;
-	nand_set_flash_node(chip, flash_np);
-
-	mtd->name = kasprintf(GFP_KERNEL, "%s.%s", dev_name(&ndfc->ofdev->dev),
-			      flash_np->name);
-	if (!mtd->name) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	ret = nand_scan(mtd, 1);
-	if (ret)
-		goto err;
-
-	ret = mtd_device_register(mtd, NULL, 0);
-
-err:
-	of_node_put(flash_np);
-	if (ret)
-		kfree(mtd->name);
-	return ret;
-}
-
-static int ndfc_probe(struct platform_device *ofdev)
-{
-	struct ndfc_controller *ndfc;
-	const __be32 *reg;
-	u32 ccr;
-	u32 cs;
-	int err, len;
-
-	/* Read the reg property to get the chip select */
-	reg = of_get_property(ofdev->dev.of_node, "reg", &len);
-	if (reg == NULL || len != 12) {
-		dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
-		return -ENOENT;
-	}
-
-	cs = be32_to_cpu(reg[0]);
-	if (cs >= NDFC_MAX_CS) {
-		dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
-		return -EINVAL;
-	}
-
-	ndfc = &ndfc_ctrl[cs];
-	ndfc->chip_select = cs;
-
-	nand_hw_control_init(&ndfc->ndfc_control);
-	ndfc->ofdev = ofdev;
-	dev_set_drvdata(&ofdev->dev, ndfc);
-
-	ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
-	if (!ndfc->ndfcbase) {
-		dev_err(&ofdev->dev, "failed to get memory\n");
-		return -EIO;
-	}
-
-	ccr = NDFC_CCR_BS(ndfc->chip_select);
-
-	/* It is ok if ccr does not exist - just default to 0 */
-	reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
-	if (reg)
-		ccr |= be32_to_cpup(reg);
-
-	out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
-
-	/* Set the bank settings if given */
-	reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
-	if (reg) {
-		int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
-		out_be32(ndfc->ndfcbase + offset, be32_to_cpup(reg));
-	}
-
-	err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
-	if (err) {
-		iounmap(ndfc->ndfcbase);
-		return err;
-	}
-
-	return 0;
-}
-
-static int ndfc_remove(struct platform_device *ofdev)
-{
-	struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
-	struct mtd_info *mtd = nand_to_mtd(&ndfc->chip);
-
-	nand_release(mtd);
-	kfree(mtd->name);
-
-	return 0;
-}
-
-static const struct of_device_id ndfc_match[] = {
-	{ .compatible = "ibm,ndfc", },
-	{}
-};
-MODULE_DEVICE_TABLE(of, ndfc_match);
-
-static struct platform_driver ndfc_driver = {
-	.driver = {
-		.name = "ndfc",
-		.of_match_table = ndfc_match,
-	},
-	.probe = ndfc_probe,
-	.remove = ndfc_remove,
-};
-
-module_platform_driver(ndfc_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
-MODULE_DESCRIPTION("OF Platform driver for NDFC");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
deleted file mode 100644
index 7bb4d2ea9342..000000000000
--- a/drivers/mtd/nand/nuc900_nand.c
+++ /dev/null
@@ -1,306 +0,0 @@ 
-/*
- * Copyright © 2009 Nuvoton technology corporation.
- *
- * Wan ZongShun <mcuos.com@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation;version 2 of the License.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-#define REG_FMICSR   	0x00
-#define REG_SMCSR    	0xa0
-#define REG_SMISR    	0xac
-#define REG_SMCMD    	0xb0
-#define REG_SMADDR   	0xb4
-#define REG_SMDATA   	0xb8
-
-#define RESET_FMI	0x01
-#define NAND_EN		0x08
-#define READYBUSY	(0x01 << 18)
-
-#define SWRST		0x01
-#define PSIZE		(0x01 << 3)
-#define DMARWEN		(0x03 << 1)
-#define BUSWID		(0x01 << 4)
-#define ECC4EN		(0x01 << 5)
-#define WP		(0x01 << 24)
-#define NANDCS		(0x01 << 25)
-#define ENDADDR		(0x01 << 31)
-
-#define read_data_reg(dev)		\
-	__raw_readl((dev)->reg + REG_SMDATA)
-
-#define write_data_reg(dev, val)	\
-	__raw_writel((val), (dev)->reg + REG_SMDATA)
-
-#define write_cmd_reg(dev, val)		\
-	__raw_writel((val), (dev)->reg + REG_SMCMD)
-
-#define write_addr_reg(dev, val)	\
-	__raw_writel((val), (dev)->reg + REG_SMADDR)
-
-struct nuc900_nand {
-	struct nand_chip chip;
-	void __iomem *reg;
-	struct clk *clk;
-	spinlock_t lock;
-};
-
-static inline struct nuc900_nand *mtd_to_nuc900(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct nuc900_nand, chip);
-}
-
-static const struct mtd_partition partitions[] = {
-	{
-	 .name = "NAND FS 0",
-	 .offset = 0,
-	 .size = 8 * 1024 * 1024
-	},
-	{
-	 .name = "NAND FS 1",
-	 .offset = MTDPART_OFS_APPEND,
-	 .size = MTDPART_SIZ_FULL
-	}
-};
-
-static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
-{
-	unsigned char ret;
-	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
-
-	ret = (unsigned char)read_data_reg(nand);
-
-	return ret;
-}
-
-static void nuc900_nand_read_buf(struct mtd_info *mtd,
-				 unsigned char *buf, int len)
-{
-	int i;
-	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
-
-	for (i = 0; i < len; i++)
-		buf[i] = (unsigned char)read_data_reg(nand);
-}
-
-static void nuc900_nand_write_buf(struct mtd_info *mtd,
-				  const unsigned char *buf, int len)
-{
-	int i;
-	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
-
-	for (i = 0; i < len; i++)
-		write_data_reg(nand, buf[i]);
-}
-
-static int nuc900_check_rb(struct nuc900_nand *nand)
-{
-	unsigned int val;
-	spin_lock(&nand->lock);
-	val = __raw_readl(nand->reg + REG_SMISR);
-	val &= READYBUSY;
-	spin_unlock(&nand->lock);
-
-	return val;
-}
-
-static int nuc900_nand_devready(struct mtd_info *mtd)
-{
-	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
-	int ready;
-
-	ready = (nuc900_check_rb(nand)) ? 1 : 0;
-	return ready;
-}
-
-static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
-				   int column, int page_addr)
-{
-	register struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
-
-	if (command == NAND_CMD_READOOB) {
-		column += mtd->writesize;
-		command = NAND_CMD_READ0;
-	}
-
-	write_cmd_reg(nand, command & 0xff);
-
-	if (column != -1 || page_addr != -1) {
-
-		if (column != -1) {
-			if (chip->options & NAND_BUSWIDTH_16 &&
-					!nand_opcode_8bits(command))
-				column >>= 1;
-			write_addr_reg(nand, column);
-			write_addr_reg(nand, column >> 8 | ENDADDR);
-		}
-		if (page_addr != -1) {
-			write_addr_reg(nand, page_addr);
-
-			if (chip->chipsize > (128 << 20)) {
-				write_addr_reg(nand, page_addr >> 8);
-				write_addr_reg(nand, page_addr >> 16 | ENDADDR);
-			} else {
-				write_addr_reg(nand, page_addr >> 8 | ENDADDR);
-			}
-		}
-	}
-
-	switch (command) {
-	case NAND_CMD_CACHEDPROG:
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-	case NAND_CMD_ERASE2:
-	case NAND_CMD_SEQIN:
-	case NAND_CMD_RNDIN:
-	case NAND_CMD_STATUS:
-		return;
-
-	case NAND_CMD_RESET:
-		if (chip->dev_ready)
-			break;
-		udelay(chip->chip_delay);
-
-		write_cmd_reg(nand, NAND_CMD_STATUS);
-		write_cmd_reg(nand, command);
-
-		while (!nuc900_check_rb(nand))
-			;
-
-		return;
-
-	case NAND_CMD_RNDOUT:
-		write_cmd_reg(nand, NAND_CMD_RNDOUTSTART);
-		return;
-
-	case NAND_CMD_READ0:
-
-		write_cmd_reg(nand, NAND_CMD_READSTART);
-	default:
-
-		if (!chip->dev_ready) {
-			udelay(chip->chip_delay);
-			return;
-		}
-	}
-
-	/* Apply this short delay always to ensure that we do wait tWB in
-	 * any case on any machine. */
-	ndelay(100);
-
-	while (!chip->dev_ready(mtd))
-		;
-}
-
-
-static void nuc900_nand_enable(struct nuc900_nand *nand)
-{
-	unsigned int val;
-	spin_lock(&nand->lock);
-	__raw_writel(RESET_FMI, (nand->reg + REG_FMICSR));
-
-	val = __raw_readl(nand->reg + REG_FMICSR);
-
-	if (!(val & NAND_EN))
-		__raw_writel(val | NAND_EN, nand->reg + REG_FMICSR);
-
-	val = __raw_readl(nand->reg + REG_SMCSR);
-
-	val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS);
-	val |= WP;
-
-	__raw_writel(val, nand->reg + REG_SMCSR);
-
-	spin_unlock(&nand->lock);
-}
-
-static int nuc900_nand_probe(struct platform_device *pdev)
-{
-	struct nuc900_nand *nuc900_nand;
-	struct nand_chip *chip;
-	struct mtd_info *mtd;
-	struct resource *res;
-
-	nuc900_nand = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_nand),
-				   GFP_KERNEL);
-	if (!nuc900_nand)
-		return -ENOMEM;
-	chip = &(nuc900_nand->chip);
-	mtd = nand_to_mtd(chip);
-
-	mtd->dev.parent		= &pdev->dev;
-	spin_lock_init(&nuc900_nand->lock);
-
-	nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(nuc900_nand->clk))
-		return -ENOENT;
-	clk_enable(nuc900_nand->clk);
-
-	chip->cmdfunc		= nuc900_nand_command_lp;
-	chip->dev_ready		= nuc900_nand_devready;
-	chip->read_byte		= nuc900_nand_read_byte;
-	chip->write_buf		= nuc900_nand_write_buf;
-	chip->read_buf		= nuc900_nand_read_buf;
-	chip->chip_delay	= 50;
-	chip->options		= 0;
-	chip->ecc.mode		= NAND_ECC_SOFT;
-	chip->ecc.algo		= NAND_ECC_HAMMING;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(nuc900_nand->reg))
-		return PTR_ERR(nuc900_nand->reg);
-
-	nuc900_nand_enable(nuc900_nand);
-
-	if (nand_scan(mtd, 1))
-		return -ENXIO;
-
-	mtd_device_register(mtd, partitions, ARRAY_SIZE(partitions));
-
-	platform_set_drvdata(pdev, nuc900_nand);
-
-	return 0;
-}
-
-static int nuc900_nand_remove(struct platform_device *pdev)
-{
-	struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
-
-	nand_release(nand_to_mtd(&nuc900_nand->chip));
-	clk_disable(nuc900_nand->clk);
-
-	return 0;
-}
-
-static struct platform_driver nuc900_nand_driver = {
-	.probe		= nuc900_nand_probe,
-	.remove		= nuc900_nand_remove,
-	.driver		= {
-		.name	= "nuc900-fmi",
-	},
-};
-
-module_platform_driver(nuc900_nand_driver);
-
-MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
-MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
deleted file mode 100644
index ebfa1751051d..000000000000
--- a/drivers/mtd/nand/omap2.c
+++ /dev/null
@@ -1,2214 +0,0 @@ 
-/*
- * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
- * Copyright © 2004 Micron Technology Inc.
- * Copyright © 2004 David Brownell
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/platform_device.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/jiffies.h>
-#include <linux/sched.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/omap-dma.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-
-#include <linux/mtd/nand_bch.h>
-#include <linux/platform_data/elm.h>
-
-#include <linux/omap-gpmc.h>
-#include <linux/platform_data/mtd-nand-omap2.h>
-
-#define	DRIVER_NAME	"omap2-nand"
-#define	OMAP_NAND_TIMEOUT_MS	5000
-
-#define NAND_Ecc_P1e		(1 << 0)
-#define NAND_Ecc_P2e		(1 << 1)
-#define NAND_Ecc_P4e		(1 << 2)
-#define NAND_Ecc_P8e		(1 << 3)
-#define NAND_Ecc_P16e		(1 << 4)
-#define NAND_Ecc_P32e		(1 << 5)
-#define NAND_Ecc_P64e		(1 << 6)
-#define NAND_Ecc_P128e		(1 << 7)
-#define NAND_Ecc_P256e		(1 << 8)
-#define NAND_Ecc_P512e		(1 << 9)
-#define NAND_Ecc_P1024e		(1 << 10)
-#define NAND_Ecc_P2048e		(1 << 11)
-
-#define NAND_Ecc_P1o		(1 << 16)
-#define NAND_Ecc_P2o		(1 << 17)
-#define NAND_Ecc_P4o		(1 << 18)
-#define NAND_Ecc_P8o		(1 << 19)
-#define NAND_Ecc_P16o		(1 << 20)
-#define NAND_Ecc_P32o		(1 << 21)
-#define NAND_Ecc_P64o		(1 << 22)
-#define NAND_Ecc_P128o		(1 << 23)
-#define NAND_Ecc_P256o		(1 << 24)
-#define NAND_Ecc_P512o		(1 << 25)
-#define NAND_Ecc_P1024o		(1 << 26)
-#define NAND_Ecc_P2048o		(1 << 27)
-
-#define TF(value)	(value ? 1 : 0)
-
-#define P2048e(a)	(TF(a & NAND_Ecc_P2048e)	<< 0)
-#define P2048o(a)	(TF(a & NAND_Ecc_P2048o)	<< 1)
-#define P1e(a)		(TF(a & NAND_Ecc_P1e)		<< 2)
-#define P1o(a)		(TF(a & NAND_Ecc_P1o)		<< 3)
-#define P2e(a)		(TF(a & NAND_Ecc_P2e)		<< 4)
-#define P2o(a)		(TF(a & NAND_Ecc_P2o)		<< 5)
-#define P4e(a)		(TF(a & NAND_Ecc_P4e)		<< 6)
-#define P4o(a)		(TF(a & NAND_Ecc_P4o)		<< 7)
-
-#define P8e(a)		(TF(a & NAND_Ecc_P8e)		<< 0)
-#define P8o(a)		(TF(a & NAND_Ecc_P8o)		<< 1)
-#define P16e(a)		(TF(a & NAND_Ecc_P16e)		<< 2)
-#define P16o(a)		(TF(a & NAND_Ecc_P16o)		<< 3)
-#define P32e(a)		(TF(a & NAND_Ecc_P32e)		<< 4)
-#define P32o(a)		(TF(a & NAND_Ecc_P32o)		<< 5)
-#define P64e(a)		(TF(a & NAND_Ecc_P64e)		<< 6)
-#define P64o(a)		(TF(a & NAND_Ecc_P64o)		<< 7)
-
-#define P128e(a)	(TF(a & NAND_Ecc_P128e)		<< 0)
-#define P128o(a)	(TF(a & NAND_Ecc_P128o)		<< 1)
-#define P256e(a)	(TF(a & NAND_Ecc_P256e)		<< 2)
-#define P256o(a)	(TF(a & NAND_Ecc_P256o)		<< 3)
-#define P512e(a)	(TF(a & NAND_Ecc_P512e)		<< 4)
-#define P512o(a)	(TF(a & NAND_Ecc_P512o)		<< 5)
-#define P1024e(a)	(TF(a & NAND_Ecc_P1024e)	<< 6)
-#define P1024o(a)	(TF(a & NAND_Ecc_P1024o)	<< 7)
-
-#define P8e_s(a)	(TF(a & NAND_Ecc_P8e)		<< 0)
-#define P8o_s(a)	(TF(a & NAND_Ecc_P8o)		<< 1)
-#define P16e_s(a)	(TF(a & NAND_Ecc_P16e)		<< 2)
-#define P16o_s(a)	(TF(a & NAND_Ecc_P16o)		<< 3)
-#define P1e_s(a)	(TF(a & NAND_Ecc_P1e)		<< 4)
-#define P1o_s(a)	(TF(a & NAND_Ecc_P1o)		<< 5)
-#define P2e_s(a)	(TF(a & NAND_Ecc_P2e)		<< 6)
-#define P2o_s(a)	(TF(a & NAND_Ecc_P2o)		<< 7)
-
-#define P4e_s(a)	(TF(a & NAND_Ecc_P4e)		<< 0)
-#define P4o_s(a)	(TF(a & NAND_Ecc_P4o)		<< 1)
-
-#define	PREFETCH_CONFIG1_CS_SHIFT	24
-#define	ECC_CONFIG_CS_SHIFT		1
-#define	CS_MASK				0x7
-#define	ENABLE_PREFETCH			(0x1 << 7)
-#define	DMA_MPU_MODE_SHIFT		2
-#define	ECCSIZE0_SHIFT			12
-#define	ECCSIZE1_SHIFT			22
-#define	ECC1RESULTSIZE			0x1
-#define	ECCCLEAR			0x100
-#define	ECC1				0x1
-#define	PREFETCH_FIFOTHRESHOLD_MAX	0x40
-#define	PREFETCH_FIFOTHRESHOLD(val)	((val) << 8)
-#define	PREFETCH_STATUS_COUNT(val)	(val & 0x00003fff)
-#define	PREFETCH_STATUS_FIFO_CNT(val)	((val >> 24) & 0x7F)
-#define	STATUS_BUFF_EMPTY		0x00000001
-
-#define SECTOR_BYTES		512
-/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
-#define BCH4_BIT_PAD		4
-
-/* GPMC ecc engine settings for read */
-#define BCH_WRAPMODE_1		1	/* BCH wrap mode 1 */
-#define BCH8R_ECC_SIZE0		0x1a	/* ecc_size0 = 26 */
-#define BCH8R_ECC_SIZE1		0x2	/* ecc_size1 = 2 */
-#define BCH4R_ECC_SIZE0		0xd	/* ecc_size0 = 13 */
-#define BCH4R_ECC_SIZE1		0x3	/* ecc_size1 = 3 */
-
-/* GPMC ecc engine settings for write */
-#define BCH_WRAPMODE_6		6	/* BCH wrap mode 6 */
-#define BCH_ECC_SIZE0		0x0	/* ecc_size0 = 0, no oob protection */
-#define BCH_ECC_SIZE1		0x20	/* ecc_size1 = 32 */
-
-#define BADBLOCK_MARKER_LENGTH		2
-
-static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
-				0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
-				0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
-				0x07, 0x0e};
-static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
-	0xac, 0x6b, 0xff, 0x99, 0x7b};
-static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
-
-/* Shared among all NAND instances to synchronize access to the ECC Engine */
-static struct nand_hw_control omap_gpmc_controller = {
-	.lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
-	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
-};
-
-struct omap_nand_info {
-	struct nand_chip		nand;
-	struct platform_device		*pdev;
-
-	int				gpmc_cs;
-	bool				dev_ready;
-	enum nand_io			xfer_type;
-	int				devsize;
-	enum omap_ecc			ecc_opt;
-	struct device_node		*elm_of_node;
-
-	unsigned long			phys_base;
-	struct completion		comp;
-	struct dma_chan			*dma;
-	int				gpmc_irq_fifo;
-	int				gpmc_irq_count;
-	enum {
-		OMAP_NAND_IO_READ = 0,	/* read */
-		OMAP_NAND_IO_WRITE,	/* write */
-	} iomode;
-	u_char				*buf;
-	int					buf_len;
-	/* Interface to GPMC */
-	struct gpmc_nand_regs		reg;
-	struct gpmc_nand_ops		*ops;
-	bool				flash_bbt;
-	/* fields specific for BCHx_HW ECC scheme */
-	struct device			*elm_dev;
-	/* NAND ready gpio */
-	struct gpio_desc		*ready_gpiod;
-};
-
-static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
-}
-
-/**
- * omap_prefetch_enable - configures and starts prefetch transfer
- * @cs: cs (chip select) number
- * @fifo_th: fifo threshold to be used for read/ write
- * @dma_mode: dma mode enable (1) or disable (0)
- * @u32_count: number of bytes to be transferred
- * @is_write: prefetch read(0) or write post(1) mode
- */
-static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
-	unsigned int u32_count, int is_write, struct omap_nand_info *info)
-{
-	u32 val;
-
-	if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
-		return -1;
-
-	if (readl(info->reg.gpmc_prefetch_control))
-		return -EBUSY;
-
-	/* Set the amount of bytes to be prefetched */
-	writel(u32_count, info->reg.gpmc_prefetch_config2);
-
-	/* Set dma/mpu mode, the prefetch read / post write and
-	 * enable the engine. Set which cs is has requested for.
-	 */
-	val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
-		PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
-		(dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
-	writel(val, info->reg.gpmc_prefetch_config1);
-
-	/*  Start the prefetch engine */
-	writel(0x1, info->reg.gpmc_prefetch_control);
-
-	return 0;
-}
-
-/**
- * omap_prefetch_reset - disables and stops the prefetch engine
- */
-static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
-{
-	u32 config1;
-
-	/* check if the same module/cs is trying to reset */
-	config1 = readl(info->reg.gpmc_prefetch_config1);
-	if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
-		return -EINVAL;
-
-	/* Stop the PFPW engine */
-	writel(0x0, info->reg.gpmc_prefetch_control);
-
-	/* Reset/disable the PFPW engine */
-	writel(0x0, info->reg.gpmc_prefetch_config1);
-
-	return 0;
-}
-
-/**
- * omap_hwcontrol - hardware specific access to control-lines
- * @mtd: MTD device structure
- * @cmd: command to device
- * @ctrl:
- * NAND_NCE: bit 0 -> don't care
- * NAND_CLE: bit 1 -> Command Latch
- * NAND_ALE: bit 2 -> Address Latch
- *
- * NOTE: boards may use different bits for these!!
- */
-static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-
-	if (cmd != NAND_CMD_NONE) {
-		if (ctrl & NAND_CLE)
-			writeb(cmd, info->reg.gpmc_nand_command);
-
-		else if (ctrl & NAND_ALE)
-			writeb(cmd, info->reg.gpmc_nand_address);
-
-		else /* NAND_NCE */
-			writeb(cmd, info->reg.gpmc_nand_data);
-	}
-}
-
-/**
- * omap_read_buf8 - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-
-	ioread8_rep(nand->IO_ADDR_R, buf, len);
-}
-
-/**
- * omap_write_buf8 - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	u_char *p = (u_char *)buf;
-	bool status;
-
-	while (len--) {
-		iowrite8(*p++, info->nand.IO_ADDR_W);
-		/* wait until buffer is available for write */
-		do {
-			status = info->ops->nand_writebuffer_empty();
-		} while (!status);
-	}
-}
-
-/**
- * omap_read_buf16 - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-
-	ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
-}
-
-/**
- * omap_write_buf16 - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	u16 *p = (u16 *) buf;
-	bool status;
-	/* FIXME try bursts of writesw() or DMA ... */
-	len >>= 1;
-
-	while (len--) {
-		iowrite16(*p++, info->nand.IO_ADDR_W);
-		/* wait until buffer is available for write */
-		do {
-			status = info->ops->nand_writebuffer_empty();
-		} while (!status);
-	}
-}
-
-/**
- * omap_read_buf_pref - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	uint32_t r_count = 0;
-	int ret = 0;
-	u32 *p = (u32 *)buf;
-
-	/* take care of subpage reads */
-	if (len % 4) {
-		if (info->nand.options & NAND_BUSWIDTH_16)
-			omap_read_buf16(mtd, buf, len % 4);
-		else
-			omap_read_buf8(mtd, buf, len % 4);
-		p = (u32 *) (buf + len % 4);
-		len -= len % 4;
-	}
-
-	/* configure and start prefetch transfer */
-	ret = omap_prefetch_enable(info->gpmc_cs,
-			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
-	if (ret) {
-		/* PFPW engine is busy, use cpu copy method */
-		if (info->nand.options & NAND_BUSWIDTH_16)
-			omap_read_buf16(mtd, (u_char *)p, len);
-		else
-			omap_read_buf8(mtd, (u_char *)p, len);
-	} else {
-		do {
-			r_count = readl(info->reg.gpmc_prefetch_status);
-			r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
-			r_count = r_count >> 2;
-			ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
-			p += r_count;
-			len -= r_count << 2;
-		} while (len);
-		/* disable and stop the PFPW engine */
-		omap_prefetch_reset(info->gpmc_cs, info);
-	}
-}
-
-/**
- * omap_write_buf_pref - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf_pref(struct mtd_info *mtd,
-					const u_char *buf, int len)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	uint32_t w_count = 0;
-	int i = 0, ret = 0;
-	u16 *p = (u16 *)buf;
-	unsigned long tim, limit;
-	u32 val;
-
-	/* take care of subpage writes */
-	if (len % 2 != 0) {
-		writeb(*buf, info->nand.IO_ADDR_W);
-		p = (u16 *)(buf + 1);
-		len--;
-	}
-
-	/*  configure and start prefetch transfer */
-	ret = omap_prefetch_enable(info->gpmc_cs,
-			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
-	if (ret) {
-		/* PFPW engine is busy, use cpu copy method */
-		if (info->nand.options & NAND_BUSWIDTH_16)
-			omap_write_buf16(mtd, (u_char *)p, len);
-		else
-			omap_write_buf8(mtd, (u_char *)p, len);
-	} else {
-		while (len) {
-			w_count = readl(info->reg.gpmc_prefetch_status);
-			w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
-			w_count = w_count >> 1;
-			for (i = 0; (i < w_count) && len; i++, len -= 2)
-				iowrite16(*p++, info->nand.IO_ADDR_W);
-		}
-		/* wait for data to flushed-out before reset the prefetch */
-		tim = 0;
-		limit = (loops_per_jiffy *
-					msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
-		do {
-			cpu_relax();
-			val = readl(info->reg.gpmc_prefetch_status);
-			val = PREFETCH_STATUS_COUNT(val);
-		} while (val && (tim++ < limit));
-
-		/* disable and stop the PFPW engine */
-		omap_prefetch_reset(info->gpmc_cs, info);
-	}
-}
-
-/*
- * omap_nand_dma_callback: callback on the completion of dma transfer
- * @data: pointer to completion data structure
- */
-static void omap_nand_dma_callback(void *data)
-{
-	complete((struct completion *) data);
-}
-
-/*
- * omap_nand_dma_transfer: configure and start dma transfer
- * @mtd: MTD device structure
- * @addr: virtual address in RAM of source/destination
- * @len: number of data bytes to be transferred
- * @is_write: flag for read/write operation
- */
-static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
-					unsigned int len, int is_write)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	struct dma_async_tx_descriptor *tx;
-	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
-							DMA_FROM_DEVICE;
-	struct scatterlist sg;
-	unsigned long tim, limit;
-	unsigned n;
-	int ret;
-	u32 val;
-
-	if (!virt_addr_valid(addr))
-		goto out_copy;
-
-	sg_init_one(&sg, addr, len);
-	n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
-	if (n == 0) {
-		dev_err(&info->pdev->dev,
-			"Couldn't DMA map a %d byte buffer\n", len);
-		goto out_copy;
-	}
-
-	tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
-		is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
-		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-	if (!tx)
-		goto out_copy_unmap;
-
-	tx->callback = omap_nand_dma_callback;
-	tx->callback_param = &info->comp;
-	dmaengine_submit(tx);
-
-	init_completion(&info->comp);
-
-	/* setup and start DMA using dma_addr */
-	dma_async_issue_pending(info->dma);
-
-	/*  configure and start prefetch transfer */
-	ret = omap_prefetch_enable(info->gpmc_cs,
-		PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
-	if (ret)
-		/* PFPW engine is busy, use cpu copy method */
-		goto out_copy_unmap;
-
-	wait_for_completion(&info->comp);
-	tim = 0;
-	limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
-
-	do {
-		cpu_relax();
-		val = readl(info->reg.gpmc_prefetch_status);
-		val = PREFETCH_STATUS_COUNT(val);
-	} while (val && (tim++ < limit));
-
-	/* disable and stop the PFPW engine */
-	omap_prefetch_reset(info->gpmc_cs, info);
-
-	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-	return 0;
-
-out_copy_unmap:
-	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
-out_copy:
-	if (info->nand.options & NAND_BUSWIDTH_16)
-		is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
-			: omap_write_buf16(mtd, (u_char *) addr, len);
-	else
-		is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
-			: omap_write_buf8(mtd, (u_char *) addr, len);
-	return 0;
-}
-
-/**
- * omap_read_buf_dma_pref - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
-{
-	if (len <= mtd->oobsize)
-		omap_read_buf_pref(mtd, buf, len);
-	else
-		/* start transfer in DMA mode */
-		omap_nand_dma_transfer(mtd, buf, len, 0x0);
-}
-
-/**
- * omap_write_buf_dma_pref - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf_dma_pref(struct mtd_info *mtd,
-					const u_char *buf, int len)
-{
-	if (len <= mtd->oobsize)
-		omap_write_buf_pref(mtd, buf, len);
-	else
-		/* start transfer in DMA mode */
-		omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
-}
-
-/*
- * omap_nand_irq - GPMC irq handler
- * @this_irq: gpmc irq number
- * @dev: omap_nand_info structure pointer is passed here
- */
-static irqreturn_t omap_nand_irq(int this_irq, void *dev)
-{
-	struct omap_nand_info *info = (struct omap_nand_info *) dev;
-	u32 bytes;
-
-	bytes = readl(info->reg.gpmc_prefetch_status);
-	bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
-	bytes = bytes  & 0xFFFC; /* io in multiple of 4 bytes */
-	if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
-		if (this_irq == info->gpmc_irq_count)
-			goto done;
-
-		if (info->buf_len && (info->buf_len < bytes))
-			bytes = info->buf_len;
-		else if (!info->buf_len)
-			bytes = 0;
-		iowrite32_rep(info->nand.IO_ADDR_W,
-						(u32 *)info->buf, bytes >> 2);
-		info->buf = info->buf + bytes;
-		info->buf_len -= bytes;
-
-	} else {
-		ioread32_rep(info->nand.IO_ADDR_R,
-						(u32 *)info->buf, bytes >> 2);
-		info->buf = info->buf + bytes;
-
-		if (this_irq == info->gpmc_irq_count)
-			goto done;
-	}
-
-	return IRQ_HANDLED;
-
-done:
-	complete(&info->comp);
-
-	disable_irq_nosync(info->gpmc_irq_fifo);
-	disable_irq_nosync(info->gpmc_irq_count);
-
-	return IRQ_HANDLED;
-}
-
-/*
- * omap_read_buf_irq_pref - read data from NAND controller into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- */
-static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	int ret = 0;
-
-	if (len <= mtd->oobsize) {
-		omap_read_buf_pref(mtd, buf, len);
-		return;
-	}
-
-	info->iomode = OMAP_NAND_IO_READ;
-	info->buf = buf;
-	init_completion(&info->comp);
-
-	/*  configure and start prefetch transfer */
-	ret = omap_prefetch_enable(info->gpmc_cs,
-			PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
-	if (ret)
-		/* PFPW engine is busy, use cpu copy method */
-		goto out_copy;
-
-	info->buf_len = len;
-
-	enable_irq(info->gpmc_irq_count);
-	enable_irq(info->gpmc_irq_fifo);
-
-	/* waiting for read to complete */
-	wait_for_completion(&info->comp);
-
-	/* disable and stop the PFPW engine */
-	omap_prefetch_reset(info->gpmc_cs, info);
-	return;
-
-out_copy:
-	if (info->nand.options & NAND_BUSWIDTH_16)
-		omap_read_buf16(mtd, buf, len);
-	else
-		omap_read_buf8(mtd, buf, len);
-}
-
-/*
- * omap_write_buf_irq_pref - write buffer to NAND controller
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
- */
-static void omap_write_buf_irq_pref(struct mtd_info *mtd,
-					const u_char *buf, int len)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	int ret = 0;
-	unsigned long tim, limit;
-	u32 val;
-
-	if (len <= mtd->oobsize) {
-		omap_write_buf_pref(mtd, buf, len);
-		return;
-	}
-
-	info->iomode = OMAP_NAND_IO_WRITE;
-	info->buf = (u_char *) buf;
-	init_completion(&info->comp);
-
-	/* configure and start prefetch transfer : size=24 */
-	ret = omap_prefetch_enable(info->gpmc_cs,
-		(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
-	if (ret)
-		/* PFPW engine is busy, use cpu copy method */
-		goto out_copy;
-
-	info->buf_len = len;
-
-	enable_irq(info->gpmc_irq_count);
-	enable_irq(info->gpmc_irq_fifo);
-
-	/* waiting for write to complete */
-	wait_for_completion(&info->comp);
-
-	/* wait for data to flushed-out before reset the prefetch */
-	tim = 0;
-	limit = (loops_per_jiffy *  msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
-	do {
-		val = readl(info->reg.gpmc_prefetch_status);
-		val = PREFETCH_STATUS_COUNT(val);
-		cpu_relax();
-	} while (val && (tim++ < limit));
-
-	/* disable and stop the PFPW engine */
-	omap_prefetch_reset(info->gpmc_cs, info);
-	return;
-
-out_copy:
-	if (info->nand.options & NAND_BUSWIDTH_16)
-		omap_write_buf16(mtd, buf, len);
-	else
-		omap_write_buf8(mtd, buf, len);
-}
-
-/**
- * gen_true_ecc - This function will generate true ECC value
- * @ecc_buf: buffer to store ecc code
- *
- * This generated true ECC value can be used when correcting
- * data read from NAND flash memory core
- */
-static void gen_true_ecc(u8 *ecc_buf)
-{
-	u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
-		((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
-
-	ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
-			P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
-	ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
-			P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
-	ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
-			P1e(tmp) | P2048o(tmp) | P2048e(tmp));
-}
-
-/**
- * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
- * @ecc_data1:  ecc code from nand spare area
- * @ecc_data2:  ecc code from hardware register obtained from hardware ecc
- * @page_data:  page data
- *
- * This function compares two ECC's and indicates if there is an error.
- * If the error can be corrected it will be corrected to the buffer.
- * If there is no error, %0 is returned. If there is an error but it
- * was corrected, %1 is returned. Otherwise, %-1 is returned.
- */
-static int omap_compare_ecc(u8 *ecc_data1,	/* read from NAND memory */
-			    u8 *ecc_data2,	/* read from register */
-			    u8 *page_data)
-{
-	uint	i;
-	u8	tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
-	u8	comp0_bit[8], comp1_bit[8], comp2_bit[8];
-	u8	ecc_bit[24];
-	u8	ecc_sum = 0;
-	u8	find_bit = 0;
-	uint	find_byte = 0;
-	int	isEccFF;
-
-	isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
-
-	gen_true_ecc(ecc_data1);
-	gen_true_ecc(ecc_data2);
-
-	for (i = 0; i <= 2; i++) {
-		*(ecc_data1 + i) = ~(*(ecc_data1 + i));
-		*(ecc_data2 + i) = ~(*(ecc_data2 + i));
-	}
-
-	for (i = 0; i < 8; i++) {
-		tmp0_bit[i]     = *ecc_data1 % 2;
-		*ecc_data1	= *ecc_data1 / 2;
-	}
-
-	for (i = 0; i < 8; i++) {
-		tmp1_bit[i]	 = *(ecc_data1 + 1) % 2;
-		*(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
-	}
-
-	for (i = 0; i < 8; i++) {
-		tmp2_bit[i]	 = *(ecc_data1 + 2) % 2;
-		*(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
-	}
-
-	for (i = 0; i < 8; i++) {
-		comp0_bit[i]     = *ecc_data2 % 2;
-		*ecc_data2       = *ecc_data2 / 2;
-	}
-
-	for (i = 0; i < 8; i++) {
-		comp1_bit[i]     = *(ecc_data2 + 1) % 2;
-		*(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
-	}
-
-	for (i = 0; i < 8; i++) {
-		comp2_bit[i]     = *(ecc_data2 + 2) % 2;
-		*(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
-	}
-
-	for (i = 0; i < 6; i++)
-		ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
-
-	for (i = 0; i < 8; i++)
-		ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
-
-	for (i = 0; i < 8; i++)
-		ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
-
-	ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
-	ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
-
-	for (i = 0; i < 24; i++)
-		ecc_sum += ecc_bit[i];
-
-	switch (ecc_sum) {
-	case 0:
-		/* Not reached because this function is not called if
-		 *  ECC values are equal
-		 */
-		return 0;
-
-	case 1:
-		/* Uncorrectable error */
-		pr_debug("ECC UNCORRECTED_ERROR 1\n");
-		return -EBADMSG;
-
-	case 11:
-		/* UN-Correctable error */
-		pr_debug("ECC UNCORRECTED_ERROR B\n");
-		return -EBADMSG;
-
-	case 12:
-		/* Correctable error */
-		find_byte = (ecc_bit[23] << 8) +
-			    (ecc_bit[21] << 7) +
-			    (ecc_bit[19] << 6) +
-			    (ecc_bit[17] << 5) +
-			    (ecc_bit[15] << 4) +
-			    (ecc_bit[13] << 3) +
-			    (ecc_bit[11] << 2) +
-			    (ecc_bit[9]  << 1) +
-			    ecc_bit[7];
-
-		find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
-
-		pr_debug("Correcting single bit ECC error at offset: "
-				"%d, bit: %d\n", find_byte, find_bit);
-
-		page_data[find_byte] ^= (1 << find_bit);
-
-		return 1;
-	default:
-		if (isEccFF) {
-			if (ecc_data2[0] == 0 &&
-			    ecc_data2[1] == 0 &&
-			    ecc_data2[2] == 0)
-				return 0;
-		}
-		pr_debug("UNCORRECTED_ERROR default\n");
-		return -EBADMSG;
-	}
-}
-
-/**
- * omap_correct_data - Compares the ECC read with HW generated ECC
- * @mtd: MTD device structure
- * @dat: page data
- * @read_ecc: ecc read from nand flash
- * @calc_ecc: ecc read from HW ECC registers
- *
- * Compares the ecc read from nand spare area with ECC registers values
- * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
- * detection and correction. If there are no errors, %0 is returned. If
- * there were errors and all of the errors were corrected, the number of
- * corrected errors is returned. If uncorrectable errors exist, %-1 is
- * returned.
- */
-static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
-				u_char *read_ecc, u_char *calc_ecc)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	int blockCnt = 0, i = 0, ret = 0;
-	int stat = 0;
-
-	/* Ex NAND_ECC_HW12_2048 */
-	if ((info->nand.ecc.mode == NAND_ECC_HW) &&
-			(info->nand.ecc.size  == 2048))
-		blockCnt = 4;
-	else
-		blockCnt = 1;
-
-	for (i = 0; i < blockCnt; i++) {
-		if (memcmp(read_ecc, calc_ecc, 3) != 0) {
-			ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
-			if (ret < 0)
-				return ret;
-			/* keep track of the number of corrected errors */
-			stat += ret;
-		}
-		read_ecc += 3;
-		calc_ecc += 3;
-		dat      += 512;
-	}
-	return stat;
-}
-
-/**
- * omap_calcuate_ecc - Generate non-inverted ECC bytes.
- * @mtd: MTD device structure
- * @dat: The pointer to data on which ecc is computed
- * @ecc_code: The ecc_code buffer
- *
- * Using noninverted ECC can be considered ugly since writing a blank
- * page ie. padding will clear the ECC bytes. This is no problem as long
- * nobody is trying to write data on the seemingly unused page. Reading
- * an erased page will produce an ECC mismatch between generated and read
- * ECC bytes that has to be dealt with separately.
- */
-static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
-				u_char *ecc_code)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	u32 val;
-
-	val = readl(info->reg.gpmc_ecc_config);
-	if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
-		return -EINVAL;
-
-	/* read ecc result */
-	val = readl(info->reg.gpmc_ecc1_result);
-	*ecc_code++ = val;          /* P128e, ..., P1e */
-	*ecc_code++ = val >> 16;    /* P128o, ..., P1o */
-	/* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
-	*ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
-
-	return 0;
-}
-
-/**
- * omap_enable_hwecc - This function enables the hardware ecc functionality
- * @mtd: MTD device structure
- * @mode: Read/Write mode
- */
-static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
-	u32 val;
-
-	/* clear ecc and enable bits */
-	val = ECCCLEAR | ECC1;
-	writel(val, info->reg.gpmc_ecc_control);
-
-	/* program ecc and result sizes */
-	val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
-			 ECC1RESULTSIZE);
-	writel(val, info->reg.gpmc_ecc_size_config);
-
-	switch (mode) {
-	case NAND_ECC_READ:
-	case NAND_ECC_WRITE:
-		writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
-		break;
-	case NAND_ECC_READSYN:
-		writel(ECCCLEAR, info->reg.gpmc_ecc_control);
-		break;
-	default:
-		dev_info(&info->pdev->dev,
-			"error: unrecognized Mode[%d]!\n", mode);
-		break;
-	}
-
-	/* (ECC 16 or 8 bit col) | ( CS  )  | ECC Enable */
-	val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
-	writel(val, info->reg.gpmc_ecc_config);
-}
-
-/**
- * omap_wait - wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND Chip structure
- *
- * Wait function is called during Program and erase operations and
- * the way it is called from MTD layer, we should wait till the NAND
- * chip is ready after the programming/erase operation has completed.
- *
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
- */
-static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	unsigned long timeo = jiffies;
-	int status, state = this->state;
-
-	if (state == FL_ERASING)
-		timeo += msecs_to_jiffies(400);
-	else
-		timeo += msecs_to_jiffies(20);
-
-	writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
-	while (time_before(jiffies, timeo)) {
-		status = readb(info->reg.gpmc_nand_data);
-		if (status & NAND_STATUS_READY)
-			break;
-		cond_resched();
-	}
-
-	status = readb(info->reg.gpmc_nand_data);
-	return status;
-}
-
-/**
- * omap_dev_ready - checks the NAND Ready GPIO line
- * @mtd: MTD device structure
- *
- * Returns true if ready and false if busy.
- */
-static int omap_dev_ready(struct mtd_info *mtd)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-
-	return gpiod_get_value(info->ready_gpiod);
-}
-
-/**
- * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
- * @mtd: MTD device structure
- * @mode: Read/Write mode
- *
- * When using BCH with SW correction (i.e. no ELM), sector size is set
- * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
- * for both reading and writing with:
- * eccsize0 = 0  (no additional protected byte in spare area)
- * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
- */
-static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
-{
-	unsigned int bch_type;
-	unsigned int dev_width, nsectors;
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	enum omap_ecc ecc_opt = info->ecc_opt;
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	u32 val, wr_mode;
-	unsigned int ecc_size1, ecc_size0;
-
-	/* GPMC configurations for calculating ECC */
-	switch (ecc_opt) {
-	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-		bch_type = 0;
-		nsectors = 1;
-		wr_mode	  = BCH_WRAPMODE_6;
-		ecc_size0 = BCH_ECC_SIZE0;
-		ecc_size1 = BCH_ECC_SIZE1;
-		break;
-	case OMAP_ECC_BCH4_CODE_HW:
-		bch_type = 0;
-		nsectors = chip->ecc.steps;
-		if (mode == NAND_ECC_READ) {
-			wr_mode	  = BCH_WRAPMODE_1;
-			ecc_size0 = BCH4R_ECC_SIZE0;
-			ecc_size1 = BCH4R_ECC_SIZE1;
-		} else {
-			wr_mode   = BCH_WRAPMODE_6;
-			ecc_size0 = BCH_ECC_SIZE0;
-			ecc_size1 = BCH_ECC_SIZE1;
-		}
-		break;
-	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-		bch_type = 1;
-		nsectors = 1;
-		wr_mode	  = BCH_WRAPMODE_6;
-		ecc_size0 = BCH_ECC_SIZE0;
-		ecc_size1 = BCH_ECC_SIZE1;
-		break;
-	case OMAP_ECC_BCH8_CODE_HW:
-		bch_type = 1;
-		nsectors = chip->ecc.steps;
-		if (mode == NAND_ECC_READ) {
-			wr_mode	  = BCH_WRAPMODE_1;
-			ecc_size0 = BCH8R_ECC_SIZE0;
-			ecc_size1 = BCH8R_ECC_SIZE1;
-		} else {
-			wr_mode   = BCH_WRAPMODE_6;
-			ecc_size0 = BCH_ECC_SIZE0;
-			ecc_size1 = BCH_ECC_SIZE1;
-		}
-		break;
-	case OMAP_ECC_BCH16_CODE_HW:
-		bch_type = 0x2;
-		nsectors = chip->ecc.steps;
-		if (mode == NAND_ECC_READ) {
-			wr_mode	  = 0x01;
-			ecc_size0 = 52; /* ECC bits in nibbles per sector */
-			ecc_size1 = 0;  /* non-ECC bits in nibbles per sector */
-		} else {
-			wr_mode	  = 0x01;
-			ecc_size0 = 0;  /* extra bits in nibbles per sector */
-			ecc_size1 = 52; /* OOB bits in nibbles per sector */
-		}
-		break;
-	default:
-		return;
-	}
-
-	writel(ECC1, info->reg.gpmc_ecc_control);
-
-	/* Configure ecc size for BCH */
-	val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
-	writel(val, info->reg.gpmc_ecc_size_config);
-
-	dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
-
-	/* BCH configuration */
-	val = ((1                        << 16) | /* enable BCH */
-	       (bch_type		 << 12) | /* BCH4/BCH8/BCH16 */
-	       (wr_mode                  <<  8) | /* wrap mode */
-	       (dev_width                <<  7) | /* bus width */
-	       (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */
-	       (info->gpmc_cs            <<  1) | /* ECC CS */
-	       (0x1));                            /* enable ECC */
-
-	writel(val, info->reg.gpmc_ecc_config);
-
-	/* Clear ecc and enable bits */
-	writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
-}
-
-static u8  bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
-static u8  bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
-				0x97, 0x79, 0xe5, 0x24, 0xb5};
-
-/**
- * omap_calculate_ecc_bch - Generate bytes of ECC bytes
- * @mtd:	MTD device structure
- * @dat:	The pointer to data on which ecc is computed
- * @ecc_code:	The ecc_code buffer
- *
- * Support calculating of BCH4/8 ecc vectors for the page
- */
-static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
-					const u_char *dat, u_char *ecc_calc)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	int eccbytes	= info->nand.ecc.bytes;
-	struct gpmc_nand_regs	*gpmc_regs = &info->reg;
-	u8 *ecc_code;
-	unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
-	u32 val;
-	int i, j;
-
-	nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
-	for (i = 0; i < nsectors; i++) {
-		ecc_code = ecc_calc;
-		switch (info->ecc_opt) {
-		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-		case OMAP_ECC_BCH8_CODE_HW:
-			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
-			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
-			bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
-			bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
-			*ecc_code++ = (bch_val4 & 0xFF);
-			*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
-			*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
-			*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
-			*ecc_code++ = (bch_val3 & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
-			*ecc_code++ = (bch_val2 & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
-			*ecc_code++ = (bch_val1 & 0xFF);
-			break;
-		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-		case OMAP_ECC_BCH4_CODE_HW:
-			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
-			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
-			*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
-			*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
-			*ecc_code++ = ((bch_val2 & 0xF) << 4) |
-				((bch_val1 >> 28) & 0xF);
-			*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
-			*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
-			*ecc_code++ = ((bch_val1 & 0xF) << 4);
-			break;
-		case OMAP_ECC_BCH16_CODE_HW:
-			val = readl(gpmc_regs->gpmc_bch_result6[i]);
-			ecc_code[0]  = ((val >>  8) & 0xFF);
-			ecc_code[1]  = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result5[i]);
-			ecc_code[2]  = ((val >> 24) & 0xFF);
-			ecc_code[3]  = ((val >> 16) & 0xFF);
-			ecc_code[4]  = ((val >>  8) & 0xFF);
-			ecc_code[5]  = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result4[i]);
-			ecc_code[6]  = ((val >> 24) & 0xFF);
-			ecc_code[7]  = ((val >> 16) & 0xFF);
-			ecc_code[8]  = ((val >>  8) & 0xFF);
-			ecc_code[9]  = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result3[i]);
-			ecc_code[10] = ((val >> 24) & 0xFF);
-			ecc_code[11] = ((val >> 16) & 0xFF);
-			ecc_code[12] = ((val >>  8) & 0xFF);
-			ecc_code[13] = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result2[i]);
-			ecc_code[14] = ((val >> 24) & 0xFF);
-			ecc_code[15] = ((val >> 16) & 0xFF);
-			ecc_code[16] = ((val >>  8) & 0xFF);
-			ecc_code[17] = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result1[i]);
-			ecc_code[18] = ((val >> 24) & 0xFF);
-			ecc_code[19] = ((val >> 16) & 0xFF);
-			ecc_code[20] = ((val >>  8) & 0xFF);
-			ecc_code[21] = ((val >>  0) & 0xFF);
-			val = readl(gpmc_regs->gpmc_bch_result0[i]);
-			ecc_code[22] = ((val >> 24) & 0xFF);
-			ecc_code[23] = ((val >> 16) & 0xFF);
-			ecc_code[24] = ((val >>  8) & 0xFF);
-			ecc_code[25] = ((val >>  0) & 0xFF);
-			break;
-		default:
-			return -EINVAL;
-		}
-
-		/* ECC scheme specific syndrome customizations */
-		switch (info->ecc_opt) {
-		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-			/* Add constant polynomial to remainder, so that
-			 * ECC of blank pages results in 0x0 on reading back */
-			for (j = 0; j < eccbytes; j++)
-				ecc_calc[j] ^= bch4_polynomial[j];
-			break;
-		case OMAP_ECC_BCH4_CODE_HW:
-			/* Set  8th ECC byte as 0x0 for ROM compatibility */
-			ecc_calc[eccbytes - 1] = 0x0;
-			break;
-		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-			/* Add constant polynomial to remainder, so that
-			 * ECC of blank pages results in 0x0 on reading back */
-			for (j = 0; j < eccbytes; j++)
-				ecc_calc[j] ^= bch8_polynomial[j];
-			break;
-		case OMAP_ECC_BCH8_CODE_HW:
-			/* Set 14th ECC byte as 0x0 for ROM compatibility */
-			ecc_calc[eccbytes - 1] = 0x0;
-			break;
-		case OMAP_ECC_BCH16_CODE_HW:
-			break;
-		default:
-			return -EINVAL;
-		}
-
-	ecc_calc += eccbytes;
-	}
-
-	return 0;
-}
-
-/**
- * erased_sector_bitflips - count bit flips
- * @data:	data sector buffer
- * @oob:	oob buffer
- * @info:	omap_nand_info
- *
- * Check the bit flips in erased page falls below correctable level.
- * If falls below, report the page as erased with correctable bit
- * flip, else report as uncorrectable page.
- */
-static int erased_sector_bitflips(u_char *data, u_char *oob,
-		struct omap_nand_info *info)
-{
-	int flip_bits = 0, i;
-
-	for (i = 0; i < info->nand.ecc.size; i++) {
-		flip_bits += hweight8(~data[i]);
-		if (flip_bits > info->nand.ecc.strength)
-			return 0;
-	}
-
-	for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
-		flip_bits += hweight8(~oob[i]);
-		if (flip_bits > info->nand.ecc.strength)
-			return 0;
-	}
-
-	/*
-	 * Bit flips falls in correctable level.
-	 * Fill data area with 0xFF
-	 */
-	if (flip_bits) {
-		memset(data, 0xFF, info->nand.ecc.size);
-		memset(oob, 0xFF, info->nand.ecc.bytes);
-	}
-
-	return flip_bits;
-}
-
-/**
- * omap_elm_correct_data - corrects page data area in case error reported
- * @mtd:	MTD device structure
- * @data:	page data
- * @read_ecc:	ecc read from nand flash
- * @calc_ecc:	ecc read from HW ECC registers
- *
- * Calculated ecc vector reported as zero in case of non-error pages.
- * In case of non-zero ecc vector, first filter out erased-pages, and
- * then process data via ELM to detect bit-flips.
- */
-static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
-				u_char *read_ecc, u_char *calc_ecc)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	struct nand_ecc_ctrl *ecc = &info->nand.ecc;
-	int eccsteps = info->nand.ecc.steps;
-	int i , j, stat = 0;
-	int eccflag, actual_eccbytes;
-	struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
-	u_char *ecc_vec = calc_ecc;
-	u_char *spare_ecc = read_ecc;
-	u_char *erased_ecc_vec;
-	u_char *buf;
-	int bitflip_count;
-	bool is_error_reported = false;
-	u32 bit_pos, byte_pos, error_max, pos;
-	int err;
-
-	switch (info->ecc_opt) {
-	case OMAP_ECC_BCH4_CODE_HW:
-		/* omit  7th ECC byte reserved for ROM code compatibility */
-		actual_eccbytes = ecc->bytes - 1;
-		erased_ecc_vec = bch4_vector;
-		break;
-	case OMAP_ECC_BCH8_CODE_HW:
-		/* omit 14th ECC byte reserved for ROM code compatibility */
-		actual_eccbytes = ecc->bytes - 1;
-		erased_ecc_vec = bch8_vector;
-		break;
-	case OMAP_ECC_BCH16_CODE_HW:
-		actual_eccbytes = ecc->bytes;
-		erased_ecc_vec = bch16_vector;
-		break;
-	default:
-		dev_err(&info->pdev->dev, "invalid driver configuration\n");
-		return -EINVAL;
-	}
-
-	/* Initialize elm error vector to zero */
-	memset(err_vec, 0, sizeof(err_vec));
-
-	for (i = 0; i < eccsteps ; i++) {
-		eccflag = 0;	/* initialize eccflag */
-
-		/*
-		 * Check any error reported,
-		 * In case of error, non zero ecc reported.
-		 */
-		for (j = 0; j < actual_eccbytes; j++) {
-			if (calc_ecc[j] != 0) {
-				eccflag = 1; /* non zero ecc, error present */
-				break;
-			}
-		}
-
-		if (eccflag == 1) {
-			if (memcmp(calc_ecc, erased_ecc_vec,
-						actual_eccbytes) == 0) {
-				/*
-				 * calc_ecc[] matches pattern for ECC(all 0xff)
-				 * so this is definitely an erased-page
-				 */
-			} else {
-				buf = &data[info->nand.ecc.size * i];
-				/*
-				 * count number of 0-bits in read_buf.
-				 * This check can be removed once a similar
-				 * check is introduced in generic NAND driver
-				 */
-				bitflip_count = erased_sector_bitflips(
-						buf, read_ecc, info);
-				if (bitflip_count) {
-					/*
-					 * number of 0-bits within ECC limits
-					 * So this may be an erased-page
-					 */
-					stat += bitflip_count;
-				} else {
-					/*
-					 * Too many 0-bits. It may be a
-					 * - programmed-page, OR
-					 * - erased-page with many bit-flips
-					 * So this page requires check by ELM
-					 */
-					err_vec[i].error_reported = true;
-					is_error_reported = true;
-				}
-			}
-		}
-
-		/* Update the ecc vector */
-		calc_ecc += ecc->bytes;
-		read_ecc += ecc->bytes;
-	}
-
-	/* Check if any error reported */
-	if (!is_error_reported)
-		return stat;
-
-	/* Decode BCH error using ELM module */
-	elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
-
-	err = 0;
-	for (i = 0; i < eccsteps; i++) {
-		if (err_vec[i].error_uncorrectable) {
-			dev_err(&info->pdev->dev,
-				"uncorrectable bit-flips found\n");
-			err = -EBADMSG;
-		} else if (err_vec[i].error_reported) {
-			for (j = 0; j < err_vec[i].error_count; j++) {
-				switch (info->ecc_opt) {
-				case OMAP_ECC_BCH4_CODE_HW:
-					/* Add 4 bits to take care of padding */
-					pos = err_vec[i].error_loc[j] +
-						BCH4_BIT_PAD;
-					break;
-				case OMAP_ECC_BCH8_CODE_HW:
-				case OMAP_ECC_BCH16_CODE_HW:
-					pos = err_vec[i].error_loc[j];
-					break;
-				default:
-					return -EINVAL;
-				}
-				error_max = (ecc->size + actual_eccbytes) * 8;
-				/* Calculate bit position of error */
-				bit_pos = pos % 8;
-
-				/* Calculate byte position of error */
-				byte_pos = (error_max - pos - 1) / 8;
-
-				if (pos < error_max) {
-					if (byte_pos < 512) {
-						pr_debug("bitflip@dat[%d]=%x\n",
-						     byte_pos, data[byte_pos]);
-						data[byte_pos] ^= 1 << bit_pos;
-					} else {
-						pr_debug("bitflip@oob[%d]=%x\n",
-							(byte_pos - 512),
-						     spare_ecc[byte_pos - 512]);
-						spare_ecc[byte_pos - 512] ^=
-							1 << bit_pos;
-					}
-				} else {
-					dev_err(&info->pdev->dev,
-						"invalid bit-flip @ %d:%d\n",
-						byte_pos, bit_pos);
-					err = -EBADMSG;
-				}
-			}
-		}
-
-		/* Update number of correctable errors */
-		stat += err_vec[i].error_count;
-
-		/* Update page data with sector size */
-		data += ecc->size;
-		spare_ecc += ecc->bytes;
-	}
-
-	return (err) ? err : stat;
-}
-
-/**
- * omap_write_page_bch - BCH ecc based write page function for entire page
- * @mtd:		mtd info structure
- * @chip:		nand chip info structure
- * @buf:		data buffer
- * @oob_required:	must write chip->oob_poi to OOB
- * @page:		page
- *
- * Custom write page method evolved to support multi sector writing in one shot
- */
-static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
-			       const uint8_t *buf, int oob_required, int page)
-{
-	int ret;
-	uint8_t *ecc_calc = chip->buffers->ecccalc;
-
-	/* Enable GPMC ecc engine */
-	chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
-
-	/* Write data */
-	chip->write_buf(mtd, buf, mtd->writesize);
-
-	/* Update ecc vector from GPMC result registers */
-	chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
-
-	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
-					 chip->ecc.total);
-	if (ret)
-		return ret;
-
-	/* Write ecc vector to OOB area */
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 0;
-}
-
-/**
- * omap_read_page_bch - BCH ecc based page read function for entire page
- * @mtd:		mtd info structure
- * @chip:		nand chip info structure
- * @buf:		buffer to store read data
- * @oob_required:	caller requires OOB data read to chip->oob_poi
- * @page:		page number to read
- *
- * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
- * used for error correction.
- * Custom method evolved to support ELM error correction & multi sector
- * reading. On reading page data area is read along with OOB data with
- * ecc engine enabled. ecc vector updated after read of OOB data.
- * For non error pages ecc vector reported as zero.
- */
-static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int oob_required, int page)
-{
-	uint8_t *ecc_calc = chip->buffers->ecccalc;
-	uint8_t *ecc_code = chip->buffers->ecccode;
-	int stat, ret;
-	unsigned int max_bitflips = 0;
-
-	/* Enable GPMC ecc engine */
-	chip->ecc.hwctl(mtd, NAND_ECC_READ);
-
-	/* Read data */
-	chip->read_buf(mtd, buf, mtd->writesize);
-
-	/* Read oob bytes */
-	chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
-		      mtd->writesize + BADBLOCK_MARKER_LENGTH, -1);
-	chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH,
-		       chip->ecc.total);
-
-	/* Calculate ecc bytes */
-	chip->ecc.calculate(mtd, buf, ecc_calc);
-
-	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
-					 chip->ecc.total);
-	if (ret)
-		return ret;
-
-	stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
-
-	if (stat < 0) {
-		mtd->ecc_stats.failed++;
-	} else {
-		mtd->ecc_stats.corrected += stat;
-		max_bitflips = max_t(unsigned int, max_bitflips, stat);
-	}
-
-	return max_bitflips;
-}
-
-/**
- * is_elm_present - checks for presence of ELM module by scanning DT nodes
- * @omap_nand_info: NAND device structure containing platform data
- */
-static bool is_elm_present(struct omap_nand_info *info,
-			   struct device_node *elm_node)
-{
-	struct platform_device *pdev;
-
-	/* check whether elm-id is passed via DT */
-	if (!elm_node) {
-		dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
-		return false;
-	}
-	pdev = of_find_device_by_node(elm_node);
-	/* check whether ELM device is registered */
-	if (!pdev) {
-		dev_err(&info->pdev->dev, "ELM device not found\n");
-		return false;
-	}
-	/* ELM module available, now configure it */
-	info->elm_dev = &pdev->dev;
-	return true;
-}
-
-static bool omap2_nand_ecc_check(struct omap_nand_info *info,
-				 struct omap_nand_platform_data	*pdata)
-{
-	bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
-
-	switch (info->ecc_opt) {
-	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-		ecc_needs_omap_bch = false;
-		ecc_needs_bch = true;
-		ecc_needs_elm = false;
-		break;
-	case OMAP_ECC_BCH4_CODE_HW:
-	case OMAP_ECC_BCH8_CODE_HW:
-	case OMAP_ECC_BCH16_CODE_HW:
-		ecc_needs_omap_bch = true;
-		ecc_needs_bch = false;
-		ecc_needs_elm = true;
-		break;
-	default:
-		ecc_needs_omap_bch = false;
-		ecc_needs_bch = false;
-		ecc_needs_elm = false;
-		break;
-	}
-
-	if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_BCH)) {
-		dev_err(&info->pdev->dev,
-			"CONFIG_MTD_NAND_ECC_BCH not enabled\n");
-		return false;
-	}
-	if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
-		dev_err(&info->pdev->dev,
-			"CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
-		return false;
-	}
-	if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
-		dev_err(&info->pdev->dev, "ELM not available\n");
-		return false;
-	}
-
-	return true;
-}
-
-static const char * const nand_xfer_types[] = {
-	[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
-	[NAND_OMAP_POLLED] = "polled",
-	[NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
-	[NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
-};
-
-static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
-{
-	struct device_node *child = dev->of_node;
-	int i;
-	const char *s;
-	u32 cs;
-
-	if (of_property_read_u32(child, "reg", &cs) < 0) {
-		dev_err(dev, "reg not found in DT\n");
-		return -EINVAL;
-	}
-
-	info->gpmc_cs = cs;
-
-	/* detect availability of ELM module. Won't be present pre-OMAP4 */
-	info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
-	if (!info->elm_of_node) {
-		info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
-		if (!info->elm_of_node)
-			dev_dbg(dev, "ti,elm-id not in DT\n");
-	}
-
-	/* select ecc-scheme for NAND */
-	if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
-		dev_err(dev, "ti,nand-ecc-opt not found\n");
-		return -EINVAL;
-	}
-
-	if (!strcmp(s, "sw")) {
-		info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
-	} else if (!strcmp(s, "ham1") ||
-		   !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
-		info->ecc_opt =	OMAP_ECC_HAM1_CODE_HW;
-	} else if (!strcmp(s, "bch4")) {
-		if (info->elm_of_node)
-			info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
-		else
-			info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
-	} else if (!strcmp(s, "bch8")) {
-		if (info->elm_of_node)
-			info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
-		else
-			info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
-	} else if (!strcmp(s, "bch16")) {
-		info->ecc_opt =	OMAP_ECC_BCH16_CODE_HW;
-	} else {
-		dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
-		return -EINVAL;
-	}
-
-	/* select data transfer mode */
-	if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
-		for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
-			if (!strcasecmp(s, nand_xfer_types[i])) {
-				info->xfer_type = i;
-				return 0;
-			}
-		}
-
-		dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
-			      struct mtd_oob_region *oobregion)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	struct nand_chip *chip = &info->nand;
-	int off = BADBLOCK_MARKER_LENGTH;
-
-	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
-	    !(chip->options & NAND_BUSWIDTH_16))
-		off = 1;
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = off;
-	oobregion->length = chip->ecc.total;
-
-	return 0;
-}
-
-static int omap_ooblayout_free(struct mtd_info *mtd, int section,
-			       struct mtd_oob_region *oobregion)
-{
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	struct nand_chip *chip = &info->nand;
-	int off = BADBLOCK_MARKER_LENGTH;
-
-	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
-	    !(chip->options & NAND_BUSWIDTH_16))
-		off = 1;
-
-	if (section)
-		return -ERANGE;
-
-	off += chip->ecc.total;
-	if (off >= mtd->oobsize)
-		return -ERANGE;
-
-	oobregion->offset = off;
-	oobregion->length = mtd->oobsize - off;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
-	.ecc = omap_ooblayout_ecc,
-	.free = omap_ooblayout_free,
-};
-
-static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int off = BADBLOCK_MARKER_LENGTH;
-
-	if (section >= chip->ecc.steps)
-		return -ERANGE;
-
-	/*
-	 * When SW correction is employed, one OMAP specific marker byte is
-	 * reserved after each ECC step.
-	 */
-	oobregion->offset = off + (section * (chip->ecc.bytes + 1));
-	oobregion->length = chip->ecc.bytes;
-
-	return 0;
-}
-
-static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int off = BADBLOCK_MARKER_LENGTH;
-
-	if (section)
-		return -ERANGE;
-
-	/*
-	 * When SW correction is employed, one OMAP specific marker byte is
-	 * reserved after each ECC step.
-	 */
-	off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
-	if (off >= mtd->oobsize)
-		return -ERANGE;
-
-	oobregion->offset = off;
-	oobregion->length = mtd->oobsize - off;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
-	.ecc = omap_sw_ooblayout_ecc,
-	.free = omap_sw_ooblayout_free,
-};
-
-static int omap_nand_probe(struct platform_device *pdev)
-{
-	struct omap_nand_info		*info;
-	struct omap_nand_platform_data	*pdata = NULL;
-	struct mtd_info			*mtd;
-	struct nand_chip		*nand_chip;
-	int				err;
-	dma_cap_mask_t			mask;
-	struct resource			*res;
-	struct device			*dev = &pdev->dev;
-	int				min_oobbytes = BADBLOCK_MARKER_LENGTH;
-	int				oobbytes_per_step;
-
-	info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
-				GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	info->pdev = pdev;
-
-	if (dev->of_node) {
-		if (omap_get_dt_info(dev, info))
-			return -EINVAL;
-	} else {
-		pdata = dev_get_platdata(&pdev->dev);
-		if (!pdata) {
-			dev_err(&pdev->dev, "platform data missing\n");
-			return -EINVAL;
-		}
-
-		info->gpmc_cs = pdata->cs;
-		info->reg = pdata->reg;
-		info->ecc_opt = pdata->ecc_opt;
-		if (pdata->dev_ready)
-			dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
-
-		info->xfer_type = pdata->xfer_type;
-		info->devsize = pdata->devsize;
-		info->elm_of_node = pdata->elm_of_node;
-		info->flash_bbt = pdata->flash_bbt;
-	}
-
-	platform_set_drvdata(pdev, info);
-	info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
-	if (!info->ops) {
-		dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
-		return -ENODEV;
-	}
-
-	nand_chip		= &info->nand;
-	mtd			= nand_to_mtd(nand_chip);
-	mtd->dev.parent		= &pdev->dev;
-	nand_chip->ecc.priv	= NULL;
-	nand_set_flash_node(nand_chip, dev->of_node);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(nand_chip->IO_ADDR_R))
-		return PTR_ERR(nand_chip->IO_ADDR_R);
-
-	info->phys_base = res->start;
-
-	nand_chip->controller = &omap_gpmc_controller;
-
-	nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
-	nand_chip->cmd_ctrl  = omap_hwcontrol;
-
-	info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
-						    GPIOD_IN);
-	if (IS_ERR(info->ready_gpiod)) {
-		dev_err(dev, "failed to get ready gpio\n");
-		return PTR_ERR(info->ready_gpiod);
-	}
-
-	/*
-	 * If RDY/BSY line is connected to OMAP then use the omap ready
-	 * function and the generic nand_wait function which reads the status
-	 * register after monitoring the RDY/BSY line. Otherwise use a standard
-	 * chip delay which is slightly more than tR (AC Timing) of the NAND
-	 * device and read status register until you get a failure or success
-	 */
-	if (info->ready_gpiod) {
-		nand_chip->dev_ready = omap_dev_ready;
-		nand_chip->chip_delay = 0;
-	} else {
-		nand_chip->waitfunc = omap_wait;
-		nand_chip->chip_delay = 50;
-	}
-
-	if (info->flash_bbt)
-		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
-
-	/* scan NAND device connected to chip controller */
-	nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		dev_err(&info->pdev->dev,
-			"scan failed, may be bus-width mismatch\n");
-		err = -ENXIO;
-		goto return_error;
-	}
-
-	if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
-		nand_chip->bbt_options |= NAND_BBT_NO_OOB;
-	else
-		nand_chip->options |= NAND_SKIP_BBTSCAN;
-
-	/* re-populate low-level callbacks based on xfer modes */
-	switch (info->xfer_type) {
-	case NAND_OMAP_PREFETCH_POLLED:
-		nand_chip->read_buf   = omap_read_buf_pref;
-		nand_chip->write_buf  = omap_write_buf_pref;
-		break;
-
-	case NAND_OMAP_POLLED:
-		/* Use nand_base defaults for {read,write}_buf */
-		break;
-
-	case NAND_OMAP_PREFETCH_DMA:
-		dma_cap_zero(mask);
-		dma_cap_set(DMA_SLAVE, mask);
-		info->dma = dma_request_chan(pdev->dev.parent, "rxtx");
-
-		if (IS_ERR(info->dma)) {
-			dev_err(&pdev->dev, "DMA engine request failed\n");
-			err = PTR_ERR(info->dma);
-			goto return_error;
-		} else {
-			struct dma_slave_config cfg;
-
-			memset(&cfg, 0, sizeof(cfg));
-			cfg.src_addr = info->phys_base;
-			cfg.dst_addr = info->phys_base;
-			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-			cfg.src_maxburst = 16;
-			cfg.dst_maxburst = 16;
-			err = dmaengine_slave_config(info->dma, &cfg);
-			if (err) {
-				dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
-					err);
-				goto return_error;
-			}
-			nand_chip->read_buf   = omap_read_buf_dma_pref;
-			nand_chip->write_buf  = omap_write_buf_dma_pref;
-		}
-		break;
-
-	case NAND_OMAP_PREFETCH_IRQ:
-		info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
-		if (info->gpmc_irq_fifo <= 0) {
-			dev_err(&pdev->dev, "error getting fifo irq\n");
-			err = -ENODEV;
-			goto return_error;
-		}
-		err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo,
-					omap_nand_irq, IRQF_SHARED,
-					"gpmc-nand-fifo", info);
-		if (err) {
-			dev_err(&pdev->dev, "requesting irq(%d) error:%d",
-						info->gpmc_irq_fifo, err);
-			info->gpmc_irq_fifo = 0;
-			goto return_error;
-		}
-
-		info->gpmc_irq_count = platform_get_irq(pdev, 1);
-		if (info->gpmc_irq_count <= 0) {
-			dev_err(&pdev->dev, "error getting count irq\n");
-			err = -ENODEV;
-			goto return_error;
-		}
-		err = devm_request_irq(&pdev->dev, info->gpmc_irq_count,
-					omap_nand_irq, IRQF_SHARED,
-					"gpmc-nand-count", info);
-		if (err) {
-			dev_err(&pdev->dev, "requesting irq(%d) error:%d",
-						info->gpmc_irq_count, err);
-			info->gpmc_irq_count = 0;
-			goto return_error;
-		}
-
-		nand_chip->read_buf  = omap_read_buf_irq_pref;
-		nand_chip->write_buf = omap_write_buf_irq_pref;
-
-		break;
-
-	default:
-		dev_err(&pdev->dev,
-			"xfer_type(%d) not supported!\n", info->xfer_type);
-		err = -EINVAL;
-		goto return_error;
-	}
-
-	if (!omap2_nand_ecc_check(info, pdata)) {
-		err = -EINVAL;
-		goto return_error;
-	}
-
-	/*
-	 * Bail out earlier to let NAND_ECC_SOFT code create its own
-	 * ooblayout instead of using ours.
-	 */
-	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
-		nand_chip->ecc.mode = NAND_ECC_SOFT;
-		nand_chip->ecc.algo = NAND_ECC_HAMMING;
-		goto scan_tail;
-	}
-
-	/* populate MTD interface based on ECC scheme */
-	switch (info->ecc_opt) {
-	case OMAP_ECC_HAM1_CODE_HW:
-		pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
-		nand_chip->ecc.mode             = NAND_ECC_HW;
-		nand_chip->ecc.bytes            = 3;
-		nand_chip->ecc.size             = 512;
-		nand_chip->ecc.strength         = 1;
-		nand_chip->ecc.calculate        = omap_calculate_ecc;
-		nand_chip->ecc.hwctl            = omap_enable_hwecc;
-		nand_chip->ecc.correct          = omap_correct_data;
-		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
-		oobbytes_per_step		= nand_chip->ecc.bytes;
-
-		if (!(nand_chip->options & NAND_BUSWIDTH_16))
-			min_oobbytes		= 1;
-
-		break;
-
-	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
-		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
-		nand_chip->ecc.mode		= NAND_ECC_HW;
-		nand_chip->ecc.size		= 512;
-		nand_chip->ecc.bytes		= 7;
-		nand_chip->ecc.strength		= 4;
-		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
-		nand_chip->ecc.correct		= nand_bch_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
-		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
-		/* Reserve one byte for the OMAP marker */
-		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
-		/* software bch library is used for locating errors */
-		nand_chip->ecc.priv		= nand_bch_init(mtd);
-		if (!nand_chip->ecc.priv) {
-			dev_err(&info->pdev->dev, "unable to use BCH library\n");
-			err = -EINVAL;
-			goto return_error;
-		}
-		break;
-
-	case OMAP_ECC_BCH4_CODE_HW:
-		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
-		nand_chip->ecc.mode		= NAND_ECC_HW;
-		nand_chip->ecc.size		= 512;
-		/* 14th bit is kept reserved for ROM-code compatibility */
-		nand_chip->ecc.bytes		= 7 + 1;
-		nand_chip->ecc.strength		= 4;
-		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
-		nand_chip->ecc.correct		= omap_elm_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
-		nand_chip->ecc.read_page	= omap_read_page_bch;
-		nand_chip->ecc.write_page	= omap_write_page_bch;
-		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
-		oobbytes_per_step		= nand_chip->ecc.bytes;
-
-		err = elm_config(info->elm_dev, BCH4_ECC,
-				 mtd->writesize / nand_chip->ecc.size,
-				 nand_chip->ecc.size, nand_chip->ecc.bytes);
-		if (err < 0)
-			goto return_error;
-		break;
-
-	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
-		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
-		nand_chip->ecc.mode		= NAND_ECC_HW;
-		nand_chip->ecc.size		= 512;
-		nand_chip->ecc.bytes		= 13;
-		nand_chip->ecc.strength		= 8;
-		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
-		nand_chip->ecc.correct		= nand_bch_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
-		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
-		/* Reserve one byte for the OMAP marker */
-		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
-		/* software bch library is used for locating errors */
-		nand_chip->ecc.priv		= nand_bch_init(mtd);
-		if (!nand_chip->ecc.priv) {
-			dev_err(&info->pdev->dev, "unable to use BCH library\n");
-			err = -EINVAL;
-			goto return_error;
-		}
-		break;
-
-	case OMAP_ECC_BCH8_CODE_HW:
-		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
-		nand_chip->ecc.mode		= NAND_ECC_HW;
-		nand_chip->ecc.size		= 512;
-		/* 14th bit is kept reserved for ROM-code compatibility */
-		nand_chip->ecc.bytes		= 13 + 1;
-		nand_chip->ecc.strength		= 8;
-		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
-		nand_chip->ecc.correct		= omap_elm_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
-		nand_chip->ecc.read_page	= omap_read_page_bch;
-		nand_chip->ecc.write_page	= omap_write_page_bch;
-		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
-		oobbytes_per_step		= nand_chip->ecc.bytes;
-
-		err = elm_config(info->elm_dev, BCH8_ECC,
-				 mtd->writesize / nand_chip->ecc.size,
-				 nand_chip->ecc.size, nand_chip->ecc.bytes);
-		if (err < 0)
-			goto return_error;
-
-		break;
-
-	case OMAP_ECC_BCH16_CODE_HW:
-		pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
-		nand_chip->ecc.mode		= NAND_ECC_HW;
-		nand_chip->ecc.size		= 512;
-		nand_chip->ecc.bytes		= 26;
-		nand_chip->ecc.strength		= 16;
-		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
-		nand_chip->ecc.correct		= omap_elm_correct_data;
-		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
-		nand_chip->ecc.read_page	= omap_read_page_bch;
-		nand_chip->ecc.write_page	= omap_write_page_bch;
-		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
-		oobbytes_per_step		= nand_chip->ecc.bytes;
-
-		err = elm_config(info->elm_dev, BCH16_ECC,
-				 mtd->writesize / nand_chip->ecc.size,
-				 nand_chip->ecc.size, nand_chip->ecc.bytes);
-		if (err < 0)
-			goto return_error;
-
-		break;
-	default:
-		dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n");
-		err = -EINVAL;
-		goto return_error;
-	}
-
-	/* check if NAND device's OOB is enough to store ECC signatures */
-	min_oobbytes += (oobbytes_per_step *
-			 (mtd->writesize / nand_chip->ecc.size));
-	if (mtd->oobsize < min_oobbytes) {
-		dev_err(&info->pdev->dev,
-			"not enough OOB bytes required = %d, available=%d\n",
-			min_oobbytes, mtd->oobsize);
-		err = -EINVAL;
-		goto return_error;
-	}
-
-scan_tail:
-	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		err = -ENXIO;
-		goto return_error;
-	}
-
-	if (dev->of_node)
-		mtd_device_register(mtd, NULL, 0);
-	else
-		mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
-
-	platform_set_drvdata(pdev, mtd);
-
-	return 0;
-
-return_error:
-	if (info->dma)
-		dma_release_channel(info->dma);
-	if (nand_chip->ecc.priv) {
-		nand_bch_free(nand_chip->ecc.priv);
-		nand_chip->ecc.priv = NULL;
-	}
-	return err;
-}
-
-static int omap_nand_remove(struct platform_device *pdev)
-{
-	struct mtd_info *mtd = platform_get_drvdata(pdev);
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct omap_nand_info *info = mtd_to_omap(mtd);
-	if (nand_chip->ecc.priv) {
-		nand_bch_free(nand_chip->ecc.priv);
-		nand_chip->ecc.priv = NULL;
-	}
-	if (info->dma)
-		dma_release_channel(info->dma);
-	nand_release(mtd);
-	return 0;
-}
-
-static const struct of_device_id omap_nand_ids[] = {
-	{ .compatible = "ti,omap2-nand", },
-	{},
-};
-
-static struct platform_driver omap_nand_driver = {
-	.probe		= omap_nand_probe,
-	.remove		= omap_nand_remove,
-	.driver		= {
-		.name	= DRIVER_NAME,
-		.of_match_table = of_match_ptr(omap_nand_ids),
-	},
-};
-
-module_platform_driver(omap_nand_driver);
-
-MODULE_ALIAS("platform:" DRIVER_NAME);
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c
deleted file mode 100644
index a3f32f939cc1..000000000000
--- a/drivers/mtd/nand/omap_elm.c
+++ /dev/null
@@ -1,578 +0,0 @@ 
-/*
- * Error Location Module
- *
- * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define DRIVER_NAME	"omap-elm"
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/sched.h>
-#include <linux/pm_runtime.h>
-#include <linux/platform_data/elm.h>
-
-#define ELM_SYSCONFIG			0x010
-#define ELM_IRQSTATUS			0x018
-#define ELM_IRQENABLE			0x01c
-#define ELM_LOCATION_CONFIG		0x020
-#define ELM_PAGE_CTRL			0x080
-#define ELM_SYNDROME_FRAGMENT_0		0x400
-#define ELM_SYNDROME_FRAGMENT_1		0x404
-#define ELM_SYNDROME_FRAGMENT_2		0x408
-#define ELM_SYNDROME_FRAGMENT_3		0x40c
-#define ELM_SYNDROME_FRAGMENT_4		0x410
-#define ELM_SYNDROME_FRAGMENT_5		0x414
-#define ELM_SYNDROME_FRAGMENT_6		0x418
-#define ELM_LOCATION_STATUS		0x800
-#define ELM_ERROR_LOCATION_0		0x880
-
-/* ELM Interrupt Status Register */
-#define INTR_STATUS_PAGE_VALID		BIT(8)
-
-/* ELM Interrupt Enable Register */
-#define INTR_EN_PAGE_MASK		BIT(8)
-
-/* ELM Location Configuration Register */
-#define ECC_BCH_LEVEL_MASK		0x3
-
-/* ELM syndrome */
-#define ELM_SYNDROME_VALID		BIT(16)
-
-/* ELM_LOCATION_STATUS Register */
-#define ECC_CORRECTABLE_MASK		BIT(8)
-#define ECC_NB_ERRORS_MASK		0x1f
-
-/* ELM_ERROR_LOCATION_0-15 Registers */
-#define ECC_ERROR_LOCATION_MASK		0x1fff
-
-#define ELM_ECC_SIZE			0x7ff
-
-#define SYNDROME_FRAGMENT_REG_SIZE	0x40
-#define ERROR_LOCATION_SIZE		0x100
-
-struct elm_registers {
-	u32 elm_irqenable;
-	u32 elm_sysconfig;
-	u32 elm_location_config;
-	u32 elm_page_ctrl;
-	u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
-	u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
-	u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
-	u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
-	u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
-	u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
-	u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
-};
-
-struct elm_info {
-	struct device *dev;
-	void __iomem *elm_base;
-	struct completion elm_completion;
-	struct list_head list;
-	enum bch_ecc bch_type;
-	struct elm_registers elm_regs;
-	int ecc_steps;
-	int ecc_syndrome_size;
-};
-
-static LIST_HEAD(elm_devices);
-
-static void elm_write_reg(struct elm_info *info, int offset, u32 val)
-{
-	writel(val, info->elm_base + offset);
-}
-
-static u32 elm_read_reg(struct elm_info *info, int offset)
-{
-	return readl(info->elm_base + offset);
-}
-
-/**
- * elm_config - Configure ELM module
- * @dev:	ELM device
- * @bch_type:	Type of BCH ecc
- */
-int elm_config(struct device *dev, enum bch_ecc bch_type,
-	int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
-{
-	u32 reg_val;
-	struct elm_info *info = dev_get_drvdata(dev);
-
-	if (!info) {
-		dev_err(dev, "Unable to configure elm - device not probed?\n");
-		return -EPROBE_DEFER;
-	}
-	/* ELM cannot detect ECC errors for chunks > 1KB */
-	if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
-		dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size);
-		return -EINVAL;
-	}
-	/* ELM support 8 error syndrome process */
-	if (ecc_steps > ERROR_VECTOR_MAX) {
-		dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
-		return -EINVAL;
-	}
-
-	reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
-	elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
-	info->bch_type		= bch_type;
-	info->ecc_steps		= ecc_steps;
-	info->ecc_syndrome_size	= ecc_syndrome_size;
-
-	return 0;
-}
-EXPORT_SYMBOL(elm_config);
-
-/**
- * elm_configure_page_mode - Enable/Disable page mode
- * @info:	elm info
- * @index:	index number of syndrome fragment vector
- * @enable:	enable/disable flag for page mode
- *
- * Enable page mode for syndrome fragment index
- */
-static void elm_configure_page_mode(struct elm_info *info, int index,
-		bool enable)
-{
-	u32 reg_val;
-
-	reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
-	if (enable)
-		reg_val |= BIT(index);	/* enable page mode */
-	else
-		reg_val &= ~BIT(index);	/* disable page mode */
-
-	elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
-}
-
-/**
- * elm_load_syndrome - Load ELM syndrome reg
- * @info:	elm info
- * @err_vec:	elm error vectors
- * @ecc:	buffer with calculated ecc
- *
- * Load syndrome fragment registers with calculated ecc in reverse order.
- */
-static void elm_load_syndrome(struct elm_info *info,
-		struct elm_errorvec *err_vec, u8 *ecc)
-{
-	int i, offset;
-	u32 val;
-
-	for (i = 0; i < info->ecc_steps; i++) {
-
-		/* Check error reported */
-		if (err_vec[i].error_reported) {
-			elm_configure_page_mode(info, i, true);
-			offset = ELM_SYNDROME_FRAGMENT_0 +
-				SYNDROME_FRAGMENT_REG_SIZE * i;
-			switch (info->bch_type) {
-			case BCH8_ECC:
-				/* syndrome fragment 0 = ecc[9-12B] */
-				val = cpu_to_be32(*(u32 *) &ecc[9]);
-				elm_write_reg(info, offset, val);
-
-				/* syndrome fragment 1 = ecc[5-8B] */
-				offset += 4;
-				val = cpu_to_be32(*(u32 *) &ecc[5]);
-				elm_write_reg(info, offset, val);
-
-				/* syndrome fragment 2 = ecc[1-4B] */
-				offset += 4;
-				val = cpu_to_be32(*(u32 *) &ecc[1]);
-				elm_write_reg(info, offset, val);
-
-				/* syndrome fragment 3 = ecc[0B] */
-				offset += 4;
-				val = ecc[0];
-				elm_write_reg(info, offset, val);
-				break;
-			case BCH4_ECC:
-				/* syndrome fragment 0 = ecc[20-52b] bits */
-				val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
-					((ecc[2] & 0xf) << 28);
-				elm_write_reg(info, offset, val);
-
-				/* syndrome fragment 1 = ecc[0-20b] bits */
-				offset += 4;
-				val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
-				elm_write_reg(info, offset, val);
-				break;
-			case BCH16_ECC:
-				val = cpu_to_be32(*(u32 *) &ecc[22]);
-				elm_write_reg(info, offset, val);
-				offset += 4;
-				val = cpu_to_be32(*(u32 *) &ecc[18]);
-				elm_write_reg(info, offset, val);
-				offset += 4;
-				val = cpu_to_be32(*(u32 *) &ecc[14]);
-				elm_write_reg(info, offset, val);
-				offset += 4;
-				val = cpu_to_be32(*(u32 *) &ecc[10]);
-				elm_write_reg(info, offset, val);
-				offset += 4;
-				val = cpu_to_be32(*(u32 *) &ecc[6]);
-				elm_write_reg(info, offset, val);
-				offset += 4;
-				val = cpu_to_be32(*(u32 *) &ecc[2]);
-				elm_write_reg(info, offset, val);
-				offset += 4;
-				val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
-				elm_write_reg(info, offset, val);
-				break;
-			default:
-				pr_err("invalid config bch_type\n");
-			}
-		}
-
-		/* Update ecc pointer with ecc byte size */
-		ecc += info->ecc_syndrome_size;
-	}
-}
-
-/**
- * elm_start_processing - start elm syndrome processing
- * @info:	elm info
- * @err_vec:	elm error vectors
- *
- * Set syndrome valid bit for syndrome fragment registers for which
- * elm syndrome fragment registers are loaded. This enables elm module
- * to start processing syndrome vectors.
- */
-static void elm_start_processing(struct elm_info *info,
-		struct elm_errorvec *err_vec)
-{
-	int i, offset;
-	u32 reg_val;
-
-	/*
-	 * Set syndrome vector valid, so that ELM module
-	 * will process it for vectors error is reported
-	 */
-	for (i = 0; i < info->ecc_steps; i++) {
-		if (err_vec[i].error_reported) {
-			offset = ELM_SYNDROME_FRAGMENT_6 +
-				SYNDROME_FRAGMENT_REG_SIZE * i;
-			reg_val = elm_read_reg(info, offset);
-			reg_val |= ELM_SYNDROME_VALID;
-			elm_write_reg(info, offset, reg_val);
-		}
-	}
-}
-
-/**
- * elm_error_correction - locate correctable error position
- * @info:	elm info
- * @err_vec:	elm error vectors
- *
- * On completion of processing by elm module, error location status
- * register updated with correctable/uncorrectable error information.
- * In case of correctable errors, number of errors located from
- * elm location status register & read the positions from
- * elm error location register.
- */
-static void elm_error_correction(struct elm_info *info,
-		struct elm_errorvec *err_vec)
-{
-	int i, j, errors = 0;
-	int offset;
-	u32 reg_val;
-
-	for (i = 0; i < info->ecc_steps; i++) {
-
-		/* Check error reported */
-		if (err_vec[i].error_reported) {
-			offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
-			reg_val = elm_read_reg(info, offset);
-
-			/* Check correctable error or not */
-			if (reg_val & ECC_CORRECTABLE_MASK) {
-				offset = ELM_ERROR_LOCATION_0 +
-					ERROR_LOCATION_SIZE * i;
-
-				/* Read count of correctable errors */
-				err_vec[i].error_count = reg_val &
-					ECC_NB_ERRORS_MASK;
-
-				/* Update the error locations in error vector */
-				for (j = 0; j < err_vec[i].error_count; j++) {
-
-					reg_val = elm_read_reg(info, offset);
-					err_vec[i].error_loc[j] = reg_val &
-						ECC_ERROR_LOCATION_MASK;
-
-					/* Update error location register */
-					offset += 4;
-				}
-
-				errors += err_vec[i].error_count;
-			} else {
-				err_vec[i].error_uncorrectable = true;
-			}
-
-			/* Clearing interrupts for processed error vectors */
-			elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
-
-			/* Disable page mode */
-			elm_configure_page_mode(info, i, false);
-		}
-	}
-}
-
-/**
- * elm_decode_bch_error_page - Locate error position
- * @dev:	device pointer
- * @ecc_calc:	calculated ECC bytes from GPMC
- * @err_vec:	elm error vectors
- *
- * Called with one or more error reported vectors & vectors with
- * error reported is updated in err_vec[].error_reported
- */
-void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
-		struct elm_errorvec *err_vec)
-{
-	struct elm_info *info = dev_get_drvdata(dev);
-	u32 reg_val;
-
-	/* Enable page mode interrupt */
-	reg_val = elm_read_reg(info, ELM_IRQSTATUS);
-	elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
-	elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
-
-	/* Load valid ecc byte to syndrome fragment register */
-	elm_load_syndrome(info, err_vec, ecc_calc);
-
-	/* Enable syndrome processing for which syndrome fragment is updated */
-	elm_start_processing(info, err_vec);
-
-	/* Wait for ELM module to finish locating error correction */
-	wait_for_completion(&info->elm_completion);
-
-	/* Disable page mode interrupt */
-	reg_val = elm_read_reg(info, ELM_IRQENABLE);
-	elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
-	elm_error_correction(info, err_vec);
-}
-EXPORT_SYMBOL(elm_decode_bch_error_page);
-
-static irqreturn_t elm_isr(int this_irq, void *dev_id)
-{
-	u32 reg_val;
-	struct elm_info *info = dev_id;
-
-	reg_val = elm_read_reg(info, ELM_IRQSTATUS);
-
-	/* All error vectors processed */
-	if (reg_val & INTR_STATUS_PAGE_VALID) {
-		elm_write_reg(info, ELM_IRQSTATUS,
-				reg_val & INTR_STATUS_PAGE_VALID);
-		complete(&info->elm_completion);
-		return IRQ_HANDLED;
-	}
-
-	return IRQ_NONE;
-}
-
-static int elm_probe(struct platform_device *pdev)
-{
-	int ret = 0;
-	struct resource *res, *irq;
-	struct elm_info *info;
-
-	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	info->dev = &pdev->dev;
-
-	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-	if (!irq) {
-		dev_err(&pdev->dev, "no irq resource defined\n");
-		return -ENODEV;
-	}
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	info->elm_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(info->elm_base))
-		return PTR_ERR(info->elm_base);
-
-	ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
-			pdev->name, info);
-	if (ret) {
-		dev_err(&pdev->dev, "failure requesting %pr\n", irq);
-		return ret;
-	}
-
-	pm_runtime_enable(&pdev->dev);
-	if (pm_runtime_get_sync(&pdev->dev) < 0) {
-		ret = -EINVAL;
-		pm_runtime_disable(&pdev->dev);
-		dev_err(&pdev->dev, "can't enable clock\n");
-		return ret;
-	}
-
-	init_completion(&info->elm_completion);
-	INIT_LIST_HEAD(&info->list);
-	list_add(&info->list, &elm_devices);
-	platform_set_drvdata(pdev, info);
-	return ret;
-}
-
-static int elm_remove(struct platform_device *pdev)
-{
-	pm_runtime_put_sync(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-/**
- * elm_context_save
- * saves ELM configurations to preserve them across Hardware powered-down
- */
-static int elm_context_save(struct elm_info *info)
-{
-	struct elm_registers *regs = &info->elm_regs;
-	enum bch_ecc bch_type = info->bch_type;
-	u32 offset = 0, i;
-
-	regs->elm_irqenable       = elm_read_reg(info, ELM_IRQENABLE);
-	regs->elm_sysconfig       = elm_read_reg(info, ELM_SYSCONFIG);
-	regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
-	regs->elm_page_ctrl       = elm_read_reg(info, ELM_PAGE_CTRL);
-	for (i = 0; i < ERROR_VECTOR_MAX; i++) {
-		offset = i * SYNDROME_FRAGMENT_REG_SIZE;
-		switch (bch_type) {
-		case BCH16_ECC:
-			regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
-					ELM_SYNDROME_FRAGMENT_6 + offset);
-			regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
-					ELM_SYNDROME_FRAGMENT_5 + offset);
-			regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
-					ELM_SYNDROME_FRAGMENT_4 + offset);
-		case BCH8_ECC:
-			regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
-					ELM_SYNDROME_FRAGMENT_3 + offset);
-			regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
-					ELM_SYNDROME_FRAGMENT_2 + offset);
-		case BCH4_ECC:
-			regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
-					ELM_SYNDROME_FRAGMENT_1 + offset);
-			regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
-					ELM_SYNDROME_FRAGMENT_0 + offset);
-			break;
-		default:
-			return -EINVAL;
-		}
-		/* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
-		 * to be saved for all BCH schemes*/
-		regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
-					ELM_SYNDROME_FRAGMENT_6 + offset);
-	}
-	return 0;
-}
-
-/**
- * elm_context_restore
- * writes configurations saved duing power-down back into ELM registers
- */
-static int elm_context_restore(struct elm_info *info)
-{
-	struct elm_registers *regs = &info->elm_regs;
-	enum bch_ecc bch_type = info->bch_type;
-	u32 offset = 0, i;
-
-	elm_write_reg(info, ELM_IRQENABLE,	 regs->elm_irqenable);
-	elm_write_reg(info, ELM_SYSCONFIG,	 regs->elm_sysconfig);
-	elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
-	elm_write_reg(info, ELM_PAGE_CTRL,	 regs->elm_page_ctrl);
-	for (i = 0; i < ERROR_VECTOR_MAX; i++) {
-		offset = i * SYNDROME_FRAGMENT_REG_SIZE;
-		switch (bch_type) {
-		case BCH16_ECC:
-			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
-					regs->elm_syndrome_fragment_6[i]);
-			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
-					regs->elm_syndrome_fragment_5[i]);
-			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
-					regs->elm_syndrome_fragment_4[i]);
-		case BCH8_ECC:
-			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
-					regs->elm_syndrome_fragment_3[i]);
-			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
-					regs->elm_syndrome_fragment_2[i]);
-		case BCH4_ECC:
-			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
-					regs->elm_syndrome_fragment_1[i]);
-			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
-					regs->elm_syndrome_fragment_0[i]);
-			break;
-		default:
-			return -EINVAL;
-		}
-		/* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
-		elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
-					regs->elm_syndrome_fragment_6[i] &
-							 ELM_SYNDROME_VALID);
-	}
-	return 0;
-}
-
-static int elm_suspend(struct device *dev)
-{
-	struct elm_info *info = dev_get_drvdata(dev);
-	elm_context_save(info);
-	pm_runtime_put_sync(dev);
-	return 0;
-}
-
-static int elm_resume(struct device *dev)
-{
-	struct elm_info *info = dev_get_drvdata(dev);
-	pm_runtime_get_sync(dev);
-	elm_context_restore(info);
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
-
-#ifdef CONFIG_OF
-static const struct of_device_id elm_of_match[] = {
-	{ .compatible = "ti,am3352-elm" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, elm_of_match);
-#endif
-
-static struct platform_driver elm_driver = {
-	.driver	= {
-		.name	= DRIVER_NAME,
-		.of_match_table = of_match_ptr(elm_of_match),
-		.pm	= &elm_pm_ops,
-	},
-	.probe	= elm_probe,
-	.remove	= elm_remove,
-};
-
-module_platform_driver(elm_driver);
-
-MODULE_DESCRIPTION("ELM driver for BCH error correction");
-MODULE_AUTHOR("Texas Instruments");
-MODULE_ALIAS("platform:" DRIVER_NAME);
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
deleted file mode 100644
index e68c4231e8b7..000000000000
--- a/drivers/mtd/nand/orion_nand.c
+++ /dev/null
@@ -1,218 +0,0 @@ 
-/*
- * drivers/mtd/nand/orion_nand.c
- *
- * NAND support for Marvell Orion SoC platforms
- *
- * Tzachi Perelstein <tzachi@marvell.com>
- *
- * This file is licensed under  the terms of the GNU General Public
- * License version 2. This program is licensed "as is" without any
- * warranty of any kind, whether express or implied.
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/io.h>
-#include <asm/sizes.h>
-#include <linux/platform_data/mtd-orion_nand.h>
-
-static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
-	struct nand_chip *nc = mtd_to_nand(mtd);
-	struct orion_nand_data *board = nand_get_controller_data(nc);
-	u32 offs;
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_CLE)
-		offs = (1 << board->cle);
-	else if (ctrl & NAND_ALE)
-		offs = (1 << board->ale);
-	else
-		return;
-
-	if (nc->options & NAND_BUSWIDTH_16)
-		offs <<= 1;
-
-	writeb(cmd, nc->IO_ADDR_W + offs);
-}
-
-static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	void __iomem *io_base = chip->IO_ADDR_R;
-	uint64_t *buf64;
-	int i = 0;
-
-	while (len && (unsigned long)buf & 7) {
-		*buf++ = readb(io_base);
-		len--;
-	}
-	buf64 = (uint64_t *)buf;
-	while (i < len/8) {
-		/*
-		 * Since GCC has no proper constraint (PR 43518)
-		 * force x variable to r2/r3 registers as ldrd instruction
-		 * requires first register to be even.
-		 */
-		register uint64_t x asm ("r2");
-
-		asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
-		buf64[i++] = x;
-	}
-	i *= 8;
-	while (i < len)
-		buf[i++] = readb(io_base);
-}
-
-static int __init orion_nand_probe(struct platform_device *pdev)
-{
-	struct mtd_info *mtd;
-	struct nand_chip *nc;
-	struct orion_nand_data *board;
-	struct resource *res;
-	struct clk *clk;
-	void __iomem *io_base;
-	int ret = 0;
-	u32 val = 0;
-
-	nc = devm_kzalloc(&pdev->dev,
-			sizeof(struct nand_chip),
-			GFP_KERNEL);
-	if (!nc)
-		return -ENOMEM;
-	mtd = nand_to_mtd(nc);
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	io_base = devm_ioremap_resource(&pdev->dev, res);
-
-	if (IS_ERR(io_base))
-		return PTR_ERR(io_base);
-
-	if (pdev->dev.of_node) {
-		board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
-					GFP_KERNEL);
-		if (!board)
-			return -ENOMEM;
-		if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
-			board->cle = (u8)val;
-		else
-			board->cle = 0;
-		if (!of_property_read_u32(pdev->dev.of_node, "ale", &val))
-			board->ale = (u8)val;
-		else
-			board->ale = 1;
-		if (!of_property_read_u32(pdev->dev.of_node,
-						"bank-width", &val))
-			board->width = (u8)val * 8;
-		else
-			board->width = 8;
-		if (!of_property_read_u32(pdev->dev.of_node,
-						"chip-delay", &val))
-			board->chip_delay = (u8)val;
-	} else {
-		board = dev_get_platdata(&pdev->dev);
-	}
-
-	mtd->dev.parent = &pdev->dev;
-
-	nand_set_controller_data(nc, board);
-	nand_set_flash_node(nc, pdev->dev.of_node);
-	nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
-	nc->cmd_ctrl = orion_nand_cmd_ctrl;
-	nc->read_buf = orion_nand_read_buf;
-	nc->ecc.mode = NAND_ECC_SOFT;
-	nc->ecc.algo = NAND_ECC_HAMMING;
-
-	if (board->chip_delay)
-		nc->chip_delay = board->chip_delay;
-
-	WARN(board->width > 16,
-		"%d bit bus width out of range",
-		board->width);
-
-	if (board->width == 16)
-		nc->options |= NAND_BUSWIDTH_16;
-
-	if (board->dev_ready)
-		nc->dev_ready = board->dev_ready;
-
-	platform_set_drvdata(pdev, mtd);
-
-	/* Not all platforms can gate the clock, so it is not
-	   an error if the clock does not exists. */
-	clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(clk)) {
-		clk_prepare_enable(clk);
-		clk_put(clk);
-	}
-
-	if (nand_scan(mtd, 1)) {
-		ret = -ENXIO;
-		goto no_dev;
-	}
-
-	mtd->name = "orion_nand";
-	ret = mtd_device_register(mtd, board->parts, board->nr_parts);
-	if (ret) {
-		nand_release(mtd);
-		goto no_dev;
-	}
-
-	return 0;
-
-no_dev:
-	if (!IS_ERR(clk)) {
-		clk_disable_unprepare(clk);
-		clk_put(clk);
-	}
-
-	return ret;
-}
-
-static int orion_nand_remove(struct platform_device *pdev)
-{
-	struct mtd_info *mtd = platform_get_drvdata(pdev);
-	struct clk *clk;
-
-	nand_release(mtd);
-
-	clk = clk_get(&pdev->dev, NULL);
-	if (!IS_ERR(clk)) {
-		clk_disable_unprepare(clk);
-		clk_put(clk);
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_OF
-static const struct of_device_id orion_nand_of_match_table[] = {
-	{ .compatible = "marvell,orion-nand", },
-	{},
-};
-MODULE_DEVICE_TABLE(of, orion_nand_of_match_table);
-#endif
-
-static struct platform_driver orion_nand_driver = {
-	.remove		= orion_nand_remove,
-	.driver		= {
-		.name	= "orion_nand",
-		.of_match_table = of_match_ptr(orion_nand_of_match_table),
-	},
-};
-
-module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tzachi Perelstein");
-MODULE_DESCRIPTION("NAND glue for Orion platforms");
-MODULE_ALIAS("platform:orion_nand");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
deleted file mode 100644
index 372b9736ac02..000000000000
--- a/drivers/mtd/nand/pasemi_nand.c
+++ /dev/null
@@ -1,233 +0,0 @@ 
-/*
- * Copyright (C) 2006-2007 PA Semi, Inc
- *
- * Author: Egor Martovetsky <egor@pasemi.com>
- * Maintained by: Olof Johansson <olof@lixom.net>
- *
- * Driver for the PWRficient onchip NAND flash interface
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-
-#undef DEBUG
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
-#include <linux/pci.h>
-
-#include <asm/io.h>
-
-#define LBICTRL_LPCCTL_NR		0x00004000
-#define CLE_PIN_CTL			15
-#define ALE_PIN_CTL			14
-
-static unsigned int lpcctl;
-static struct mtd_info *pasemi_nand_mtd;
-static const char driver_name[] = "pasemi-nand";
-
-static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	while (len > 0x800) {
-		memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
-		buf += 0x800;
-		len -= 0x800;
-	}
-	memcpy_fromio(buf, chip->IO_ADDR_R, len);
-}
-
-static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	while (len > 0x800) {
-		memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
-		buf += 0x800;
-		len -= 0x800;
-	}
-	memcpy_toio(chip->IO_ADDR_R, buf, len);
-}
-
-static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
-			     unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_CLE)
-		out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
-	else
-		out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
-
-	/* Push out posted writes */
-	eieio();
-	inl(lpcctl);
-}
-
-int pasemi_device_ready(struct mtd_info *mtd)
-{
-	return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
-}
-
-static int pasemi_nand_probe(struct platform_device *ofdev)
-{
-	struct device *dev = &ofdev->dev;
-	struct pci_dev *pdev;
-	struct device_node *np = dev->of_node;
-	struct resource res;
-	struct nand_chip *chip;
-	int err = 0;
-
-	err = of_address_to_resource(np, 0, &res);
-
-	if (err)
-		return -EINVAL;
-
-	/* We only support one device at the moment */
-	if (pasemi_nand_mtd)
-		return -ENODEV;
-
-	dev_dbg(dev, "pasemi_nand at %pR\n", &res);
-
-	/* Allocate memory for MTD device structure and private data */
-	chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-	if (!chip) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	pasemi_nand_mtd = nand_to_mtd(chip);
-
-	/* Link the private data with the MTD structure */
-	pasemi_nand_mtd->dev.parent = dev;
-
-	chip->IO_ADDR_R = of_iomap(np, 0);
-	chip->IO_ADDR_W = chip->IO_ADDR_R;
-
-	if (!chip->IO_ADDR_R) {
-		err = -EIO;
-		goto out_mtd;
-	}
-
-	pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
-	if (!pdev) {
-		err = -ENODEV;
-		goto out_ior;
-	}
-
-	lpcctl = pci_resource_start(pdev, 0);
-	pci_dev_put(pdev);
-
-	if (!request_region(lpcctl, 4, driver_name)) {
-		err = -EBUSY;
-		goto out_ior;
-	}
-
-	chip->cmd_ctrl = pasemi_hwcontrol;
-	chip->dev_ready = pasemi_device_ready;
-	chip->read_buf = pasemi_read_buf;
-	chip->write_buf = pasemi_write_buf;
-	chip->chip_delay = 0;
-	chip->ecc.mode = NAND_ECC_SOFT;
-	chip->ecc.algo = NAND_ECC_HAMMING;
-
-	/* Enable the following for a flash based bad block table */
-	chip->bbt_options = NAND_BBT_USE_FLASH;
-
-	/* Scan to find existence of the device */
-	if (nand_scan(pasemi_nand_mtd, 1)) {
-		err = -ENXIO;
-		goto out_lpc;
-	}
-
-	if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
-		dev_err(dev, "Unable to register MTD device\n");
-		err = -ENODEV;
-		goto out_lpc;
-	}
-
-	dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
-		 lpcctl);
-
-	return 0;
-
- out_lpc:
-	release_region(lpcctl, 4);
- out_ior:
-	iounmap(chip->IO_ADDR_R);
- out_mtd:
-	kfree(chip);
- out:
-	return err;
-}
-
-static int pasemi_nand_remove(struct platform_device *ofdev)
-{
-	struct nand_chip *chip;
-
-	if (!pasemi_nand_mtd)
-		return 0;
-
-	chip = mtd_to_nand(pasemi_nand_mtd);
-
-	/* Release resources, unregister device */
-	nand_release(pasemi_nand_mtd);
-
-	release_region(lpcctl, 4);
-
-	iounmap(chip->IO_ADDR_R);
-
-	/* Free the MTD device structure */
-	kfree(chip);
-
-	pasemi_nand_mtd = NULL;
-
-	return 0;
-}
-
-static const struct of_device_id pasemi_nand_match[] =
-{
-	{
-		.compatible   = "pasemi,localbus-nand",
-	},
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, pasemi_nand_match);
-
-static struct platform_driver pasemi_nand_driver =
-{
-	.driver = {
-		.name = driver_name,
-		.of_match_table = pasemi_nand_match,
-	},
-	.probe		= pasemi_nand_probe,
-	.remove		= pasemi_nand_remove,
-};
-
-module_platform_driver(pasemi_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
-MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
deleted file mode 100644
index d5c3c894c60d..000000000000
--- a/drivers/mtd/nand/plat_nand.c
+++ /dev/null
@@ -1,145 +0,0 @@ 
-/*
- * Generic NAND driver
- *
- * Author: Vitaly Wool <vitalywool@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/err.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-
-struct plat_nand_data {
-	struct nand_chip	chip;
-	void __iomem		*io_base;
-};
-
-/*
- * Probe for the NAND device.
- */
-static int plat_nand_probe(struct platform_device *pdev)
-{
-	struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
-	struct plat_nand_data *data;
-	struct mtd_info *mtd;
-	struct resource *res;
-	const char **part_types;
-	int err = 0;
-
-	if (!pdata) {
-		dev_err(&pdev->dev, "platform_nand_data is missing\n");
-		return -EINVAL;
-	}
-
-	if (pdata->chip.nr_chips < 1) {
-		dev_err(&pdev->dev, "invalid number of chips specified\n");
-		return -EINVAL;
-	}
-
-	/* Allocate memory for the device structure (and zero it) */
-	data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
-			    GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	data->io_base = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(data->io_base))
-		return PTR_ERR(data->io_base);
-
-	nand_set_flash_node(&data->chip, pdev->dev.of_node);
-	mtd = nand_to_mtd(&data->chip);
-	mtd->dev.parent = &pdev->dev;
-
-	data->chip.IO_ADDR_R = data->io_base;
-	data->chip.IO_ADDR_W = data->io_base;
-	data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
-	data->chip.dev_ready = pdata->ctrl.dev_ready;
-	data->chip.select_chip = pdata->ctrl.select_chip;
-	data->chip.write_buf = pdata->ctrl.write_buf;
-	data->chip.read_buf = pdata->ctrl.read_buf;
-	data->chip.read_byte = pdata->ctrl.read_byte;
-	data->chip.chip_delay = pdata->chip.chip_delay;
-	data->chip.options |= pdata->chip.options;
-	data->chip.bbt_options |= pdata->chip.bbt_options;
-
-	data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
-	data->chip.ecc.mode = NAND_ECC_SOFT;
-	data->chip.ecc.algo = NAND_ECC_HAMMING;
-
-	platform_set_drvdata(pdev, data);
-
-	/* Handle any platform specific setup */
-	if (pdata->ctrl.probe) {
-		err = pdata->ctrl.probe(pdev);
-		if (err)
-			goto out;
-	}
-
-	/* Scan to find existence of the device */
-	if (nand_scan(mtd, pdata->chip.nr_chips)) {
-		err = -ENXIO;
-		goto out;
-	}
-
-	part_types = pdata->chip.part_probe_types;
-
-	err = mtd_device_parse_register(mtd, part_types, NULL,
-					pdata->chip.partitions,
-					pdata->chip.nr_partitions);
-
-	if (!err)
-		return err;
-
-	nand_release(mtd);
-out:
-	if (pdata->ctrl.remove)
-		pdata->ctrl.remove(pdev);
-	return err;
-}
-
-/*
- * Remove a NAND device.
- */
-static int plat_nand_remove(struct platform_device *pdev)
-{
-	struct plat_nand_data *data = platform_get_drvdata(pdev);
-	struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
-
-	nand_release(nand_to_mtd(&data->chip));
-	if (pdata->ctrl.remove)
-		pdata->ctrl.remove(pdev);
-
-	return 0;
-}
-
-static const struct of_device_id plat_nand_match[] = {
-	{ .compatible = "gen_nand" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, plat_nand_match);
-
-static struct platform_driver plat_nand_driver = {
-	.probe	= plat_nand_probe,
-	.remove	= plat_nand_remove,
-	.driver	= {
-		.name		= "gen_nand",
-		.of_match_table = plat_nand_match,
-	},
-};
-
-module_platform_driver(plat_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Vitaly Wool");
-MODULE_DESCRIPTION("Simple generic NAND driver");
-MODULE_ALIAS("platform:gen_nand");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
deleted file mode 100644
index 4feec4ea3082..000000000000
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ /dev/null
@@ -1,2067 +0,0 @@ 
-/*
- * drivers/mtd/nand/pxa3xx_nand.c
- *
- * Copyright © 2005 Intel Corporation
- * Copyright © 2006 Marvell International Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/dma/pxa-dma.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-#include <linux/iopoll.h>
-#include <linux/irq.h>
-#include <linux/slab.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_data/mtd-nand-pxa3xx.h>
-
-#define	CHIP_DELAY_TIMEOUT	msecs_to_jiffies(200)
-#define NAND_STOP_DELAY		msecs_to_jiffies(40)
-#define PAGE_CHUNK_SIZE		(2048)
-
-/*
- * Define a buffer size for the initial command that detects the flash device:
- * STATUS, READID and PARAM.
- * ONFI param page is 256 bytes, and there are three redundant copies
- * to be read. JEDEC param page is 512 bytes, and there are also three
- * redundant copies to be read.
- * Hence this buffer should be at least 512 x 3. Let's pick 2048.
- */
-#define INIT_BUFFER_SIZE	2048
-
-/* registers and bit definitions */
-#define NDCR		(0x00) /* Control register */
-#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
-#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
-#define NDSR		(0x14) /* Status Register */
-#define NDPCR		(0x18) /* Page Count Register */
-#define NDBDR0		(0x1C) /* Bad Block Register 0 */
-#define NDBDR1		(0x20) /* Bad Block Register 1 */
-#define NDECCCTRL	(0x28) /* ECC control */
-#define NDDB		(0x40) /* Data Buffer */
-#define NDCB0		(0x48) /* Command Buffer0 */
-#define NDCB1		(0x4C) /* Command Buffer1 */
-#define NDCB2		(0x50) /* Command Buffer2 */
-
-#define NDCR_SPARE_EN		(0x1 << 31)
-#define NDCR_ECC_EN		(0x1 << 30)
-#define NDCR_DMA_EN		(0x1 << 29)
-#define NDCR_ND_RUN		(0x1 << 28)
-#define NDCR_DWIDTH_C		(0x1 << 27)
-#define NDCR_DWIDTH_M		(0x1 << 26)
-#define NDCR_PAGE_SZ		(0x1 << 24)
-#define NDCR_NCSX		(0x1 << 23)
-#define NDCR_ND_MODE		(0x3 << 21)
-#define NDCR_NAND_MODE   	(0x0)
-#define NDCR_CLR_PG_CNT		(0x1 << 20)
-#define NFCV1_NDCR_ARB_CNTL	(0x1 << 19)
-#define NFCV2_NDCR_STOP_ON_UNCOR	(0x1 << 19)
-#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
-#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
-
-#define NDCR_RA_START		(0x1 << 15)
-#define NDCR_PG_PER_BLK		(0x1 << 14)
-#define NDCR_ND_ARB_EN		(0x1 << 12)
-#define NDCR_INT_MASK           (0xFFF)
-
-#define NDSR_MASK		(0xfff)
-#define NDSR_ERR_CNT_OFF	(16)
-#define NDSR_ERR_CNT_MASK       (0x1f)
-#define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
-#define NDSR_RDY                (0x1 << 12)
-#define NDSR_FLASH_RDY          (0x1 << 11)
-#define NDSR_CS0_PAGED		(0x1 << 10)
-#define NDSR_CS1_PAGED		(0x1 << 9)
-#define NDSR_CS0_CMDD		(0x1 << 8)
-#define NDSR_CS1_CMDD		(0x1 << 7)
-#define NDSR_CS0_BBD		(0x1 << 6)
-#define NDSR_CS1_BBD		(0x1 << 5)
-#define NDSR_UNCORERR		(0x1 << 4)
-#define NDSR_CORERR		(0x1 << 3)
-#define NDSR_WRDREQ		(0x1 << 2)
-#define NDSR_RDDREQ		(0x1 << 1)
-#define NDSR_WRCMDREQ		(0x1)
-
-#define NDCB0_LEN_OVRD		(0x1 << 28)
-#define NDCB0_ST_ROW_EN         (0x1 << 26)
-#define NDCB0_AUTO_RS		(0x1 << 25)
-#define NDCB0_CSEL		(0x1 << 24)
-#define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
-#define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
-#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
-#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
-#define NDCB0_NC		(0x1 << 20)
-#define NDCB0_DBC		(0x1 << 19)
-#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
-#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
-#define NDCB0_CMD2_MASK		(0xff << 8)
-#define NDCB0_CMD1_MASK		(0xff)
-#define NDCB0_ADDR_CYC_SHIFT	(16)
-
-#define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
-#define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
-#define EXT_CMD_TYPE_READ	4 /* Read */
-#define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
-#define EXT_CMD_TYPE_FINAL	3 /* Final command */
-#define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
-#define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */
-
-/*
- * This should be large enough to read 'ONFI' and 'JEDEC'.
- * Let's use 7 bytes, which is the maximum ID count supported
- * by the controller (see NDCR_RD_ID_CNT_MASK).
- */
-#define READ_ID_BYTES		7
-
-/* macros for registers read/write */
-#define nand_writel(info, off, val)					\
-	do {								\
-		dev_vdbg(&info->pdev->dev,				\
-			 "%s():%d nand_writel(0x%x, 0x%04x)\n",		\
-			 __func__, __LINE__, (val), (off));		\
-		writel_relaxed((val), (info)->mmio_base + (off));	\
-	} while (0)
-
-#define nand_readl(info, off)						\
-	({								\
-		unsigned int _v;					\
-		_v = readl_relaxed((info)->mmio_base + (off));		\
-		dev_vdbg(&info->pdev->dev,				\
-			 "%s():%d nand_readl(0x%04x) = 0x%x\n",		\
-			 __func__, __LINE__, (off), _v);		\
-		_v;							\
-	})
-
-/* error code and state */
-enum {
-	ERR_NONE	= 0,
-	ERR_DMABUSERR	= -1,
-	ERR_SENDCMD	= -2,
-	ERR_UNCORERR	= -3,
-	ERR_BBERR	= -4,
-	ERR_CORERR	= -5,
-};
-
-enum {
-	STATE_IDLE = 0,
-	STATE_PREPARED,
-	STATE_CMD_HANDLE,
-	STATE_DMA_READING,
-	STATE_DMA_WRITING,
-	STATE_DMA_DONE,
-	STATE_PIO_READING,
-	STATE_PIO_WRITING,
-	STATE_CMD_DONE,
-	STATE_READY,
-};
-
-enum pxa3xx_nand_variant {
-	PXA3XX_NAND_VARIANT_PXA,
-	PXA3XX_NAND_VARIANT_ARMADA370,
-};
-
-struct pxa3xx_nand_host {
-	struct nand_chip	chip;
-	void			*info_data;
-
-	/* page size of attached chip */
-	int			use_ecc;
-	int			cs;
-
-	/* calculated from pxa3xx_nand_flash data */
-	unsigned int		col_addr_cycles;
-	unsigned int		row_addr_cycles;
-};
-
-struct pxa3xx_nand_info {
-	struct nand_hw_control	controller;
-	struct platform_device	 *pdev;
-
-	struct clk		*clk;
-	void __iomem		*mmio_base;
-	unsigned long		mmio_phys;
-	struct completion	cmd_complete, dev_ready;
-
-	unsigned int 		buf_start;
-	unsigned int		buf_count;
-	unsigned int		buf_size;
-	unsigned int		data_buff_pos;
-	unsigned int		oob_buff_pos;
-
-	/* DMA information */
-	struct scatterlist	sg;
-	enum dma_data_direction	dma_dir;
-	struct dma_chan		*dma_chan;
-	dma_cookie_t		dma_cookie;
-	int			drcmr_dat;
-
-	unsigned char		*data_buff;
-	unsigned char		*oob_buff;
-	dma_addr_t 		data_buff_phys;
-	int 			data_dma_ch;
-
-	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
-	unsigned int		state;
-
-	/*
-	 * This driver supports NFCv1 (as found in PXA SoC)
-	 * and NFCv2 (as found in Armada 370/XP SoC).
-	 */
-	enum pxa3xx_nand_variant variant;
-
-	int			cs;
-	int			use_ecc;	/* use HW ECC ? */
-	int			ecc_bch;	/* using BCH ECC? */
-	int			use_dma;	/* use DMA ? */
-	int			use_spare;	/* use spare ? */
-	int			need_wait;
-
-	/* Amount of real data per full chunk */
-	unsigned int		chunk_size;
-
-	/* Amount of spare data per full chunk */
-	unsigned int		spare_size;
-
-	/* Number of full chunks (i.e chunk_size + spare_size) */
-	unsigned int            nfullchunks;
-
-	/*
-	 * Total number of chunks. If equal to nfullchunks, then there
-	 * are only full chunks. Otherwise, there is one last chunk of
-	 * size (last_chunk_size + last_spare_size)
-	 */
-	unsigned int            ntotalchunks;
-
-	/* Amount of real data in the last chunk */
-	unsigned int		last_chunk_size;
-
-	/* Amount of spare data in the last chunk */
-	unsigned int		last_spare_size;
-
-	unsigned int		ecc_size;
-	unsigned int		ecc_err_cnt;
-	unsigned int		max_bitflips;
-	int 			retcode;
-
-	/*
-	 * Variables only valid during command
-	 * execution. step_chunk_size and step_spare_size is the
-	 * amount of real data and spare data in the current
-	 * chunk. cur_chunk is the current chunk being
-	 * read/programmed.
-	 */
-	unsigned int		step_chunk_size;
-	unsigned int		step_spare_size;
-	unsigned int            cur_chunk;
-
-	/* cached register value */
-	uint32_t		reg_ndcr;
-	uint32_t		ndtr0cs0;
-	uint32_t		ndtr1cs0;
-
-	/* generated NDCBx register values */
-	uint32_t		ndcb0;
-	uint32_t		ndcb1;
-	uint32_t		ndcb2;
-	uint32_t		ndcb3;
-};
-
-static bool use_dma = 1;
-module_param(use_dma, bool, 0444);
-MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
-
-struct pxa3xx_nand_timing {
-	unsigned int	tCH;  /* Enable signal hold time */
-	unsigned int	tCS;  /* Enable signal setup time */
-	unsigned int	tWH;  /* ND_nWE high duration */
-	unsigned int	tWP;  /* ND_nWE pulse time */
-	unsigned int	tRH;  /* ND_nRE high duration */
-	unsigned int	tRP;  /* ND_nRE pulse width */
-	unsigned int	tR;   /* ND_nWE high to ND_nRE low for read */
-	unsigned int	tWHR; /* ND_nWE high to ND_nRE low for status read */
-	unsigned int	tAR;  /* ND_ALE low to ND_nRE low delay */
-};
-
-struct pxa3xx_nand_flash {
-	uint32_t	chip_id;
-	unsigned int	flash_width;	/* Width of Flash memory (DWIDTH_M) */
-	unsigned int	dfc_width;	/* Width of flash controller(DWIDTH_C) */
-	struct pxa3xx_nand_timing *timing;	/* NAND Flash timing */
-};
-
-static struct pxa3xx_nand_timing timing[] = {
-	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
-	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
-	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
-	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
-};
-
-static struct pxa3xx_nand_flash builtin_flash_types[] = {
-	{ 0x46ec, 16, 16, &timing[1] },
-	{ 0xdaec,  8,  8, &timing[1] },
-	{ 0xd7ec,  8,  8, &timing[1] },
-	{ 0xa12c,  8,  8, &timing[2] },
-	{ 0xb12c, 16, 16, &timing[2] },
-	{ 0xdc2c,  8,  8, &timing[2] },
-	{ 0xcc2c, 16, 16, &timing[2] },
-	{ 0xba20, 16, 16, &timing[3] },
-};
-
-static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
-				struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-	int nchunks = mtd->writesize / info->chunk_size;
-
-	if (section >= nchunks)
-		return -ERANGE;
-
-	oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
-			    info->spare_size;
-	oobregion->length = info->ecc_size;
-
-	return 0;
-}
-
-static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-	int nchunks = mtd->writesize / info->chunk_size;
-
-	if (section >= nchunks)
-		return -ERANGE;
-
-	if (!info->spare_size)
-		return 0;
-
-	oobregion->offset = section * (info->ecc_size + info->spare_size);
-	oobregion->length = info->spare_size;
-	if (!section) {
-		/*
-		 * Bootrom looks in bytes 0 & 5 for bad blocks for the
-		 * 4KB page / 4bit BCH combination.
-		 */
-		if (mtd->writesize == 4096 && info->chunk_size == 2048) {
-			oobregion->offset += 6;
-			oobregion->length -= 6;
-		} else {
-			oobregion->offset += 2;
-			oobregion->length -= 2;
-		}
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
-	.ecc = pxa3xx_ooblayout_ecc,
-	.free = pxa3xx_ooblayout_free,
-};
-
-static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
-static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
-
-static struct nand_bbt_descr bbt_main_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	8,
-	.len = 6,
-	.veroffs = 14,
-	.maxblocks = 8,		/* Last 8 blocks in each chip */
-	.pattern = bbt_pattern
-};
-
-static struct nand_bbt_descr bbt_mirror_descr = {
-	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
-		| NAND_BBT_2BIT | NAND_BBT_VERSION,
-	.offs =	8,
-	.len = 6,
-	.veroffs = 14,
-	.maxblocks = 8,		/* Last 8 blocks in each chip */
-	.pattern = bbt_mirror_pattern
-};
-
-#define NDTR0_tCH(c)	(min((c), 7) << 19)
-#define NDTR0_tCS(c)	(min((c), 7) << 16)
-#define NDTR0_tWH(c)	(min((c), 7) << 11)
-#define NDTR0_tWP(c)	(min((c), 7) << 8)
-#define NDTR0_tRH(c)	(min((c), 7) << 3)
-#define NDTR0_tRP(c)	(min((c), 7) << 0)
-
-#define NDTR1_tR(c)	(min((c), 65535) << 16)
-#define NDTR1_tWHR(c)	(min((c), 15) << 4)
-#define NDTR1_tAR(c)	(min((c), 15) << 0)
-
-/* convert nano-seconds to nand flash controller clock cycles */
-#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
-
-static const struct of_device_id pxa3xx_nand_dt_ids[] = {
-	{
-		.compatible = "marvell,pxa3xx-nand",
-		.data       = (void *)PXA3XX_NAND_VARIANT_PXA,
-	},
-	{
-		.compatible = "marvell,armada370-nand",
-		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
-	},
-	{}
-};
-MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
-
-static enum pxa3xx_nand_variant
-pxa3xx_nand_get_variant(struct platform_device *pdev)
-{
-	const struct of_device_id *of_id =
-			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
-	if (!of_id)
-		return PXA3XX_NAND_VARIANT_PXA;
-	return (enum pxa3xx_nand_variant)of_id->data;
-}
-
-static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
-				   const struct pxa3xx_nand_timing *t)
-{
-	struct pxa3xx_nand_info *info = host->info_data;
-	unsigned long nand_clk = clk_get_rate(info->clk);
-	uint32_t ndtr0, ndtr1;
-
-	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
-		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
-		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
-		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
-		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
-		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
-
-	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
-		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
-		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
-
-	info->ndtr0cs0 = ndtr0;
-	info->ndtr1cs0 = ndtr1;
-	nand_writel(info, NDTR0CS0, ndtr0);
-	nand_writel(info, NDTR1CS0, ndtr1);
-}
-
-static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
-				       const struct nand_sdr_timings *t)
-{
-	struct pxa3xx_nand_info *info = host->info_data;
-	struct nand_chip *chip = &host->chip;
-	unsigned long nand_clk = clk_get_rate(info->clk);
-	uint32_t ndtr0, ndtr1;
-
-	u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
-	u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
-	u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
-	u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
-	u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
-	u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
-	u32 tR = chip->chip_delay * 1000;
-	u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
-	u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
-
-	/* fallback to a default value if tR = 0 */
-	if (!tR)
-		tR = 20000;
-
-	ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
-		NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
-		NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
-		NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
-		NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
-		NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
-
-	ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
-		NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
-		NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
-
-	info->ndtr0cs0 = ndtr0;
-	info->ndtr1cs0 = ndtr1;
-	nand_writel(info, NDTR0CS0, ndtr0);
-	nand_writel(info, NDTR1CS0, ndtr1);
-}
-
-static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
-					   unsigned int *flash_width,
-					   unsigned int *dfc_width)
-{
-	struct nand_chip *chip = &host->chip;
-	struct pxa3xx_nand_info *info = host->info_data;
-	const struct pxa3xx_nand_flash *f = NULL;
-	struct mtd_info *mtd = nand_to_mtd(&host->chip);
-	int i, id, ntypes;
-
-	ntypes = ARRAY_SIZE(builtin_flash_types);
-
-	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
-
-	id = chip->read_byte(mtd);
-	id |= chip->read_byte(mtd) << 0x8;
-
-	for (i = 0; i < ntypes; i++) {
-		f = &builtin_flash_types[i];
-
-		if (f->chip_id == id)
-			break;
-	}
-
-	if (i == ntypes) {
-		dev_err(&info->pdev->dev, "Error: timings not found\n");
-		return -EINVAL;
-	}
-
-	pxa3xx_nand_set_timing(host, f->timing);
-
-	*flash_width = f->flash_width;
-	*dfc_width = f->dfc_width;
-
-	return 0;
-}
-
-static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
-					 int mode)
-{
-	const struct nand_sdr_timings *timings;
-
-	mode = fls(mode) - 1;
-	if (mode < 0)
-		mode = 0;
-
-	timings = onfi_async_timing_mode_to_sdr_timings(mode);
-	if (IS_ERR(timings))
-		return PTR_ERR(timings);
-
-	pxa3xx_nand_set_sdr_timing(host, timings);
-
-	return 0;
-}
-
-static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
-{
-	struct nand_chip *chip = &host->chip;
-	struct pxa3xx_nand_info *info = host->info_data;
-	unsigned int flash_width = 0, dfc_width = 0;
-	int mode, err;
-
-	mode = onfi_get_async_timing_mode(chip);
-	if (mode == ONFI_TIMING_MODE_UNKNOWN) {
-		err = pxa3xx_nand_init_timings_compat(host, &flash_width,
-						      &dfc_width);
-		if (err)
-			return err;
-
-		if (flash_width == 16) {
-			info->reg_ndcr |= NDCR_DWIDTH_M;
-			chip->options |= NAND_BUSWIDTH_16;
-		}
-
-		info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
-	} else {
-		err = pxa3xx_nand_init_timings_onfi(host, mode);
-		if (err)
-			return err;
-	}
-
-	return 0;
-}
-
-/**
- * NOTE: it is a must to set ND_RUN firstly, then write
- * command buffer, otherwise, it does not work.
- * We enable all the interrupt at the same time, and
- * let pxa3xx_nand_irq to handle all logic.
- */
-static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
-{
-	uint32_t ndcr;
-
-	ndcr = info->reg_ndcr;
-
-	if (info->use_ecc) {
-		ndcr |= NDCR_ECC_EN;
-		if (info->ecc_bch)
-			nand_writel(info, NDECCCTRL, 0x1);
-	} else {
-		ndcr &= ~NDCR_ECC_EN;
-		if (info->ecc_bch)
-			nand_writel(info, NDECCCTRL, 0x0);
-	}
-
-	if (info->use_dma)
-		ndcr |= NDCR_DMA_EN;
-	else
-		ndcr &= ~NDCR_DMA_EN;
-
-	if (info->use_spare)
-		ndcr |= NDCR_SPARE_EN;
-	else
-		ndcr &= ~NDCR_SPARE_EN;
-
-	ndcr |= NDCR_ND_RUN;
-
-	/* clear status bits and run */
-	nand_writel(info, NDSR, NDSR_MASK);
-	nand_writel(info, NDCR, 0);
-	nand_writel(info, NDCR, ndcr);
-}
-
-static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
-{
-	uint32_t ndcr;
-	int timeout = NAND_STOP_DELAY;
-
-	/* wait RUN bit in NDCR become 0 */
-	ndcr = nand_readl(info, NDCR);
-	while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
-		ndcr = nand_readl(info, NDCR);
-		udelay(1);
-	}
-
-	if (timeout <= 0) {
-		ndcr &= ~NDCR_ND_RUN;
-		nand_writel(info, NDCR, ndcr);
-	}
-	if (info->dma_chan)
-		dmaengine_terminate_all(info->dma_chan);
-
-	/* clear status bits */
-	nand_writel(info, NDSR, NDSR_MASK);
-}
-
-static void __maybe_unused
-enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
-{
-	uint32_t ndcr;
-
-	ndcr = nand_readl(info, NDCR);
-	nand_writel(info, NDCR, ndcr & ~int_mask);
-}
-
-static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
-{
-	uint32_t ndcr;
-
-	ndcr = nand_readl(info, NDCR);
-	nand_writel(info, NDCR, ndcr | int_mask);
-}
-
-static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
-{
-	if (info->ecc_bch) {
-		u32 val;
-		int ret;
-
-		/*
-		 * According to the datasheet, when reading from NDDB
-		 * with BCH enabled, after each 32 bytes reads, we
-		 * have to make sure that the NDSR.RDDREQ bit is set.
-		 *
-		 * Drain the FIFO 8 32 bits reads at a time, and skip
-		 * the polling on the last read.
-		 */
-		while (len > 8) {
-			ioread32_rep(info->mmio_base + NDDB, data, 8);
-
-			ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
-							 val & NDSR_RDDREQ, 1000, 5000);
-			if (ret) {
-				dev_err(&info->pdev->dev,
-					"Timeout on RDDREQ while draining the FIFO\n");
-				return;
-			}
-
-			data += 32;
-			len -= 8;
-		}
-	}
-
-	ioread32_rep(info->mmio_base + NDDB, data, len);
-}
-
-static void handle_data_pio(struct pxa3xx_nand_info *info)
-{
-	switch (info->state) {
-	case STATE_PIO_WRITING:
-		if (info->step_chunk_size)
-			writesl(info->mmio_base + NDDB,
-				info->data_buff + info->data_buff_pos,
-				DIV_ROUND_UP(info->step_chunk_size, 4));
-
-		if (info->step_spare_size)
-			writesl(info->mmio_base + NDDB,
-				info->oob_buff + info->oob_buff_pos,
-				DIV_ROUND_UP(info->step_spare_size, 4));
-		break;
-	case STATE_PIO_READING:
-		if (info->step_chunk_size)
-			drain_fifo(info,
-				   info->data_buff + info->data_buff_pos,
-				   DIV_ROUND_UP(info->step_chunk_size, 4));
-
-		if (info->step_spare_size)
-			drain_fifo(info,
-				   info->oob_buff + info->oob_buff_pos,
-				   DIV_ROUND_UP(info->step_spare_size, 4));
-		break;
-	default:
-		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
-				info->state);
-		BUG();
-	}
-
-	/* Update buffer pointers for multi-page read/write */
-	info->data_buff_pos += info->step_chunk_size;
-	info->oob_buff_pos += info->step_spare_size;
-}
-
-static void pxa3xx_nand_data_dma_irq(void *data)
-{
-	struct pxa3xx_nand_info *info = data;
-	struct dma_tx_state state;
-	enum dma_status status;
-
-	status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
-	if (likely(status == DMA_COMPLETE)) {
-		info->state = STATE_DMA_DONE;
-	} else {
-		dev_err(&info->pdev->dev, "DMA error on data channel\n");
-		info->retcode = ERR_DMABUSERR;
-	}
-	dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
-
-	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
-	enable_int(info, NDCR_INT_MASK);
-}
-
-static void start_data_dma(struct pxa3xx_nand_info *info)
-{
-	enum dma_transfer_direction direction;
-	struct dma_async_tx_descriptor *tx;
-
-	switch (info->state) {
-	case STATE_DMA_WRITING:
-		info->dma_dir = DMA_TO_DEVICE;
-		direction = DMA_MEM_TO_DEV;
-		break;
-	case STATE_DMA_READING:
-		info->dma_dir = DMA_FROM_DEVICE;
-		direction = DMA_DEV_TO_MEM;
-		break;
-	default:
-		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
-				info->state);
-		BUG();
-	}
-	info->sg.length = info->chunk_size;
-	if (info->use_spare)
-		info->sg.length += info->spare_size + info->ecc_size;
-	dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
-
-	tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
-				     DMA_PREP_INTERRUPT);
-	if (!tx) {
-		dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
-		return;
-	}
-	tx->callback = pxa3xx_nand_data_dma_irq;
-	tx->callback_param = info;
-	info->dma_cookie = dmaengine_submit(tx);
-	dma_async_issue_pending(info->dma_chan);
-	dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
-		__func__, direction, info->dma_cookie, info->sg.length);
-}
-
-static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
-{
-	struct pxa3xx_nand_info *info = data;
-
-	handle_data_pio(info);
-
-	info->state = STATE_CMD_DONE;
-	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
-
-	return IRQ_HANDLED;
-}
-
-static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
-{
-	struct pxa3xx_nand_info *info = devid;
-	unsigned int status, is_completed = 0, is_ready = 0;
-	unsigned int ready, cmd_done;
-	irqreturn_t ret = IRQ_HANDLED;
-
-	if (info->cs == 0) {
-		ready           = NDSR_FLASH_RDY;
-		cmd_done        = NDSR_CS0_CMDD;
-	} else {
-		ready           = NDSR_RDY;
-		cmd_done        = NDSR_CS1_CMDD;
-	}
-
-	status = nand_readl(info, NDSR);
-
-	if (status & NDSR_UNCORERR)
-		info->retcode = ERR_UNCORERR;
-	if (status & NDSR_CORERR) {
-		info->retcode = ERR_CORERR;
-		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
-		    info->ecc_bch)
-			info->ecc_err_cnt = NDSR_ERR_CNT(status);
-		else
-			info->ecc_err_cnt = 1;
-
-		/*
-		 * Each chunk composing a page is corrected independently,
-		 * and we need to store maximum number of corrected bitflips
-		 * to return it to the MTD layer in ecc.read_page().
-		 */
-		info->max_bitflips = max_t(unsigned int,
-					   info->max_bitflips,
-					   info->ecc_err_cnt);
-	}
-	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
-		/* whether use dma to transfer data */
-		if (info->use_dma) {
-			disable_int(info, NDCR_INT_MASK);
-			info->state = (status & NDSR_RDDREQ) ?
-				      STATE_DMA_READING : STATE_DMA_WRITING;
-			start_data_dma(info);
-			goto NORMAL_IRQ_EXIT;
-		} else {
-			info->state = (status & NDSR_RDDREQ) ?
-				      STATE_PIO_READING : STATE_PIO_WRITING;
-			ret = IRQ_WAKE_THREAD;
-			goto NORMAL_IRQ_EXIT;
-		}
-	}
-	if (status & cmd_done) {
-		info->state = STATE_CMD_DONE;
-		is_completed = 1;
-	}
-	if (status & ready) {
-		info->state = STATE_READY;
-		is_ready = 1;
-	}
-
-	/*
-	 * Clear all status bit before issuing the next command, which
-	 * can and will alter the status bits and will deserve a new
-	 * interrupt on its own. This lets the controller exit the IRQ
-	 */
-	nand_writel(info, NDSR, status);
-
-	if (status & NDSR_WRCMDREQ) {
-		status &= ~NDSR_WRCMDREQ;
-		info->state = STATE_CMD_HANDLE;
-
-		/*
-		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
-		 * must be loaded by writing directly either 12 or 16
-		 * bytes directly to NDCB0, four bytes at a time.
-		 *
-		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
-		 * but each NDCBx register can be read.
-		 */
-		nand_writel(info, NDCB0, info->ndcb0);
-		nand_writel(info, NDCB0, info->ndcb1);
-		nand_writel(info, NDCB0, info->ndcb2);
-
-		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
-		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
-			nand_writel(info, NDCB0, info->ndcb3);
-	}
-
-	if (is_completed)
-		complete(&info->cmd_complete);
-	if (is_ready)
-		complete(&info->dev_ready);
-NORMAL_IRQ_EXIT:
-	return ret;
-}
-
-static inline int is_buf_blank(uint8_t *buf, size_t len)
-{
-	for (; len > 0; len--)
-		if (*buf++ != 0xff)
-			return 0;
-	return 1;
-}
-
-static void set_command_address(struct pxa3xx_nand_info *info,
-		unsigned int page_size, uint16_t column, int page_addr)
-{
-	/* small page addr setting */
-	if (page_size < PAGE_CHUNK_SIZE) {
-		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
-				| (column & 0xFF);
-
-		info->ndcb2 = 0;
-	} else {
-		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
-				| (column & 0xFFFF);
-
-		if (page_addr & 0xFF0000)
-			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
-		else
-			info->ndcb2 = 0;
-	}
-}
-
-static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
-{
-	struct pxa3xx_nand_host *host = info->host[info->cs];
-	struct mtd_info *mtd = nand_to_mtd(&host->chip);
-
-	/* reset data and oob column point to handle data */
-	info->buf_start		= 0;
-	info->buf_count		= 0;
-	info->data_buff_pos	= 0;
-	info->oob_buff_pos	= 0;
-	info->step_chunk_size   = 0;
-	info->step_spare_size   = 0;
-	info->cur_chunk         = 0;
-	info->use_ecc		= 0;
-	info->use_spare		= 1;
-	info->retcode		= ERR_NONE;
-	info->ecc_err_cnt	= 0;
-	info->ndcb3		= 0;
-	info->need_wait		= 0;
-
-	switch (command) {
-	case NAND_CMD_READ0:
-	case NAND_CMD_PAGEPROG:
-		info->use_ecc = 1;
-		break;
-	case NAND_CMD_PARAM:
-		info->use_spare = 0;
-		break;
-	default:
-		info->ndcb1 = 0;
-		info->ndcb2 = 0;
-		break;
-	}
-
-	/*
-	 * If we are about to issue a read command, or about to set
-	 * the write address, then clean the data buffer.
-	 */
-	if (command == NAND_CMD_READ0 ||
-	    command == NAND_CMD_READOOB ||
-	    command == NAND_CMD_SEQIN) {
-
-		info->buf_count = mtd->writesize + mtd->oobsize;
-		memset(info->data_buff, 0xFF, info->buf_count);
-	}
-
-}
-
-static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
-		int ext_cmd_type, uint16_t column, int page_addr)
-{
-	int addr_cycle, exec_cmd;
-	struct pxa3xx_nand_host *host;
-	struct mtd_info *mtd;
-
-	host = info->host[info->cs];
-	mtd = nand_to_mtd(&host->chip);
-	addr_cycle = 0;
-	exec_cmd = 1;
-
-	if (info->cs != 0)
-		info->ndcb0 = NDCB0_CSEL;
-	else
-		info->ndcb0 = 0;
-
-	if (command == NAND_CMD_SEQIN)
-		exec_cmd = 0;
-
-	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
-				    + host->col_addr_cycles);
-
-	switch (command) {
-	case NAND_CMD_READOOB:
-	case NAND_CMD_READ0:
-		info->buf_start = column;
-		info->ndcb0 |= NDCB0_CMD_TYPE(0)
-				| addr_cycle
-				| NAND_CMD_READ0;
-
-		if (command == NAND_CMD_READOOB)
-			info->buf_start += mtd->writesize;
-
-		if (info->cur_chunk < info->nfullchunks) {
-			info->step_chunk_size = info->chunk_size;
-			info->step_spare_size = info->spare_size;
-		} else {
-			info->step_chunk_size = info->last_chunk_size;
-			info->step_spare_size = info->last_spare_size;
-		}
-
-		/*
-		 * Multiple page read needs an 'extended command type' field,
-		 * which is either naked-read or last-read according to the
-		 * state.
-		 */
-		if (mtd->writesize == PAGE_CHUNK_SIZE) {
-			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
-		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
-			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
-					| NDCB0_LEN_OVRD
-					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
-			info->ndcb3 = info->step_chunk_size +
-				info->step_spare_size;
-		}
-
-		set_command_address(info, mtd->writesize, column, page_addr);
-		break;
-
-	case NAND_CMD_SEQIN:
-
-		info->buf_start = column;
-		set_command_address(info, mtd->writesize, 0, page_addr);
-
-		/*
-		 * Multiple page programming needs to execute the initial
-		 * SEQIN command that sets the page address.
-		 */
-		if (mtd->writesize > PAGE_CHUNK_SIZE) {
-			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
-				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
-				| addr_cycle
-				| command;
-			exec_cmd = 1;
-		}
-		break;
-
-	case NAND_CMD_PAGEPROG:
-		if (is_buf_blank(info->data_buff,
-					(mtd->writesize + mtd->oobsize))) {
-			exec_cmd = 0;
-			break;
-		}
-
-		if (info->cur_chunk < info->nfullchunks) {
-			info->step_chunk_size = info->chunk_size;
-			info->step_spare_size = info->spare_size;
-		} else {
-			info->step_chunk_size = info->last_chunk_size;
-			info->step_spare_size = info->last_spare_size;
-		}
-
-		/* Second command setting for large pages */
-		if (mtd->writesize > PAGE_CHUNK_SIZE) {
-			/*
-			 * Multiple page write uses the 'extended command'
-			 * field. This can be used to issue a command dispatch
-			 * or a naked-write depending on the current stage.
-			 */
-			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
-					| NDCB0_LEN_OVRD
-					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
-			info->ndcb3 = info->step_chunk_size +
-				      info->step_spare_size;
-
-			/*
-			 * This is the command dispatch that completes a chunked
-			 * page program operation.
-			 */
-			if (info->cur_chunk == info->ntotalchunks) {
-				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
-					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
-					| command;
-				info->ndcb1 = 0;
-				info->ndcb2 = 0;
-				info->ndcb3 = 0;
-			}
-		} else {
-			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
-					| NDCB0_AUTO_RS
-					| NDCB0_ST_ROW_EN
-					| NDCB0_DBC
-					| (NAND_CMD_PAGEPROG << 8)
-					| NAND_CMD_SEQIN
-					| addr_cycle;
-		}
-		break;
-
-	case NAND_CMD_PARAM:
-		info->buf_count = INIT_BUFFER_SIZE;
-		info->ndcb0 |= NDCB0_CMD_TYPE(0)
-				| NDCB0_ADDR_CYC(1)
-				| NDCB0_LEN_OVRD
-				| command;
-		info->ndcb1 = (column & 0xFF);
-		info->ndcb3 = INIT_BUFFER_SIZE;
-		info->step_chunk_size = INIT_BUFFER_SIZE;
-		break;
-
-	case NAND_CMD_READID:
-		info->buf_count = READ_ID_BYTES;
-		info->ndcb0 |= NDCB0_CMD_TYPE(3)
-				| NDCB0_ADDR_CYC(1)
-				| command;
-		info->ndcb1 = (column & 0xFF);
-
-		info->step_chunk_size = 8;
-		break;
-	case NAND_CMD_STATUS:
-		info->buf_count = 1;
-		info->ndcb0 |= NDCB0_CMD_TYPE(4)
-				| NDCB0_ADDR_CYC(1)
-				| command;
-
-		info->step_chunk_size = 8;
-		break;
-
-	case NAND_CMD_ERASE1:
-		info->ndcb0 |= NDCB0_CMD_TYPE(2)
-				| NDCB0_AUTO_RS
-				| NDCB0_ADDR_CYC(3)
-				| NDCB0_DBC
-				| (NAND_CMD_ERASE2 << 8)
-				| NAND_CMD_ERASE1;
-		info->ndcb1 = page_addr;
-		info->ndcb2 = 0;
-
-		break;
-	case NAND_CMD_RESET:
-		info->ndcb0 |= NDCB0_CMD_TYPE(5)
-				| command;
-
-		break;
-
-	case NAND_CMD_ERASE2:
-		exec_cmd = 0;
-		break;
-
-	default:
-		exec_cmd = 0;
-		dev_err(&info->pdev->dev, "non-supported command %x\n",
-				command);
-		break;
-	}
-
-	return exec_cmd;
-}
-
-static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
-			 int column, int page_addr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-	int exec_cmd;
-
-	/*
-	 * if this is a x16 device ,then convert the input
-	 * "byte" address into a "word" address appropriate
-	 * for indexing a word-oriented device
-	 */
-	if (info->reg_ndcr & NDCR_DWIDTH_M)
-		column /= 2;
-
-	/*
-	 * There may be different NAND chip hooked to
-	 * different chip select, so check whether
-	 * chip select has been changed, if yes, reset the timing
-	 */
-	if (info->cs != host->cs) {
-		info->cs = host->cs;
-		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
-		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
-	}
-
-	prepare_start_command(info, command);
-
-	info->state = STATE_PREPARED;
-	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
-
-	if (exec_cmd) {
-		init_completion(&info->cmd_complete);
-		init_completion(&info->dev_ready);
-		info->need_wait = 1;
-		pxa3xx_nand_start(info);
-
-		if (!wait_for_completion_timeout(&info->cmd_complete,
-		    CHIP_DELAY_TIMEOUT)) {
-			dev_err(&info->pdev->dev, "Wait time out!!!\n");
-			/* Stop State Machine for next command cycle */
-			pxa3xx_nand_stop(info);
-		}
-	}
-	info->state = STATE_IDLE;
-}
-
-static void nand_cmdfunc_extended(struct mtd_info *mtd,
-				  const unsigned command,
-				  int column, int page_addr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-	int exec_cmd, ext_cmd_type;
-
-	/*
-	 * if this is a x16 device then convert the input
-	 * "byte" address into a "word" address appropriate
-	 * for indexing a word-oriented device
-	 */
-	if (info->reg_ndcr & NDCR_DWIDTH_M)
-		column /= 2;
-
-	/*
-	 * There may be different NAND chip hooked to
-	 * different chip select, so check whether
-	 * chip select has been changed, if yes, reset the timing
-	 */
-	if (info->cs != host->cs) {
-		info->cs = host->cs;
-		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
-		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
-	}
-
-	/* Select the extended command for the first command */
-	switch (command) {
-	case NAND_CMD_READ0:
-	case NAND_CMD_READOOB:
-		ext_cmd_type = EXT_CMD_TYPE_MONO;
-		break;
-	case NAND_CMD_SEQIN:
-		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
-		break;
-	case NAND_CMD_PAGEPROG:
-		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
-		break;
-	default:
-		ext_cmd_type = 0;
-		break;
-	}
-
-	prepare_start_command(info, command);
-
-	/*
-	 * Prepare the "is ready" completion before starting a command
-	 * transaction sequence. If the command is not executed the
-	 * completion will be completed, see below.
-	 *
-	 * We can do that inside the loop because the command variable
-	 * is invariant and thus so is the exec_cmd.
-	 */
-	info->need_wait = 1;
-	init_completion(&info->dev_ready);
-	do {
-		info->state = STATE_PREPARED;
-
-		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
-					       column, page_addr);
-		if (!exec_cmd) {
-			info->need_wait = 0;
-			complete(&info->dev_ready);
-			break;
-		}
-
-		init_completion(&info->cmd_complete);
-		pxa3xx_nand_start(info);
-
-		if (!wait_for_completion_timeout(&info->cmd_complete,
-		    CHIP_DELAY_TIMEOUT)) {
-			dev_err(&info->pdev->dev, "Wait time out!!!\n");
-			/* Stop State Machine for next command cycle */
-			pxa3xx_nand_stop(info);
-			break;
-		}
-
-		/* Only a few commands need several steps */
-		if (command != NAND_CMD_PAGEPROG &&
-		    command != NAND_CMD_READ0    &&
-		    command != NAND_CMD_READOOB)
-			break;
-
-		info->cur_chunk++;
-
-		/* Check if the sequence is complete */
-		if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
-			break;
-
-		/*
-		 * After a splitted program command sequence has issued
-		 * the command dispatch, the command sequence is complete.
-		 */
-		if (info->cur_chunk == (info->ntotalchunks + 1) &&
-		    command == NAND_CMD_PAGEPROG &&
-		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
-			break;
-
-		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
-			/* Last read: issue a 'last naked read' */
-			if (info->cur_chunk == info->ntotalchunks - 1)
-				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
-			else
-				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
-
-		/*
-		 * If a splitted program command has no more data to transfer,
-		 * the command dispatch must be issued to complete.
-		 */
-		} else if (command == NAND_CMD_PAGEPROG &&
-			   info->cur_chunk == info->ntotalchunks) {
-				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
-		}
-	} while (1);
-
-	info->state = STATE_IDLE;
-}
-
-static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
-		struct nand_chip *chip, const uint8_t *buf, int oob_required,
-		int page)
-{
-	chip->write_buf(mtd, buf, mtd->writesize);
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	return 0;
-}
-
-static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
-		struct nand_chip *chip, uint8_t *buf, int oob_required,
-		int page)
-{
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-
-	chip->read_buf(mtd, buf, mtd->writesize);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	if (info->retcode == ERR_CORERR && info->use_ecc) {
-		mtd->ecc_stats.corrected += info->ecc_err_cnt;
-
-	} else if (info->retcode == ERR_UNCORERR) {
-		/*
-		 * for blank page (all 0xff), HW will calculate its ECC as
-		 * 0, which is different from the ECC information within
-		 * OOB, ignore such uncorrectable errors
-		 */
-		if (is_buf_blank(buf, mtd->writesize))
-			info->retcode = ERR_NONE;
-		else
-			mtd->ecc_stats.failed++;
-	}
-
-	return info->max_bitflips;
-}
-
-static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-	char retval = 0xFF;
-
-	if (info->buf_start < info->buf_count)
-		/* Has just send a new command? */
-		retval = info->data_buff[info->buf_start++];
-
-	return retval;
-}
-
-static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-	u16 retval = 0xFFFF;
-
-	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
-		retval = *((u16 *)(info->data_buff+info->buf_start));
-		info->buf_start += 2;
-	}
-	return retval;
-}
-
-static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
-
-	memcpy(buf, info->data_buff + info->buf_start, real_len);
-	info->buf_start += real_len;
-}
-
-static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
-		const uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
-
-	memcpy(info->data_buff + info->buf_start, buf, real_len);
-	info->buf_start += real_len;
-}
-
-static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
-{
-	return;
-}
-
-static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-
-	if (info->need_wait) {
-		info->need_wait = 0;
-		if (!wait_for_completion_timeout(&info->dev_ready,
-		    CHIP_DELAY_TIMEOUT)) {
-			dev_err(&info->pdev->dev, "Ready time out!!!\n");
-			return NAND_STATUS_FAIL;
-		}
-	}
-
-	/* pxa3xx_nand_send_command has waited for command complete */
-	if (this->state == FL_WRITING || this->state == FL_ERASING) {
-		if (info->retcode == ERR_NONE)
-			return 0;
-		else
-			return NAND_STATUS_FAIL;
-	}
-
-	return NAND_STATUS_READY;
-}
-
-static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
-{
-	struct pxa3xx_nand_host *host = info->host[info->cs];
-	struct platform_device *pdev = info->pdev;
-	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	const struct nand_sdr_timings *timings;
-
-	/* Configure default flash values */
-	info->chunk_size = PAGE_CHUNK_SIZE;
-	info->reg_ndcr = 0x0; /* enable all interrupts */
-	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
-	info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
-	info->reg_ndcr |= NDCR_SPARE_EN;
-
-	/* use the common timing to make a try */
-	timings = onfi_async_timing_mode_to_sdr_timings(0);
-	if (IS_ERR(timings))
-		return PTR_ERR(timings);
-
-	pxa3xx_nand_set_sdr_timing(host, timings);
-	return 0;
-}
-
-static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
-{
-	struct pxa3xx_nand_host *host = info->host[info->cs];
-	struct nand_chip *chip = &host->chip;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-
-	info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
-	info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
-	info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
-}
-
-static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
-{
-	struct platform_device *pdev = info->pdev;
-	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	uint32_t ndcr = nand_readl(info, NDCR);
-
-	/* Set an initial chunk size */
-	info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
-	info->reg_ndcr = ndcr &
-		~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
-	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
-	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
-	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
-}
-
-static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
-{
-	struct platform_device *pdev = info->pdev;
-	struct dma_slave_config	config;
-	dma_cap_mask_t mask;
-	struct pxad_param param;
-	int ret;
-
-	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
-	if (info->data_buff == NULL)
-		return -ENOMEM;
-	if (use_dma == 0)
-		return 0;
-
-	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
-	if (ret)
-		return ret;
-
-	sg_init_one(&info->sg, info->data_buff, info->buf_size);
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-	param.prio = PXAD_PRIO_LOWEST;
-	param.drcmr = info->drcmr_dat;
-	info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
-							  &param, &pdev->dev,
-							  "data");
-	if (!info->dma_chan) {
-		dev_err(&pdev->dev, "unable to request data dma channel\n");
-		return -ENODEV;
-	}
-
-	memset(&config, 0, sizeof(config));
-	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-	config.src_addr = info->mmio_phys + NDDB;
-	config.dst_addr = info->mmio_phys + NDDB;
-	config.src_maxburst = 32;
-	config.dst_maxburst = 32;
-	ret = dmaengine_slave_config(info->dma_chan, &config);
-	if (ret < 0) {
-		dev_err(&info->pdev->dev,
-			"dma channel configuration failed: %d\n",
-			ret);
-		return ret;
-	}
-
-	/*
-	 * Now that DMA buffers are allocated we turn on
-	 * DMA proper for I/O operations.
-	 */
-	info->use_dma = 1;
-	return 0;
-}
-
-static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
-{
-	if (info->use_dma) {
-		dmaengine_terminate_all(info->dma_chan);
-		dma_release_channel(info->dma_chan);
-	}
-	kfree(info->data_buff);
-}
-
-static int pxa_ecc_init(struct pxa3xx_nand_info *info,
-			struct mtd_info *mtd,
-			int strength, int ecc_stepsize, int page_size)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
-		info->nfullchunks = 1;
-		info->ntotalchunks = 1;
-		info->chunk_size = 2048;
-		info->spare_size = 40;
-		info->ecc_size = 24;
-		ecc->mode = NAND_ECC_HW;
-		ecc->size = 512;
-		ecc->strength = 1;
-
-	} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
-		info->nfullchunks = 1;
-		info->ntotalchunks = 1;
-		info->chunk_size = 512;
-		info->spare_size = 8;
-		info->ecc_size = 8;
-		ecc->mode = NAND_ECC_HW;
-		ecc->size = 512;
-		ecc->strength = 1;
-
-	/*
-	 * Required ECC: 4-bit correction per 512 bytes
-	 * Select: 16-bit correction per 2048 bytes
-	 */
-	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
-		info->ecc_bch = 1;
-		info->nfullchunks = 1;
-		info->ntotalchunks = 1;
-		info->chunk_size = 2048;
-		info->spare_size = 32;
-		info->ecc_size = 32;
-		ecc->mode = NAND_ECC_HW;
-		ecc->size = info->chunk_size;
-		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
-		ecc->strength = 16;
-
-	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
-		info->ecc_bch = 1;
-		info->nfullchunks = 2;
-		info->ntotalchunks = 2;
-		info->chunk_size = 2048;
-		info->spare_size = 32;
-		info->ecc_size = 32;
-		ecc->mode = NAND_ECC_HW;
-		ecc->size = info->chunk_size;
-		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
-		ecc->strength = 16;
-
-	/*
-	 * Required ECC: 8-bit correction per 512 bytes
-	 * Select: 16-bit correction per 1024 bytes
-	 */
-	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
-		info->ecc_bch = 1;
-		info->nfullchunks = 4;
-		info->ntotalchunks = 5;
-		info->chunk_size = 1024;
-		info->spare_size = 0;
-		info->last_chunk_size = 0;
-		info->last_spare_size = 64;
-		info->ecc_size = 32;
-		ecc->mode = NAND_ECC_HW;
-		ecc->size = info->chunk_size;
-		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
-		ecc->strength = 16;
-	} else {
-		dev_err(&info->pdev->dev,
-			"ECC strength %d at page size %d is not supported\n",
-			strength, page_size);
-		return -ENODEV;
-	}
-
-	dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
-		 ecc->strength, ecc->size);
-	return 0;
-}
-
-static int pxa3xx_nand_scan(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
-	struct pxa3xx_nand_info *info = host->info_data;
-	struct platform_device *pdev = info->pdev;
-	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	int ret;
-	uint16_t ecc_strength, ecc_step;
-
-	if (pdata->keep_config) {
-		pxa3xx_nand_detect_config(info);
-	} else {
-		ret = pxa3xx_nand_config_ident(info);
-		if (ret)
-			return ret;
-	}
-
-	if (info->reg_ndcr & NDCR_DWIDTH_M)
-		chip->options |= NAND_BUSWIDTH_16;
-
-	/* Device detection must be done with ECC disabled */
-	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
-		nand_writel(info, NDECCCTRL, 0x0);
-
-	if (pdata->flash_bbt)
-		chip->bbt_options |= NAND_BBT_USE_FLASH;
-
-	chip->ecc.strength = pdata->ecc_strength;
-	chip->ecc.size = pdata->ecc_step_size;
-
-	if (nand_scan_ident(mtd, 1, NULL))
-		return -ENODEV;
-
-	if (!pdata->keep_config) {
-		ret = pxa3xx_nand_init(host);
-		if (ret) {
-			dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
-				ret);
-			return ret;
-		}
-	}
-
-	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
-		/*
-		 * We'll use a bad block table stored in-flash and don't
-		 * allow writing the bad block marker to the flash.
-		 */
-		chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
-		chip->bbt_td = &bbt_main_descr;
-		chip->bbt_md = &bbt_mirror_descr;
-	}
-
-	/*
-	 * If the page size is bigger than the FIFO size, let's check
-	 * we are given the right variant and then switch to the extended
-	 * (aka splitted) command handling,
-	 */
-	if (mtd->writesize > PAGE_CHUNK_SIZE) {
-		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
-			chip->cmdfunc = nand_cmdfunc_extended;
-		} else {
-			dev_err(&info->pdev->dev,
-				"unsupported page size on this variant\n");
-			return -ENODEV;
-		}
-	}
-
-	ecc_strength = chip->ecc.strength;
-	ecc_step = chip->ecc.size;
-	if (!ecc_strength || !ecc_step) {
-		ecc_strength = chip->ecc_strength_ds;
-		ecc_step = chip->ecc_step_ds;
-	}
-
-	/* Set default ECC strength requirements on non-ONFI devices */
-	if (ecc_strength < 1 && ecc_step < 1) {
-		ecc_strength = 1;
-		ecc_step = 512;
-	}
-
-	ret = pxa_ecc_init(info, mtd, ecc_strength,
-			   ecc_step, mtd->writesize);
-	if (ret)
-		return ret;
-
-	/* calculate addressing information */
-	if (mtd->writesize >= 2048)
-		host->col_addr_cycles = 2;
-	else
-		host->col_addr_cycles = 1;
-
-	/* release the initial buffer */
-	kfree(info->data_buff);
-
-	/* allocate the real data + oob buffer */
-	info->buf_size = mtd->writesize + mtd->oobsize;
-	ret = pxa3xx_nand_init_buff(info);
-	if (ret)
-		return ret;
-	info->oob_buff = info->data_buff + mtd->writesize;
-
-	if ((mtd->size >> chip->page_shift) > 65536)
-		host->row_addr_cycles = 3;
-	else
-		host->row_addr_cycles = 2;
-
-	if (!pdata->keep_config)
-		pxa3xx_nand_config_tail(info);
-
-	return nand_scan_tail(mtd);
-}
-
-static int alloc_nand_resource(struct platform_device *pdev)
-{
-	struct device_node *np = pdev->dev.of_node;
-	struct pxa3xx_nand_platform_data *pdata;
-	struct pxa3xx_nand_info *info;
-	struct pxa3xx_nand_host *host;
-	struct nand_chip *chip = NULL;
-	struct mtd_info *mtd;
-	struct resource *r;
-	int ret, irq, cs;
-
-	pdata = dev_get_platdata(&pdev->dev);
-	if (pdata->num_cs <= 0)
-		return -ENODEV;
-	info = devm_kzalloc(&pdev->dev,
-			    sizeof(*info) + sizeof(*host) * pdata->num_cs,
-			    GFP_KERNEL);
-	if (!info)
-		return -ENOMEM;
-
-	info->pdev = pdev;
-	info->variant = pxa3xx_nand_get_variant(pdev);
-	for (cs = 0; cs < pdata->num_cs; cs++) {
-		host = (void *)&info[1] + sizeof(*host) * cs;
-		chip = &host->chip;
-		nand_set_controller_data(chip, host);
-		mtd = nand_to_mtd(chip);
-		info->host[cs] = host;
-		host->cs = cs;
-		host->info_data = info;
-		mtd->dev.parent = &pdev->dev;
-		/* FIXME: all chips use the same device tree partitions */
-		nand_set_flash_node(chip, np);
-
-		nand_set_controller_data(chip, host);
-		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
-		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
-		chip->controller        = &info->controller;
-		chip->waitfunc		= pxa3xx_nand_waitfunc;
-		chip->select_chip	= pxa3xx_nand_select_chip;
-		chip->read_word		= pxa3xx_nand_read_word;
-		chip->read_byte		= pxa3xx_nand_read_byte;
-		chip->read_buf		= pxa3xx_nand_read_buf;
-		chip->write_buf		= pxa3xx_nand_write_buf;
-		chip->options		|= NAND_NO_SUBPAGE_WRITE;
-		chip->cmdfunc		= nand_cmdfunc;
-	}
-
-	nand_hw_control_init(chip->controller);
-	info->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(info->clk)) {
-		dev_err(&pdev->dev, "failed to get nand clock\n");
-		return PTR_ERR(info->clk);
-	}
-	ret = clk_prepare_enable(info->clk);
-	if (ret < 0)
-		return ret;
-
-	if (!np && use_dma) {
-		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
-		if (r == NULL) {
-			dev_err(&pdev->dev,
-				"no resource defined for data DMA\n");
-			ret = -ENXIO;
-			goto fail_disable_clk;
-		}
-		info->drcmr_dat = r->start;
-	}
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "no IRQ resource defined\n");
-		ret = -ENXIO;
-		goto fail_disable_clk;
-	}
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
-	if (IS_ERR(info->mmio_base)) {
-		ret = PTR_ERR(info->mmio_base);
-		goto fail_disable_clk;
-	}
-	info->mmio_phys = r->start;
-
-	/* Allocate a buffer to allow flash detection */
-	info->buf_size = INIT_BUFFER_SIZE;
-	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
-	if (info->data_buff == NULL) {
-		ret = -ENOMEM;
-		goto fail_disable_clk;
-	}
-
-	/* initialize all interrupts to be disabled */
-	disable_int(info, NDSR_MASK);
-
-	ret = request_threaded_irq(irq, pxa3xx_nand_irq,
-				   pxa3xx_nand_irq_thread, IRQF_ONESHOT,
-				   pdev->name, info);
-	if (ret < 0) {
-		dev_err(&pdev->dev, "failed to request IRQ\n");
-		goto fail_free_buf;
-	}
-
-	platform_set_drvdata(pdev, info);
-
-	return 0;
-
-fail_free_buf:
-	free_irq(irq, info);
-	kfree(info->data_buff);
-fail_disable_clk:
-	clk_disable_unprepare(info->clk);
-	return ret;
-}
-
-static int pxa3xx_nand_remove(struct platform_device *pdev)
-{
-	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
-	struct pxa3xx_nand_platform_data *pdata;
-	int irq, cs;
-
-	if (!info)
-		return 0;
-
-	pdata = dev_get_platdata(&pdev->dev);
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq >= 0)
-		free_irq(irq, info);
-	pxa3xx_nand_free_buff(info);
-
-	/*
-	 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
-	 * In order to prevent a lockup of the system bus, the DFI bus
-	 * arbitration is granted to SMC upon driver removal. This is done by
-	 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
-	 * access to the bus anymore.
-	 */
-	nand_writel(info, NDCR,
-		    (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
-		    NFCV1_NDCR_ARB_CNTL);
-	clk_disable_unprepare(info->clk);
-
-	for (cs = 0; cs < pdata->num_cs; cs++)
-		nand_release(nand_to_mtd(&info->host[cs]->chip));
-	return 0;
-}
-
-static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
-{
-	struct pxa3xx_nand_platform_data *pdata;
-	struct device_node *np = pdev->dev.of_node;
-	const struct of_device_id *of_id =
-			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
-
-	if (!of_id)
-		return 0;
-
-	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
-	if (!pdata)
-		return -ENOMEM;
-
-	if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
-		pdata->enable_arbiter = 1;
-	if (of_get_property(np, "marvell,nand-keep-config", NULL))
-		pdata->keep_config = 1;
-	of_property_read_u32(np, "num-cs", &pdata->num_cs);
-
-	pdev->dev.platform_data = pdata;
-
-	return 0;
-}
-
-static int pxa3xx_nand_probe(struct platform_device *pdev)
-{
-	struct pxa3xx_nand_platform_data *pdata;
-	struct pxa3xx_nand_info *info;
-	int ret, cs, probe_success, dma_available;
-
-	dma_available = IS_ENABLED(CONFIG_ARM) &&
-		(IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
-	if (use_dma && !dma_available) {
-		use_dma = 0;
-		dev_warn(&pdev->dev,
-			 "This platform can't do DMA on this device\n");
-	}
-
-	ret = pxa3xx_nand_probe_dt(pdev);
-	if (ret)
-		return ret;
-
-	pdata = dev_get_platdata(&pdev->dev);
-	if (!pdata) {
-		dev_err(&pdev->dev, "no platform data defined\n");
-		return -ENODEV;
-	}
-
-	ret = alloc_nand_resource(pdev);
-	if (ret) {
-		dev_err(&pdev->dev, "alloc nand resource failed\n");
-		return ret;
-	}
-
-	info = platform_get_drvdata(pdev);
-	probe_success = 0;
-	for (cs = 0; cs < pdata->num_cs; cs++) {
-		struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
-
-		/*
-		 * The mtd name matches the one used in 'mtdparts' kernel
-		 * parameter. This name cannot be changed or otherwise
-		 * user's mtd partitions configuration would get broken.
-		 */
-		mtd->name = "pxa3xx_nand-0";
-		info->cs = cs;
-		ret = pxa3xx_nand_scan(mtd);
-		if (ret) {
-			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
-				cs);
-			continue;
-		}
-
-		ret = mtd_device_register(mtd, pdata->parts[cs],
-					  pdata->nr_parts[cs]);
-		if (!ret)
-			probe_success = 1;
-	}
-
-	if (!probe_success) {
-		pxa3xx_nand_remove(pdev);
-		return -ENODEV;
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int pxa3xx_nand_suspend(struct device *dev)
-{
-	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
-
-	if (info->state) {
-		dev_err(dev, "driver busy, state = %d\n", info->state);
-		return -EAGAIN;
-	}
-
-	clk_disable(info->clk);
-	return 0;
-}
-
-static int pxa3xx_nand_resume(struct device *dev)
-{
-	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
-	int ret;
-
-	ret = clk_enable(info->clk);
-	if (ret < 0)
-		return ret;
-
-	/* We don't want to handle interrupt without calling mtd routine */
-	disable_int(info, NDCR_INT_MASK);
-
-	/*
-	 * Directly set the chip select to a invalid value,
-	 * then the driver would reset the timing according
-	 * to current chip select at the beginning of cmdfunc
-	 */
-	info->cs = 0xff;
-
-	/*
-	 * As the spec says, the NDSR would be updated to 0x1800 when
-	 * doing the nand_clk disable/enable.
-	 * To prevent it damaging state machine of the driver, clear
-	 * all status before resume
-	 */
-	nand_writel(info, NDSR, NDSR_MASK);
-
-	return 0;
-}
-#else
-#define pxa3xx_nand_suspend	NULL
-#define pxa3xx_nand_resume	NULL
-#endif
-
-static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
-	.suspend	= pxa3xx_nand_suspend,
-	.resume		= pxa3xx_nand_resume,
-};
-
-static struct platform_driver pxa3xx_nand_driver = {
-	.driver = {
-		.name	= "pxa3xx-nand",
-		.of_match_table = pxa3xx_nand_dt_ids,
-		.pm	= &pxa3xx_nand_pm_ops,
-	},
-	.probe		= pxa3xx_nand_probe,
-	.remove		= pxa3xx_nand_remove,
-};
-
-module_platform_driver(pxa3xx_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c
deleted file mode 100644
index a77c66f4d8bc..000000000000
--- a/drivers/mtd/nand/qcom_nandc.c
+++ /dev/null
@@ -1,2208 +0,0 @@ 
-/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/clk.h>
-#include <linux/slab.h>
-#include <linux/bitops.h>
-#include <linux/dma-mapping.h>
-#include <linux/dmaengine.h>
-#include <linux/module.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/delay.h>
-
-/* NANDc reg offsets */
-#define	NAND_FLASH_CMD			0x00
-#define	NAND_ADDR0			0x04
-#define	NAND_ADDR1			0x08
-#define	NAND_FLASH_CHIP_SELECT		0x0c
-#define	NAND_EXEC_CMD			0x10
-#define	NAND_FLASH_STATUS		0x14
-#define	NAND_BUFFER_STATUS		0x18
-#define	NAND_DEV0_CFG0			0x20
-#define	NAND_DEV0_CFG1			0x24
-#define	NAND_DEV0_ECC_CFG		0x28
-#define	NAND_DEV1_ECC_CFG		0x2c
-#define	NAND_DEV1_CFG0			0x30
-#define	NAND_DEV1_CFG1			0x34
-#define	NAND_READ_ID			0x40
-#define	NAND_READ_STATUS		0x44
-#define	NAND_DEV_CMD0			0xa0
-#define	NAND_DEV_CMD1			0xa4
-#define	NAND_DEV_CMD2			0xa8
-#define	NAND_DEV_CMD_VLD		0xac
-#define	SFLASHC_BURST_CFG		0xe0
-#define	NAND_ERASED_CW_DETECT_CFG	0xe8
-#define	NAND_ERASED_CW_DETECT_STATUS	0xec
-#define	NAND_EBI2_ECC_BUF_CFG		0xf0
-#define	FLASH_BUF_ACC			0x100
-
-#define	NAND_CTRL			0xf00
-#define	NAND_VERSION			0xf08
-#define	NAND_READ_LOCATION_0		0xf20
-#define	NAND_READ_LOCATION_1		0xf24
-
-/* dummy register offsets, used by write_reg_dma */
-#define	NAND_DEV_CMD1_RESTORE		0xdead
-#define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
-
-/* NAND_FLASH_CMD bits */
-#define	PAGE_ACC			BIT(4)
-#define	LAST_PAGE			BIT(5)
-
-/* NAND_FLASH_CHIP_SELECT bits */
-#define	NAND_DEV_SEL			0
-#define	DM_EN				BIT(2)
-
-/* NAND_FLASH_STATUS bits */
-#define	FS_OP_ERR			BIT(4)
-#define	FS_READY_BSY_N			BIT(5)
-#define	FS_MPU_ERR			BIT(8)
-#define	FS_DEVICE_STS_ERR		BIT(16)
-#define	FS_DEVICE_WP			BIT(23)
-
-/* NAND_BUFFER_STATUS bits */
-#define	BS_UNCORRECTABLE_BIT		BIT(8)
-#define	BS_CORRECTABLE_ERR_MSK		0x1f
-
-/* NAND_DEVn_CFG0 bits */
-#define	DISABLE_STATUS_AFTER_WRITE	4
-#define	CW_PER_PAGE			6
-#define	UD_SIZE_BYTES			9
-#define	ECC_PARITY_SIZE_BYTES_RS	19
-#define	SPARE_SIZE_BYTES		23
-#define	NUM_ADDR_CYCLES			27
-#define	STATUS_BFR_READ			30
-#define	SET_RD_MODE_AFTER_STATUS	31
-
-/* NAND_DEVn_CFG0 bits */
-#define	DEV0_CFG1_ECC_DISABLE		0
-#define	WIDE_FLASH			1
-#define	NAND_RECOVERY_CYCLES		2
-#define	CS_ACTIVE_BSY			5
-#define	BAD_BLOCK_BYTE_NUM		6
-#define	BAD_BLOCK_IN_SPARE_AREA		16
-#define	WR_RD_BSY_GAP			17
-#define	ENABLE_BCH_ECC			27
-
-/* NAND_DEV0_ECC_CFG bits */
-#define	ECC_CFG_ECC_DISABLE		0
-#define	ECC_SW_RESET			1
-#define	ECC_MODE			4
-#define	ECC_PARITY_SIZE_BYTES_BCH	8
-#define	ECC_NUM_DATA_BYTES		16
-#define	ECC_FORCE_CLK_OPEN		30
-
-/* NAND_DEV_CMD1 bits */
-#define	READ_ADDR			0
-
-/* NAND_DEV_CMD_VLD bits */
-#define	READ_START_VLD			0
-
-/* NAND_EBI2_ECC_BUF_CFG bits */
-#define	NUM_STEPS			0
-
-/* NAND_ERASED_CW_DETECT_CFG bits */
-#define	ERASED_CW_ECC_MASK		1
-#define	AUTO_DETECT_RES			0
-#define	MASK_ECC			(1 << ERASED_CW_ECC_MASK)
-#define	RESET_ERASED_DET		(1 << AUTO_DETECT_RES)
-#define	ACTIVE_ERASED_DET		(0 << AUTO_DETECT_RES)
-#define	CLR_ERASED_PAGE_DET		(RESET_ERASED_DET | MASK_ECC)
-#define	SET_ERASED_PAGE_DET		(ACTIVE_ERASED_DET | MASK_ECC)
-
-/* NAND_ERASED_CW_DETECT_STATUS bits */
-#define	PAGE_ALL_ERASED			BIT(7)
-#define	CODEWORD_ALL_ERASED		BIT(6)
-#define	PAGE_ERASED			BIT(5)
-#define	CODEWORD_ERASED			BIT(4)
-#define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
-#define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
-
-/* Version Mask */
-#define	NAND_VERSION_MAJOR_MASK		0xf0000000
-#define	NAND_VERSION_MAJOR_SHIFT	28
-#define	NAND_VERSION_MINOR_MASK		0x0fff0000
-#define	NAND_VERSION_MINOR_SHIFT	16
-
-/* NAND OP_CMDs */
-#define	PAGE_READ			0x2
-#define	PAGE_READ_WITH_ECC		0x3
-#define	PAGE_READ_WITH_ECC_SPARE	0x4
-#define	PROGRAM_PAGE			0x6
-#define	PAGE_PROGRAM_WITH_ECC		0x7
-#define	PROGRAM_PAGE_SPARE		0x9
-#define	BLOCK_ERASE			0xa
-#define	FETCH_ID			0xb
-#define	RESET_DEVICE			0xd
-
-/*
- * the NAND controller performs reads/writes with ECC in 516 byte chunks.
- * the driver calls the chunks 'step' or 'codeword' interchangeably
- */
-#define	NANDC_STEP_SIZE			512
-
-/*
- * the largest page size we support is 8K, this will have 16 steps/codewords
- * of 512 bytes each
- */
-#define	MAX_NUM_STEPS			(SZ_8K / NANDC_STEP_SIZE)
-
-/* we read at most 3 registers per codeword scan */
-#define	MAX_REG_RD			(3 * MAX_NUM_STEPS)
-
-/* ECC modes supported by the controller */
-#define	ECC_NONE	BIT(0)
-#define	ECC_RS_4BIT	BIT(1)
-#define	ECC_BCH_4BIT	BIT(2)
-#define	ECC_BCH_8BIT	BIT(3)
-
-struct desc_info {
-	struct list_head node;
-
-	enum dma_data_direction dir;
-	struct scatterlist sgl;
-	struct dma_async_tx_descriptor *dma_desc;
-};
-
-/*
- * holds the current register values that we want to write. acts as a contiguous
- * chunk of memory which we use to write the controller registers through DMA.
- */
-struct nandc_regs {
-	__le32 cmd;
-	__le32 addr0;
-	__le32 addr1;
-	__le32 chip_sel;
-	__le32 exec;
-
-	__le32 cfg0;
-	__le32 cfg1;
-	__le32 ecc_bch_cfg;
-
-	__le32 clrflashstatus;
-	__le32 clrreadstatus;
-
-	__le32 cmd1;
-	__le32 vld;
-
-	__le32 orig_cmd1;
-	__le32 orig_vld;
-
-	__le32 ecc_buf_cfg;
-};
-
-/*
- * NAND controller data struct
- *
- * @controller:			base controller structure
- * @host_list:			list containing all the chips attached to the
- *				controller
- * @dev:			parent device
- * @base:			MMIO base
- * @base_dma:			physical base address of controller registers
- * @core_clk:			controller clock
- * @aon_clk:			another controller clock
- *
- * @chan:			dma channel
- * @cmd_crci:			ADM DMA CRCI for command flow control
- * @data_crci:			ADM DMA CRCI for data flow control
- * @desc_list:			DMA descriptor list (list of desc_infos)
- *
- * @data_buffer:		our local DMA buffer for page read/writes,
- *				used when we can't use the buffer provided
- *				by upper layers directly
- * @buf_size/count/start:	markers for chip->read_buf/write_buf functions
- * @reg_read_buf:		local buffer for reading back registers via DMA
- * @reg_read_pos:		marker for data read in reg_read_buf
- *
- * @regs:			a contiguous chunk of memory for DMA register
- *				writes. contains the register values to be
- *				written to controller
- * @cmd1/vld:			some fixed controller register values
- * @ecc_modes:			supported ECC modes by the current controller,
- *				initialized via DT match data
- */
-struct qcom_nand_controller {
-	struct nand_hw_control controller;
-	struct list_head host_list;
-
-	struct device *dev;
-
-	void __iomem *base;
-	dma_addr_t base_dma;
-
-	struct clk *core_clk;
-	struct clk *aon_clk;
-
-	struct dma_chan *chan;
-	unsigned int cmd_crci;
-	unsigned int data_crci;
-	struct list_head desc_list;
-
-	u8		*data_buffer;
-	int		buf_size;
-	int		buf_count;
-	int		buf_start;
-
-	__le32 *reg_read_buf;
-	int reg_read_pos;
-
-	struct nandc_regs *regs;
-
-	u32 cmd1, vld;
-	u32 ecc_modes;
-};
-
-/*
- * NAND chip structure
- *
- * @chip:			base NAND chip structure
- * @node:			list node to add itself to host_list in
- *				qcom_nand_controller
- *
- * @cs:				chip select value for this chip
- * @cw_size:			the number of bytes in a single step/codeword
- *				of a page, consisting of all data, ecc, spare
- *				and reserved bytes
- * @cw_data:			the number of bytes within a codeword protected
- *				by ECC
- * @use_ecc:			request the controller to use ECC for the
- *				upcoming read/write
- * @bch_enabled:		flag to tell whether BCH ECC mode is used
- * @ecc_bytes_hw:		ECC bytes used by controller hardware for this
- *				chip
- * @status:			value to be returned if NAND_CMD_STATUS command
- *				is executed
- * @last_command:		keeps track of last command on this chip. used
- *				for reading correct status
- *
- * @cfg0, cfg1, cfg0_raw..:	NANDc register configurations needed for
- *				ecc/non-ecc mode for the current nand flash
- *				device
- */
-struct qcom_nand_host {
-	struct nand_chip chip;
-	struct list_head node;
-
-	int cs;
-	int cw_size;
-	int cw_data;
-	bool use_ecc;
-	bool bch_enabled;
-	int ecc_bytes_hw;
-	int spare_bytes;
-	int bbm_size;
-	u8 status;
-	int last_command;
-
-	u32 cfg0, cfg1;
-	u32 cfg0_raw, cfg1_raw;
-	u32 ecc_buf_cfg;
-	u32 ecc_bch_cfg;
-	u32 clrflashstatus;
-	u32 clrreadstatus;
-};
-
-static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
-{
-	return container_of(chip, struct qcom_nand_host, chip);
-}
-
-static inline struct qcom_nand_controller *
-get_qcom_nand_controller(struct nand_chip *chip)
-{
-	return container_of(chip->controller, struct qcom_nand_controller,
-			    controller);
-}
-
-static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
-{
-	return ioread32(nandc->base + offset);
-}
-
-static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
-			       u32 val)
-{
-	iowrite32(val, nandc->base + offset);
-}
-
-static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
-{
-	switch (offset) {
-	case NAND_FLASH_CMD:
-		return &regs->cmd;
-	case NAND_ADDR0:
-		return &regs->addr0;
-	case NAND_ADDR1:
-		return &regs->addr1;
-	case NAND_FLASH_CHIP_SELECT:
-		return &regs->chip_sel;
-	case NAND_EXEC_CMD:
-		return &regs->exec;
-	case NAND_FLASH_STATUS:
-		return &regs->clrflashstatus;
-	case NAND_DEV0_CFG0:
-		return &regs->cfg0;
-	case NAND_DEV0_CFG1:
-		return &regs->cfg1;
-	case NAND_DEV0_ECC_CFG:
-		return &regs->ecc_bch_cfg;
-	case NAND_READ_STATUS:
-		return &regs->clrreadstatus;
-	case NAND_DEV_CMD1:
-		return &regs->cmd1;
-	case NAND_DEV_CMD1_RESTORE:
-		return &regs->orig_cmd1;
-	case NAND_DEV_CMD_VLD:
-		return &regs->vld;
-	case NAND_DEV_CMD_VLD_RESTORE:
-		return &regs->orig_vld;
-	case NAND_EBI2_ECC_BUF_CFG:
-		return &regs->ecc_buf_cfg;
-	default:
-		return NULL;
-	}
-}
-
-static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
-			  u32 val)
-{
-	struct nandc_regs *regs = nandc->regs;
-	__le32 *reg;
-
-	reg = offset_to_nandc_reg(regs, offset);
-
-	if (reg)
-		*reg = cpu_to_le32(val);
-}
-
-/* helper to configure address register values */
-static void set_address(struct qcom_nand_host *host, u16 column, int page)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-	if (chip->options & NAND_BUSWIDTH_16)
-		column >>= 1;
-
-	nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
-	nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
-}
-
-/*
- * update_rw_regs:	set up read/write register values, these will be
- *			written to the NAND controller registers via DMA
- *
- * @num_cw:		number of steps for the read/write operation
- * @read:		read or write operation
- */
-static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	u32 cmd, cfg0, cfg1, ecc_bch_cfg;
-
-	if (read) {
-		if (host->use_ecc)
-			cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
-		else
-			cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
-	} else {
-			cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
-	}
-
-	if (host->use_ecc) {
-		cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
-				(num_cw - 1) << CW_PER_PAGE;
-
-		cfg1 = host->cfg1;
-		ecc_bch_cfg = host->ecc_bch_cfg;
-	} else {
-		cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
-				(num_cw - 1) << CW_PER_PAGE;
-
-		cfg1 = host->cfg1_raw;
-		ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
-	}
-
-	nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
-	nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
-	nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
-	nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
-	nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
-	nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
-	nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
-	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
-}
-
-static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
-			 int reg_off, const void *vaddr, int size,
-			 bool flow_control)
-{
-	struct desc_info *desc;
-	struct dma_async_tx_descriptor *dma_desc;
-	struct scatterlist *sgl;
-	struct dma_slave_config slave_conf;
-	enum dma_transfer_direction dir_eng;
-	int ret;
-
-	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
-	if (!desc)
-		return -ENOMEM;
-
-	sgl = &desc->sgl;
-
-	sg_init_one(sgl, vaddr, size);
-
-	if (read) {
-		dir_eng = DMA_DEV_TO_MEM;
-		desc->dir = DMA_FROM_DEVICE;
-	} else {
-		dir_eng = DMA_MEM_TO_DEV;
-		desc->dir = DMA_TO_DEVICE;
-	}
-
-	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
-	if (ret == 0) {
-		ret = -ENOMEM;
-		goto err;
-	}
-
-	memset(&slave_conf, 0x00, sizeof(slave_conf));
-
-	slave_conf.device_fc = flow_control;
-	if (read) {
-		slave_conf.src_maxburst = 16;
-		slave_conf.src_addr = nandc->base_dma + reg_off;
-		slave_conf.slave_id = nandc->data_crci;
-	} else {
-		slave_conf.dst_maxburst = 16;
-		slave_conf.dst_addr = nandc->base_dma + reg_off;
-		slave_conf.slave_id = nandc->cmd_crci;
-	}
-
-	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
-	if (ret) {
-		dev_err(nandc->dev, "failed to configure dma channel\n");
-		goto err;
-	}
-
-	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
-	if (!dma_desc) {
-		dev_err(nandc->dev, "failed to prepare desc\n");
-		ret = -EINVAL;
-		goto err;
-	}
-
-	desc->dma_desc = dma_desc;
-
-	list_add_tail(&desc->node, &nandc->desc_list);
-
-	return 0;
-err:
-	kfree(desc);
-
-	return ret;
-}
-
-/*
- * read_reg_dma:	prepares a descriptor to read a given number of
- *			contiguous registers to the reg_read_buf pointer
- *
- * @first:		offset of the first register in the contiguous block
- * @num_regs:		number of registers to read
- */
-static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
-			int num_regs)
-{
-	bool flow_control = false;
-	void *vaddr;
-	int size;
-
-	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
-		flow_control = true;
-
-	size = num_regs * sizeof(u32);
-	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
-	nandc->reg_read_pos += num_regs;
-
-	return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
-}
-
-/*
- * write_reg_dma:	prepares a descriptor to write a given number of
- *			contiguous registers
- *
- * @first:		offset of the first register in the contiguous block
- * @num_regs:		number of registers to write
- */
-static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
-			 int num_regs)
-{
-	bool flow_control = false;
-	struct nandc_regs *regs = nandc->regs;
-	void *vaddr;
-	int size;
-
-	vaddr = offset_to_nandc_reg(regs, first);
-
-	if (first == NAND_FLASH_CMD)
-		flow_control = true;
-
-	if (first == NAND_DEV_CMD1_RESTORE)
-		first = NAND_DEV_CMD1;
-
-	if (first == NAND_DEV_CMD_VLD_RESTORE)
-		first = NAND_DEV_CMD_VLD;
-
-	size = num_regs * sizeof(u32);
-
-	return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
-}
-
-/*
- * read_data_dma:	prepares a DMA descriptor to transfer data from the
- *			controller's internal buffer to the buffer 'vaddr'
- *
- * @reg_off:		offset within the controller's data buffer
- * @vaddr:		virtual address of the buffer we want to write to
- * @size:		DMA transaction size in bytes
- */
-static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-			 const u8 *vaddr, int size)
-{
-	return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
-}
-
-/*
- * write_data_dma:	prepares a DMA descriptor to transfer data from
- *			'vaddr' to the controller's internal buffer
- *
- * @reg_off:		offset within the controller's data buffer
- * @vaddr:		virtual address of the buffer we want to read from
- * @size:		DMA transaction size in bytes
- */
-static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
-			  const u8 *vaddr, int size)
-{
-	return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
-}
-
-/*
- * helper to prepare dma descriptors to configure registers needed for reading a
- * codeword/step in a page
- */
-static void config_cw_read(struct qcom_nand_controller *nandc)
-{
-	write_reg_dma(nandc, NAND_FLASH_CMD, 3);
-	write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
-	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
-
-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
-
-	read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
-	read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
-}
-
-/*
- * helpers to prepare dma descriptors used to configure registers needed for
- * writing a codeword/step in a page
- */
-static void config_cw_write_pre(struct qcom_nand_controller *nandc)
-{
-	write_reg_dma(nandc, NAND_FLASH_CMD, 3);
-	write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
-	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
-}
-
-static void config_cw_write_post(struct qcom_nand_controller *nandc)
-{
-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
-
-	read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
-
-	write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
-	write_reg_dma(nandc, NAND_READ_STATUS, 1);
-}
-
-/*
- * the following functions are used within chip->cmdfunc() to perform different
- * NAND_CMD_* commands
- */
-
-/* sets up descriptors for NAND_CMD_PARAM */
-static int nandc_param(struct qcom_nand_host *host)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-	/*
-	 * NAND_CMD_PARAM is called before we know much about the FLASH chip
-	 * in use. we configure the controller to perform a raw read of 512
-	 * bytes to read onfi params
-	 */
-	nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
-	nandc_set_reg(nandc, NAND_ADDR0, 0);
-	nandc_set_reg(nandc, NAND_ADDR1, 0);
-	nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
-					| 512 << UD_SIZE_BYTES
-					| 5 << NUM_ADDR_CYCLES
-					| 0 << SPARE_SIZE_BYTES);
-	nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
-					| 0 << CS_ACTIVE_BSY
-					| 17 << BAD_BLOCK_BYTE_NUM
-					| 1 << BAD_BLOCK_IN_SPARE_AREA
-					| 2 << WR_RD_BSY_GAP
-					| 0 << WIDE_FLASH
-					| 1 << DEV0_CFG1_ECC_DISABLE);
-	nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
-
-	/* configure CMD1 and VLD for ONFI param probing */
-	nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
-		      (nandc->vld & ~(1 << READ_START_VLD))
-		      | 0 << READ_START_VLD);
-	nandc_set_reg(nandc, NAND_DEV_CMD1,
-		      (nandc->cmd1 & ~(0xFF << READ_ADDR))
-		      | NAND_CMD_PARAM << READ_ADDR);
-
-	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
-
-	nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
-	nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
-
-	write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
-	write_reg_dma(nandc, NAND_DEV_CMD1, 1);
-
-	nandc->buf_count = 512;
-	memset(nandc->data_buffer, 0xff, nandc->buf_count);
-
-	config_cw_read(nandc);
-
-	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
-		      nandc->buf_count);
-
-	/* restore CMD1 and VLD regs */
-	write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
-	write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
-
-	return 0;
-}
-
-/* sets up descriptors for NAND_CMD_ERASE1 */
-static int erase_block(struct qcom_nand_host *host, int page_addr)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-	nandc_set_reg(nandc, NAND_FLASH_CMD,
-		      BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
-	nandc_set_reg(nandc, NAND_ADDR0, page_addr);
-	nandc_set_reg(nandc, NAND_ADDR1, 0);
-	nandc_set_reg(nandc, NAND_DEV0_CFG0,
-		      host->cfg0_raw & ~(7 << CW_PER_PAGE));
-	nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
-	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
-	nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
-	nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
-
-	write_reg_dma(nandc, NAND_FLASH_CMD, 3);
-	write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
-
-	read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
-
-	write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
-	write_reg_dma(nandc, NAND_READ_STATUS, 1);
-
-	return 0;
-}
-
-/* sets up descriptors for NAND_CMD_READID */
-static int read_id(struct qcom_nand_host *host, int column)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-	if (column == -1)
-		return 0;
-
-	nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
-	nandc_set_reg(nandc, NAND_ADDR0, column);
-	nandc_set_reg(nandc, NAND_ADDR1, 0);
-	nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
-	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
-
-	write_reg_dma(nandc, NAND_FLASH_CMD, 4);
-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
-
-	read_reg_dma(nandc, NAND_READ_ID, 1);
-
-	return 0;
-}
-
-/* sets up descriptors for NAND_CMD_RESET */
-static int reset(struct qcom_nand_host *host)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-	nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
-	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
-
-	write_reg_dma(nandc, NAND_FLASH_CMD, 1);
-	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
-
-	read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
-
-	return 0;
-}
-
-/* helpers to submit/free our list of dma descriptors */
-static int submit_descs(struct qcom_nand_controller *nandc)
-{
-	struct desc_info *desc;
-	dma_cookie_t cookie = 0;
-
-	list_for_each_entry(desc, &nandc->desc_list, node)
-		cookie = dmaengine_submit(desc->dma_desc);
-
-	if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
-		return -ETIMEDOUT;
-
-	return 0;
-}
-
-static void free_descs(struct qcom_nand_controller *nandc)
-{
-	struct desc_info *desc, *n;
-
-	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
-		list_del(&desc->node);
-		dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
-		kfree(desc);
-	}
-}
-
-/* reset the register read buffer for next NAND operation */
-static void clear_read_regs(struct qcom_nand_controller *nandc)
-{
-	nandc->reg_read_pos = 0;
-	memset(nandc->reg_read_buf, 0,
-	       MAX_REG_RD * sizeof(*nandc->reg_read_buf));
-}
-
-static void pre_command(struct qcom_nand_host *host, int command)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-	nandc->buf_count = 0;
-	nandc->buf_start = 0;
-	host->use_ecc = false;
-	host->last_command = command;
-
-	clear_read_regs(nandc);
-}
-
-/*
- * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
- * privately maintained status byte, this status byte can be read after
- * NAND_CMD_STATUS is called
- */
-static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int num_cw;
-	int i;
-
-	num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
-
-	for (i = 0; i < num_cw; i++) {
-		u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
-
-		if (flash_status & FS_MPU_ERR)
-			host->status &= ~NAND_STATUS_WP;
-
-		if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
-						 (flash_status &
-						  FS_DEVICE_STS_ERR)))
-			host->status |= NAND_STATUS_FAIL;
-	}
-}
-
-static void post_command(struct qcom_nand_host *host, int command)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-	switch (command) {
-	case NAND_CMD_READID:
-		memcpy(nandc->data_buffer, nandc->reg_read_buf,
-		       nandc->buf_count);
-		break;
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_ERASE1:
-		parse_erase_write_errors(host, command);
-		break;
-	default:
-		break;
-	}
-}
-
-/*
- * Implements chip->cmdfunc. It's  only used for a limited set of commands.
- * The rest of the commands wouldn't be called by upper layers. For example,
- * NAND_CMD_READOOB would never be called because we have our own versions
- * of read_oob ops for nand_ecc_ctrl.
- */
-static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
-			       int column, int page_addr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	bool wait = false;
-	int ret = 0;
-
-	pre_command(host, command);
-
-	switch (command) {
-	case NAND_CMD_RESET:
-		ret = reset(host);
-		wait = true;
-		break;
-
-	case NAND_CMD_READID:
-		nandc->buf_count = 4;
-		ret = read_id(host, column);
-		wait = true;
-		break;
-
-	case NAND_CMD_PARAM:
-		ret = nandc_param(host);
-		wait = true;
-		break;
-
-	case NAND_CMD_ERASE1:
-		ret = erase_block(host, page_addr);
-		wait = true;
-		break;
-
-	case NAND_CMD_READ0:
-		/* we read the entire page for now */
-		WARN_ON(column != 0);
-
-		host->use_ecc = true;
-		set_address(host, 0, page_addr);
-		update_rw_regs(host, ecc->steps, true);
-		break;
-
-	case NAND_CMD_SEQIN:
-		WARN_ON(column != 0);
-		set_address(host, 0, page_addr);
-		break;
-
-	case NAND_CMD_PAGEPROG:
-	case NAND_CMD_STATUS:
-	case NAND_CMD_NONE:
-	default:
-		break;
-	}
-
-	if (ret) {
-		dev_err(nandc->dev, "failure executing command %d\n",
-			command);
-		free_descs(nandc);
-		return;
-	}
-
-	if (wait) {
-		ret = submit_descs(nandc);
-		if (ret)
-			dev_err(nandc->dev,
-				"failure submitting descs for command %d\n",
-				command);
-	}
-
-	free_descs(nandc);
-
-	post_command(host, command);
-}
-
-/*
- * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
- * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
- *
- * when using RS ECC, the HW reports the same erros when reading an erased CW,
- * but it notifies that it is an erased CW by placing special characters at
- * certain offsets in the buffer.
- *
- * verify if the page is erased or not, and fix up the page for RS ECC by
- * replacing the special characters with 0xff.
- */
-static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
-{
-	u8 empty1, empty2;
-
-	/*
-	 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
-	 * is erased by looking for 0x54s at offsets 3 and 175 from the
-	 * beginning of each codeword
-	 */
-
-	empty1 = data_buf[3];
-	empty2 = data_buf[175];
-
-	/*
-	 * if the erased codework markers, if they exist override them with
-	 * 0xffs
-	 */
-	if ((empty1 == 0x54 && empty2 == 0xff) ||
-	    (empty1 == 0xff && empty2 == 0x54)) {
-		data_buf[3] = 0xff;
-		data_buf[175] = 0xff;
-	}
-
-	/*
-	 * check if the entire chunk contains 0xffs or not. if it doesn't, then
-	 * restore the original values at the special offsets
-	 */
-	if (memchr_inv(data_buf, 0xff, data_len)) {
-		data_buf[3] = empty1;
-		data_buf[175] = empty2;
-
-		return false;
-	}
-
-	return true;
-}
-
-struct read_stats {
-	__le32 flash;
-	__le32 buffer;
-	__le32 erased_cw;
-};
-
-/*
- * reads back status registers set by the controller to notify page read
- * errors. this is equivalent to what 'ecc->correct()' would do.
- */
-static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
-			     u8 *oob_buf)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	unsigned int max_bitflips = 0;
-	struct read_stats *buf;
-	int i;
-
-	buf = (struct read_stats *)nandc->reg_read_buf;
-
-	for (i = 0; i < ecc->steps; i++, buf++) {
-		u32 flash, buffer, erased_cw;
-		int data_len, oob_len;
-
-		if (i == (ecc->steps - 1)) {
-			data_len = ecc->size - ((ecc->steps - 1) << 2);
-			oob_len = ecc->steps << 2;
-		} else {
-			data_len = host->cw_data;
-			oob_len = 0;
-		}
-
-		flash = le32_to_cpu(buf->flash);
-		buffer = le32_to_cpu(buf->buffer);
-		erased_cw = le32_to_cpu(buf->erased_cw);
-
-		if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
-			bool erased;
-
-			/* ignore erased codeword errors */
-			if (host->bch_enabled) {
-				erased = (erased_cw & ERASED_CW) == ERASED_CW ?
-					 true : false;
-			} else {
-				erased = erased_chunk_check_and_fixup(data_buf,
-								      data_len);
-			}
-
-			if (erased) {
-				data_buf += data_len;
-				if (oob_buf)
-					oob_buf += oob_len + ecc->bytes;
-				continue;
-			}
-
-			if (buffer & BS_UNCORRECTABLE_BIT) {
-				int ret, ecclen, extraooblen;
-				void *eccbuf;
-
-				eccbuf = oob_buf ? oob_buf + oob_len : NULL;
-				ecclen = oob_buf ? host->ecc_bytes_hw : 0;
-				extraooblen = oob_buf ? oob_len : 0;
-
-				/*
-				 * make sure it isn't an erased page reported
-				 * as not-erased by HW because of a few bitflips
-				 */
-				ret = nand_check_erased_ecc_chunk(data_buf,
-					data_len, eccbuf, ecclen, oob_buf,
-					extraooblen, ecc->strength);
-				if (ret < 0) {
-					mtd->ecc_stats.failed++;
-				} else {
-					mtd->ecc_stats.corrected += ret;
-					max_bitflips =
-						max_t(unsigned int, max_bitflips, ret);
-				}
-			}
-		} else {
-			unsigned int stat;
-
-			stat = buffer & BS_CORRECTABLE_ERR_MSK;
-			mtd->ecc_stats.corrected += stat;
-			max_bitflips = max(max_bitflips, stat);
-		}
-
-		data_buf += data_len;
-		if (oob_buf)
-			oob_buf += oob_len + ecc->bytes;
-	}
-
-	return max_bitflips;
-}
-
-/*
- * helper to perform the actual page read operation, used by ecc->read_page(),
- * ecc->read_oob()
- */
-static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
-			 u8 *oob_buf)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int i, ret;
-
-	/* queue cmd descs for each codeword */
-	for (i = 0; i < ecc->steps; i++) {
-		int data_size, oob_size;
-
-		if (i == (ecc->steps - 1)) {
-			data_size = ecc->size - ((ecc->steps - 1) << 2);
-			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
-				   host->spare_bytes;
-		} else {
-			data_size = host->cw_data;
-			oob_size = host->ecc_bytes_hw + host->spare_bytes;
-		}
-
-		config_cw_read(nandc);
-
-		if (data_buf)
-			read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
-				      data_size);
-
-		/*
-		 * when ecc is enabled, the controller doesn't read the real
-		 * or dummy bad block markers in each chunk. To maintain a
-		 * consistent layout across RAW and ECC reads, we just
-		 * leave the real/dummy BBM offsets empty (i.e, filled with
-		 * 0xffs)
-		 */
-		if (oob_buf) {
-			int j;
-
-			for (j = 0; j < host->bbm_size; j++)
-				*oob_buf++ = 0xff;
-
-			read_data_dma(nandc, FLASH_BUF_ACC + data_size,
-				      oob_buf, oob_size);
-		}
-
-		if (data_buf)
-			data_buf += data_size;
-		if (oob_buf)
-			oob_buf += oob_size;
-	}
-
-	ret = submit_descs(nandc);
-	if (ret)
-		dev_err(nandc->dev, "failure to read page/oob\n");
-
-	free_descs(nandc);
-
-	return ret;
-}
-
-/*
- * a helper that copies the last step/codeword of a page (containing free oob)
- * into our local buffer
- */
-static int copy_last_cw(struct qcom_nand_host *host, int page)
-{
-	struct nand_chip *chip = &host->chip;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int size;
-	int ret;
-
-	clear_read_regs(nandc);
-
-	size = host->use_ecc ? host->cw_data : host->cw_size;
-
-	/* prepare a clean read buffer */
-	memset(nandc->data_buffer, 0xff, size);
-
-	set_address(host, host->cw_size * (ecc->steps - 1), page);
-	update_rw_regs(host, 1, true);
-
-	config_cw_read(nandc);
-
-	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
-
-	ret = submit_descs(nandc);
-	if (ret)
-		dev_err(nandc->dev, "failed to copy last codeword\n");
-
-	free_descs(nandc);
-
-	return ret;
-}
-
-/* implements ecc->read_page() */
-static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int oob_required, int page)
-{
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	u8 *data_buf, *oob_buf = NULL;
-	int ret;
-
-	data_buf = buf;
-	oob_buf = oob_required ? chip->oob_poi : NULL;
-
-	ret = read_page_ecc(host, data_buf, oob_buf);
-	if (ret) {
-		dev_err(nandc->dev, "failure to read page\n");
-		return ret;
-	}
-
-	return parse_read_errors(host, data_buf, oob_buf);
-}
-
-/* implements ecc->read_page_raw() */
-static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
-				    struct nand_chip *chip, uint8_t *buf,
-				    int oob_required, int page)
-{
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	u8 *data_buf, *oob_buf;
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int i, ret;
-
-	data_buf = buf;
-	oob_buf = chip->oob_poi;
-
-	host->use_ecc = false;
-	update_rw_regs(host, ecc->steps, true);
-
-	for (i = 0; i < ecc->steps; i++) {
-		int data_size1, data_size2, oob_size1, oob_size2;
-		int reg_off = FLASH_BUF_ACC;
-
-		data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
-		oob_size1 = host->bbm_size;
-
-		if (i == (ecc->steps - 1)) {
-			data_size2 = ecc->size - data_size1 -
-				     ((ecc->steps - 1) << 2);
-			oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
-				    host->spare_bytes;
-		} else {
-			data_size2 = host->cw_data - data_size1;
-			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
-		}
-
-		config_cw_read(nandc);
-
-		read_data_dma(nandc, reg_off, data_buf, data_size1);
-		reg_off += data_size1;
-		data_buf += data_size1;
-
-		read_data_dma(nandc, reg_off, oob_buf, oob_size1);
-		reg_off += oob_size1;
-		oob_buf += oob_size1;
-
-		read_data_dma(nandc, reg_off, data_buf, data_size2);
-		reg_off += data_size2;
-		data_buf += data_size2;
-
-		read_data_dma(nandc, reg_off, oob_buf, oob_size2);
-		oob_buf += oob_size2;
-	}
-
-	ret = submit_descs(nandc);
-	if (ret)
-		dev_err(nandc->dev, "failure to read raw page\n");
-
-	free_descs(nandc);
-
-	return 0;
-}
-
-/* implements ecc->read_oob() */
-static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			       int page)
-{
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int ret;
-
-	clear_read_regs(nandc);
-
-	host->use_ecc = true;
-	set_address(host, 0, page);
-	update_rw_regs(host, ecc->steps, true);
-
-	ret = read_page_ecc(host, NULL, chip->oob_poi);
-	if (ret)
-		dev_err(nandc->dev, "failure to read oob\n");
-
-	return ret;
-}
-
-/* implements ecc->write_page() */
-static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-				 const uint8_t *buf, int oob_required, int page)
-{
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	u8 *data_buf, *oob_buf;
-	int i, ret;
-
-	clear_read_regs(nandc);
-
-	data_buf = (u8 *)buf;
-	oob_buf = chip->oob_poi;
-
-	host->use_ecc = true;
-	update_rw_regs(host, ecc->steps, false);
-
-	for (i = 0; i < ecc->steps; i++) {
-		int data_size, oob_size;
-
-		if (i == (ecc->steps - 1)) {
-			data_size = ecc->size - ((ecc->steps - 1) << 2);
-			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
-				   host->spare_bytes;
-		} else {
-			data_size = host->cw_data;
-			oob_size = ecc->bytes;
-		}
-
-		config_cw_write_pre(nandc);
-
-		write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
-
-		/*
-		 * when ECC is enabled, we don't really need to write anything
-		 * to oob for the first n - 1 codewords since these oob regions
-		 * just contain ECC bytes that's written by the controller
-		 * itself. For the last codeword, we skip the bbm positions and
-		 * write to the free oob area.
-		 */
-		if (i == (ecc->steps - 1)) {
-			oob_buf += host->bbm_size;
-
-			write_data_dma(nandc, FLASH_BUF_ACC + data_size,
-				       oob_buf, oob_size);
-		}
-
-		config_cw_write_post(nandc);
-
-		data_buf += data_size;
-		oob_buf += oob_size;
-	}
-
-	ret = submit_descs(nandc);
-	if (ret)
-		dev_err(nandc->dev, "failure to write page\n");
-
-	free_descs(nandc);
-
-	return ret;
-}
-
-/* implements ecc->write_page_raw() */
-static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
-				     struct nand_chip *chip, const uint8_t *buf,
-				     int oob_required, int page)
-{
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	u8 *data_buf, *oob_buf;
-	int i, ret;
-
-	clear_read_regs(nandc);
-
-	data_buf = (u8 *)buf;
-	oob_buf = chip->oob_poi;
-
-	host->use_ecc = false;
-	update_rw_regs(host, ecc->steps, false);
-
-	for (i = 0; i < ecc->steps; i++) {
-		int data_size1, data_size2, oob_size1, oob_size2;
-		int reg_off = FLASH_BUF_ACC;
-
-		data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
-		oob_size1 = host->bbm_size;
-
-		if (i == (ecc->steps - 1)) {
-			data_size2 = ecc->size - data_size1 -
-				     ((ecc->steps - 1) << 2);
-			oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
-				    host->spare_bytes;
-		} else {
-			data_size2 = host->cw_data - data_size1;
-			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
-		}
-
-		config_cw_write_pre(nandc);
-
-		write_data_dma(nandc, reg_off, data_buf, data_size1);
-		reg_off += data_size1;
-		data_buf += data_size1;
-
-		write_data_dma(nandc, reg_off, oob_buf, oob_size1);
-		reg_off += oob_size1;
-		oob_buf += oob_size1;
-
-		write_data_dma(nandc, reg_off, data_buf, data_size2);
-		reg_off += data_size2;
-		data_buf += data_size2;
-
-		write_data_dma(nandc, reg_off, oob_buf, oob_size2);
-		oob_buf += oob_size2;
-
-		config_cw_write_post(nandc);
-	}
-
-	ret = submit_descs(nandc);
-	if (ret)
-		dev_err(nandc->dev, "failure to write raw page\n");
-
-	free_descs(nandc);
-
-	return ret;
-}
-
-/*
- * implements ecc->write_oob()
- *
- * the NAND controller cannot write only data or only oob within a codeword,
- * since ecc is calculated for the combined codeword. we first copy the
- * entire contents for the last codeword(data + oob), replace the old oob
- * with the new one in chip->oob_poi, and then write the entire codeword.
- * this read-copy-write operation results in a slight performance loss.
- */
-static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
-				int page)
-{
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	u8 *oob = chip->oob_poi;
-	int data_size, oob_size;
-	int ret, status = 0;
-
-	host->use_ecc = true;
-
-	ret = copy_last_cw(host, page);
-	if (ret)
-		return ret;
-
-	clear_read_regs(nandc);
-
-	/* calculate the data and oob size for the last codeword/step */
-	data_size = ecc->size - ((ecc->steps - 1) << 2);
-	oob_size = mtd->oobavail;
-
-	/* override new oob content to last codeword */
-	mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
-				    0, mtd->oobavail);
-
-	set_address(host, host->cw_size * (ecc->steps - 1), page);
-	update_rw_regs(host, 1, false);
-
-	config_cw_write_pre(nandc);
-	write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
-		       data_size + oob_size);
-	config_cw_write_post(nandc);
-
-	ret = submit_descs(nandc);
-
-	free_descs(nandc);
-
-	if (ret) {
-		dev_err(nandc->dev, "failure to write oob\n");
-		return -EIO;
-	}
-
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
-	status = chip->waitfunc(mtd, chip);
-
-	return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int page, ret, bbpos, bad = 0;
-	u32 flash_status;
-
-	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
-
-	/*
-	 * configure registers for a raw sub page read, the address is set to
-	 * the beginning of the last codeword, we don't care about reading ecc
-	 * portion of oob. we just want the first few bytes from this codeword
-	 * that contains the BBM
-	 */
-	host->use_ecc = false;
-
-	ret = copy_last_cw(host, page);
-	if (ret)
-		goto err;
-
-	flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
-
-	if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
-		dev_warn(nandc->dev, "error when trying to read BBM\n");
-		goto err;
-	}
-
-	bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
-
-	bad = nandc->data_buffer[bbpos] != 0xff;
-
-	if (chip->options & NAND_BUSWIDTH_16)
-		bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
-err:
-	return bad;
-}
-
-static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int page, ret, status = 0;
-
-	clear_read_regs(nandc);
-
-	/*
-	 * to mark the BBM as bad, we flash the entire last codeword with 0s.
-	 * we don't care about the rest of the content in the codeword since
-	 * we aren't going to use this block again
-	 */
-	memset(nandc->data_buffer, 0x00, host->cw_size);
-
-	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
-
-	/* prepare write */
-	host->use_ecc = false;
-	set_address(host, host->cw_size * (ecc->steps - 1), page);
-	update_rw_regs(host, 1, false);
-
-	config_cw_write_pre(nandc);
-	write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
-	config_cw_write_post(nandc);
-
-	ret = submit_descs(nandc);
-
-	free_descs(nandc);
-
-	if (ret) {
-		dev_err(nandc->dev, "failure to update BBM\n");
-		return -EIO;
-	}
-
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
-	status = chip->waitfunc(mtd, chip);
-
-	return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-/*
- * the three functions below implement chip->read_byte(), chip->read_buf()
- * and chip->write_buf() respectively. these aren't used for
- * reading/writing page data, they are used for smaller data like reading
- * id, status etc
- */
-static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	u8 *buf = nandc->data_buffer;
-	u8 ret = 0x0;
-
-	if (host->last_command == NAND_CMD_STATUS) {
-		ret = host->status;
-
-		host->status = NAND_STATUS_READY | NAND_STATUS_WP;
-
-		return ret;
-	}
-
-	if (nandc->buf_start < nandc->buf_count)
-		ret = buf[nandc->buf_start++];
-
-	return ret;
-}
-
-static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
-
-	memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
-	nandc->buf_start += real_len;
-}
-
-static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
-				 int len)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
-
-	memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
-
-	nandc->buf_start += real_len;
-}
-
-/* we support only one external chip for now */
-static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-
-	if (chipnr <= 0)
-		return;
-
-	dev_warn(nandc->dev, "invalid chip select\n");
-}
-
-/*
- * NAND controller page layout info
- *
- * Layout with ECC enabled:
- *
- * |----------------------|  |---------------------------------|
- * |           xx.......yy|  |             *********xx.......yy|
- * |    DATA   xx..ECC..yy|  |    DATA     **SPARE**xx..ECC..yy|
- * |   (516)   xx.......yy|  |  (516-n*4)  **(n*4)**xx.......yy|
- * |           xx.......yy|  |             *********xx.......yy|
- * |----------------------|  |---------------------------------|
- *     codeword 1,2..n-1                  codeword n
- *  <---(528/532 Bytes)-->    <-------(528/532 Bytes)--------->
- *
- * n = Number of codewords in the page
- * . = ECC bytes
- * * = Spare/free bytes
- * x = Unused byte(s)
- * y = Reserved byte(s)
- *
- * 2K page: n = 4, spare = 16 bytes
- * 4K page: n = 8, spare = 32 bytes
- * 8K page: n = 16, spare = 64 bytes
- *
- * the qcom nand controller operates at a sub page/codeword level. each
- * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
- * the number of ECC bytes vary based on the ECC strength and the bus width.
- *
- * the first n - 1 codewords contains 516 bytes of user data, the remaining
- * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
- * both user data and spare(oobavail) bytes that sum up to 516 bytes.
- *
- * When we access a page with ECC enabled, the reserved bytes(s) are not
- * accessible at all. When reading, we fill up these unreadable positions
- * with 0xffs. When writing, the controller skips writing the inaccessible
- * bytes.
- *
- * Layout with ECC disabled:
- *
- * |------------------------------|  |---------------------------------------|
- * |         yy          xx.......|  |         bb          *********xx.......|
- * |  DATA1  yy  DATA2   xx..ECC..|  |  DATA1  bb  DATA2   **SPARE**xx..ECC..|
- * | (size1) yy (size2)  xx.......|  | (size1) bb (size2)  **(n*4)**xx.......|
- * |         yy          xx.......|  |         bb          *********xx.......|
- * |------------------------------|  |---------------------------------------|
- *         codeword 1,2..n-1                        codeword n
- *  <-------(528/532 Bytes)------>    <-----------(528/532 Bytes)----------->
- *
- * n = Number of codewords in the page
- * . = ECC bytes
- * * = Spare/free bytes
- * x = Unused byte(s)
- * y = Dummy Bad Bock byte(s)
- * b = Real Bad Block byte(s)
- * size1/size2 = function of codeword size and 'n'
- *
- * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
- * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
- * Block Markers. In the last codeword, this position contains the real BBM
- *
- * In order to have a consistent layout between RAW and ECC modes, we assume
- * the following OOB layout arrangement:
- *
- * |-----------|  |--------------------|
- * |yyxx.......|  |bb*********xx.......|
- * |yyxx..ECC..|  |bb*FREEOOB*xx..ECC..|
- * |yyxx.......|  |bb*********xx.......|
- * |yyxx.......|  |bb*********xx.......|
- * |-----------|  |--------------------|
- *  first n - 1       nth OOB region
- *  OOB regions
- *
- * n = Number of codewords in the page
- * . = ECC bytes
- * * = FREE OOB bytes
- * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
- * x = Unused byte(s)
- * b = Real bad block byte(s) (inaccessible when ECC enabled)
- *
- * This layout is read as is when ECC is disabled. When ECC is enabled, the
- * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
- * and assumed as 0xffs when we read a page/oob. The ECC, unused and
- * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
- * the sum of the three).
- */
-static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
-				   struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (section > 1)
-		return -ERANGE;
-
-	if (!section) {
-		oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
-				    host->bbm_size;
-		oobregion->offset = 0;
-	} else {
-		oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
-		oobregion->offset = mtd->oobsize - oobregion->length;
-	}
-
-	return 0;
-}
-
-static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
-				     struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct qcom_nand_host *host = to_qcom_nand_host(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->length = ecc->steps * 4;
-	oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
-	.ecc = qcom_nand_ooblayout_ecc,
-	.free = qcom_nand_ooblayout_free,
-};
-
-static int qcom_nand_host_setup(struct qcom_nand_host *host)
-{
-	struct nand_chip *chip = &host->chip;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
-	int cwperpage, bad_block_byte;
-	bool wide_bus;
-	int ecc_mode = 1;
-
-	/*
-	 * the controller requires each step consists of 512 bytes of data.
-	 * bail out if DT has populated a wrong step size.
-	 */
-	if (ecc->size != NANDC_STEP_SIZE) {
-		dev_err(nandc->dev, "invalid ecc size\n");
-		return -EINVAL;
-	}
-
-	wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
-
-	if (ecc->strength >= 8) {
-		/* 8 bit ECC defaults to BCH ECC on all platforms */
-		host->bch_enabled = true;
-		ecc_mode = 1;
-
-		if (wide_bus) {
-			host->ecc_bytes_hw = 14;
-			host->spare_bytes = 0;
-			host->bbm_size = 2;
-		} else {
-			host->ecc_bytes_hw = 13;
-			host->spare_bytes = 2;
-			host->bbm_size = 1;
-		}
-	} else {
-		/*
-		 * if the controller supports BCH for 4 bit ECC, the controller
-		 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
-		 * always 10 bytes
-		 */
-		if (nandc->ecc_modes & ECC_BCH_4BIT) {
-			/* BCH */
-			host->bch_enabled = true;
-			ecc_mode = 0;
-
-			if (wide_bus) {
-				host->ecc_bytes_hw = 8;
-				host->spare_bytes = 2;
-				host->bbm_size = 2;
-			} else {
-				host->ecc_bytes_hw = 7;
-				host->spare_bytes = 4;
-				host->bbm_size = 1;
-			}
-		} else {
-			/* RS */
-			host->ecc_bytes_hw = 10;
-
-			if (wide_bus) {
-				host->spare_bytes = 0;
-				host->bbm_size = 2;
-			} else {
-				host->spare_bytes = 1;
-				host->bbm_size = 1;
-			}
-		}
-	}
-
-	/*
-	 * we consider ecc->bytes as the sum of all the non-data content in a
-	 * step. It gives us a clean representation of the oob area (even if
-	 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
-	 * ECC and 12 bytes for 4 bit ECC
-	 */
-	ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
-
-	ecc->read_page		= qcom_nandc_read_page;
-	ecc->read_page_raw	= qcom_nandc_read_page_raw;
-	ecc->read_oob		= qcom_nandc_read_oob;
-	ecc->write_page		= qcom_nandc_write_page;
-	ecc->write_page_raw	= qcom_nandc_write_page_raw;
-	ecc->write_oob		= qcom_nandc_write_oob;
-
-	ecc->mode = NAND_ECC_HW;
-
-	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
-
-	cwperpage = mtd->writesize / ecc->size;
-
-	/*
-	 * DATA_UD_BYTES varies based on whether the read/write command protects
-	 * spare data with ECC too. We protect spare data by default, so we set
-	 * it to main + spare data, which are 512 and 4 bytes respectively.
-	 */
-	host->cw_data = 516;
-
-	/*
-	 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
-	 * for 8 bit ECC
-	 */
-	host->cw_size = host->cw_data + ecc->bytes;
-
-	if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
-		dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
-		return -EINVAL;
-	}
-
-	bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
-
-	host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
-				| host->cw_data << UD_SIZE_BYTES
-				| 0 << DISABLE_STATUS_AFTER_WRITE
-				| 5 << NUM_ADDR_CYCLES
-				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
-				| 0 << STATUS_BFR_READ
-				| 1 << SET_RD_MODE_AFTER_STATUS
-				| host->spare_bytes << SPARE_SIZE_BYTES;
-
-	host->cfg1 = 7 << NAND_RECOVERY_CYCLES
-				| 0 <<  CS_ACTIVE_BSY
-				| bad_block_byte << BAD_BLOCK_BYTE_NUM
-				| 0 << BAD_BLOCK_IN_SPARE_AREA
-				| 2 << WR_RD_BSY_GAP
-				| wide_bus << WIDE_FLASH
-				| host->bch_enabled << ENABLE_BCH_ECC;
-
-	host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
-				| host->cw_size << UD_SIZE_BYTES
-				| 5 << NUM_ADDR_CYCLES
-				| 0 << SPARE_SIZE_BYTES;
-
-	host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
-				| 0 << CS_ACTIVE_BSY
-				| 17 << BAD_BLOCK_BYTE_NUM
-				| 1 << BAD_BLOCK_IN_SPARE_AREA
-				| 2 << WR_RD_BSY_GAP
-				| wide_bus << WIDE_FLASH
-				| 1 << DEV0_CFG1_ECC_DISABLE;
-
-	host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
-				| 0 << ECC_SW_RESET
-				| host->cw_data << ECC_NUM_DATA_BYTES
-				| 1 << ECC_FORCE_CLK_OPEN
-				| ecc_mode << ECC_MODE
-				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
-
-	host->ecc_buf_cfg = 0x203 << NUM_STEPS;
-
-	host->clrflashstatus = FS_READY_BSY_N;
-	host->clrreadstatus = 0xc0;
-
-	dev_dbg(nandc->dev,
-		"cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
-		host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
-		host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
-		cwperpage);
-
-	return 0;
-}
-
-static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
-{
-	int ret;
-
-	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
-	if (ret) {
-		dev_err(nandc->dev, "failed to set DMA mask\n");
-		return ret;
-	}
-
-	/*
-	 * we use the internal buffer for reading ONFI params, reading small
-	 * data like ID and status, and preforming read-copy-write operations
-	 * when writing to a codeword partially. 532 is the maximum possible
-	 * size of a codeword for our nand controller
-	 */
-	nandc->buf_size = 532;
-
-	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
-					GFP_KERNEL);
-	if (!nandc->data_buffer)
-		return -ENOMEM;
-
-	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
-					GFP_KERNEL);
-	if (!nandc->regs)
-		return -ENOMEM;
-
-	nandc->reg_read_buf = devm_kzalloc(nandc->dev,
-				MAX_REG_RD * sizeof(*nandc->reg_read_buf),
-				GFP_KERNEL);
-	if (!nandc->reg_read_buf)
-		return -ENOMEM;
-
-	nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
-	if (!nandc->chan) {
-		dev_err(nandc->dev, "failed to request slave channel\n");
-		return -ENODEV;
-	}
-
-	INIT_LIST_HEAD(&nandc->desc_list);
-	INIT_LIST_HEAD(&nandc->host_list);
-
-	nand_hw_control_init(&nandc->controller);
-
-	return 0;
-}
-
-static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
-{
-	dma_release_channel(nandc->chan);
-}
-
-/* one time setup of a few nand controller registers */
-static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
-{
-	/* kill onenand */
-	nandc_write(nandc, SFLASHC_BURST_CFG, 0);
-
-	/* enable ADM DMA */
-	nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
-
-	/* save the original values of these registers */
-	nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
-	nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
-
-	return 0;
-}
-
-static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
-			       struct qcom_nand_host *host,
-			       struct device_node *dn)
-{
-	struct nand_chip *chip = &host->chip;
-	struct mtd_info *mtd = nand_to_mtd(chip);
-	struct device *dev = nandc->dev;
-	int ret;
-
-	ret = of_property_read_u32(dn, "reg", &host->cs);
-	if (ret) {
-		dev_err(dev, "can't get chip-select\n");
-		return -ENXIO;
-	}
-
-	nand_set_flash_node(chip, dn);
-	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
-	mtd->owner = THIS_MODULE;
-	mtd->dev.parent = dev;
-
-	chip->cmdfunc		= qcom_nandc_command;
-	chip->select_chip	= qcom_nandc_select_chip;
-	chip->read_byte		= qcom_nandc_read_byte;
-	chip->read_buf		= qcom_nandc_read_buf;
-	chip->write_buf		= qcom_nandc_write_buf;
-
-	/*
-	 * the bad block marker is readable only when we read the last codeword
-	 * of a page with ECC disabled. currently, the nand_base and nand_bbt
-	 * helpers don't allow us to read BB from a nand chip with ECC
-	 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
-	 * and block_markbad helpers until we permanently switch to using
-	 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
-	 */
-	chip->block_bad		= qcom_nandc_block_bad;
-	chip->block_markbad	= qcom_nandc_block_markbad;
-
-	chip->controller = &nandc->controller;
-	chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
-			 NAND_SKIP_BBTSCAN;
-
-	/* set up initial status value */
-	host->status = NAND_STATUS_READY | NAND_STATUS_WP;
-
-	ret = nand_scan_ident(mtd, 1, NULL);
-	if (ret)
-		return ret;
-
-	ret = qcom_nand_host_setup(host);
-	if (ret)
-		return ret;
-
-	ret = nand_scan_tail(mtd);
-	if (ret)
-		return ret;
-
-	return mtd_device_register(mtd, NULL, 0);
-}
-
-/* parse custom DT properties here */
-static int qcom_nandc_parse_dt(struct platform_device *pdev)
-{
-	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
-	struct device_node *np = nandc->dev->of_node;
-	int ret;
-
-	ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
-	if (ret) {
-		dev_err(nandc->dev, "command CRCI unspecified\n");
-		return ret;
-	}
-
-	ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
-	if (ret) {
-		dev_err(nandc->dev, "data CRCI unspecified\n");
-		return ret;
-	}
-
-	return 0;
-}
-
-static int qcom_nandc_probe(struct platform_device *pdev)
-{
-	struct qcom_nand_controller *nandc;
-	struct qcom_nand_host *host;
-	const void *dev_data;
-	struct device *dev = &pdev->dev;
-	struct device_node *dn = dev->of_node, *child;
-	struct resource *res;
-	int ret;
-
-	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
-	if (!nandc)
-		return -ENOMEM;
-
-	platform_set_drvdata(pdev, nandc);
-	nandc->dev = dev;
-
-	dev_data = of_device_get_match_data(dev);
-	if (!dev_data) {
-		dev_err(&pdev->dev, "failed to get device data\n");
-		return -ENODEV;
-	}
-
-	nandc->ecc_modes = (unsigned long)dev_data;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	nandc->base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(nandc->base))
-		return PTR_ERR(nandc->base);
-
-	nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
-
-	nandc->core_clk = devm_clk_get(dev, "core");
-	if (IS_ERR(nandc->core_clk))
-		return PTR_ERR(nandc->core_clk);
-
-	nandc->aon_clk = devm_clk_get(dev, "aon");
-	if (IS_ERR(nandc->aon_clk))
-		return PTR_ERR(nandc->aon_clk);
-
-	ret = qcom_nandc_parse_dt(pdev);
-	if (ret)
-		return ret;
-
-	ret = qcom_nandc_alloc(nandc);
-	if (ret)
-		return ret;
-
-	ret = clk_prepare_enable(nandc->core_clk);
-	if (ret)
-		goto err_core_clk;
-
-	ret = clk_prepare_enable(nandc->aon_clk);
-	if (ret)
-		goto err_aon_clk;
-
-	ret = qcom_nandc_setup(nandc);
-	if (ret)
-		goto err_setup;
-
-	for_each_available_child_of_node(dn, child) {
-		if (of_device_is_compatible(child, "qcom,nandcs")) {
-			host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
-			if (!host) {
-				of_node_put(child);
-				ret = -ENOMEM;
-				goto err_cs_init;
-			}
-
-			ret = qcom_nand_host_init(nandc, host, child);
-			if (ret) {
-				devm_kfree(dev, host);
-				continue;
-			}
-
-			list_add_tail(&host->node, &nandc->host_list);
-		}
-	}
-
-	if (list_empty(&nandc->host_list)) {
-		ret = -ENODEV;
-		goto err_cs_init;
-	}
-
-	return 0;
-
-err_cs_init:
-	list_for_each_entry(host, &nandc->host_list, node)
-		nand_release(nand_to_mtd(&host->chip));
-err_setup:
-	clk_disable_unprepare(nandc->aon_clk);
-err_aon_clk:
-	clk_disable_unprepare(nandc->core_clk);
-err_core_clk:
-	qcom_nandc_unalloc(nandc);
-
-	return ret;
-}
-
-static int qcom_nandc_remove(struct platform_device *pdev)
-{
-	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
-	struct qcom_nand_host *host;
-
-	list_for_each_entry(host, &nandc->host_list, node)
-		nand_release(nand_to_mtd(&host->chip));
-
-	qcom_nandc_unalloc(nandc);
-
-	clk_disable_unprepare(nandc->aon_clk);
-	clk_disable_unprepare(nandc->core_clk);
-
-	return 0;
-}
-
-#define EBI2_NANDC_ECC_MODES	(ECC_RS_4BIT | ECC_BCH_8BIT)
-
-/*
- * data will hold a struct pointer containing more differences once we support
- * more controller variants
- */
-static const struct of_device_id qcom_nandc_of_match[] = {
-	{	.compatible = "qcom,ipq806x-nand",
-		.data = (void *)EBI2_NANDC_ECC_MODES,
-	},
-	{}
-};
-MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
-
-static struct platform_driver qcom_nandc_driver = {
-	.driver = {
-		.name = "qcom-nandc",
-		.of_match_table = qcom_nandc_of_match,
-	},
-	.probe   = qcom_nandc_probe,
-	.remove  = qcom_nandc_remove,
-};
-module_platform_driver(qcom_nandc_driver);
-
-MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
-MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
deleted file mode 100644
index fc9287af4614..000000000000
--- a/drivers/mtd/nand/r852.c
+++ /dev/null
@@ -1,1082 +0,0 @@ 
-/*
- * Copyright © 2009 - Maxim Levitsky
- * driver for Ricoh xD readers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/jiffies.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <asm/byteorder.h>
-#include <linux/sched.h>
-#include "sm_common.h"
-#include "r852.h"
-
-
-static bool r852_enable_dma = 1;
-module_param(r852_enable_dma, bool, S_IRUGO);
-MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
-
-static int debug;
-module_param(debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug level (0-2)");
-
-/* read register */
-static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
-{
-	uint8_t reg = readb(dev->mmio + address);
-	return reg;
-}
-
-/* write register */
-static inline void r852_write_reg(struct r852_device *dev,
-						int address, uint8_t value)
-{
-	writeb(value, dev->mmio + address);
-	mmiowb();
-}
-
-
-/* read dword sized register */
-static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
-{
-	uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
-	return reg;
-}
-
-/* write dword sized register */
-static inline void r852_write_reg_dword(struct r852_device *dev,
-							int address, uint32_t value)
-{
-	writel(cpu_to_le32(value), dev->mmio + address);
-	mmiowb();
-}
-
-/* returns pointer to our private structure */
-static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	return nand_get_controller_data(chip);
-}
-
-
-/* check if controller supports dma */
-static void r852_dma_test(struct r852_device *dev)
-{
-	dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
-		(R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
-
-	if (!dev->dma_usable)
-		message("Non dma capable device detected, dma disabled");
-
-	if (!r852_enable_dma) {
-		message("disabling dma on user request");
-		dev->dma_usable = 0;
-	}
-}
-
-/*
- * Enable dma. Enables ether first or second stage of the DMA,
- * Expects dev->dma_dir and dev->dma_state be set
- */
-static void r852_dma_enable(struct r852_device *dev)
-{
-	uint8_t dma_reg, dma_irq_reg;
-
-	/* Set up dma settings */
-	dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
-	dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
-
-	if (dev->dma_dir)
-		dma_reg |= R852_DMA_READ;
-
-	if (dev->dma_state == DMA_INTERNAL) {
-		dma_reg |= R852_DMA_INTERNAL;
-		/* Precaution to make sure HW doesn't write */
-			/* to random kernel memory */
-		r852_write_reg_dword(dev, R852_DMA_ADDR,
-			cpu_to_le32(dev->phys_bounce_buffer));
-	} else {
-		dma_reg |= R852_DMA_MEMORY;
-		r852_write_reg_dword(dev, R852_DMA_ADDR,
-			cpu_to_le32(dev->phys_dma_addr));
-	}
-
-	/* Precaution: make sure write reached the device */
-	r852_read_reg_dword(dev, R852_DMA_ADDR);
-
-	r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
-
-	/* Set dma irq */
-	dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
-	r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
-		dma_irq_reg |
-		R852_DMA_IRQ_INTERNAL |
-		R852_DMA_IRQ_ERROR |
-		R852_DMA_IRQ_MEMORY);
-}
-
-/*
- * Disable dma, called from the interrupt handler, which specifies
- * success of the operation via 'error' argument
- */
-static void r852_dma_done(struct r852_device *dev, int error)
-{
-	WARN_ON(dev->dma_stage == 0);
-
-	r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
-			r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
-
-	r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
-	r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
-
-	/* Precaution to make sure HW doesn't write to random kernel memory */
-	r852_write_reg_dword(dev, R852_DMA_ADDR,
-		cpu_to_le32(dev->phys_bounce_buffer));
-	r852_read_reg_dword(dev, R852_DMA_ADDR);
-
-	dev->dma_error = error;
-	dev->dma_stage = 0;
-
-	if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
-		pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
-			dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
-}
-
-/*
- * Wait, till dma is done, which includes both phases of it
- */
-static int r852_dma_wait(struct r852_device *dev)
-{
-	long timeout = wait_for_completion_timeout(&dev->dma_done,
-				msecs_to_jiffies(1000));
-	if (!timeout) {
-		dbg("timeout waiting for DMA interrupt");
-		return -ETIMEDOUT;
-	}
-
-	return 0;
-}
-
-/*
- * Read/Write one page using dma. Only pages can be read (512 bytes)
-*/
-static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
-{
-	int bounce = 0;
-	unsigned long flags;
-	int error;
-
-	dev->dma_error = 0;
-
-	/* Set dma direction */
-	dev->dma_dir = do_read;
-	dev->dma_stage = 1;
-	reinit_completion(&dev->dma_done);
-
-	dbg_verbose("doing dma %s ", do_read ? "read" : "write");
-
-	/* Set initial dma state: for reading first fill on board buffer,
-	  from device, for writes first fill the buffer  from memory*/
-	dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
-
-	/* if incoming buffer is not page aligned, we should do bounce */
-	if ((unsigned long)buf & (R852_DMA_LEN-1))
-		bounce = 1;
-
-	if (!bounce) {
-		dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
-			R852_DMA_LEN,
-			(do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
-
-		if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
-			bounce = 1;
-	}
-
-	if (bounce) {
-		dbg_verbose("dma: using bounce buffer");
-		dev->phys_dma_addr = dev->phys_bounce_buffer;
-		if (!do_read)
-			memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
-	}
-
-	/* Enable DMA */
-	spin_lock_irqsave(&dev->irqlock, flags);
-	r852_dma_enable(dev);
-	spin_unlock_irqrestore(&dev->irqlock, flags);
-
-	/* Wait till complete */
-	error = r852_dma_wait(dev);
-
-	if (error) {
-		r852_dma_done(dev, error);
-		return;
-	}
-
-	if (do_read && bounce)
-		memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
-}
-
-/*
- * Program data lines of the nand chip to send data to it
- */
-static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct r852_device *dev = r852_get_dev(mtd);
-	uint32_t reg;
-
-	/* Don't allow any access to hardware if we suspect card removal */
-	if (dev->card_unstable)
-		return;
-
-	/* Special case for whole sector read */
-	if (len == R852_DMA_LEN && dev->dma_usable) {
-		r852_do_dma(dev, (uint8_t *)buf, 0);
-		return;
-	}
-
-	/* write DWORD chinks - faster */
-	while (len >= 4) {
-		reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
-		r852_write_reg_dword(dev, R852_DATALINE, reg);
-		buf += 4;
-		len -= 4;
-
-	}
-
-	/* write rest */
-	while (len > 0) {
-		r852_write_reg(dev, R852_DATALINE, *buf++);
-		len--;
-	}
-}
-
-/*
- * Read data lines of the nand chip to retrieve data
- */
-static void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct r852_device *dev = r852_get_dev(mtd);
-	uint32_t reg;
-
-	if (dev->card_unstable) {
-		/* since we can't signal error here, at least, return
-			predictable buffer */
-		memset(buf, 0, len);
-		return;
-	}
-
-	/* special case for whole sector read */
-	if (len == R852_DMA_LEN && dev->dma_usable) {
-		r852_do_dma(dev, buf, 1);
-		return;
-	}
-
-	/* read in dword sized chunks */
-	while (len >= 4) {
-
-		reg = r852_read_reg_dword(dev, R852_DATALINE);
-		*buf++ = reg & 0xFF;
-		*buf++ = (reg >> 8) & 0xFF;
-		*buf++ = (reg >> 16) & 0xFF;
-		*buf++ = (reg >> 24) & 0xFF;
-		len -= 4;
-	}
-
-	/* read the reset by bytes */
-	while (len--)
-		*buf++ = r852_read_reg(dev, R852_DATALINE);
-}
-
-/*
- * Read one byte from nand chip
- */
-static uint8_t r852_read_byte(struct mtd_info *mtd)
-{
-	struct r852_device *dev = r852_get_dev(mtd);
-
-	/* Same problem as in r852_read_buf.... */
-	if (dev->card_unstable)
-		return 0;
-
-	return r852_read_reg(dev, R852_DATALINE);
-}
-
-/*
- * Control several chip lines & send commands
- */
-static void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
-{
-	struct r852_device *dev = r852_get_dev(mtd);
-
-	if (dev->card_unstable)
-		return;
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-
-		dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
-				 R852_CTL_ON | R852_CTL_CARDENABLE);
-
-		if (ctrl & NAND_ALE)
-			dev->ctlreg |= R852_CTL_DATA;
-
-		if (ctrl & NAND_CLE)
-			dev->ctlreg |= R852_CTL_COMMAND;
-
-		if (ctrl & NAND_NCE)
-			dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
-		else
-			dev->ctlreg &= ~R852_CTL_WRITE;
-
-		/* when write is stareted, enable write access */
-		if (dat == NAND_CMD_ERASE1)
-			dev->ctlreg |= R852_CTL_WRITE;
-
-		r852_write_reg(dev, R852_CTL, dev->ctlreg);
-	}
-
-	 /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
-		to set write mode */
-	if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
-		dev->ctlreg |= R852_CTL_WRITE;
-		r852_write_reg(dev, R852_CTL, dev->ctlreg);
-	}
-
-	if (dat != NAND_CMD_NONE)
-		r852_write_reg(dev, R852_DATALINE, dat);
-}
-
-/*
- * Wait till card is ready.
- * based on nand_wait, but returns errors on DMA error
- */
-static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
-{
-	struct r852_device *dev = nand_get_controller_data(chip);
-
-	unsigned long timeout;
-	int status;
-
-	timeout = jiffies + (chip->state == FL_ERASING ?
-		msecs_to_jiffies(400) : msecs_to_jiffies(20));
-
-	while (time_before(jiffies, timeout))
-		if (chip->dev_ready(mtd))
-			break;
-
-	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
-	status = (int)chip->read_byte(mtd);
-
-	/* Unfortunelly, no way to send detailed error status... */
-	if (dev->dma_error) {
-		status |= NAND_STATUS_FAIL;
-		dev->dma_error = 0;
-	}
-	return status;
-}
-
-/*
- * Check if card is ready
- */
-
-static int r852_ready(struct mtd_info *mtd)
-{
-	struct r852_device *dev = r852_get_dev(mtd);
-	return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
-}
-
-
-/*
- * Set ECC engine mode
-*/
-
-static void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
-{
-	struct r852_device *dev = r852_get_dev(mtd);
-
-	if (dev->card_unstable)
-		return;
-
-	switch (mode) {
-	case NAND_ECC_READ:
-	case NAND_ECC_WRITE:
-		/* enable ecc generation/check*/
-		dev->ctlreg |= R852_CTL_ECC_ENABLE;
-
-		/* flush ecc buffer */
-		r852_write_reg(dev, R852_CTL,
-			dev->ctlreg | R852_CTL_ECC_ACCESS);
-
-		r852_read_reg_dword(dev, R852_DATALINE);
-		r852_write_reg(dev, R852_CTL, dev->ctlreg);
-		return;
-
-	case NAND_ECC_READSYN:
-		/* disable ecc generation */
-		dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
-		r852_write_reg(dev, R852_CTL, dev->ctlreg);
-	}
-}
-
-/*
- * Calculate ECC, only used for writes
- */
-
-static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
-							uint8_t *ecc_code)
-{
-	struct r852_device *dev = r852_get_dev(mtd);
-	struct sm_oob *oob = (struct sm_oob *)ecc_code;
-	uint32_t ecc1, ecc2;
-
-	if (dev->card_unstable)
-		return 0;
-
-	dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
-	r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
-
-	ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
-	ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
-
-	oob->ecc1[0] = (ecc1) & 0xFF;
-	oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
-	oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
-
-	oob->ecc2[0] = (ecc2) & 0xFF;
-	oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
-	oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
-
-	r852_write_reg(dev, R852_CTL, dev->ctlreg);
-	return 0;
-}
-
-/*
- * Correct the data using ECC, hw did almost everything for us
- */
-
-static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
-				uint8_t *read_ecc, uint8_t *calc_ecc)
-{
-	uint32_t ecc_reg;
-	uint8_t ecc_status, err_byte;
-	int i, error = 0;
-
-	struct r852_device *dev = r852_get_dev(mtd);
-
-	if (dev->card_unstable)
-		return 0;
-
-	if (dev->dma_error) {
-		dev->dma_error = 0;
-		return -EIO;
-	}
-
-	r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
-	ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
-	r852_write_reg(dev, R852_CTL, dev->ctlreg);
-
-	for (i = 0 ; i <= 1 ; i++) {
-
-		ecc_status = (ecc_reg >> 8) & 0xFF;
-
-		/* ecc uncorrectable error */
-		if (ecc_status & R852_ECC_FAIL) {
-			dbg("ecc: unrecoverable error, in half %d", i);
-			error = -EBADMSG;
-			goto exit;
-		}
-
-		/* correctable error */
-		if (ecc_status & R852_ECC_CORRECTABLE) {
-
-			err_byte = ecc_reg & 0xFF;
-			dbg("ecc: recoverable error, "
-				"in half %d, byte %d, bit %d", i,
-				err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
-
-			dat[err_byte] ^=
-				1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
-			error++;
-		}
-
-		dat += 256;
-		ecc_reg >>= 16;
-	}
-exit:
-	return error;
-}
-
-/*
- * This is copy of nand_read_oob_std
- * nand_read_oob_syndrome assumes we can send column address - we can't
- */
-static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
-			     int page)
-{
-	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
-	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 0;
-}
-
-/*
- * Start the nand engine
- */
-
-static void r852_engine_enable(struct r852_device *dev)
-{
-	if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
-		r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
-		r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
-	} else {
-		r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
-		r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
-	}
-	msleep(300);
-	r852_write_reg(dev, R852_CTL, 0);
-}
-
-
-/*
- * Stop the nand engine
- */
-
-static void r852_engine_disable(struct r852_device *dev)
-{
-	r852_write_reg_dword(dev, R852_HW, 0);
-	r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
-}
-
-/*
- * Test if card is present
- */
-
-static void r852_card_update_present(struct r852_device *dev)
-{
-	unsigned long flags;
-	uint8_t reg;
-
-	spin_lock_irqsave(&dev->irqlock, flags);
-	reg = r852_read_reg(dev, R852_CARD_STA);
-	dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
-	spin_unlock_irqrestore(&dev->irqlock, flags);
-}
-
-/*
- * Update card detection IRQ state according to current card state
- * which is read in r852_card_update_present
- */
-static void r852_update_card_detect(struct r852_device *dev)
-{
-	int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
-	dev->card_unstable = 0;
-
-	card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
-	card_detect_reg |= R852_CARD_IRQ_GENABLE;
-
-	card_detect_reg |= dev->card_detected ?
-		R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
-
-	r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
-}
-
-static ssize_t r852_media_type_show(struct device *sys_dev,
-			struct device_attribute *attr, char *buf)
-{
-	struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
-	struct r852_device *dev = r852_get_dev(mtd);
-	char *data = dev->sm ? "smartmedia" : "xd";
-
-	strcpy(buf, data);
-	return strlen(data);
-}
-
-static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
-
-
-/* Detect properties of card in slot */
-static void r852_update_media_status(struct r852_device *dev)
-{
-	uint8_t reg;
-	unsigned long flags;
-	int readonly;
-
-	spin_lock_irqsave(&dev->irqlock, flags);
-	if (!dev->card_detected) {
-		message("card removed");
-		spin_unlock_irqrestore(&dev->irqlock, flags);
-		return ;
-	}
-
-	readonly  = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
-	reg = r852_read_reg(dev, R852_DMA_CAP);
-	dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
-
-	message("detected %s %s card in slot",
-		dev->sm ? "SmartMedia" : "xD",
-		readonly ? "readonly" : "writeable");
-
-	dev->readonly = readonly;
-	spin_unlock_irqrestore(&dev->irqlock, flags);
-}
-
-/*
- * Register the nand device
- * Called when the card is detected
- */
-static int r852_register_nand_device(struct r852_device *dev)
-{
-	struct mtd_info *mtd = nand_to_mtd(dev->chip);
-
-	WARN_ON(dev->card_registred);
-
-	mtd->dev.parent = &dev->pci_dev->dev;
-
-	if (dev->readonly)
-		dev->chip->options |= NAND_ROM;
-
-	r852_engine_enable(dev);
-
-	if (sm_register_device(mtd, dev->sm))
-		goto error1;
-
-	if (device_create_file(&mtd->dev, &dev_attr_media_type)) {
-		message("can't create media type sysfs attribute");
-		goto error3;
-	}
-
-	dev->card_registred = 1;
-	return 0;
-error3:
-	nand_release(mtd);
-error1:
-	/* Force card redetect */
-	dev->card_detected = 0;
-	return -1;
-}
-
-/*
- * Unregister the card
- */
-
-static void r852_unregister_nand_device(struct r852_device *dev)
-{
-	struct mtd_info *mtd = nand_to_mtd(dev->chip);
-
-	if (!dev->card_registred)
-		return;
-
-	device_remove_file(&mtd->dev, &dev_attr_media_type);
-	nand_release(mtd);
-	r852_engine_disable(dev);
-	dev->card_registred = 0;
-}
-
-/* Card state updater */
-static void r852_card_detect_work(struct work_struct *work)
-{
-	struct r852_device *dev =
-		container_of(work, struct r852_device, card_detect_work.work);
-
-	r852_card_update_present(dev);
-	r852_update_card_detect(dev);
-	dev->card_unstable = 0;
-
-	/* False alarm */
-	if (dev->card_detected == dev->card_registred)
-		goto exit;
-
-	/* Read media properties */
-	r852_update_media_status(dev);
-
-	/* Register the card */
-	if (dev->card_detected)
-		r852_register_nand_device(dev);
-	else
-		r852_unregister_nand_device(dev);
-exit:
-	r852_update_card_detect(dev);
-}
-
-/* Ack + disable IRQ generation */
-static void r852_disable_irqs(struct r852_device *dev)
-{
-	uint8_t reg;
-	reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
-	r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
-
-	reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
-	r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
-					reg & ~R852_DMA_IRQ_MASK);
-
-	r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
-	r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
-}
-
-/* Interrupt handler */
-static irqreturn_t r852_irq(int irq, void *data)
-{
-	struct r852_device *dev = (struct r852_device *)data;
-
-	uint8_t card_status, dma_status;
-	unsigned long flags;
-	irqreturn_t ret = IRQ_NONE;
-
-	spin_lock_irqsave(&dev->irqlock, flags);
-
-	/* handle card detection interrupts first */
-	card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
-	r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
-
-	if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
-
-		ret = IRQ_HANDLED;
-		dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
-
-		/* we shouldn't receive any interrupts if we wait for card
-			to settle */
-		WARN_ON(dev->card_unstable);
-
-		/* disable irqs while card is unstable */
-		/* this will timeout DMA if active, but better that garbage */
-		r852_disable_irqs(dev);
-
-		if (dev->card_unstable)
-			goto out;
-
-		/* let, card state to settle a bit, and then do the work */
-		dev->card_unstable = 1;
-		queue_delayed_work(dev->card_workqueue,
-			&dev->card_detect_work, msecs_to_jiffies(100));
-		goto out;
-	}
-
-
-	/* Handle dma interrupts */
-	dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
-	r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
-
-	if (dma_status & R852_DMA_IRQ_MASK) {
-
-		ret = IRQ_HANDLED;
-
-		if (dma_status & R852_DMA_IRQ_ERROR) {
-			dbg("received dma error IRQ");
-			r852_dma_done(dev, -EIO);
-			complete(&dev->dma_done);
-			goto out;
-		}
-
-		/* received DMA interrupt out of nowhere? */
-		WARN_ON_ONCE(dev->dma_stage == 0);
-
-		if (dev->dma_stage == 0)
-			goto out;
-
-		/* done device access */
-		if (dev->dma_state == DMA_INTERNAL &&
-				(dma_status & R852_DMA_IRQ_INTERNAL)) {
-
-			dev->dma_state = DMA_MEMORY;
-			dev->dma_stage++;
-		}
-
-		/* done memory DMA */
-		if (dev->dma_state == DMA_MEMORY &&
-				(dma_status & R852_DMA_IRQ_MEMORY)) {
-			dev->dma_state = DMA_INTERNAL;
-			dev->dma_stage++;
-		}
-
-		/* Enable 2nd half of dma dance */
-		if (dev->dma_stage == 2)
-			r852_dma_enable(dev);
-
-		/* Operation done */
-		if (dev->dma_stage == 3) {
-			r852_dma_done(dev, 0);
-			complete(&dev->dma_done);
-		}
-		goto out;
-	}
-
-	/* Handle unknown interrupts */
-	if (dma_status)
-		dbg("bad dma IRQ status = %x", dma_status);
-
-	if (card_status & ~R852_CARD_STA_CD)
-		dbg("strange card status = %x", card_status);
-
-out:
-	spin_unlock_irqrestore(&dev->irqlock, flags);
-	return ret;
-}
-
-static int  r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
-{
-	int error;
-	struct nand_chip *chip;
-	struct r852_device *dev;
-
-	/* pci initialization */
-	error = pci_enable_device(pci_dev);
-
-	if (error)
-		goto error1;
-
-	pci_set_master(pci_dev);
-
-	error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
-	if (error)
-		goto error2;
-
-	error = pci_request_regions(pci_dev, DRV_NAME);
-
-	if (error)
-		goto error3;
-
-	error = -ENOMEM;
-
-	/* init nand chip, but register it only on card insert */
-	chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
-
-	if (!chip)
-		goto error4;
-
-	/* commands */
-	chip->cmd_ctrl = r852_cmdctl;
-	chip->waitfunc = r852_wait;
-	chip->dev_ready = r852_ready;
-
-	/* I/O */
-	chip->read_byte = r852_read_byte;
-	chip->read_buf = r852_read_buf;
-	chip->write_buf = r852_write_buf;
-
-	/* ecc */
-	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
-	chip->ecc.size = R852_DMA_LEN;
-	chip->ecc.bytes = SM_OOB_SIZE;
-	chip->ecc.strength = 2;
-	chip->ecc.hwctl = r852_ecc_hwctl;
-	chip->ecc.calculate = r852_ecc_calculate;
-	chip->ecc.correct = r852_ecc_correct;
-
-	/* TODO: hack */
-	chip->ecc.read_oob = r852_read_oob;
-
-	/* init our device structure */
-	dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
-
-	if (!dev)
-		goto error5;
-
-	nand_set_controller_data(chip, dev);
-	dev->chip = chip;
-	dev->pci_dev = pci_dev;
-	pci_set_drvdata(pci_dev, dev);
-
-	dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
-		&dev->phys_bounce_buffer);
-
-	if (!dev->bounce_buffer)
-		goto error6;
-
-
-	error = -ENODEV;
-	dev->mmio = pci_ioremap_bar(pci_dev, 0);
-
-	if (!dev->mmio)
-		goto error7;
-
-	error = -ENOMEM;
-	dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
-
-	if (!dev->tmp_buffer)
-		goto error8;
-
-	init_completion(&dev->dma_done);
-
-	dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
-
-	if (!dev->card_workqueue)
-		goto error9;
-
-	INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
-
-	/* shutdown everything - precation */
-	r852_engine_disable(dev);
-	r852_disable_irqs(dev);
-
-	r852_dma_test(dev);
-
-	dev->irq = pci_dev->irq;
-	spin_lock_init(&dev->irqlock);
-
-	dev->card_detected = 0;
-	r852_card_update_present(dev);
-
-	/*register irq handler*/
-	error = -ENODEV;
-	if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
-			  DRV_NAME, dev))
-		goto error10;
-
-	/* kick initial present test */
-	queue_delayed_work(dev->card_workqueue,
-		&dev->card_detect_work, 0);
-
-
-	printk(KERN_NOTICE DRV_NAME ": driver loaded successfully\n");
-	return 0;
-
-error10:
-	destroy_workqueue(dev->card_workqueue);
-error9:
-	kfree(dev->tmp_buffer);
-error8:
-	pci_iounmap(pci_dev, dev->mmio);
-error7:
-	pci_free_consistent(pci_dev, R852_DMA_LEN,
-		dev->bounce_buffer, dev->phys_bounce_buffer);
-error6:
-	kfree(dev);
-error5:
-	kfree(chip);
-error4:
-	pci_release_regions(pci_dev);
-error3:
-error2:
-	pci_disable_device(pci_dev);
-error1:
-	return error;
-}
-
-static void r852_remove(struct pci_dev *pci_dev)
-{
-	struct r852_device *dev = pci_get_drvdata(pci_dev);
-
-	/* Stop detect workqueue -
-		we are going to unregister the device anyway*/
-	cancel_delayed_work_sync(&dev->card_detect_work);
-	destroy_workqueue(dev->card_workqueue);
-
-	/* Unregister the device, this might make more IO */
-	r852_unregister_nand_device(dev);
-
-	/* Stop interrupts */
-	r852_disable_irqs(dev);
-	free_irq(dev->irq, dev);
-
-	/* Cleanup */
-	kfree(dev->tmp_buffer);
-	pci_iounmap(pci_dev, dev->mmio);
-	pci_free_consistent(pci_dev, R852_DMA_LEN,
-		dev->bounce_buffer, dev->phys_bounce_buffer);
-
-	kfree(dev->chip);
-	kfree(dev);
-
-	/* Shutdown the PCI device */
-	pci_release_regions(pci_dev);
-	pci_disable_device(pci_dev);
-}
-
-static void r852_shutdown(struct pci_dev *pci_dev)
-{
-	struct r852_device *dev = pci_get_drvdata(pci_dev);
-
-	cancel_delayed_work_sync(&dev->card_detect_work);
-	r852_disable_irqs(dev);
-	synchronize_irq(dev->irq);
-	pci_disable_device(pci_dev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int r852_suspend(struct device *device)
-{
-	struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
-
-	if (dev->ctlreg & R852_CTL_CARDENABLE)
-		return -EBUSY;
-
-	/* First make sure the detect work is gone */
-	cancel_delayed_work_sync(&dev->card_detect_work);
-
-	/* Turn off the interrupts and stop the device */
-	r852_disable_irqs(dev);
-	r852_engine_disable(dev);
-
-	/* If card was pulled off just during the suspend, which is very
-		unlikely, we will remove it on resume, it too late now
-		anyway... */
-	dev->card_unstable = 0;
-	return 0;
-}
-
-static int r852_resume(struct device *device)
-{
-	struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
-	struct mtd_info *mtd = nand_to_mtd(dev->chip);
-
-	r852_disable_irqs(dev);
-	r852_card_update_present(dev);
-	r852_engine_disable(dev);
-
-
-	/* If card status changed, just do the work */
-	if (dev->card_detected != dev->card_registred) {
-		dbg("card was %s during low power state",
-			dev->card_detected ? "added" : "removed");
-
-		queue_delayed_work(dev->card_workqueue,
-		&dev->card_detect_work, msecs_to_jiffies(1000));
-		return 0;
-	}
-
-	/* Otherwise, initialize the card */
-	if (dev->card_registred) {
-		r852_engine_enable(dev);
-		dev->chip->select_chip(mtd, 0);
-		dev->chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
-		dev->chip->select_chip(mtd, -1);
-	}
-
-	/* Program card detection IRQ */
-	r852_update_card_detect(dev);
-	return 0;
-}
-#endif
-
-static const struct pci_device_id r852_pci_id_tbl[] = {
-
-	{ PCI_VDEVICE(RICOH, 0x0852), },
-	{ },
-};
-
-MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
-
-static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
-
-static struct pci_driver r852_pci_driver = {
-	.name		= DRV_NAME,
-	.id_table	= r852_pci_id_tbl,
-	.probe		= r852_probe,
-	.remove		= r852_remove,
-	.shutdown	= r852_shutdown,
-	.driver.pm	= &r852_pm_ops,
-};
-
-module_pci_driver(r852_pci_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
-MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/r852.h b/drivers/mtd/nand/r852.h
deleted file mode 100644
index 8713c57f6207..000000000000
--- a/drivers/mtd/nand/r852.h
+++ /dev/null
@@ -1,160 +0,0 @@ 
-/*
- * Copyright © 2009 - Maxim Levitsky
- * driver for Ricoh xD readers
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/pci.h>
-#include <linux/completion.h>
-#include <linux/workqueue.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/spinlock.h>
-
-
-/* nand interface + ecc
-   byte write/read does one cycle on nand data lines.
-   dword write/read does 4 cycles
-   if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
-   results of ecc correction, if DMA read was done before.
-   If write was done two dword reads read generated ecc checksums
-*/
-#define	R852_DATALINE		0x00
-
-/* control register */
-#define R852_CTL		0x04
-#define R852_CTL_COMMAND 	0x01	/* send command (#CLE)*/
-#define R852_CTL_DATA		0x02	/* read/write data (#ALE)*/
-#define R852_CTL_ON		0x04	/* only seem to controls the hd led, */
-					/* but has to be set on start...*/
-#define R852_CTL_RESET		0x08	/* unknown, set only on start once*/
-#define R852_CTL_CARDENABLE	0x10	/* probably (#CE) - always set*/
-#define R852_CTL_ECC_ENABLE	0x20	/* enable ecc engine */
-#define R852_CTL_ECC_ACCESS	0x40	/* read/write ecc via reg #0*/
-#define R852_CTL_WRITE		0x80	/* set when performing writes (#WP) */
-
-/* card detection status */
-#define R852_CARD_STA		0x05
-
-#define R852_CARD_STA_CD	0x01	/* state of #CD line, same as 0x04 */
-#define R852_CARD_STA_RO	0x02	/* card is readonly */
-#define R852_CARD_STA_PRESENT	0x04	/* card is present (#CD) */
-#define R852_CARD_STA_ABSENT	0x08	/* card is absent */
-#define R852_CARD_STA_BUSY	0x80	/* card is busy - (#R/B) */
-
-/* card detection irq status & enable*/
-#define R852_CARD_IRQ_STA	0x06	/* IRQ status */
-#define R852_CARD_IRQ_ENABLE	0x07	/* IRQ enable */
-
-#define R852_CARD_IRQ_CD	0x01	/* fire when #CD lights, same as 0x04*/
-#define R852_CARD_IRQ_REMOVE	0x04	/* detect card removal */
-#define R852_CARD_IRQ_INSERT	0x08	/* detect card insert */
-#define R852_CARD_IRQ_UNK1	0x10	/* unknown */
-#define R852_CARD_IRQ_GENABLE	0x80	/* general enable */
-#define R852_CARD_IRQ_MASK	0x1D
-
-
-
-/* hardware enable */
-#define R852_HW			0x08
-#define R852_HW_ENABLED		0x01	/* hw enabled */
-#define R852_HW_UNKNOWN		0x80
-
-
-/* dma capabilities */
-#define R852_DMA_CAP		0x09
-#define R852_SMBIT		0x20	/* if set with bit #6 or bit #7, then */
-					/* hw is smartmedia */
-#define R852_DMA1		0x40	/* if set w/bit #7, dma is supported */
-#define R852_DMA2		0x80	/* if set w/bit #6, dma is supported */
-
-
-/* physical DMA address - 32 bit value*/
-#define R852_DMA_ADDR		0x0C
-
-
-/* dma settings */
-#define R852_DMA_SETTINGS	0x10
-#define R852_DMA_MEMORY		0x01	/* (memory <-> internal hw buffer) */
-#define R852_DMA_READ		0x02	/* 0 = write, 1 = read */
-#define R852_DMA_INTERNAL	0x04	/* (internal hw buffer <-> card) */
-
-/* dma IRQ status */
-#define R852_DMA_IRQ_STA		0x14
-
-/* dma IRQ enable */
-#define R852_DMA_IRQ_ENABLE	0x18
-
-#define R852_DMA_IRQ_MEMORY	0x01	/* (memory <-> internal hw buffer) */
-#define R852_DMA_IRQ_ERROR	0x02	/* error did happen */
-#define R852_DMA_IRQ_INTERNAL	0x04	/* (internal hw buffer <-> card) */
-#define R852_DMA_IRQ_MASK	0x07	/* mask of all IRQ bits */
-
-
-/* ECC syndrome format - read from reg #0 will return two copies of these for
-   each half of the page.
-   first byte is error byte location, and second, bit location + flags */
-#define R852_ECC_ERR_BIT_MSK	0x07	/* error bit location */
-#define R852_ECC_CORRECT		0x10	/* no errors - (guessed) */
-#define R852_ECC_CORRECTABLE	0x20	/* correctable error exist */
-#define R852_ECC_FAIL		0x40	/* non correctable error detected */
-
-#define R852_DMA_LEN		512
-
-#define DMA_INTERNAL	0
-#define DMA_MEMORY	1
-
-struct r852_device {
-	void __iomem *mmio;		/* mmio */
-	struct nand_chip *chip;		/* nand chip backpointer */
-	struct pci_dev *pci_dev;	/* pci backpointer */
-
-	/* dma area */
-	dma_addr_t phys_dma_addr;	/* bus address of buffer*/
-	struct completion dma_done;	/* data transfer done */
-
-	dma_addr_t phys_bounce_buffer;	/* bus address of bounce buffer */
-	uint8_t *bounce_buffer;		/* virtual address of bounce buffer */
-
-	int dma_dir;			/* 1 = read, 0 = write */
-	int dma_stage;			/* 0 - idle, 1 - first step,
-					   2 - second step */
-
-	int dma_state;			/* 0 = internal, 1 = memory */
-	int dma_error;			/* dma errors */
-	int dma_usable;			/* is it possible to use dma */
-
-	/* card status area */
-	struct delayed_work card_detect_work;
-	struct workqueue_struct *card_workqueue;
-	int card_registred;		/* card registered with mtd */
-	int card_detected;		/* card detected in slot */
-	int card_unstable;		/* whenever the card is inserted,
-					   is not known yet */
-	int readonly;			/* card is readonly */
-	int sm;				/* Is card smartmedia */
-
-	/* interrupt handling */
-	spinlock_t irqlock;		/* IRQ protecting lock */
-	int irq;			/* irq num */
-	/* misc */
-	void *tmp_buffer;		/* temporary buffer */
-	uint8_t ctlreg;			/* cached contents of control reg */
-};
-
-#define DRV_NAME "r852"
-
-
-#define dbg(format, ...) \
-	if (debug) \
-		printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
-
-#define dbg_verbose(format, ...) \
-	if (debug > 1) \
-		printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
-
-
-#define message(format, ...) \
-	printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/rawnand/Kconfig b/drivers/mtd/nand/rawnand/Kconfig
new file mode 100644
index 000000000000..7b7a887b4709
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/Kconfig
@@ -0,0 +1,572 @@ 
+config MTD_NAND_ECC
+	tristate
+
+config MTD_NAND_ECC_SMC
+	bool "NAND ECC Smart Media byte order"
+	depends on MTD_NAND_ECC
+	default n
+	help
+	  Software ECC according to the Smart Media Specification.
+	  The original Linux implementation had byte 0 and 1 swapped.
+
+
+menuconfig MTD_NAND
+	tristate "NAND Device Support"
+	depends on MTD
+	select MTD_NAND_IDS
+	select MTD_NAND_ECC
+	help
+	  This enables support for accessing all type of NAND flash
+	  devices. For further information see
+	  <http://www.linux-mtd.infradead.org/doc/nand.html>.
+
+if MTD_NAND
+
+config MTD_NAND_BCH
+	tristate
+	select BCH
+	depends on MTD_NAND_ECC_BCH
+	default MTD_NAND
+
+config MTD_NAND_ECC_BCH
+	bool "Support software BCH ECC"
+	default n
+	help
+	  This enables support for software BCH error correction. Binary BCH
+	  codes are more powerful and cpu intensive than traditional Hamming
+	  ECC codes. They are used with NAND devices requiring more than 1 bit
+	  of error correction.
+
+config MTD_SM_COMMON
+	tristate
+	default n
+
+config MTD_NAND_DENALI
+	tristate
+
+config MTD_NAND_DENALI_PCI
+        tristate "Support Denali NAND controller on Intel Moorestown"
+	select MTD_NAND_DENALI
+	depends on HAS_DMA && PCI
+        help
+          Enable the driver for NAND flash on Intel Moorestown, using the
+          Denali NAND controller core.
+
+config MTD_NAND_DENALI_DT
+	tristate "Support Denali NAND controller as a DT device"
+	select MTD_NAND_DENALI
+	depends on HAS_DMA && HAVE_CLK && OF
+	help
+	  Enable the driver for NAND flash on platforms using a Denali NAND
+	  controller as a DT device.
+
+config MTD_NAND_DENALI_SCRATCH_REG_ADDR
+        hex "Denali NAND size scratch register address"
+        default "0xFF108018"
+        depends on MTD_NAND_DENALI_PCI
+        help
+          Some platforms place the NAND chip size in a scratch register
+          because (some versions of) the driver aren't able to automatically
+          determine the size of certain chips. Set the address of the
+          scratch register here to enable this feature. On Intel Moorestown
+          boards, the scratch register is at 0xFF108018.
+
+config MTD_NAND_GPIO
+	tristate "GPIO assisted NAND Flash driver"
+	depends on GPIOLIB || COMPILE_TEST
+	depends on HAS_IOMEM
+	help
+	  This enables a NAND flash driver where control signals are
+	  connected to GPIO pins, and commands and data are communicated
+	  via a memory mapped interface.
+
+config MTD_NAND_AMS_DELTA
+	tristate "NAND Flash device on Amstrad E3"
+	depends on MACH_AMS_DELTA
+	default y
+	help
+	  Support for NAND flash on Amstrad E3 (Delta).
+
+config MTD_NAND_OMAP2
+	tristate "NAND Flash device on OMAP2, OMAP3, OMAP4 and Keystone"
+	depends on (ARCH_OMAP2PLUS || ARCH_KEYSTONE)
+	help
+          Support for NAND flash on Texas Instruments OMAP2, OMAP3, OMAP4
+	  and Keystone platforms.
+
+config MTD_NAND_OMAP_BCH
+	depends on MTD_NAND_OMAP2
+	bool "Support hardware based BCH error correction"
+	default n
+	select BCH
+	help
+	  This config enables the ELM hardware engine, which can be used to
+	  locate and correct errors when using BCH ECC scheme. This offloads
+	  the cpu from doing ECC error searching and correction. However some
+	  legacy OMAP families like OMAP2xxx, OMAP3xxx do not have ELM engine
+	  so this is optional for them.
+
+config MTD_NAND_OMAP_BCH_BUILD
+	def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
+
+config MTD_NAND_IDS
+	tristate
+
+config MTD_NAND_RICOH
+	tristate "Ricoh xD card reader"
+	default n
+	depends on PCI
+	select MTD_SM_COMMON
+	help
+	  Enable support for Ricoh R5C852 xD card reader
+	  You also need to enable ether
+	  NAND SSFDC (SmartMedia) read only translation layer' or new
+	  expermental, readwrite
+	  'SmartMedia/xD new translation layer'
+
+config MTD_NAND_AU1550
+	tristate "Au1550/1200 NAND support"
+	depends on MIPS_ALCHEMY
+	help
+	  This enables the driver for the NAND flash controller on the
+	  AMD/Alchemy 1550 SOC.
+
+config MTD_NAND_BF5XX
+	tristate "Blackfin on-chip NAND Flash Controller driver"
+	depends on BF54x || BF52x
+	help
+	  This enables the Blackfin on-chip NAND flash controller
+
+	  No board specific support is done by this driver, each board
+	  must advertise a platform_device for the driver to attach.
+
+	  This driver can also be built as a module. If so, the module
+	  will be called bf5xx-nand.
+
+config MTD_NAND_BF5XX_HWECC
+	bool "BF5XX NAND Hardware ECC"
+	default y
+	depends on MTD_NAND_BF5XX
+	help
+	  Enable the use of the BF5XX's internal ECC generator when
+	  using NAND.
+
+config MTD_NAND_BF5XX_BOOTROM_ECC
+	bool "Use Blackfin BootROM ECC Layout"
+	default n
+	depends on MTD_NAND_BF5XX_HWECC
+	help
+	  If you wish to modify NAND pages and allow the Blackfin on-chip
+	  BootROM to boot from them, say Y here.  This is only necessary
+	  if you are booting U-Boot out of NAND and you wish to update
+	  U-Boot from Linux' userspace.  Otherwise, you should say N here.
+
+	  If unsure, say N.
+
+config MTD_NAND_S3C2410
+	tristate "NAND Flash support for Samsung S3C SoCs"
+	depends on ARCH_S3C24XX || ARCH_S3C64XX
+	help
+	  This enables the NAND flash controller on the S3C24xx and S3C64xx
+	  SoCs
+
+	  No board specific support is done by this driver, each board
+	  must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_S3C2410_DEBUG
+	bool "Samsung S3C NAND driver debug"
+	depends on MTD_NAND_S3C2410
+	help
+	  Enable debugging of the S3C NAND driver
+
+config MTD_NAND_S3C2410_HWECC
+	bool "Samsung S3C NAND Hardware ECC"
+	depends on MTD_NAND_S3C2410
+	help
+	  Enable the use of the controller's internal ECC generator when
+	  using NAND. Early versions of the chips have had problems with
+	  incorrect ECC generation, and if using these, the default of
+	  software ECC is preferable.
+
+config MTD_NAND_NDFC
+	tristate "NDFC NanD Flash Controller"
+	depends on 4xx
+	select MTD_NAND_ECC_SMC
+	help
+	 NDFC Nand Flash Controllers are integrated in IBM/AMCC's 4xx SoCs
+
+config MTD_NAND_S3C2410_CLKSTOP
+	bool "Samsung S3C NAND IDLE clock stop"
+	depends on MTD_NAND_S3C2410
+	default n
+	help
+	  Stop the clock to the NAND controller when there is no chip
+	  selected to save power. This will mean there is a small delay
+	  when the is NAND chip selected or released, but will save
+	  approximately 5mA of power when there is nothing happening.
+
+config MTD_NAND_DISKONCHIP
+	tristate "DiskOnChip 2000, Millennium and Millennium Plus (NAND reimplementation)"
+	depends on HAS_IOMEM
+	select REED_SOLOMON
+	select REED_SOLOMON_DEC16
+	help
+	  This is a reimplementation of M-Systems DiskOnChip 2000,
+	  Millennium and Millennium Plus as a standard NAND device driver,
+	  as opposed to the earlier self-contained MTD device drivers.
+	  This should enable, among other things, proper JFFS2 operation on
+	  these devices.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+        bool "Advanced detection options for DiskOnChip"
+        depends on MTD_NAND_DISKONCHIP
+        help
+          This option allows you to specify nonstandard address at which to
+          probe for a DiskOnChip, or to change the detection options.  You
+          are unlikely to need any of this unless you are using LinuxBIOS.
+          Say 'N'.
+
+config MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+        hex "Physical address of DiskOnChip" if MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+        depends on MTD_NAND_DISKONCHIP
+        default "0"
+        ---help---
+        By default, the probe for DiskOnChip devices will look for a
+        DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+        This option allows you to specify a single address at which to probe
+        for the device, which is useful if you have other devices in that
+        range which get upset when they are probed.
+
+        (Note that on PowerPC, the normal probe will only check at
+        0xE4000000.)
+
+        Normally, you should leave this set to zero, to allow the probe at
+        the normal addresses.
+
+config MTD_NAND_DISKONCHIP_PROBE_HIGH
+        bool "Probe high addresses"
+        depends on MTD_NAND_DISKONCHIP_PROBE_ADVANCED
+        help
+          By default, the probe for DiskOnChip devices will look for a
+          DiskOnChip at every multiple of 0x2000 between 0xC8000 and 0xEE000.
+          This option changes to make it probe between 0xFFFC8000 and
+          0xFFFEE000.  Unless you are using LinuxBIOS, this is unlikely to be
+          useful to you.  Say 'N'.
+
+config MTD_NAND_DISKONCHIP_BBTWRITE
+	bool "Allow BBT writes on DiskOnChip Millennium and 2000TSOP"
+	depends on MTD_NAND_DISKONCHIP
+	help
+	  On DiskOnChip devices shipped with the INFTL filesystem (Millennium
+	  and 2000 TSOP/Alon), Linux reserves some space at the end of the
+	  device for the Bad Block Table (BBT).  If you have existing INFTL
+	  data on your device (created by non-Linux tools such as M-Systems'
+	  DOS drivers), your data might overlap the area Linux wants to use for
+	  the BBT.  If this is a concern for you, leave this option disabled and
+	  Linux will not write BBT data into this area.
+	  The downside of leaving this option disabled is that if bad blocks
+	  are detected by Linux, they will not be recorded in the BBT, which
+	  could cause future problems.
+	  Once you enable this option, new filesystems (INFTL or others, created
+	  in Linux or other operating systems) will not use the reserved area.
+	  The only reason not to enable this option is to prevent damage to
+	  preexisting filesystems.
+	  Even if you leave this disabled, you can enable BBT writes at module
+	  load time (assuming you build diskonchip as a module) with the module
+	  parameter "inftl_bbt_write=1".
+
+config MTD_NAND_DOCG4
+	tristate "Support for DiskOnChip G4"
+	depends on HAS_IOMEM
+	select BCH
+	select BITREVERSE
+	help
+	  Support for diskonchip G4 nand flash, found in various smartphones and
+	  PDAs, among them the Palm Treo680, HTC Prophet and Wizard, Toshiba
+	  Portege G900, Asus P526, and O2 XDA Zinc.
+
+	  With this driver you will be able to use UBI and create a ubifs on the
+	  device, so you may wish to consider enabling UBI and UBIFS as well.
+
+	  These devices ship with the Mys/Sandisk SAFTL formatting, for which
+	  there is currently no mtd parser, so you may want to use command line
+	  partitioning to segregate write-protected blocks. On the Treo680, the
+	  first five erase blocks (256KiB each) are write-protected, followed
+	  by the block containing the saftl partition table.  This is probably
+	  typical.
+
+config MTD_NAND_SHARPSL
+	tristate "Support for NAND Flash on Sharp SL Series (C7xx + others)"
+	depends on ARCH_PXA
+
+config MTD_NAND_CAFE
+	tristate "NAND support for OLPC CAFÉ chip"
+	depends on PCI
+	select REED_SOLOMON
+	select REED_SOLOMON_DEC16
+	help
+	  Use NAND flash attached to the CAFÉ chip designed for the OLPC
+	  laptop.
+
+config MTD_NAND_CS553X
+	tristate "NAND support for CS5535/CS5536 (AMD Geode companion chip)"
+	depends on X86_32
+	depends on !UML && HAS_IOMEM
+	help
+	  The CS553x companion chips for the AMD Geode processor
+	  include NAND flash controllers with built-in hardware ECC
+	  capabilities; enabling this option will allow you to use
+	  these. The driver will check the MSRs to verify that the
+	  controller is enabled for NAND, and currently requires that
+	  the controller be in MMIO mode.
+
+	  If you say "m", the module will be called cs553x_nand.
+
+config MTD_NAND_ATMEL
+	tristate "Support for NAND Flash / SmartMedia on AT91 and AVR32"
+	depends on ARCH_AT91 || AVR32
+	help
+	  Enables support for NAND Flash / Smart Media Card interface
+	  on Atmel AT91 and AVR32 processors.
+
+config MTD_NAND_PXA3xx
+	tristate "NAND support on PXA3xx and Armada 370/XP"
+	depends on PXA3xx || ARCH_MMP || PLAT_ORION
+	help
+	  This enables the driver for the NAND flash device found on
+	  PXA3xx processors (NFCv1) and also on Armada 370/XP (NFCv2).
+
+config MTD_NAND_SLC_LPC32XX
+	tristate "NXP LPC32xx SLC Controller"
+	depends on ARCH_LPC32XX
+	help
+	  Enables support for NXP's LPC32XX SLC (i.e. for Single Level Cell
+	  chips) NAND controller. This is the default for the PHYTEC 3250
+	  reference board which contains a NAND256R3A2CZA6 chip.
+
+	  Please check the actual NAND chip connected and its support
+	  by the SLC NAND controller.
+
+config MTD_NAND_MLC_LPC32XX
+	tristate "NXP LPC32xx MLC Controller"
+	depends on ARCH_LPC32XX
+	help
+	  Uses the LPC32XX MLC (i.e. for Multi Level Cell chips) NAND
+	  controller. This is the default for the WORK92105 controller
+	  board.
+
+	  Please check the actual NAND chip connected and its support
+	  by the MLC NAND controller.
+
+config MTD_NAND_CM_X270
+	tristate "Support for NAND Flash on CM-X270 modules"
+	depends on MACH_ARMCORE
+
+config MTD_NAND_PASEMI
+	tristate "NAND support for PA Semi PWRficient"
+	depends on PPC_PASEMI
+	help
+	  Enables support for NAND Flash interface on PA Semi PWRficient
+	  based boards
+
+config MTD_NAND_TMIO
+	tristate "NAND Flash device on Toshiba Mobile IO Controller"
+	depends on MFD_TMIO
+	help
+	  Support for NAND flash connected to a Toshiba Mobile IO
+	  Controller in some PDAs, including the Sharp SL6000x.
+
+config MTD_NAND_NANDSIM
+	tristate "Support for NAND Flash Simulator"
+	help
+	  The simulator may simulate various NAND flash chips for the
+	  MTD nand layer.
+
+config MTD_NAND_GPMI_NAND
+        tristate "GPMI NAND Flash Controller driver"
+        depends on MTD_NAND && MXS_DMA
+        help
+	 Enables NAND Flash support for IMX23, IMX28 or IMX6.
+	 The GPMI controller is very powerful, with the help of BCH
+	 module, it can do the hardware ECC. The GPMI supports several
+	 NAND flashs at the same time. The GPMI may conflicts with other
+	 block, such as SD card. So pay attention to it when you enable
+	 the GPMI.
+
+config MTD_NAND_BRCMNAND
+	tristate "Broadcom STB NAND controller"
+	depends on ARM || ARM64 || MIPS
+	help
+	  Enables the Broadcom NAND controller driver. The controller was
+	  originally designed for Set-Top Box but is used on various BCM7xxx,
+	  BCM3xxx, BCM63xxx, iProc/Cygnus and more.
+
+config MTD_NAND_BCM47XXNFLASH
+	tristate "Support for NAND flash on BCM4706 BCMA bus"
+	depends on BCMA_NFLASH
+	help
+	  BCMA bus can have various flash memories attached, they are
+	  registered by bcma as platform devices. This enables driver for
+	  NAND flash memories. For now only BCM4706 is supported.
+
+config MTD_NAND_PLATFORM
+	tristate "Support for generic platform NAND driver"
+	depends on HAS_IOMEM
+	help
+	  This implements a generic NAND driver for on-SOC platform
+	  devices. You will need to provide platform-specific functions
+	  via platform_data.
+
+config MTD_NAND_ORION
+	tristate "NAND Flash support for Marvell Orion SoC"
+	depends on PLAT_ORION
+	help
+	  This enables the NAND flash controller on Orion machines.
+
+	  No board specific support is done by this driver, each board
+	  must advertise a platform_device for the driver to attach.
+
+config MTD_NAND_FSL_ELBC
+	tristate "NAND support for Freescale eLBC controllers"
+	depends on FSL_SOC
+	select FSL_LBC
+	help
+	  Various Freescale chips, including the 8313, include a NAND Flash
+	  Controller Module with built-in hardware ECC capabilities.
+	  Enabling this option will enable you to use this to control
+	  external NAND devices.
+
+config MTD_NAND_FSL_IFC
+	tristate "NAND support for Freescale IFC controller"
+	depends on FSL_SOC || ARCH_LAYERSCAPE
+	select FSL_IFC
+	select MEMORY
+	help
+	  Various Freescale chips e.g P1010, include a NAND Flash machine
+	  with built-in hardware ECC capabilities.
+	  Enabling this option will enable you to use this to control
+	  external NAND devices.
+
+config MTD_NAND_FSL_UPM
+	tristate "Support for NAND on Freescale UPM"
+	depends on PPC_83xx || PPC_85xx
+	select FSL_LBC
+	help
+	  Enables support for NAND Flash chips wired onto Freescale PowerPC
+	  processor localbus with User-Programmable Machine support.
+
+config MTD_NAND_MPC5121_NFC
+	tristate "MPC5121 built-in NAND Flash Controller support"
+	depends on PPC_MPC512x
+	help
+	  This enables the driver for the NAND flash controller on the
+	  MPC5121 SoC.
+
+config MTD_NAND_VF610_NFC
+	tristate "Support for Freescale NFC for VF610/MPC5125"
+	depends on (SOC_VF610 || COMPILE_TEST)
+	depends on HAS_IOMEM
+	help
+	  Enables support for NAND Flash Controller on some Freescale
+	  processors like the VF610, MPC5125, MCF54418 or Kinetis K70.
+	  The driver supports a maximum 2k page size. With 2k pages and
+	  64 bytes or more of OOB, hardware ECC with up to 32-bit error
+	  correction is supported. Hardware ECC is only enabled through
+	  device tree.
+
+config MTD_NAND_MXC
+	tristate "MXC NAND support"
+	depends on ARCH_MXC
+	help
+	  This enables the driver for the NAND flash controller on the
+	  MXC processors.
+
+config MTD_NAND_SH_FLCTL
+	tristate "Support for NAND on Renesas SuperH FLCTL"
+	depends on SUPERH || COMPILE_TEST
+	depends on HAS_IOMEM
+	depends on HAS_DMA
+	help
+	  Several Renesas SuperH CPU has FLCTL. This option enables support
+	  for NAND Flash using FLCTL.
+
+config MTD_NAND_DAVINCI
+        tristate "Support NAND on DaVinci/Keystone SoC"
+        depends on ARCH_DAVINCI || (ARCH_KEYSTONE && TI_AEMIF)
+        help
+	  Enable the driver for NAND flash chips on Texas Instruments
+	  DaVinci/Keystone processors.
+
+config MTD_NAND_TXX9NDFMC
+	tristate "NAND Flash support for TXx9 SoC"
+	depends on SOC_TX4938 || SOC_TX4939
+	help
+	  This enables the NAND flash controller on the TXx9 SoCs.
+
+config MTD_NAND_SOCRATES
+	tristate "Support for NAND on Socrates board"
+	depends on SOCRATES
+	help
+	  Enables support for NAND Flash chips wired onto Socrates board.
+
+config MTD_NAND_NUC900
+	tristate "Support for NAND on Nuvoton NUC9xx/w90p910 evaluation boards."
+	depends on ARCH_W90X900
+	help
+	  This enables the driver for the NAND Flash on evaluation board based
+	  on w90p910 / NUC9xx.
+
+config MTD_NAND_JZ4740
+	tristate "Support for JZ4740 SoC NAND controller"
+	depends on MACH_JZ4740
+	help
+		Enables support for NAND Flash on JZ4740 SoC based boards.
+
+config MTD_NAND_JZ4780
+	tristate "Support for NAND on JZ4780 SoC"
+	depends on MACH_JZ4780 && JZ4780_NEMC
+	help
+	  Enables support for NAND Flash connected to the NEMC on JZ4780 SoC
+	  based boards, using the BCH controller for hardware error correction.
+
+config MTD_NAND_FSMC
+	tristate "Support for NAND on ST Micros FSMC"
+	depends on PLAT_SPEAR || ARCH_NOMADIK || ARCH_U8500 || MACH_U300
+	help
+	  Enables support for NAND Flash chips on the ST Microelectronics
+	  Flexible Static Memory Controller (FSMC)
+
+config MTD_NAND_XWAY
+	tristate "Support for NAND on Lantiq XWAY SoC"
+	depends on LANTIQ && SOC_TYPE_XWAY
+	help
+	  Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
+	  to the External Bus Unit (EBU).
+
+config MTD_NAND_SUNXI
+	tristate "Support for NAND on Allwinner SoCs"
+	depends on ARCH_SUNXI
+	help
+	  Enables support for NAND Flash chips on Allwinner SoCs.
+
+config MTD_NAND_HISI504
+	tristate "Support for NAND controller on Hisilicon SoC Hip04"
+	depends on HAS_DMA
+	help
+	  Enables support for NAND controller on Hisilicon SoC Hip04.
+
+config MTD_NAND_QCOM
+	tristate "Support for NAND on QCOM SoCs"
+	depends on ARCH_QCOM
+	help
+	  Enables support for NAND flash chips on SoCs containing the EBI2 NAND
+	  controller. This controller is found on IPQ806x SoC.
+
+config MTD_NAND_MTK
+	tristate "Support for NAND controller on MTK SoCs"
+	depends on HAS_DMA
+	help
+	  Enables support for NAND controller on MTK SoCs.
+	  This controller is found on mt27xx, mt81xx, mt65xx SoCs.
+
+endif # MTD_NAND
diff --git a/drivers/mtd/nand/rawnand/Makefile b/drivers/mtd/nand/rawnand/Makefile
new file mode 100644
index 000000000000..cafde6f3d957
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/Makefile
@@ -0,0 +1,62 @@ 
+#
+# linux/drivers/nand/Makefile
+#
+
+obj-$(CONFIG_MTD_NAND)			+= nand.o
+obj-$(CONFIG_MTD_NAND_ECC)		+= nand_ecc.o
+obj-$(CONFIG_MTD_NAND_BCH)		+= nand_bch.o
+obj-$(CONFIG_MTD_NAND_IDS)		+= nand_ids.o
+obj-$(CONFIG_MTD_SM_COMMON) 		+= sm_common.o
+
+obj-$(CONFIG_MTD_NAND_CAFE)		+= cafe_nand.o
+obj-$(CONFIG_MTD_NAND_AMS_DELTA)	+= ams-delta.o
+obj-$(CONFIG_MTD_NAND_DENALI)		+= denali.o
+obj-$(CONFIG_MTD_NAND_DENALI_PCI)	+= denali_pci.o
+obj-$(CONFIG_MTD_NAND_DENALI_DT)	+= denali_dt.o
+obj-$(CONFIG_MTD_NAND_AU1550)		+= au1550nd.o
+obj-$(CONFIG_MTD_NAND_BF5XX)		+= bf5xx_nand.o
+obj-$(CONFIG_MTD_NAND_S3C2410)		+= s3c2410.o
+obj-$(CONFIG_MTD_NAND_DAVINCI)		+= davinci_nand.o
+obj-$(CONFIG_MTD_NAND_DISKONCHIP)	+= diskonchip.o
+obj-$(CONFIG_MTD_NAND_DOCG4)		+= docg4.o
+obj-$(CONFIG_MTD_NAND_FSMC)		+= fsmc_nand.o
+obj-$(CONFIG_MTD_NAND_SHARPSL)		+= sharpsl.o
+obj-$(CONFIG_MTD_NAND_NANDSIM)		+= nandsim.o
+obj-$(CONFIG_MTD_NAND_CS553X)		+= cs553x_nand.o
+obj-$(CONFIG_MTD_NAND_NDFC)		+= ndfc.o
+obj-$(CONFIG_MTD_NAND_ATMEL)		+= atmel_nand.o
+obj-$(CONFIG_MTD_NAND_GPIO)		+= gpio.o
+omap2_nand-objs := omap2.o
+obj-$(CONFIG_MTD_NAND_OMAP2) 		+= omap2_nand.o
+obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD)	+= omap_elm.o
+obj-$(CONFIG_MTD_NAND_CM_X270)		+= cmx270_nand.o
+obj-$(CONFIG_MTD_NAND_PXA3xx)		+= pxa3xx_nand.o
+obj-$(CONFIG_MTD_NAND_TMIO)		+= tmio_nand.o
+obj-$(CONFIG_MTD_NAND_PLATFORM)		+= plat_nand.o
+obj-$(CONFIG_MTD_NAND_PASEMI)		+= pasemi_nand.o
+obj-$(CONFIG_MTD_NAND_ORION)		+= orion_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_ELBC)		+= fsl_elbc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_IFC)		+= fsl_ifc_nand.o
+obj-$(CONFIG_MTD_NAND_FSL_UPM)		+= fsl_upm.o
+obj-$(CONFIG_MTD_NAND_SLC_LPC32XX)      += lpc32xx_slc.o
+obj-$(CONFIG_MTD_NAND_MLC_LPC32XX)      += lpc32xx_mlc.o
+obj-$(CONFIG_MTD_NAND_SH_FLCTL)		+= sh_flctl.o
+obj-$(CONFIG_MTD_NAND_MXC)		+= mxc_nand.o
+obj-$(CONFIG_MTD_NAND_SOCRATES)		+= socrates_nand.o
+obj-$(CONFIG_MTD_NAND_TXX9NDFMC)	+= txx9ndfmc.o
+obj-$(CONFIG_MTD_NAND_NUC900)		+= nuc900_nand.o
+obj-$(CONFIG_MTD_NAND_MPC5121_NFC)	+= mpc5121_nfc.o
+obj-$(CONFIG_MTD_NAND_VF610_NFC)	+= vf610_nfc.o
+obj-$(CONFIG_MTD_NAND_RICOH)		+= r852.o
+obj-$(CONFIG_MTD_NAND_JZ4740)		+= jz4740_nand.o
+obj-$(CONFIG_MTD_NAND_JZ4780)		+= jz4780_nand.o jz4780_bch.o
+obj-$(CONFIG_MTD_NAND_GPMI_NAND)	+= gpmi-nand/
+obj-$(CONFIG_MTD_NAND_XWAY)		+= xway_nand.o
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH)	+= bcm47xxnflash/
+obj-$(CONFIG_MTD_NAND_SUNXI)		+= sunxi_nand.o
+obj-$(CONFIG_MTD_NAND_HISI504)	        += hisi504_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= brcmnand/
+obj-$(CONFIG_MTD_NAND_QCOM)		+= qcom_nandc.o
+obj-$(CONFIG_MTD_NAND_MTK)		+= mtk_nand.o mtk_ecc.o
+
+nand-objs := nand_base.o nand_bbt.o nand_timings.o
diff --git a/drivers/mtd/nand/rawnand/ams-delta.c b/drivers/mtd/nand/rawnand/ams-delta.c
new file mode 100644
index 000000000000..0972493b6cd2
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/ams-delta.c
@@ -0,0 +1,291 @@ 
+/*
+ *  drivers/mtd/nand/ams-delta.c
+ *
+ *  Copyright (C) 2006 Jonathan McDowell <noodles@earth.li>
+ *
+ *  Derived from drivers/mtd/toto.c
+ *  Converted to platform driver by Janusz Krzysztofik <jkrzyszt@tis.icnet.pl>
+ *  Partially stolen from drivers/mtd/nand/plat_nand.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Overview:
+ *   This is a device driver for the NAND flash device found on the
+ *   Amstrad E3 (Delta).
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/gpio-omap.h>
+
+#include <asm/io.h>
+#include <asm/sizes.h>
+
+#include <mach/board-ams-delta.h>
+
+#include <mach/hardware.h>
+
+/*
+ * MTD structure for E3 (Delta)
+ */
+static struct mtd_info *ams_delta_mtd = NULL;
+
+/*
+ * Define partitions for flash devices
+ */
+
+static struct mtd_partition partition_info[] = {
+	{ .name		= "Kernel",
+	  .offset	= 0,
+	  .size		= 3 * SZ_1M + SZ_512K },
+	{ .name		= "u-boot",
+	  .offset	= 3 * SZ_1M + SZ_512K,
+	  .size		= SZ_256K },
+	{ .name		= "u-boot params",
+	  .offset	= 3 * SZ_1M + SZ_512K + SZ_256K,
+	  .size		= SZ_256K },
+	{ .name		= "Amstrad LDR",
+	  .offset	= 4 * SZ_1M,
+	  .size		= SZ_256K },
+	{ .name		= "File system",
+	  .offset	= 4 * SZ_1M + 1 * SZ_256K,
+	  .size		= 27 * SZ_1M },
+	{ .name		= "PBL reserved",
+	  .offset	= 32 * SZ_1M - 3 * SZ_256K,
+	  .size		=  3 * SZ_256K },
+};
+
+static void ams_delta_write_byte(struct mtd_info *mtd, u_char byte)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	void __iomem *io_base = (void __iomem *)nand_get_controller_data(this);
+
+	writew(0, io_base + OMAP_MPUIO_IO_CNTL);
+	writew(byte, this->IO_ADDR_W);
+	gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 0);
+	ndelay(40);
+	gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NWE, 1);
+}
+
+static u_char ams_delta_read_byte(struct mtd_info *mtd)
+{
+	u_char res;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	void __iomem *io_base = (void __iomem *)nand_get_controller_data(this);
+
+	gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 0);
+	ndelay(40);
+	writew(~0, io_base + OMAP_MPUIO_IO_CNTL);
+	res = readw(this->IO_ADDR_R);
+	gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NRE, 1);
+
+	return res;
+}
+
+static void ams_delta_write_buf(struct mtd_info *mtd, const u_char *buf,
+				int len)
+{
+	int i;
+
+	for (i=0; i<len; i++)
+		ams_delta_write_byte(mtd, buf[i]);
+}
+
+static void ams_delta_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	int i;
+
+	for (i=0; i<len; i++)
+		buf[i] = ams_delta_read_byte(mtd);
+}
+
+/*
+ * Command control function
+ *
+ * ctrl:
+ * NAND_NCE: bit 0 -> bit 2
+ * NAND_CLE: bit 1 -> bit 7
+ * NAND_ALE: bit 2 -> bit 6
+ */
+static void ams_delta_hwcontrol(struct mtd_info *mtd, int cmd,
+				unsigned int ctrl)
+{
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_NCE,
+				(ctrl & NAND_NCE) == 0);
+		gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_CLE,
+				(ctrl & NAND_CLE) != 0);
+		gpio_set_value(AMS_DELTA_GPIO_PIN_NAND_ALE,
+				(ctrl & NAND_ALE) != 0);
+	}
+
+	if (cmd != NAND_CMD_NONE)
+		ams_delta_write_byte(mtd, cmd);
+}
+
+static int ams_delta_nand_ready(struct mtd_info *mtd)
+{
+	return gpio_get_value(AMS_DELTA_GPIO_PIN_NAND_RB);
+}
+
+static const struct gpio _mandatory_gpio[] = {
+	{
+		.gpio	= AMS_DELTA_GPIO_PIN_NAND_NCE,
+		.flags	= GPIOF_OUT_INIT_HIGH,
+		.label	= "nand_nce",
+	},
+	{
+		.gpio	= AMS_DELTA_GPIO_PIN_NAND_NRE,
+		.flags	= GPIOF_OUT_INIT_HIGH,
+		.label	= "nand_nre",
+	},
+	{
+		.gpio	= AMS_DELTA_GPIO_PIN_NAND_NWP,
+		.flags	= GPIOF_OUT_INIT_HIGH,
+		.label	= "nand_nwp",
+	},
+	{
+		.gpio	= AMS_DELTA_GPIO_PIN_NAND_NWE,
+		.flags	= GPIOF_OUT_INIT_HIGH,
+		.label	= "nand_nwe",
+	},
+	{
+		.gpio	= AMS_DELTA_GPIO_PIN_NAND_ALE,
+		.flags	= GPIOF_OUT_INIT_LOW,
+		.label	= "nand_ale",
+	},
+	{
+		.gpio	= AMS_DELTA_GPIO_PIN_NAND_CLE,
+		.flags	= GPIOF_OUT_INIT_LOW,
+		.label	= "nand_cle",
+	},
+};
+
+/*
+ * Main initialization routine
+ */
+static int ams_delta_init(struct platform_device *pdev)
+{
+	struct nand_chip *this;
+	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	void __iomem *io_base;
+	int err = 0;
+
+	if (!res)
+		return -ENXIO;
+
+	/* Allocate memory for MTD device structure and private data */
+	this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+	if (!this) {
+		printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	ams_delta_mtd = nand_to_mtd(this);
+	ams_delta_mtd->owner = THIS_MODULE;
+
+	/*
+	 * Don't try to request the memory region from here,
+	 * it should have been already requested from the
+	 * gpio-omap driver and requesting it again would fail.
+	 */
+
+	io_base = ioremap(res->start, resource_size(res));
+	if (io_base == NULL) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		err = -EIO;
+		goto out_free;
+	}
+
+	nand_set_controller_data(this, (void *)io_base);
+
+	/* Set address of NAND IO lines */
+	this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH;
+	this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT;
+	this->read_byte = ams_delta_read_byte;
+	this->write_buf = ams_delta_write_buf;
+	this->read_buf = ams_delta_read_buf;
+	this->cmd_ctrl = ams_delta_hwcontrol;
+	if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) {
+		this->dev_ready = ams_delta_nand_ready;
+	} else {
+		this->dev_ready = NULL;
+		printk(KERN_NOTICE "Couldn't request gpio for Delta NAND ready.\n");
+	}
+	/* 25 us command delay time */
+	this->chip_delay = 30;
+	this->ecc.mode = NAND_ECC_SOFT;
+	this->ecc.algo = NAND_ECC_HAMMING;
+
+	platform_set_drvdata(pdev, io_base);
+
+	/* Set chip enabled, but  */
+	err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+	if (err)
+		goto out_gpio;
+
+	/* Scan to find existence of the device */
+	if (nand_scan(ams_delta_mtd, 1)) {
+		err = -ENXIO;
+		goto out_mtd;
+	}
+
+	/* Register the partitions */
+	mtd_device_register(ams_delta_mtd, partition_info,
+			    ARRAY_SIZE(partition_info));
+
+	goto out;
+
+ out_mtd:
+	gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+out_gpio:
+	gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
+	iounmap(io_base);
+out_free:
+	kfree(this);
+ out:
+	return err;
+}
+
+/*
+ * Clean up routine
+ */
+static int ams_delta_cleanup(struct platform_device *pdev)
+{
+	void __iomem *io_base = platform_get_drvdata(pdev);
+
+	/* Release resources, unregister device */
+	nand_release(ams_delta_mtd);
+
+	gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio));
+	gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB);
+	iounmap(io_base);
+
+	/* Free the MTD device structure */
+	kfree(mtd_to_nand(ams_delta_mtd));
+
+	return 0;
+}
+
+static struct platform_driver ams_delta_nand_driver = {
+	.probe		= ams_delta_init,
+	.remove		= ams_delta_cleanup,
+	.driver		= {
+		.name	= "ams-delta-nand",
+	},
+};
+
+module_platform_driver(ams_delta_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
+MODULE_DESCRIPTION("Glue layer for NAND flash on Amstrad E3 (Delta)");
diff --git a/drivers/mtd/nand/rawnand/atmel_nand.c b/drivers/mtd/nand/rawnand/atmel_nand.c
new file mode 100644
index 000000000000..fbb7e5da2541
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/atmel_nand.c
@@ -0,0 +1,2481 @@ 
+/*
+ *  Copyright © 2003 Rick Bronson
+ *
+ *  Derived from drivers/mtd/nand/autcpu12.c
+ *	 Copyright © 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ *  Derived from drivers/mtd/spia.c
+ *	 Copyright © 2000 Steven J. Hill (sjhill@cotw.com)
+ *
+ *
+ *  Add Hardware ECC support for AT91SAM9260 / AT91SAM9263
+ *     Richard Genoud (richard.genoud@gmail.com), Adeneo Copyright © 2007
+ *
+ *     Derived from Das U-Boot source code
+ *     		(u-boot-1.1.5/board/atmel/at91sam9263ek/nand.c)
+ *     © Copyright 2006 ATMEL Rousset, Lacressonniere Nicolas
+ *
+ *  Add Programmable Multibit ECC support for various AT91 SoC
+ *     © Copyright 2012 ATMEL, Hong Xu
+ *
+ *  Add Nand Flash Controller support for SAMA5 SoC
+ *     © Copyright 2013 ATMEL, Josh Wu (josh.wu@atmel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_data/atmel.h>
+
+static int use_dma = 1;
+module_param(use_dma, int, 0);
+
+static int on_flash_bbt = 0;
+module_param(on_flash_bbt, int, 0);
+
+/* Register access macros */
+#define ecc_readl(add, reg)				\
+	__raw_readl(add + ATMEL_ECC_##reg)
+#define ecc_writel(add, reg, value)			\
+	__raw_writel((value), add + ATMEL_ECC_##reg)
+
+#include "atmel_nand_ecc.h"	/* Hardware ECC registers */
+#include "atmel_nand_nfc.h"	/* Nand Flash Controller definition */
+
+struct atmel_nand_caps {
+	bool pmecc_correct_erase_page;
+	uint8_t pmecc_max_correction;
+};
+
+/*
+ * oob layout for large page size
+ * bad block info is on bytes 0 and 1
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ *
+ * oob layout for small page size
+ * bad block info is on bytes 4 and 5
+ * the bytes have to be consecutives to avoid
+ * several NAND_CMD_RNDOUT during read
+ */
+static int atmel_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = 4;
+	oobregion->offset = 0;
+
+	return 0;
+}
+
+static int atmel_ooblayout_free_sp(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 6;
+	oobregion->length = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops atmel_ooblayout_sp_ops = {
+	.ecc = atmel_ooblayout_ecc_sp,
+	.free = atmel_ooblayout_free_sp,
+};
+
+struct atmel_nfc {
+	void __iomem		*base_cmd_regs;
+	void __iomem		*hsmc_regs;
+	void			*sram_bank0;
+	dma_addr_t		sram_bank0_phys;
+	bool			use_nfc_sram;
+	bool			write_by_sram;
+
+	struct clk		*clk;
+
+	bool			is_initialized;
+	struct completion	comp_ready;
+	struct completion	comp_cmd_done;
+	struct completion	comp_xfer_done;
+
+	/* Point to the sram bank which include readed data via NFC */
+	void			*data_in_sram;
+	bool			will_write_sram;
+};
+static struct atmel_nfc	nand_nfc;
+
+struct atmel_nand_host {
+	struct nand_chip	nand_chip;
+	void __iomem		*io_base;
+	dma_addr_t		io_phys;
+	struct atmel_nand_data	board;
+	struct device		*dev;
+	void __iomem		*ecc;
+
+	struct completion	comp;
+	struct dma_chan		*dma_chan;
+
+	struct atmel_nfc	*nfc;
+
+	const struct atmel_nand_caps	*caps;
+	bool			has_pmecc;
+	u8			pmecc_corr_cap;
+	u16			pmecc_sector_size;
+	bool			has_no_lookup_table;
+	u32			pmecc_lookup_table_offset;
+	u32			pmecc_lookup_table_offset_512;
+	u32			pmecc_lookup_table_offset_1024;
+
+	int			pmecc_degree;	/* Degree of remainders */
+	int			pmecc_cw_len;	/* Length of codeword */
+
+	void __iomem		*pmerrloc_base;
+	void __iomem		*pmerrloc_el_base;
+	void __iomem		*pmecc_rom_base;
+
+	/* lookup table for alpha_to and index_of */
+	void __iomem		*pmecc_alpha_to;
+	void __iomem		*pmecc_index_of;
+
+	/* data for pmecc computation */
+	int16_t			*pmecc_partial_syn;
+	int16_t			*pmecc_si;
+	int16_t			*pmecc_smu;	/* Sigma table */
+	int16_t			*pmecc_lmu;	/* polynomal order */
+	int			*pmecc_mu;
+	int			*pmecc_dmu;
+	int			*pmecc_delta;
+};
+
+/*
+ * Enable NAND.
+ */
+static void atmel_nand_enable(struct atmel_nand_host *host)
+{
+	if (gpio_is_valid(host->board.enable_pin))
+		gpio_set_value(host->board.enable_pin, 0);
+}
+
+/*
+ * Disable NAND.
+ */
+static void atmel_nand_disable(struct atmel_nand_host *host)
+{
+	if (gpio_is_valid(host->board.enable_pin))
+		gpio_set_value(host->board.enable_pin, 1);
+}
+
+/*
+ * Hardware specific access to control-lines
+ */
+static void atmel_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		if (ctrl & NAND_NCE)
+			atmel_nand_enable(host);
+		else
+			atmel_nand_disable(host);
+	}
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		writeb(cmd, host->io_base + (1 << host->board.cle));
+	else
+		writeb(cmd, host->io_base + (1 << host->board.ale));
+}
+
+/*
+ * Read the Device Ready pin.
+ */
+static int atmel_nand_device_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+
+	return gpio_get_value(host->board.rdy_pin) ^
+                !!host->board.rdy_pin_active_low;
+}
+
+/* Set up for hardware ready pin and enable pin. */
+static int atmel_nand_set_enable_ready_pins(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(chip);
+	int res = 0;
+
+	if (gpio_is_valid(host->board.rdy_pin)) {
+		res = devm_gpio_request(host->dev,
+				host->board.rdy_pin, "nand_rdy");
+		if (res < 0) {
+			dev_err(host->dev,
+				"can't request rdy gpio %d\n",
+				host->board.rdy_pin);
+			return res;
+		}
+
+		res = gpio_direction_input(host->board.rdy_pin);
+		if (res < 0) {
+			dev_err(host->dev,
+				"can't request input direction rdy gpio %d\n",
+				host->board.rdy_pin);
+			return res;
+		}
+
+		chip->dev_ready = atmel_nand_device_ready;
+	}
+
+	if (gpio_is_valid(host->board.enable_pin)) {
+		res = devm_gpio_request(host->dev,
+				host->board.enable_pin, "nand_enable");
+		if (res < 0) {
+			dev_err(host->dev,
+				"can't request enable gpio %d\n",
+				host->board.enable_pin);
+			return res;
+		}
+
+		res = gpio_direction_output(host->board.enable_pin, 1);
+		if (res < 0) {
+			dev_err(host->dev,
+				"can't request output direction enable gpio %d\n",
+				host->board.enable_pin);
+			return res;
+		}
+	}
+
+	return res;
+}
+
+/*
+ * Minimal-overhead PIO for data access.
+ */
+static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
+{
+	struct nand_chip	*nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+		memcpy(buf, host->nfc->data_in_sram, len);
+		host->nfc->data_in_sram += len;
+	} else {
+		__raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+	}
+}
+
+static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
+{
+	struct nand_chip	*nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if (host->nfc && host->nfc->use_nfc_sram && host->nfc->data_in_sram) {
+		memcpy(buf, host->nfc->data_in_sram, len);
+		host->nfc->data_in_sram += len;
+	} else {
+		__raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+	}
+}
+
+static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
+{
+	struct nand_chip	*nand_chip = mtd_to_nand(mtd);
+
+	__raw_writesb(nand_chip->IO_ADDR_W, buf, len);
+}
+
+static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
+{
+	struct nand_chip	*nand_chip = mtd_to_nand(mtd);
+
+	__raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
+}
+
+static void dma_complete_func(void *completion)
+{
+	complete(completion);
+}
+
+static int nfc_set_sram_bank(struct atmel_nand_host *host, unsigned int bank)
+{
+	/* NFC only has two banks. Must be 0 or 1 */
+	if (bank > 1)
+		return -EINVAL;
+
+	if (bank) {
+		struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+
+		/* Only for a 2k-page or lower flash, NFC can handle 2 banks */
+		if (mtd->writesize > 2048)
+			return -EINVAL;
+		nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK1);
+	} else {
+		nfc_writel(host->nfc->hsmc_regs, BANK, ATMEL_HSMC_NFC_BANK0);
+	}
+
+	return 0;
+}
+
+static uint nfc_get_sram_off(struct atmel_nand_host *host)
+{
+	if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+		return NFC_SRAM_BANK1_OFFSET;
+	else
+		return 0;
+}
+
+static dma_addr_t nfc_sram_phys(struct atmel_nand_host *host)
+{
+	if (nfc_readl(host->nfc->hsmc_regs, BANK) & ATMEL_HSMC_NFC_BANK1)
+		return host->nfc->sram_bank0_phys + NFC_SRAM_BANK1_OFFSET;
+	else
+		return host->nfc->sram_bank0_phys;
+}
+
+static int atmel_nand_dma_op(struct mtd_info *mtd, void *buf, int len,
+			       int is_read)
+{
+	struct dma_device *dma_dev;
+	enum dma_ctrl_flags flags;
+	dma_addr_t dma_src_addr, dma_dst_addr, phys_addr;
+	struct dma_async_tx_descriptor *tx = NULL;
+	dma_cookie_t cookie;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(chip);
+	void *p = buf;
+	int err = -EIO;
+	enum dma_data_direction dir = is_read ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+	struct atmel_nfc *nfc = host->nfc;
+
+	if (buf >= high_memory)
+		goto err_buf;
+
+	dma_dev = host->dma_chan->device;
+
+	flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+
+	phys_addr = dma_map_single(dma_dev->dev, p, len, dir);
+	if (dma_mapping_error(dma_dev->dev, phys_addr)) {
+		dev_err(host->dev, "Failed to dma_map_single\n");
+		goto err_buf;
+	}
+
+	if (is_read) {
+		if (nfc && nfc->data_in_sram)
+			dma_src_addr = nfc_sram_phys(host) + (nfc->data_in_sram
+				- (nfc->sram_bank0 + nfc_get_sram_off(host)));
+		else
+			dma_src_addr = host->io_phys;
+
+		dma_dst_addr = phys_addr;
+	} else {
+		dma_src_addr = phys_addr;
+
+		if (nfc && nfc->write_by_sram)
+			dma_dst_addr = nfc_sram_phys(host);
+		else
+			dma_dst_addr = host->io_phys;
+	}
+
+	tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr,
+					     dma_src_addr, len, flags);
+	if (!tx) {
+		dev_err(host->dev, "Failed to prepare DMA memcpy\n");
+		goto err_dma;
+	}
+
+	init_completion(&host->comp);
+	tx->callback = dma_complete_func;
+	tx->callback_param = &host->comp;
+
+	cookie = tx->tx_submit(tx);
+	if (dma_submit_error(cookie)) {
+		dev_err(host->dev, "Failed to do DMA tx_submit\n");
+		goto err_dma;
+	}
+
+	dma_async_issue_pending(host->dma_chan);
+	wait_for_completion(&host->comp);
+
+	if (is_read && nfc && nfc->data_in_sram)
+		/* After read data from SRAM, need to increase the position */
+		nfc->data_in_sram += len;
+
+	err = 0;
+
+err_dma:
+	dma_unmap_single(dma_dev->dev, phys_addr, len, dir);
+err_buf:
+	if (err != 0)
+		dev_dbg(host->dev, "Fall back to CPU I/O\n");
+	return err;
+}
+
+static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (use_dma && len > mtd->oobsize)
+		/* only use DMA for bigger than oob size: better performances */
+		if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
+			return;
+
+	if (chip->options & NAND_BUSWIDTH_16)
+		atmel_read_buf16(mtd, buf, len);
+	else
+		atmel_read_buf8(mtd, buf, len);
+}
+
+static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (use_dma && len > mtd->oobsize)
+		/* only use DMA for bigger than oob size: better performances */
+		if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
+			return;
+
+	if (chip->options & NAND_BUSWIDTH_16)
+		atmel_write_buf16(mtd, buf, len);
+	else
+		atmel_write_buf8(mtd, buf, len);
+}
+
+/*
+ * Return number of ecc bytes per sector according to sector size and
+ * correction capability
+ *
+ * Following table shows what at91 PMECC supported:
+ * Correction Capability	Sector_512_bytes	Sector_1024_bytes
+ * =====================	================	=================
+ *                2-bits                 4-bytes                  4-bytes
+ *                4-bits                 7-bytes                  7-bytes
+ *                8-bits                13-bytes                 14-bytes
+ *               12-bits                20-bytes                 21-bytes
+ *               24-bits                39-bytes                 42-bytes
+ *               32-bits                52-bytes                 56-bytes
+ */
+static int pmecc_get_ecc_bytes(int cap, int sector_size)
+{
+	int m = 12 + sector_size / 512;
+	return (m * cap + 7) / 8;
+}
+
+static void __iomem *pmecc_get_alpha_to(struct atmel_nand_host *host)
+{
+	int table_size;
+
+	table_size = host->pmecc_sector_size == 512 ?
+		PMECC_LOOKUP_TABLE_SIZE_512 : PMECC_LOOKUP_TABLE_SIZE_1024;
+
+	return host->pmecc_rom_base + host->pmecc_lookup_table_offset +
+			table_size * sizeof(int16_t);
+}
+
+static int pmecc_data_alloc(struct atmel_nand_host *host)
+{
+	const int cap = host->pmecc_corr_cap;
+	int size;
+
+	size = (2 * cap + 1) * sizeof(int16_t);
+	host->pmecc_partial_syn = devm_kzalloc(host->dev, size, GFP_KERNEL);
+	host->pmecc_si = devm_kzalloc(host->dev, size, GFP_KERNEL);
+	host->pmecc_lmu = devm_kzalloc(host->dev,
+			(cap + 1) * sizeof(int16_t), GFP_KERNEL);
+	host->pmecc_smu = devm_kzalloc(host->dev,
+			(cap + 2) * size, GFP_KERNEL);
+
+	size = (cap + 1) * sizeof(int);
+	host->pmecc_mu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+	host->pmecc_dmu = devm_kzalloc(host->dev, size, GFP_KERNEL);
+	host->pmecc_delta = devm_kzalloc(host->dev, size, GFP_KERNEL);
+
+	if (!host->pmecc_partial_syn ||
+		!host->pmecc_si ||
+		!host->pmecc_lmu ||
+		!host->pmecc_smu ||
+		!host->pmecc_mu ||
+		!host->pmecc_dmu ||
+		!host->pmecc_delta)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void pmecc_gen_syndrome(struct mtd_info *mtd, int sector)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+	int i;
+	uint32_t value;
+
+	/* Fill odd syndromes */
+	for (i = 0; i < host->pmecc_corr_cap; i++) {
+		value = pmecc_readl_rem_relaxed(host->ecc, sector, i / 2);
+		if (i & 1)
+			value >>= 16;
+		value &= 0xffff;
+		host->pmecc_partial_syn[(2 * i) + 1] = (int16_t)value;
+	}
+}
+
+static void pmecc_substitute(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+	int16_t __iomem *alpha_to = host->pmecc_alpha_to;
+	int16_t __iomem *index_of = host->pmecc_index_of;
+	int16_t *partial_syn = host->pmecc_partial_syn;
+	const int cap = host->pmecc_corr_cap;
+	int16_t *si;
+	int i, j;
+
+	/* si[] is a table that holds the current syndrome value,
+	 * an element of that table belongs to the field
+	 */
+	si = host->pmecc_si;
+
+	memset(&si[1], 0, sizeof(int16_t) * (2 * cap - 1));
+
+	/* Computation 2t syndromes based on S(x) */
+	/* Odd syndromes */
+	for (i = 1; i < 2 * cap; i += 2) {
+		for (j = 0; j < host->pmecc_degree; j++) {
+			if (partial_syn[i] & ((unsigned short)0x1 << j))
+				si[i] = readw_relaxed(alpha_to + i * j) ^ si[i];
+		}
+	}
+	/* Even syndrome = (Odd syndrome) ** 2 */
+	for (i = 2, j = 1; j <= cap; i = ++j << 1) {
+		if (si[j] == 0) {
+			si[i] = 0;
+		} else {
+			int16_t tmp;
+
+			tmp = readw_relaxed(index_of + si[j]);
+			tmp = (tmp * 2) % host->pmecc_cw_len;
+			si[i] = readw_relaxed(alpha_to + tmp);
+		}
+	}
+
+	return;
+}
+
+static void pmecc_get_sigma(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+
+	int16_t *lmu = host->pmecc_lmu;
+	int16_t *si = host->pmecc_si;
+	int *mu = host->pmecc_mu;
+	int *dmu = host->pmecc_dmu;	/* Discrepancy */
+	int *delta = host->pmecc_delta; /* Delta order */
+	int cw_len = host->pmecc_cw_len;
+	const int16_t cap = host->pmecc_corr_cap;
+	const int num = 2 * cap + 1;
+	int16_t __iomem	*index_of = host->pmecc_index_of;
+	int16_t __iomem	*alpha_to = host->pmecc_alpha_to;
+	int i, j, k;
+	uint32_t dmu_0_count, tmp;
+	int16_t *smu = host->pmecc_smu;
+
+	/* index of largest delta */
+	int ro;
+	int largest;
+	int diff;
+
+	dmu_0_count = 0;
+
+	/* First Row */
+
+	/* Mu */
+	mu[0] = -1;
+
+	memset(smu, 0, sizeof(int16_t) * num);
+	smu[0] = 1;
+
+	/* discrepancy set to 1 */
+	dmu[0] = 1;
+	/* polynom order set to 0 */
+	lmu[0] = 0;
+	delta[0] = (mu[0] * 2 - lmu[0]) >> 1;
+
+	/* Second Row */
+
+	/* Mu */
+	mu[1] = 0;
+	/* Sigma(x) set to 1 */
+	memset(&smu[num], 0, sizeof(int16_t) * num);
+	smu[num] = 1;
+
+	/* discrepancy set to S1 */
+	dmu[1] = si[1];
+
+	/* polynom order set to 0 */
+	lmu[1] = 0;
+
+	delta[1] = (mu[1] * 2 - lmu[1]) >> 1;
+
+	/* Init the Sigma(x) last row */
+	memset(&smu[(cap + 1) * num], 0, sizeof(int16_t) * num);
+
+	for (i = 1; i <= cap; i++) {
+		mu[i + 1] = i << 1;
+		/* Begin Computing Sigma (Mu+1) and L(mu) */
+		/* check if discrepancy is set to 0 */
+		if (dmu[i] == 0) {
+			dmu_0_count++;
+
+			tmp = ((cap - (lmu[i] >> 1) - 1) / 2);
+			if ((cap - (lmu[i] >> 1) - 1) & 0x1)
+				tmp += 2;
+			else
+				tmp += 1;
+
+			if (dmu_0_count == tmp) {
+				for (j = 0; j <= (lmu[i] >> 1) + 1; j++)
+					smu[(cap + 1) * num + j] =
+							smu[i * num + j];
+
+				lmu[cap + 1] = lmu[i];
+				return;
+			}
+
+			/* copy polynom */
+			for (j = 0; j <= lmu[i] >> 1; j++)
+				smu[(i + 1) * num + j] = smu[i * num + j];
+
+			/* copy previous polynom order to the next */
+			lmu[i + 1] = lmu[i];
+		} else {
+			ro = 0;
+			largest = -1;
+			/* find largest delta with dmu != 0 */
+			for (j = 0; j < i; j++) {
+				if ((dmu[j]) && (delta[j] > largest)) {
+					largest = delta[j];
+					ro = j;
+				}
+			}
+
+			/* compute difference */
+			diff = (mu[i] - mu[ro]);
+
+			/* Compute degree of the new smu polynomial */
+			if ((lmu[i] >> 1) > ((lmu[ro] >> 1) + diff))
+				lmu[i + 1] = lmu[i];
+			else
+				lmu[i + 1] = ((lmu[ro] >> 1) + diff) * 2;
+
+			/* Init smu[i+1] with 0 */
+			for (k = 0; k < num; k++)
+				smu[(i + 1) * num + k] = 0;
+
+			/* Compute smu[i+1] */
+			for (k = 0; k <= lmu[ro] >> 1; k++) {
+				int16_t a, b, c;
+
+				if (!(smu[ro * num + k] && dmu[i]))
+					continue;
+				a = readw_relaxed(index_of + dmu[i]);
+				b = readw_relaxed(index_of + dmu[ro]);
+				c = readw_relaxed(index_of + smu[ro * num + k]);
+				tmp = a + (cw_len - b) + c;
+				a = readw_relaxed(alpha_to + tmp % cw_len);
+				smu[(i + 1) * num + (k + diff)] = a;
+			}
+
+			for (k = 0; k <= lmu[i] >> 1; k++)
+				smu[(i + 1) * num + k] ^= smu[i * num + k];
+		}
+
+		/* End Computing Sigma (Mu+1) and L(mu) */
+		/* In either case compute delta */
+		delta[i + 1] = (mu[i + 1] * 2 - lmu[i + 1]) >> 1;
+
+		/* Do not compute discrepancy for the last iteration */
+		if (i >= cap)
+			continue;
+
+		for (k = 0; k <= (lmu[i + 1] >> 1); k++) {
+			tmp = 2 * (i - 1);
+			if (k == 0) {
+				dmu[i + 1] = si[tmp + 3];
+			} else if (smu[(i + 1) * num + k] && si[tmp + 3 - k]) {
+				int16_t a, b, c;
+				a = readw_relaxed(index_of +
+						smu[(i + 1) * num + k]);
+				b = si[2 * (i - 1) + 3 - k];
+				c = readw_relaxed(index_of + b);
+				tmp = a + c;
+				tmp %= cw_len;
+				dmu[i + 1] = readw_relaxed(alpha_to + tmp) ^
+					dmu[i + 1];
+			}
+		}
+	}
+
+	return;
+}
+
+static int pmecc_err_location(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+	unsigned long end_time;
+	const int cap = host->pmecc_corr_cap;
+	const int num = 2 * cap + 1;
+	int sector_size = host->pmecc_sector_size;
+	int err_nbr = 0;	/* number of error */
+	int roots_nbr;		/* number of roots */
+	int i;
+	uint32_t val;
+	int16_t *smu = host->pmecc_smu;
+
+	pmerrloc_writel(host->pmerrloc_base, ELDIS, PMERRLOC_DISABLE);
+
+	for (i = 0; i <= host->pmecc_lmu[cap + 1] >> 1; i++) {
+		pmerrloc_writel_sigma_relaxed(host->pmerrloc_base, i,
+				      smu[(cap + 1) * num + i]);
+		err_nbr++;
+	}
+
+	val = (err_nbr - 1) << 16;
+	if (sector_size == 1024)
+		val |= 1;
+
+	pmerrloc_writel(host->pmerrloc_base, ELCFG, val);
+	pmerrloc_writel(host->pmerrloc_base, ELEN,
+			sector_size * 8 + host->pmecc_degree * cap);
+
+	end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+	while (!(pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+		 & PMERRLOC_CALC_DONE)) {
+		if (unlikely(time_after(jiffies, end_time))) {
+			dev_err(host->dev, "PMECC: Timeout to calculate error location.\n");
+			return -1;
+		}
+		cpu_relax();
+	}
+
+	roots_nbr = (pmerrloc_readl_relaxed(host->pmerrloc_base, ELISR)
+		& PMERRLOC_ERR_NUM_MASK) >> 8;
+	/* Number of roots == degree of smu hence <= cap */
+	if (roots_nbr == host->pmecc_lmu[cap + 1] >> 1)
+		return err_nbr - 1;
+
+	/* Number of roots does not match the degree of smu
+	 * unable to correct error */
+	return -1;
+}
+
+static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
+		int sector_num, int extra_bytes, int err_nbr)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+	int i = 0;
+	int byte_pos, bit_pos, sector_size, pos;
+	uint32_t tmp;
+	uint8_t err_byte;
+
+	sector_size = host->pmecc_sector_size;
+
+	while (err_nbr) {
+		tmp = pmerrloc_readl_el_relaxed(host->pmerrloc_el_base, i) - 1;
+		byte_pos = tmp / 8;
+		bit_pos  = tmp % 8;
+
+		if (byte_pos >= (sector_size + extra_bytes))
+			BUG();	/* should never happen */
+
+		if (byte_pos < sector_size) {
+			err_byte = *(buf + byte_pos);
+			*(buf + byte_pos) ^= (1 << bit_pos);
+
+			pos = sector_num * host->pmecc_sector_size + byte_pos;
+			dev_dbg(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+				pos, bit_pos, err_byte, *(buf + byte_pos));
+		} else {
+			struct mtd_oob_region oobregion;
+
+			/* Bit flip in OOB area */
+			tmp = sector_num * nand_chip->ecc.bytes
+					+ (byte_pos - sector_size);
+			err_byte = ecc[tmp];
+			ecc[tmp] ^= (1 << bit_pos);
+
+			mtd_ooblayout_ecc(mtd, 0, &oobregion);
+			pos = tmp + oobregion.offset;
+			dev_dbg(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
+				pos, bit_pos, err_byte, ecc[tmp]);
+		}
+
+		i++;
+		err_nbr--;
+	}
+
+	return;
+}
+
+static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
+	u8 *ecc)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+	int i, err_nbr;
+	uint8_t *buf_pos;
+	int max_bitflips = 0;
+
+	for (i = 0; i < nand_chip->ecc.steps; i++) {
+		err_nbr = 0;
+		if (pmecc_stat & 0x1) {
+			buf_pos = buf + i * host->pmecc_sector_size;
+
+			pmecc_gen_syndrome(mtd, i);
+			pmecc_substitute(mtd);
+			pmecc_get_sigma(mtd);
+
+			err_nbr = pmecc_err_location(mtd);
+			if (err_nbr >= 0) {
+				pmecc_correct_data(mtd, buf_pos, ecc, i,
+						   nand_chip->ecc.bytes,
+						   err_nbr);
+			} else if (!host->caps->pmecc_correct_erase_page) {
+				u8 *ecc_pos = ecc + (i * nand_chip->ecc.bytes);
+
+				/* Try to detect erased pages */
+				err_nbr = nand_check_erased_ecc_chunk(buf_pos,
+							host->pmecc_sector_size,
+							ecc_pos,
+							nand_chip->ecc.bytes,
+							NULL, 0,
+							nand_chip->ecc.strength);
+			}
+
+			if (err_nbr < 0) {
+				dev_err(host->dev, "PMECC: Too many errors\n");
+				mtd->ecc_stats.failed++;
+				return -EIO;
+			}
+
+			mtd->ecc_stats.corrected += err_nbr;
+			max_bitflips = max_t(int, max_bitflips, err_nbr);
+		}
+		pmecc_stat >>= 1;
+	}
+
+	return max_bitflips;
+}
+
+static void pmecc_enable(struct atmel_nand_host *host, int ecc_op)
+{
+	u32 val;
+
+	if (ecc_op != NAND_ECC_READ && ecc_op != NAND_ECC_WRITE) {
+		dev_err(host->dev, "atmel_nand: wrong pmecc operation type!");
+		return;
+	}
+
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+	val = pmecc_readl_relaxed(host->ecc, CFG);
+
+	if (ecc_op == NAND_ECC_READ)
+		pmecc_writel(host->ecc, CFG, (val & ~PMECC_CFG_WRITE_OP)
+			| PMECC_CFG_AUTO_ENABLE);
+	else
+		pmecc_writel(host->ecc, CFG, (val | PMECC_CFG_WRITE_OP)
+			& ~PMECC_CFG_AUTO_ENABLE);
+
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+}
+
+static int atmel_nand_pmecc_read_page(struct mtd_info *mtd,
+	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+	struct atmel_nand_host *host = nand_get_controller_data(chip);
+	int eccsize = chip->ecc.size * chip->ecc.steps;
+	uint8_t *oob = chip->oob_poi;
+	uint32_t stat;
+	unsigned long end_time;
+	int bitflips = 0;
+
+	if (!host->nfc || !host->nfc->use_nfc_sram)
+		pmecc_enable(host, NAND_ECC_READ);
+
+	chip->read_buf(mtd, buf, eccsize);
+	chip->read_buf(mtd, oob, mtd->oobsize);
+
+	end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+	while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+		if (unlikely(time_after(jiffies, end_time))) {
+			dev_err(host->dev, "PMECC: Timeout to get error status.\n");
+			return -EIO;
+		}
+		cpu_relax();
+	}
+
+	stat = pmecc_readl_relaxed(host->ecc, ISR);
+	if (stat != 0) {
+		struct mtd_oob_region oobregion;
+
+		mtd_ooblayout_ecc(mtd, 0, &oobregion);
+		bitflips = pmecc_correction(mtd, stat, buf,
+					    &oob[oobregion.offset]);
+		if (bitflips < 0)
+			/* uncorrectable errors */
+			return 0;
+	}
+
+	return bitflips;
+}
+
+static int atmel_nand_pmecc_write_page(struct mtd_info *mtd,
+		struct nand_chip *chip, const uint8_t *buf, int oob_required,
+		int page)
+{
+	struct atmel_nand_host *host = nand_get_controller_data(chip);
+	struct mtd_oob_region oobregion = { };
+	int i, j, section = 0;
+	unsigned long end_time;
+
+	if (!host->nfc || !host->nfc->write_by_sram) {
+		pmecc_enable(host, NAND_ECC_WRITE);
+		chip->write_buf(mtd, (u8 *)buf, mtd->writesize);
+	}
+
+	end_time = jiffies + msecs_to_jiffies(PMECC_MAX_TIMEOUT_MS);
+	while ((pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY)) {
+		if (unlikely(time_after(jiffies, end_time))) {
+			dev_err(host->dev, "PMECC: Timeout to get ECC value.\n");
+			return -EIO;
+		}
+		cpu_relax();
+	}
+
+	for (i = 0; i < chip->ecc.steps; i++) {
+		for (j = 0; j < chip->ecc.bytes; j++) {
+			if (!oobregion.length)
+				mtd_ooblayout_ecc(mtd, section, &oobregion);
+
+			chip->oob_poi[oobregion.offset] =
+				pmecc_readb_ecc_relaxed(host->ecc, i, j);
+			oobregion.length--;
+			oobregion.offset++;
+			section++;
+		}
+	}
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+static void atmel_pmecc_core_init(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+	int eccbytes = mtd_ooblayout_count_eccbytes(mtd);
+	uint32_t val = 0;
+	struct mtd_oob_region oobregion;
+
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+
+	switch (host->pmecc_corr_cap) {
+	case 2:
+		val = PMECC_CFG_BCH_ERR2;
+		break;
+	case 4:
+		val = PMECC_CFG_BCH_ERR4;
+		break;
+	case 8:
+		val = PMECC_CFG_BCH_ERR8;
+		break;
+	case 12:
+		val = PMECC_CFG_BCH_ERR12;
+		break;
+	case 24:
+		val = PMECC_CFG_BCH_ERR24;
+		break;
+	case 32:
+		val = PMECC_CFG_BCH_ERR32;
+		break;
+	}
+
+	if (host->pmecc_sector_size == 512)
+		val |= PMECC_CFG_SECTOR512;
+	else if (host->pmecc_sector_size == 1024)
+		val |= PMECC_CFG_SECTOR1024;
+
+	switch (nand_chip->ecc.steps) {
+	case 1:
+		val |= PMECC_CFG_PAGE_1SECTOR;
+		break;
+	case 2:
+		val |= PMECC_CFG_PAGE_2SECTORS;
+		break;
+	case 4:
+		val |= PMECC_CFG_PAGE_4SECTORS;
+		break;
+	case 8:
+		val |= PMECC_CFG_PAGE_8SECTORS;
+		break;
+	}
+
+	val |= (PMECC_CFG_READ_OP | PMECC_CFG_SPARE_DISABLE
+		| PMECC_CFG_AUTO_DISABLE);
+	pmecc_writel(host->ecc, CFG, val);
+
+	pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
+	mtd_ooblayout_ecc(mtd, 0, &oobregion);
+	pmecc_writel(host->ecc, SADDR, oobregion.offset);
+	pmecc_writel(host->ecc, EADDR,
+		     oobregion.offset + eccbytes - 1);
+	/* See datasheet about PMECC Clock Control Register */
+	pmecc_writel(host->ecc, CLK, 2);
+	pmecc_writel(host->ecc, IDR, 0xff);
+	pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
+}
+
+/*
+ * Get minimum ecc requirements from NAND.
+ * If pmecc-cap, pmecc-sector-size in DTS are not specified, this function
+ * will set them according to minimum ecc requirement. Otherwise, use the
+ * value in DTS file.
+ * return 0 if success. otherwise return error code.
+ */
+static int pmecc_choose_ecc(struct atmel_nand_host *host,
+		int *cap, int *sector_size)
+{
+	/* Get minimum ECC requirements */
+	if (host->nand_chip.ecc_strength_ds) {
+		*cap = host->nand_chip.ecc_strength_ds;
+		*sector_size = host->nand_chip.ecc_step_ds;
+		dev_info(host->dev, "minimum ECC: %d bits in %d bytes\n",
+				*cap, *sector_size);
+	} else {
+		*cap = 2;
+		*sector_size = 512;
+		dev_info(host->dev, "can't detect min. ECC, assume 2 bits in 512 bytes\n");
+	}
+
+	/* If device tree doesn't specify, use NAND's minimum ECC parameters */
+	if (host->pmecc_corr_cap == 0) {
+		if (*cap > host->caps->pmecc_max_correction)
+			return -EINVAL;
+
+		/* use the most fitable ecc bits (the near bigger one ) */
+		if (*cap <= 2)
+			host->pmecc_corr_cap = 2;
+		else if (*cap <= 4)
+			host->pmecc_corr_cap = 4;
+		else if (*cap <= 8)
+			host->pmecc_corr_cap = 8;
+		else if (*cap <= 12)
+			host->pmecc_corr_cap = 12;
+		else if (*cap <= 24)
+			host->pmecc_corr_cap = 24;
+		else if (*cap <= 32)
+			host->pmecc_corr_cap = 32;
+		else
+			return -EINVAL;
+	}
+	if (host->pmecc_sector_size == 0) {
+		/* use the most fitable sector size (the near smaller one ) */
+		if (*sector_size >= 1024)
+			host->pmecc_sector_size = 1024;
+		else if (*sector_size >= 512)
+			host->pmecc_sector_size = 512;
+		else
+			return -EINVAL;
+	}
+	return 0;
+}
+
+static inline int deg(unsigned int poly)
+{
+	/* polynomial degree is the most-significant bit index */
+	return fls(poly) - 1;
+}
+
+static int build_gf_tables(int mm, unsigned int poly,
+		int16_t *index_of, int16_t *alpha_to)
+{
+	unsigned int i, x = 1;
+	const unsigned int k = 1 << deg(poly);
+	unsigned int nn = (1 << mm) - 1;
+
+	/* primitive polynomial must be of degree m */
+	if (k != (1u << mm))
+		return -EINVAL;
+
+	for (i = 0; i < nn; i++) {
+		alpha_to[i] = x;
+		index_of[x] = i;
+		if (i && (x == 1))
+			/* polynomial is not primitive (a^i=1 with 0<i<2^m-1) */
+			return -EINVAL;
+		x <<= 1;
+		if (x & k)
+			x ^= poly;
+	}
+	alpha_to[nn] = 1;
+	index_of[0] = 0;
+
+	return 0;
+}
+
+static uint16_t *create_lookup_table(struct device *dev, int sector_size)
+{
+	int degree = (sector_size == 512) ?
+			PMECC_GF_DIMENSION_13 :
+			PMECC_GF_DIMENSION_14;
+	unsigned int poly = (sector_size == 512) ?
+			PMECC_GF_13_PRIMITIVE_POLY :
+			PMECC_GF_14_PRIMITIVE_POLY;
+	int table_size = (sector_size == 512) ?
+			PMECC_LOOKUP_TABLE_SIZE_512 :
+			PMECC_LOOKUP_TABLE_SIZE_1024;
+
+	int16_t *addr = devm_kzalloc(dev, 2 * table_size * sizeof(uint16_t),
+			GFP_KERNEL);
+	if (addr && build_gf_tables(degree, poly, addr, addr + table_size))
+		return NULL;
+
+	return addr;
+}
+
+static int atmel_pmecc_nand_init_params(struct platform_device *pdev,
+					 struct atmel_nand_host *host)
+{
+	struct nand_chip *nand_chip = &host->nand_chip;
+	struct mtd_info *mtd = nand_to_mtd(nand_chip);
+	struct resource *regs, *regs_pmerr, *regs_rom;
+	uint16_t *galois_table;
+	int cap, sector_size, err_no;
+
+	err_no = pmecc_choose_ecc(host, &cap, &sector_size);
+	if (err_no) {
+		dev_err(host->dev, "The NAND flash's ECC requirement are not support!");
+		return err_no;
+	}
+
+	if (cap > host->pmecc_corr_cap ||
+			sector_size != host->pmecc_sector_size)
+		dev_info(host->dev, "WARNING: Be Caution! Using different PMECC parameters from Nand ONFI ECC reqirement.\n");
+
+	cap = host->pmecc_corr_cap;
+	sector_size = host->pmecc_sector_size;
+	host->pmecc_lookup_table_offset = (sector_size == 512) ?
+			host->pmecc_lookup_table_offset_512 :
+			host->pmecc_lookup_table_offset_1024;
+
+	dev_info(host->dev, "Initialize PMECC params, cap: %d, sector: %d\n",
+		 cap, sector_size);
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!regs) {
+		dev_warn(host->dev,
+			"Can't get I/O resource regs for PMECC controller, rolling back on software ECC\n");
+		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
+		return 0;
+	}
+
+	host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(host->ecc)) {
+		err_no = PTR_ERR(host->ecc);
+		goto err;
+	}
+
+	regs_pmerr = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	host->pmerrloc_base = devm_ioremap_resource(&pdev->dev, regs_pmerr);
+	if (IS_ERR(host->pmerrloc_base)) {
+		err_no = PTR_ERR(host->pmerrloc_base);
+		goto err;
+	}
+	host->pmerrloc_el_base = host->pmerrloc_base + ATMEL_PMERRLOC_SIGMAx +
+		(host->caps->pmecc_max_correction + 1) * 4;
+
+	if (!host->has_no_lookup_table) {
+		regs_rom = platform_get_resource(pdev, IORESOURCE_MEM, 3);
+		host->pmecc_rom_base = devm_ioremap_resource(&pdev->dev,
+								regs_rom);
+		if (IS_ERR(host->pmecc_rom_base)) {
+			dev_err(host->dev, "Can not get I/O resource for ROM, will build a lookup table in runtime!\n");
+			host->has_no_lookup_table = true;
+		}
+	}
+
+	if (host->has_no_lookup_table) {
+		/* Build the look-up table in runtime */
+		galois_table = create_lookup_table(host->dev, sector_size);
+		if (!galois_table) {
+			dev_err(host->dev, "Failed to build a lookup table in runtime!\n");
+			err_no = -EINVAL;
+			goto err;
+		}
+
+		host->pmecc_rom_base = (void __iomem *)galois_table;
+		host->pmecc_lookup_table_offset = 0;
+	}
+
+	nand_chip->ecc.size = sector_size;
+
+	/* set ECC page size and oob layout */
+	switch (mtd->writesize) {
+	case 512:
+	case 1024:
+	case 2048:
+	case 4096:
+	case 8192:
+		if (sector_size > mtd->writesize) {
+			dev_err(host->dev, "pmecc sector size is bigger than the page size!\n");
+			err_no = -EINVAL;
+			goto err;
+		}
+
+		host->pmecc_degree = (sector_size == 512) ?
+			PMECC_GF_DIMENSION_13 : PMECC_GF_DIMENSION_14;
+		host->pmecc_cw_len = (1 << host->pmecc_degree) - 1;
+		host->pmecc_alpha_to = pmecc_get_alpha_to(host);
+		host->pmecc_index_of = host->pmecc_rom_base +
+			host->pmecc_lookup_table_offset;
+
+		nand_chip->ecc.strength = cap;
+		nand_chip->ecc.bytes = pmecc_get_ecc_bytes(cap, sector_size);
+		nand_chip->ecc.steps = mtd->writesize / sector_size;
+		nand_chip->ecc.total = nand_chip->ecc.bytes *
+			nand_chip->ecc.steps;
+		if (nand_chip->ecc.total >
+				mtd->oobsize - PMECC_OOB_RESERVED_BYTES) {
+			dev_err(host->dev, "No room for ECC bytes\n");
+			err_no = -EINVAL;
+			goto err;
+		}
+
+		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+		break;
+	default:
+		dev_warn(host->dev,
+			"Unsupported page size for PMECC, use Software ECC\n");
+		/* page size not handled by HW ECC */
+		/* switching back to soft ECC */
+		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
+		return 0;
+	}
+
+	/* Allocate data for PMECC computation */
+	err_no = pmecc_data_alloc(host);
+	if (err_no) {
+		dev_err(host->dev,
+				"Cannot allocate memory for PMECC computation!\n");
+		goto err;
+	}
+
+	nand_chip->options |= NAND_NO_SUBPAGE_WRITE;
+	nand_chip->ecc.read_page = atmel_nand_pmecc_read_page;
+	nand_chip->ecc.write_page = atmel_nand_pmecc_write_page;
+
+	atmel_pmecc_core_init(mtd);
+
+	return 0;
+
+err:
+	return err_no;
+}
+
+/*
+ * Calculate HW ECC
+ *
+ * function called after a write
+ *
+ * mtd:        MTD block structure
+ * dat:        raw data (unused)
+ * ecc_code:   buffer for ECC
+ */
+static int atmel_nand_calculate(struct mtd_info *mtd,
+		const u_char *dat, unsigned char *ecc_code)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+	unsigned int ecc_value;
+
+	/* get the first 2 ECC bytes */
+	ecc_value = ecc_readl(host->ecc, PR);
+
+	ecc_code[0] = ecc_value & 0xFF;
+	ecc_code[1] = (ecc_value >> 8) & 0xFF;
+
+	/* get the last 2 ECC bytes */
+	ecc_value = ecc_readl(host->ecc, NPR) & ATMEL_ECC_NPARITY;
+
+	ecc_code[2] = ecc_value & 0xFF;
+	ecc_code[3] = (ecc_value >> 8) & 0xFF;
+
+	return 0;
+}
+
+/*
+ * HW ECC read page function
+ *
+ * mtd:        mtd info structure
+ * chip:       nand chip info structure
+ * buf:        buffer to store read data
+ * oob_required:    caller expects OOB data read to chip->oob_poi
+ */
+static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
+{
+	int eccsize = chip->ecc.size;
+	int eccbytes = chip->ecc.bytes;
+	uint8_t *p = buf;
+	uint8_t *oob = chip->oob_poi;
+	uint8_t *ecc_pos;
+	int stat;
+	unsigned int max_bitflips = 0;
+	struct mtd_oob_region oobregion = {};
+
+	/*
+	 * Errata: ALE is incorrectly wired up to the ECC controller
+	 * on the AP7000, so it will include the address cycles in the
+	 * ECC calculation.
+	 *
+	 * Workaround: Reset the parity registers before reading the
+	 * actual data.
+	 */
+	struct atmel_nand_host *host = nand_get_controller_data(chip);
+	if (host->board.need_reset_workaround)
+		ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
+
+	/* read the page */
+	chip->read_buf(mtd, p, eccsize);
+
+	/* move to ECC position if needed */
+	mtd_ooblayout_ecc(mtd, 0, &oobregion);
+	if (oobregion.offset != 0) {
+		/*
+		 * This only works on large pages because the ECC controller
+		 * waits for NAND_CMD_RNDOUTSTART after the NAND_CMD_RNDOUT.
+		 * Anyway, for small pages, the first ECC byte is at offset
+		 * 0 in the OOB area.
+		 */
+		chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+			      mtd->writesize + oobregion.offset, -1);
+	}
+
+	/* the ECC controller needs to read the ECC just after the data */
+	ecc_pos = oob + oobregion.offset;
+	chip->read_buf(mtd, ecc_pos, eccbytes);
+
+	/* check if there's an error */
+	stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+	if (stat < 0) {
+		mtd->ecc_stats.failed++;
+	} else {
+		mtd->ecc_stats.corrected += stat;
+		max_bitflips = max_t(unsigned int, max_bitflips, stat);
+	}
+
+	/* get back to oob start (end of page) */
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+
+	/* read the oob */
+	chip->read_buf(mtd, oob, mtd->oobsize);
+
+	return max_bitflips;
+}
+
+/*
+ * HW ECC Correction
+ *
+ * function called after a read
+ *
+ * mtd:        MTD block structure
+ * dat:        raw data read from the chip
+ * read_ecc:   ECC from the chip (unused)
+ * isnull:     unused
+ *
+ * Detect and correct a 1 bit error for a page
+ */
+static int atmel_nand_correct(struct mtd_info *mtd, u_char *dat,
+		u_char *read_ecc, u_char *isnull)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+	unsigned int ecc_status;
+	unsigned int ecc_word, ecc_bit;
+
+	/* get the status from the Status Register */
+	ecc_status = ecc_readl(host->ecc, SR);
+
+	/* if there's no error */
+	if (likely(!(ecc_status & ATMEL_ECC_RECERR)))
+		return 0;
+
+	/* get error bit offset (4 bits) */
+	ecc_bit = ecc_readl(host->ecc, PR) & ATMEL_ECC_BITADDR;
+	/* get word address (12 bits) */
+	ecc_word = ecc_readl(host->ecc, PR) & ATMEL_ECC_WORDADDR;
+	ecc_word >>= 4;
+
+	/* if there are multiple errors */
+	if (ecc_status & ATMEL_ECC_MULERR) {
+		/* check if it is a freshly erased block
+		 * (filled with 0xff) */
+		if ((ecc_bit == ATMEL_ECC_BITADDR)
+				&& (ecc_word == (ATMEL_ECC_WORDADDR >> 4))) {
+			/* the block has just been erased, return OK */
+			return 0;
+		}
+		/* it doesn't seems to be a freshly
+		 * erased block.
+		 * We can't correct so many errors */
+		dev_dbg(host->dev, "atmel_nand : multiple errors detected."
+				" Unable to correct.\n");
+		return -EBADMSG;
+	}
+
+	/* if there's a single bit error : we can correct it */
+	if (ecc_status & ATMEL_ECC_ECCERR) {
+		/* there's nothing much to do here.
+		 * the bit error is on the ECC itself.
+		 */
+		dev_dbg(host->dev, "atmel_nand : one bit error on ECC code."
+				" Nothing to correct\n");
+		return 0;
+	}
+
+	dev_dbg(host->dev, "atmel_nand : one bit error on data."
+			" (word offset in the page :"
+			" 0x%x bit offset : 0x%x)\n",
+			ecc_word, ecc_bit);
+	/* correct the error */
+	if (nand_chip->options & NAND_BUSWIDTH_16) {
+		/* 16 bits words */
+		((unsigned short *) dat)[ecc_word] ^= (1 << ecc_bit);
+	} else {
+		/* 8 bits words */
+		dat[ecc_word] ^= (1 << ecc_bit);
+	}
+	dev_dbg(host->dev, "atmel_nand : error corrected\n");
+	return 1;
+}
+
+/*
+ * Enable HW ECC : unused on most chips
+ */
+static void atmel_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if (host->board.need_reset_workaround)
+		ecc_writel(host->ecc, CR, ATMEL_ECC_RST);
+}
+
+static int atmel_of_init_ecc(struct atmel_nand_host *host,
+			     struct device_node *np)
+{
+	u32 offset[2];
+	u32 val;
+
+	host->has_pmecc = of_property_read_bool(np, "atmel,has-pmecc");
+
+	/* Not using PMECC */
+	if (!(host->nand_chip.ecc.mode == NAND_ECC_HW) || !host->has_pmecc)
+		return 0;
+
+	/* use PMECC, get correction capability, sector size and lookup
+	 * table offset.
+	 * If correction bits and sector size are not specified, then find
+	 * them from NAND ONFI parameters.
+	 */
+	if (of_property_read_u32(np, "atmel,pmecc-cap", &val) == 0) {
+		if (val > host->caps->pmecc_max_correction) {
+			dev_err(host->dev,
+				"Required ECC strength too high: %u max %u\n",
+				val, host->caps->pmecc_max_correction);
+			return -EINVAL;
+		}
+		if ((val != 2)  && (val != 4)  && (val != 8) &&
+		    (val != 12) && (val != 24) && (val != 32)) {
+			dev_err(host->dev,
+				"Required ECC strength not supported: %u\n",
+				val);
+			return -EINVAL;
+		}
+		host->pmecc_corr_cap = (u8)val;
+	}
+
+	if (of_property_read_u32(np, "atmel,pmecc-sector-size", &val) == 0) {
+		if ((val != 512) && (val != 1024)) {
+			dev_err(host->dev,
+				"Required ECC sector size not supported: %u\n",
+				val);
+			return -EINVAL;
+		}
+		host->pmecc_sector_size = (u16)val;
+	}
+
+	if (of_property_read_u32_array(np, "atmel,pmecc-lookup-table-offset",
+			offset, 2) != 0) {
+		dev_err(host->dev, "Cannot get PMECC lookup table offset, will build a lookup table in runtime.\n");
+		host->has_no_lookup_table = true;
+		/* Will build a lookup table and initialize the offset later */
+		return 0;
+	}
+
+	if (!offset[0] && !offset[1]) {
+		dev_err(host->dev, "Invalid PMECC lookup table offset\n");
+		return -EINVAL;
+	}
+
+	host->pmecc_lookup_table_offset_512 = offset[0];
+	host->pmecc_lookup_table_offset_1024 = offset[1];
+
+	return 0;
+}
+
+static int atmel_of_init_port(struct atmel_nand_host *host,
+			      struct device_node *np)
+{
+	u32 val;
+	struct atmel_nand_data *board = &host->board;
+	enum of_gpio_flags flags = 0;
+
+	host->caps = (struct atmel_nand_caps *)
+		of_device_get_match_data(host->dev);
+
+	if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) {
+		if (val >= 32) {
+			dev_err(host->dev, "invalid addr-offset %u\n", val);
+			return -EINVAL;
+		}
+		board->ale = val;
+	}
+
+	if (of_property_read_u32(np, "atmel,nand-cmd-offset", &val) == 0) {
+		if (val >= 32) {
+			dev_err(host->dev, "invalid cmd-offset %u\n", val);
+			return -EINVAL;
+		}
+		board->cle = val;
+	}
+
+	board->has_dma = of_property_read_bool(np, "atmel,nand-has-dma");
+
+	board->rdy_pin = of_get_gpio_flags(np, 0, &flags);
+	board->rdy_pin_active_low = (flags == OF_GPIO_ACTIVE_LOW);
+
+	board->enable_pin = of_get_gpio(np, 1);
+	board->det_pin = of_get_gpio(np, 2);
+
+	/* load the nfc driver if there is */
+	of_platform_populate(np, NULL, NULL, host->dev);
+
+	/*
+	 * Initialize ECC mode to NAND_ECC_SOFT so that we have a correct value
+	 * even if the nand-ecc-mode property is not defined.
+	 */
+	host->nand_chip.ecc.mode = NAND_ECC_SOFT;
+	host->nand_chip.ecc.algo = NAND_ECC_HAMMING;
+
+	return 0;
+}
+
+static int atmel_hw_nand_init_params(struct platform_device *pdev,
+					 struct atmel_nand_host *host)
+{
+	struct nand_chip *nand_chip = &host->nand_chip;
+	struct mtd_info *mtd = nand_to_mtd(nand_chip);
+	struct resource		*regs;
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!regs) {
+		dev_err(host->dev,
+			"Can't get I/O resource regs, use software ECC\n");
+		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
+		return 0;
+	}
+
+	host->ecc = devm_ioremap_resource(&pdev->dev, regs);
+	if (IS_ERR(host->ecc))
+		return PTR_ERR(host->ecc);
+
+	/* ECC is calculated for the whole page (1 step) */
+	nand_chip->ecc.size = mtd->writesize;
+
+	/* set ECC page size and oob layout */
+	switch (mtd->writesize) {
+	case 512:
+		mtd_set_ooblayout(mtd, &atmel_ooblayout_sp_ops);
+		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
+		break;
+	case 1024:
+		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
+		break;
+	case 2048:
+		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
+		break;
+	case 4096:
+		mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+		ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
+		break;
+	default:
+		/* page size not handled by HW ECC */
+		/* switching back to soft ECC */
+		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
+		return 0;
+	}
+
+	/* set up for HW ECC */
+	nand_chip->ecc.calculate = atmel_nand_calculate;
+	nand_chip->ecc.correct = atmel_nand_correct;
+	nand_chip->ecc.hwctl = atmel_nand_hwctl;
+	nand_chip->ecc.read_page = atmel_nand_read_page;
+	nand_chip->ecc.bytes = 4;
+	nand_chip->ecc.strength = 1;
+
+	return 0;
+}
+
+static inline u32 nfc_read_status(struct atmel_nand_host *host)
+{
+	u32 err_flags = NFC_SR_DTOE | NFC_SR_UNDEF | NFC_SR_AWB | NFC_SR_ASE;
+	u32 nfc_status = nfc_readl(host->nfc->hsmc_regs, SR);
+
+	if (unlikely(nfc_status & err_flags)) {
+		if (nfc_status & NFC_SR_DTOE)
+			dev_err(host->dev, "NFC: Waiting Nand R/B Timeout Error\n");
+		else if (nfc_status & NFC_SR_UNDEF)
+			dev_err(host->dev, "NFC: Access Undefined Area Error\n");
+		else if (nfc_status & NFC_SR_AWB)
+			dev_err(host->dev, "NFC: Access memory While NFC is busy\n");
+		else if (nfc_status & NFC_SR_ASE)
+			dev_err(host->dev, "NFC: Access memory Size Error\n");
+	}
+
+	return nfc_status;
+}
+
+/* SMC interrupt service routine */
+static irqreturn_t hsmc_interrupt(int irq, void *dev_id)
+{
+	struct atmel_nand_host *host = dev_id;
+	u32 status, mask, pending;
+	irqreturn_t ret = IRQ_NONE;
+
+	status = nfc_read_status(host);
+	mask = nfc_readl(host->nfc->hsmc_regs, IMR);
+	pending = status & mask;
+
+	if (pending & NFC_SR_XFR_DONE) {
+		complete(&host->nfc->comp_xfer_done);
+		nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_XFR_DONE);
+		ret = IRQ_HANDLED;
+	}
+	if (pending & NFC_SR_RB_EDGE) {
+		complete(&host->nfc->comp_ready);
+		nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_RB_EDGE);
+		ret = IRQ_HANDLED;
+	}
+	if (pending & NFC_SR_CMD_DONE) {
+		complete(&host->nfc->comp_cmd_done);
+		nfc_writel(host->nfc->hsmc_regs, IDR, NFC_SR_CMD_DONE);
+		ret = IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+/* NFC(Nand Flash Controller) related functions */
+static void nfc_prepare_interrupt(struct atmel_nand_host *host, u32 flag)
+{
+	if (flag & NFC_SR_XFR_DONE)
+		init_completion(&host->nfc->comp_xfer_done);
+
+	if (flag & NFC_SR_RB_EDGE)
+		init_completion(&host->nfc->comp_ready);
+
+	if (flag & NFC_SR_CMD_DONE)
+		init_completion(&host->nfc->comp_cmd_done);
+
+	/* Enable interrupt that need to wait for */
+	nfc_writel(host->nfc->hsmc_regs, IER, flag);
+}
+
+static int nfc_wait_interrupt(struct atmel_nand_host *host, u32 flag)
+{
+	int i, index = 0;
+	struct completion *comp[3];	/* Support 3 interrupt completion */
+
+	if (flag & NFC_SR_XFR_DONE)
+		comp[index++] = &host->nfc->comp_xfer_done;
+
+	if (flag & NFC_SR_RB_EDGE)
+		comp[index++] = &host->nfc->comp_ready;
+
+	if (flag & NFC_SR_CMD_DONE)
+		comp[index++] = &host->nfc->comp_cmd_done;
+
+	if (index == 0) {
+		dev_err(host->dev, "Unknown interrupt flag: 0x%08x\n", flag);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < index; i++) {
+		if (wait_for_completion_timeout(comp[i],
+				msecs_to_jiffies(NFC_TIME_OUT_MS)))
+			continue;	/* wait for next completion */
+		else
+			goto err_timeout;
+	}
+
+	return 0;
+
+err_timeout:
+	dev_err(host->dev, "Time out to wait for interrupt: 0x%08x\n", flag);
+	/* Disable the interrupt as it is not handled by interrupt handler */
+	nfc_writel(host->nfc->hsmc_regs, IDR, flag);
+	return -ETIMEDOUT;
+}
+
+static int nfc_send_command(struct atmel_nand_host *host,
+	unsigned int cmd, unsigned int addr, unsigned char cycle0)
+{
+	unsigned long timeout;
+	u32 flag = NFC_SR_CMD_DONE;
+	flag |= cmd & NFCADDR_CMD_DATAEN ? NFC_SR_XFR_DONE : 0;
+
+	dev_dbg(host->dev,
+		"nfc_cmd: 0x%08x, addr1234: 0x%08x, cycle0: 0x%02x\n",
+		cmd, addr, cycle0);
+
+	timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+	while (nfc_readl(host->nfc->hsmc_regs, SR) & NFC_SR_BUSY) {
+		if (time_after(jiffies, timeout)) {
+			dev_err(host->dev,
+				"Time out to wait for NFC ready!\n");
+			return -ETIMEDOUT;
+		}
+	}
+
+	nfc_prepare_interrupt(host, flag);
+	nfc_writel(host->nfc->hsmc_regs, CYCLE0, cycle0);
+	nfc_cmd_addr1234_writel(cmd, addr, host->nfc->base_cmd_regs);
+	return nfc_wait_interrupt(host, flag);
+}
+
+static int nfc_device_ready(struct mtd_info *mtd)
+{
+	u32 status, mask;
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+
+	status = nfc_read_status(host);
+	mask = nfc_readl(host->nfc->hsmc_regs, IMR);
+
+	/* The mask should be 0. If not we may lost interrupts */
+	if (unlikely(mask & status))
+		dev_err(host->dev, "Lost the interrupt flags: 0x%08x\n",
+				mask & status);
+
+	return status & NFC_SR_RB_EDGE;
+}
+
+static void nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if (chip == -1)
+		nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_DISABLE);
+	else
+		nfc_writel(host->nfc->hsmc_regs, CTRL, NFC_CTRL_ENABLE);
+}
+
+static int nfc_make_addr(struct mtd_info *mtd, int command, int column,
+		int page_addr, unsigned int *addr1234, unsigned int *cycle0)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	int acycle = 0;
+	unsigned char addr_bytes[8];
+	int index = 0, bit_shift;
+
+	BUG_ON(addr1234 == NULL || cycle0 == NULL);
+
+	*cycle0 = 0;
+	*addr1234 = 0;
+
+	if (column != -1) {
+		if (chip->options & NAND_BUSWIDTH_16 &&
+				!nand_opcode_8bits(command))
+			column >>= 1;
+		addr_bytes[acycle++] = column & 0xff;
+		if (mtd->writesize > 512)
+			addr_bytes[acycle++] = (column >> 8) & 0xff;
+	}
+
+	if (page_addr != -1) {
+		addr_bytes[acycle++] = page_addr & 0xff;
+		addr_bytes[acycle++] = (page_addr >> 8) & 0xff;
+		if (chip->chipsize > (128 << 20))
+			addr_bytes[acycle++] = (page_addr >> 16) & 0xff;
+	}
+
+	if (acycle > 4)
+		*cycle0 = addr_bytes[index++];
+
+	for (bit_shift = 0; index < acycle; bit_shift += 8)
+		*addr1234 += addr_bytes[index++] << bit_shift;
+
+	/* return acycle in cmd register */
+	return acycle << NFCADDR_CMD_ACYCLE_BIT_POS;
+}
+
+static void nfc_nand_command(struct mtd_info *mtd, unsigned int command,
+				int column, int page_addr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(chip);
+	unsigned long timeout;
+	unsigned int nfc_addr_cmd = 0;
+
+	unsigned int cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+
+	/* Set default settings: no cmd2, no addr cycle. read from nand */
+	unsigned int cmd2 = 0;
+	unsigned int vcmd2 = 0;
+	int acycle = NFCADDR_CMD_ACYCLE_NONE;
+	int csid = NFCADDR_CMD_CSID_3;
+	int dataen = NFCADDR_CMD_DATADIS;
+	int nfcwr = NFCADDR_CMD_NFCRD;
+	unsigned int addr1234 = 0;
+	unsigned int cycle0 = 0;
+	bool do_addr = true;
+	host->nfc->data_in_sram = NULL;
+
+	dev_dbg(host->dev, "%s: cmd = 0x%02x, col = 0x%08x, page = 0x%08x\n",
+	     __func__, command, column, page_addr);
+
+	switch (command) {
+	case NAND_CMD_RESET:
+		nfc_addr_cmd = cmd1 | acycle | csid | dataen | nfcwr;
+		nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+		udelay(chip->chip_delay);
+
+		nfc_nand_command(mtd, NAND_CMD_STATUS, -1, -1);
+		timeout = jiffies + msecs_to_jiffies(NFC_TIME_OUT_MS);
+		while (!(chip->read_byte(mtd) & NAND_STATUS_READY)) {
+			if (time_after(jiffies, timeout)) {
+				dev_err(host->dev,
+					"Time out to wait status ready!\n");
+				break;
+			}
+		}
+		return;
+	case NAND_CMD_STATUS:
+		do_addr = false;
+		break;
+	case NAND_CMD_PARAM:
+	case NAND_CMD_READID:
+		do_addr = false;
+		acycle = NFCADDR_CMD_ACYCLE_1;
+		if (column != -1)
+			addr1234 = column;
+		break;
+	case NAND_CMD_RNDOUT:
+		cmd2 = NAND_CMD_RNDOUTSTART << NFCADDR_CMD_CMD2_BIT_POS;
+		vcmd2 = NFCADDR_CMD_VCMD2;
+		break;
+	case NAND_CMD_READ0:
+	case NAND_CMD_READOOB:
+		if (command == NAND_CMD_READOOB) {
+			column += mtd->writesize;
+			command = NAND_CMD_READ0; /* only READ0 is valid */
+			cmd1 = command << NFCADDR_CMD_CMD1_BIT_POS;
+		}
+		if (host->nfc->use_nfc_sram) {
+			/* Enable Data transfer to sram */
+			dataen = NFCADDR_CMD_DATAEN;
+
+			/* Need enable PMECC now, since NFC will transfer
+			 * data in bus after sending nfc read command.
+			 */
+			if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+				pmecc_enable(host, NAND_ECC_READ);
+		}
+
+		cmd2 = NAND_CMD_READSTART << NFCADDR_CMD_CMD2_BIT_POS;
+		vcmd2 = NFCADDR_CMD_VCMD2;
+		break;
+	/* For prgramming command, the cmd need set to write enable */
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_SEQIN:
+	case NAND_CMD_RNDIN:
+		nfcwr = NFCADDR_CMD_NFCWR;
+		if (host->nfc->will_write_sram && command == NAND_CMD_SEQIN)
+			dataen = NFCADDR_CMD_DATAEN;
+		break;
+	default:
+		break;
+	}
+
+	if (do_addr)
+		acycle = nfc_make_addr(mtd, command, column, page_addr,
+				&addr1234, &cycle0);
+
+	nfc_addr_cmd = cmd1 | cmd2 | vcmd2 | acycle | csid | dataen | nfcwr;
+	nfc_send_command(host, nfc_addr_cmd, addr1234, cycle0);
+
+	/*
+	 * Program and erase have their own busy handlers status, sequential
+	 * in, and deplete1 need no delay.
+	 */
+	switch (command) {
+	case NAND_CMD_CACHEDPROG:
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+	case NAND_CMD_RNDIN:
+	case NAND_CMD_STATUS:
+	case NAND_CMD_RNDOUT:
+	case NAND_CMD_SEQIN:
+	case NAND_CMD_READID:
+		return;
+
+	case NAND_CMD_READ0:
+		if (dataen == NFCADDR_CMD_DATAEN) {
+			host->nfc->data_in_sram = host->nfc->sram_bank0 +
+				nfc_get_sram_off(host);
+			return;
+		}
+		/* fall through */
+	default:
+		nfc_prepare_interrupt(host, NFC_SR_RB_EDGE);
+		nfc_wait_interrupt(host, NFC_SR_RB_EDGE);
+	}
+}
+
+static int nfc_sram_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+			uint32_t offset, int data_len, const uint8_t *buf,
+			int oob_required, int page, int cached, int raw)
+{
+	int cfg, len;
+	int status = 0;
+	struct atmel_nand_host *host = nand_get_controller_data(chip);
+	void *sram = host->nfc->sram_bank0 + nfc_get_sram_off(host);
+
+	/* Subpage write is not supported */
+	if (offset || (data_len < mtd->writesize))
+		return -EINVAL;
+
+	len = mtd->writesize;
+	/* Copy page data to sram that will write to nand via NFC */
+	if (use_dma) {
+		if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) != 0)
+			/* Fall back to use cpu copy */
+			memcpy(sram, buf, len);
+	} else {
+		memcpy(sram, buf, len);
+	}
+
+	cfg = nfc_readl(host->nfc->hsmc_regs, CFG);
+	if (unlikely(raw) && oob_required) {
+		memcpy(sram + len, chip->oob_poi, mtd->oobsize);
+		len += mtd->oobsize;
+		nfc_writel(host->nfc->hsmc_regs, CFG, cfg | NFC_CFG_WSPARE);
+	} else {
+		nfc_writel(host->nfc->hsmc_regs, CFG, cfg & ~NFC_CFG_WSPARE);
+	}
+
+	if (chip->ecc.mode == NAND_ECC_HW && host->has_pmecc)
+		/*
+		 * When use NFC sram, need set up PMECC before send
+		 * NAND_CMD_SEQIN command. Since when the nand command
+		 * is sent, nfc will do transfer from sram and nand.
+		 */
+		pmecc_enable(host, NAND_ECC_WRITE);
+
+	host->nfc->will_write_sram = true;
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+	host->nfc->will_write_sram = false;
+
+	if (likely(!raw))
+		/* Need to write ecc into oob */
+		status = chip->ecc.write_page(mtd, chip, buf, oob_required,
+					      page);
+
+	if (status < 0)
+		return status;
+
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+	status = chip->waitfunc(mtd, chip);
+
+	if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+		status = chip->errstat(mtd, chip, FL_WRITING, status, page);
+
+	if (status & NAND_STATUS_FAIL)
+		return -EIO;
+
+	return 0;
+}
+
+static int nfc_sram_init(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct atmel_nand_host *host = nand_get_controller_data(chip);
+	int res = 0;
+
+	/* Initialize the NFC CFG register */
+	unsigned int cfg_nfc = 0;
+
+	/* set page size and oob layout */
+	switch (mtd->writesize) {
+	case 512:
+		cfg_nfc = NFC_CFG_PAGESIZE_512;
+		break;
+	case 1024:
+		cfg_nfc = NFC_CFG_PAGESIZE_1024;
+		break;
+	case 2048:
+		cfg_nfc = NFC_CFG_PAGESIZE_2048;
+		break;
+	case 4096:
+		cfg_nfc = NFC_CFG_PAGESIZE_4096;
+		break;
+	case 8192:
+		cfg_nfc = NFC_CFG_PAGESIZE_8192;
+		break;
+	default:
+		dev_err(host->dev, "Unsupported page size for NFC.\n");
+		res = -ENXIO;
+		return res;
+	}
+
+	/* oob bytes size = (NFCSPARESIZE + 1) * 4
+	 * Max support spare size is 512 bytes. */
+	cfg_nfc |= (((mtd->oobsize / 4) - 1) << NFC_CFG_NFC_SPARESIZE_BIT_POS
+		& NFC_CFG_NFC_SPARESIZE);
+	/* default set a max timeout */
+	cfg_nfc |= NFC_CFG_RSPARE |
+			NFC_CFG_NFC_DTOCYC | NFC_CFG_NFC_DTOMUL;
+
+	nfc_writel(host->nfc->hsmc_regs, CFG, cfg_nfc);
+
+	host->nfc->will_write_sram = false;
+	nfc_set_sram_bank(host, 0);
+
+	/* Use Write page with NFC SRAM only for PMECC or ECC NONE. */
+	if (host->nfc->write_by_sram) {
+		if ((chip->ecc.mode == NAND_ECC_HW && host->has_pmecc) ||
+				chip->ecc.mode == NAND_ECC_NONE)
+			chip->write_page = nfc_sram_write_page;
+		else
+			host->nfc->write_by_sram = false;
+	}
+
+	dev_info(host->dev, "Using NFC Sram read %s\n",
+			host->nfc->write_by_sram ? "and write" : "");
+	return 0;
+}
+
+static struct platform_driver atmel_nand_nfc_driver;
+/*
+ * Probe for the NAND device.
+ */
+static int atmel_nand_probe(struct platform_device *pdev)
+{
+	struct atmel_nand_host *host;
+	struct mtd_info *mtd;
+	struct nand_chip *nand_chip;
+	struct resource *mem;
+	int res, irq;
+
+	/* Allocate memory for the device structure (and zero it) */
+	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	res = platform_driver_register(&atmel_nand_nfc_driver);
+	if (res)
+		dev_err(&pdev->dev, "atmel_nand: can't register NFC driver\n");
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	host->io_base = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(host->io_base)) {
+		res = PTR_ERR(host->io_base);
+		goto err_nand_ioremap;
+	}
+	host->io_phys = (dma_addr_t)mem->start;
+
+	nand_chip = &host->nand_chip;
+	mtd = nand_to_mtd(nand_chip);
+	host->dev = &pdev->dev;
+	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+		nand_set_flash_node(nand_chip, pdev->dev.of_node);
+		/* Only when CONFIG_OF is enabled of_node can be parsed */
+		res = atmel_of_init_port(host, pdev->dev.of_node);
+		if (res)
+			goto err_nand_ioremap;
+	} else {
+		memcpy(&host->board, dev_get_platdata(&pdev->dev),
+		       sizeof(struct atmel_nand_data));
+		nand_chip->ecc.mode = host->board.ecc_mode;
+
+		/*
+		 * When using software ECC every supported avr32 board means
+		 * Hamming algorithm. If that ever changes we'll need to add
+		 * ecc_algo field to the struct atmel_nand_data.
+		 */
+		if (nand_chip->ecc.mode == NAND_ECC_SOFT)
+			nand_chip->ecc.algo = NAND_ECC_HAMMING;
+
+		/* 16-bit bus width */
+		if (host->board.bus_width_16)
+			nand_chip->options |= NAND_BUSWIDTH_16;
+	}
+
+	 /* link the private data structures */
+	nand_set_controller_data(nand_chip, host);
+	mtd->dev.parent = &pdev->dev;
+
+	/* Set address of NAND IO lines */
+	nand_chip->IO_ADDR_R = host->io_base;
+	nand_chip->IO_ADDR_W = host->io_base;
+
+	if (nand_nfc.is_initialized) {
+		/* NFC driver is probed and initialized */
+		host->nfc = &nand_nfc;
+
+		nand_chip->select_chip = nfc_select_chip;
+		nand_chip->dev_ready = nfc_device_ready;
+		nand_chip->cmdfunc = nfc_nand_command;
+
+		/* Initialize the interrupt for NFC */
+		irq = platform_get_irq(pdev, 0);
+		if (irq < 0) {
+			dev_err(host->dev, "Cannot get HSMC irq!\n");
+			res = irq;
+			goto err_nand_ioremap;
+		}
+
+		res = devm_request_irq(&pdev->dev, irq, hsmc_interrupt,
+				0, "hsmc", host);
+		if (res) {
+			dev_err(&pdev->dev, "Unable to request HSMC irq %d\n",
+				irq);
+			goto err_nand_ioremap;
+		}
+	} else {
+		res = atmel_nand_set_enable_ready_pins(mtd);
+		if (res)
+			goto err_nand_ioremap;
+
+		nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
+	}
+
+	nand_chip->chip_delay = 40;		/* 40us command delay time */
+
+
+	nand_chip->read_buf = atmel_read_buf;
+	nand_chip->write_buf = atmel_write_buf;
+
+	platform_set_drvdata(pdev, host);
+	atmel_nand_enable(host);
+
+	if (gpio_is_valid(host->board.det_pin)) {
+		res = devm_gpio_request(&pdev->dev,
+				host->board.det_pin, "nand_det");
+		if (res < 0) {
+			dev_err(&pdev->dev,
+				"can't request det gpio %d\n",
+				host->board.det_pin);
+			goto err_no_card;
+		}
+
+		res = gpio_direction_input(host->board.det_pin);
+		if (res < 0) {
+			dev_err(&pdev->dev,
+				"can't request input direction det gpio %d\n",
+				host->board.det_pin);
+			goto err_no_card;
+		}
+
+		if (gpio_get_value(host->board.det_pin)) {
+			dev_info(&pdev->dev, "No SmartMedia card inserted.\n");
+			res = -ENXIO;
+			goto err_no_card;
+		}
+	}
+
+	if (!host->board.has_dma)
+		use_dma = 0;
+
+	if (use_dma) {
+		dma_cap_mask_t mask;
+
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_MEMCPY, mask);
+		host->dma_chan = dma_request_channel(mask, NULL, NULL);
+		if (!host->dma_chan) {
+			dev_err(host->dev, "Failed to request DMA channel\n");
+			use_dma = 0;
+		}
+	}
+	if (use_dma)
+		dev_info(host->dev, "Using %s for DMA transfers.\n",
+					dma_chan_name(host->dma_chan));
+	else
+		dev_info(host->dev, "No DMA support for NAND access.\n");
+
+	/* first scan to find the device and get the page size */
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		res = -ENXIO;
+		goto err_scan_ident;
+	}
+
+	if (host->board.on_flash_bbt || on_flash_bbt)
+		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+	if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
+		dev_info(&pdev->dev, "Use On Flash BBT\n");
+
+	if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) {
+		res = atmel_of_init_ecc(host, pdev->dev.of_node);
+		if (res)
+			goto err_hw_ecc;
+	}
+
+	if (nand_chip->ecc.mode == NAND_ECC_HW) {
+		if (host->has_pmecc)
+			res = atmel_pmecc_nand_init_params(pdev, host);
+		else
+			res = atmel_hw_nand_init_params(pdev, host);
+
+		if (res != 0)
+			goto err_hw_ecc;
+	}
+
+	/* initialize the nfc configuration register */
+	if (host->nfc && host->nfc->use_nfc_sram) {
+		res = nfc_sram_init(mtd);
+		if (res) {
+			host->nfc->use_nfc_sram = false;
+			dev_err(host->dev, "Disable use nfc sram for data transfer.\n");
+		}
+	}
+
+	/* second phase scan */
+	if (nand_scan_tail(mtd)) {
+		res = -ENXIO;
+		goto err_scan_tail;
+	}
+
+	mtd->name = "atmel_nand";
+	res = mtd_device_register(mtd, host->board.parts,
+				  host->board.num_parts);
+	if (!res)
+		return res;
+
+err_scan_tail:
+	if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW)
+		pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+err_hw_ecc:
+err_scan_ident:
+err_no_card:
+	atmel_nand_disable(host);
+	if (host->dma_chan)
+		dma_release_channel(host->dma_chan);
+err_nand_ioremap:
+	return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int atmel_nand_remove(struct platform_device *pdev)
+{
+	struct atmel_nand_host *host = platform_get_drvdata(pdev);
+	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+
+	nand_release(mtd);
+
+	atmel_nand_disable(host);
+
+	if (host->has_pmecc && host->nand_chip.ecc.mode == NAND_ECC_HW) {
+		pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
+		pmerrloc_writel(host->pmerrloc_base, ELDIS,
+				PMERRLOC_DISABLE);
+	}
+
+	if (host->dma_chan)
+		dma_release_channel(host->dma_chan);
+
+	platform_driver_unregister(&atmel_nand_nfc_driver);
+
+	return 0;
+}
+
+/*
+ * AT91RM9200 does not have PMECC or PMECC Errloc peripherals for
+ * BCH ECC. Combined with the "atmel,has-pmecc", it is used to describe
+ * devices from the SAM9 family that have those.
+ */
+static const struct atmel_nand_caps at91rm9200_caps = {
+	.pmecc_correct_erase_page = false,
+	.pmecc_max_correction = 24,
+};
+
+static const struct atmel_nand_caps sama5d4_caps = {
+	.pmecc_correct_erase_page = true,
+	.pmecc_max_correction = 24,
+};
+
+/*
+ * The PMECC Errloc controller starting in SAMA5D2 is not compatible,
+ * as the increased correction strength requires more registers.
+ */
+static const struct atmel_nand_caps sama5d2_caps = {
+	.pmecc_correct_erase_page = true,
+	.pmecc_max_correction = 32,
+};
+
+static const struct of_device_id atmel_nand_dt_ids[] = {
+	{ .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps },
+	{ .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps },
+	{ .compatible = "atmel,sama5d2-nand", .data = &sama5d2_caps },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_nand_dt_ids);
+
+static int atmel_nand_nfc_probe(struct platform_device *pdev)
+{
+	struct atmel_nfc *nfc = &nand_nfc;
+	struct resource *nfc_cmd_regs, *nfc_hsmc_regs, *nfc_sram;
+	int ret;
+
+	nfc_cmd_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	nfc->base_cmd_regs = devm_ioremap_resource(&pdev->dev, nfc_cmd_regs);
+	if (IS_ERR(nfc->base_cmd_regs))
+		return PTR_ERR(nfc->base_cmd_regs);
+
+	nfc_hsmc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	nfc->hsmc_regs = devm_ioremap_resource(&pdev->dev, nfc_hsmc_regs);
+	if (IS_ERR(nfc->hsmc_regs))
+		return PTR_ERR(nfc->hsmc_regs);
+
+	nfc_sram = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	if (nfc_sram) {
+		nfc->sram_bank0 = (void * __force)
+				devm_ioremap_resource(&pdev->dev, nfc_sram);
+		if (IS_ERR(nfc->sram_bank0)) {
+			dev_warn(&pdev->dev, "Fail to ioremap the NFC sram with error: %ld. So disable NFC sram.\n",
+					PTR_ERR(nfc->sram_bank0));
+		} else {
+			nfc->use_nfc_sram = true;
+			nfc->sram_bank0_phys = (dma_addr_t)nfc_sram->start;
+
+			if (pdev->dev.of_node)
+				nfc->write_by_sram = of_property_read_bool(
+						pdev->dev.of_node,
+						"atmel,write-by-sram");
+		}
+	}
+
+	nfc_writel(nfc->hsmc_regs, IDR, 0xffffffff);
+	nfc_readl(nfc->hsmc_regs, SR);	/* clear the NFC_SR */
+
+	nfc->clk = devm_clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(nfc->clk)) {
+		ret = clk_prepare_enable(nfc->clk);
+		if (ret)
+			return ret;
+	} else {
+		dev_warn(&pdev->dev, "NFC clock missing, update your Device Tree");
+	}
+
+	nfc->is_initialized = true;
+	dev_info(&pdev->dev, "NFC is probed.\n");
+
+	return 0;
+}
+
+static int atmel_nand_nfc_remove(struct platform_device *pdev)
+{
+	struct atmel_nfc *nfc = &nand_nfc;
+
+	if (!IS_ERR(nfc->clk))
+		clk_disable_unprepare(nfc->clk);
+
+	return 0;
+}
+
+static const struct of_device_id atmel_nand_nfc_match[] = {
+	{ .compatible = "atmel,sama5d3-nfc" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atmel_nand_nfc_match);
+
+static struct platform_driver atmel_nand_nfc_driver = {
+	.driver = {
+		.name = "atmel_nand_nfc",
+		.of_match_table = of_match_ptr(atmel_nand_nfc_match),
+	},
+	.probe = atmel_nand_nfc_probe,
+	.remove = atmel_nand_nfc_remove,
+};
+
+static struct platform_driver atmel_nand_driver = {
+	.probe		= atmel_nand_probe,
+	.remove		= atmel_nand_remove,
+	.driver		= {
+		.name	= "atmel_nand",
+		.of_match_table	= of_match_ptr(atmel_nand_dt_ids),
+	},
+};
+
+module_platform_driver(atmel_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rick Bronson");
+MODULE_DESCRIPTION("NAND/SmartMedia driver for AT91 / AVR32");
+MODULE_ALIAS("platform:atmel_nand");
diff --git a/drivers/mtd/nand/rawnand/atmel_nand_ecc.h b/drivers/mtd/nand/rawnand/atmel_nand_ecc.h
new file mode 100644
index 000000000000..834d694487bd
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/atmel_nand_ecc.h
@@ -0,0 +1,163 @@ 
+/*
+ * Error Corrected Code Controller (ECC) - System peripherals regsters.
+ * Based on AT91SAM9260 datasheet revision B.
+ *
+ * Copyright (C) 2007 Andrew Victor
+ * Copyright (C) 2007 - 2012 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_ECC_H
+#define ATMEL_NAND_ECC_H
+
+#define ATMEL_ECC_CR		0x00			/* Control register */
+#define		ATMEL_ECC_RST		(1 << 0)		/* Reset parity */
+
+#define ATMEL_ECC_MR		0x04			/* Mode register */
+#define		ATMEL_ECC_PAGESIZE	(3 << 0)		/* Page Size */
+#define			ATMEL_ECC_PAGESIZE_528		(0)
+#define			ATMEL_ECC_PAGESIZE_1056		(1)
+#define			ATMEL_ECC_PAGESIZE_2112		(2)
+#define			ATMEL_ECC_PAGESIZE_4224		(3)
+
+#define ATMEL_ECC_SR		0x08			/* Status register */
+#define		ATMEL_ECC_RECERR		(1 << 0)		/* Recoverable Error */
+#define		ATMEL_ECC_ECCERR		(1 << 1)		/* ECC Single Bit Error */
+#define		ATMEL_ECC_MULERR		(1 << 2)		/* Multiple Errors */
+
+#define ATMEL_ECC_PR		0x0c			/* Parity register */
+#define		ATMEL_ECC_BITADDR	(0xf << 0)		/* Bit Error Address */
+#define		ATMEL_ECC_WORDADDR	(0xfff << 4)		/* Word Error Address */
+
+#define ATMEL_ECC_NPR		0x10			/* NParity register */
+#define		ATMEL_ECC_NPARITY	(0xffff << 0)		/* NParity */
+
+/* PMECC Register Definitions */
+#define ATMEL_PMECC_CFG			0x000	/* Configuration Register */
+#define		PMECC_CFG_BCH_ERR2		(0 << 0)
+#define		PMECC_CFG_BCH_ERR4		(1 << 0)
+#define		PMECC_CFG_BCH_ERR8		(2 << 0)
+#define		PMECC_CFG_BCH_ERR12		(3 << 0)
+#define		PMECC_CFG_BCH_ERR24		(4 << 0)
+#define		PMECC_CFG_BCH_ERR32		(5 << 0)
+
+#define		PMECC_CFG_SECTOR512		(0 << 4)
+#define		PMECC_CFG_SECTOR1024		(1 << 4)
+
+#define		PMECC_CFG_PAGE_1SECTOR		(0 << 8)
+#define		PMECC_CFG_PAGE_2SECTORS		(1 << 8)
+#define		PMECC_CFG_PAGE_4SECTORS		(2 << 8)
+#define		PMECC_CFG_PAGE_8SECTORS		(3 << 8)
+
+#define		PMECC_CFG_READ_OP		(0 << 12)
+#define		PMECC_CFG_WRITE_OP		(1 << 12)
+
+#define		PMECC_CFG_SPARE_ENABLE		(1 << 16)
+#define		PMECC_CFG_SPARE_DISABLE		(0 << 16)
+
+#define		PMECC_CFG_AUTO_ENABLE		(1 << 20)
+#define		PMECC_CFG_AUTO_DISABLE		(0 << 20)
+
+#define ATMEL_PMECC_SAREA		0x004	/* Spare area size */
+#define ATMEL_PMECC_SADDR		0x008	/* PMECC starting address */
+#define ATMEL_PMECC_EADDR		0x00c	/* PMECC ending address */
+#define ATMEL_PMECC_CLK			0x010	/* PMECC clock control */
+#define		PMECC_CLK_133MHZ		(2 << 0)
+
+#define ATMEL_PMECC_CTRL		0x014	/* PMECC control register */
+#define		PMECC_CTRL_RST			(1 << 0)
+#define		PMECC_CTRL_DATA			(1 << 1)
+#define		PMECC_CTRL_USER			(1 << 2)
+#define		PMECC_CTRL_ENABLE		(1 << 4)
+#define		PMECC_CTRL_DISABLE		(1 << 5)
+
+#define ATMEL_PMECC_SR			0x018	/* PMECC status register */
+#define		PMECC_SR_BUSY			(1 << 0)
+#define		PMECC_SR_ENABLE			(1 << 4)
+
+#define ATMEL_PMECC_IER			0x01c	/* PMECC interrupt enable */
+#define		PMECC_IER_ENABLE		(1 << 0)
+#define ATMEL_PMECC_IDR			0x020	/* PMECC interrupt disable */
+#define		PMECC_IER_DISABLE		(1 << 0)
+#define ATMEL_PMECC_IMR			0x024	/* PMECC interrupt mask */
+#define		PMECC_IER_MASK			(1 << 0)
+#define ATMEL_PMECC_ISR			0x028	/* PMECC interrupt status */
+#define ATMEL_PMECC_ECCx		0x040	/* PMECC ECC x */
+#define ATMEL_PMECC_REMx		0x240	/* PMECC REM x */
+
+/* PMERRLOC Register Definitions */
+#define ATMEL_PMERRLOC_ELCFG		0x000	/* Error location config */
+#define		PMERRLOC_ELCFG_SECTOR_512	(0 << 0)
+#define		PMERRLOC_ELCFG_SECTOR_1024	(1 << 0)
+#define		PMERRLOC_ELCFG_NUM_ERRORS(n)	((n) << 16)
+
+#define ATMEL_PMERRLOC_ELPRIM		0x004	/* Error location primitive */
+#define ATMEL_PMERRLOC_ELEN		0x008	/* Error location enable */
+#define ATMEL_PMERRLOC_ELDIS		0x00c	/* Error location disable */
+#define		PMERRLOC_DISABLE		(1 << 0)
+
+#define ATMEL_PMERRLOC_ELSR		0x010	/* Error location status */
+#define		PMERRLOC_ELSR_BUSY		(1 << 0)
+#define ATMEL_PMERRLOC_ELIER		0x014	/* Error location int enable */
+#define ATMEL_PMERRLOC_ELIDR		0x018	/* Error location int disable */
+#define ATMEL_PMERRLOC_ELIMR		0x01c	/* Error location int mask */
+#define ATMEL_PMERRLOC_ELISR		0x020	/* Error location int status */
+#define		PMERRLOC_ERR_NUM_MASK		(0x1f << 8)
+#define		PMERRLOC_CALC_DONE		(1 << 0)
+#define ATMEL_PMERRLOC_SIGMAx		0x028	/* Error location SIGMA x */
+
+/*
+ * The ATMEL_PMERRLOC_ELx register location depends from the number of
+ * bits corrected by the PMECC controller. Do not use it.
+ */
+
+/* Register access macros for PMECC */
+#define pmecc_readl_relaxed(addr, reg) \
+	readl_relaxed((addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_writel(addr, reg, value) \
+	writel((value), (addr) + ATMEL_PMECC_##reg)
+
+#define pmecc_readb_ecc_relaxed(addr, sector, n) \
+	readb_relaxed((addr) + ATMEL_PMECC_ECCx + ((sector) * 0x40) + (n))
+
+#define pmecc_readl_rem_relaxed(addr, sector, n) \
+	readl_relaxed((addr) + ATMEL_PMECC_REMx + ((sector) * 0x40) + ((n) * 4))
+
+#define pmerrloc_readl_relaxed(addr, reg) \
+	readl_relaxed((addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel(addr, reg, value) \
+	writel((value), (addr) + ATMEL_PMERRLOC_##reg)
+
+#define pmerrloc_writel_sigma_relaxed(addr, n, value) \
+	writel_relaxed((value), (addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_sigma_relaxed(addr, n) \
+	readl_relaxed((addr) + ATMEL_PMERRLOC_SIGMAx + ((n) * 4))
+
+#define pmerrloc_readl_el_relaxed(addr, n) \
+	readl_relaxed((addr) + ((n) * 4))
+
+/* Galois field dimension */
+#define PMECC_GF_DIMENSION_13			13
+#define PMECC_GF_DIMENSION_14			14
+
+/* Primitive Polynomial used by PMECC */
+#define PMECC_GF_13_PRIMITIVE_POLY		0x201b
+#define PMECC_GF_14_PRIMITIVE_POLY		0x4443
+
+#define PMECC_LOOKUP_TABLE_SIZE_512		0x2000
+#define PMECC_LOOKUP_TABLE_SIZE_1024		0x4000
+
+/* Time out value for reading PMECC status register */
+#define PMECC_MAX_TIMEOUT_MS			100
+
+/* Reserved bytes in oob area */
+#define PMECC_OOB_RESERVED_BYTES		2
+
+#endif
diff --git a/drivers/mtd/nand/rawnand/atmel_nand_nfc.h b/drivers/mtd/nand/rawnand/atmel_nand_nfc.h
new file mode 100644
index 000000000000..4d5d26221a7e
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/atmel_nand_nfc.h
@@ -0,0 +1,103 @@ 
+/*
+ * Atmel Nand Flash Controller (NFC) - System peripherals regsters.
+ * Based on SAMA5D3 datasheet.
+ *
+ * © Copyright 2013 Atmel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef ATMEL_NAND_NFC_H
+#define ATMEL_NAND_NFC_H
+
+/*
+ * HSMC NFC registers
+ */
+#define ATMEL_HSMC_NFC_CFG	0x00		/* NFC Configuration Register */
+#define		NFC_CFG_PAGESIZE	(7 << 0)
+#define			NFC_CFG_PAGESIZE_512	(0 << 0)
+#define			NFC_CFG_PAGESIZE_1024	(1 << 0)
+#define			NFC_CFG_PAGESIZE_2048	(2 << 0)
+#define			NFC_CFG_PAGESIZE_4096	(3 << 0)
+#define			NFC_CFG_PAGESIZE_8192	(4 << 0)
+#define		NFC_CFG_WSPARE		(1 << 8)
+#define		NFC_CFG_RSPARE		(1 << 9)
+#define		NFC_CFG_NFC_DTOCYC	(0xf << 16)
+#define		NFC_CFG_NFC_DTOMUL	(0x7 << 20)
+#define		NFC_CFG_NFC_SPARESIZE	(0x7f << 24)
+#define		NFC_CFG_NFC_SPARESIZE_BIT_POS	24
+
+#define ATMEL_HSMC_NFC_CTRL	0x04		/* NFC Control Register */
+#define		NFC_CTRL_ENABLE		(1 << 0)
+#define		NFC_CTRL_DISABLE	(1 << 1)
+
+#define ATMEL_HSMC_NFC_SR	0x08		/* NFC Status Register */
+#define		NFC_SR_BUSY		(1 << 8)
+#define		NFC_SR_XFR_DONE		(1 << 16)
+#define		NFC_SR_CMD_DONE		(1 << 17)
+#define		NFC_SR_DTOE		(1 << 20)
+#define		NFC_SR_UNDEF		(1 << 21)
+#define		NFC_SR_AWB		(1 << 22)
+#define		NFC_SR_ASE		(1 << 23)
+#define		NFC_SR_RB_EDGE		(1 << 24)
+
+#define ATMEL_HSMC_NFC_IER	0x0c
+#define ATMEL_HSMC_NFC_IDR	0x10
+#define ATMEL_HSMC_NFC_IMR	0x14
+#define ATMEL_HSMC_NFC_CYCLE0	0x18		/* NFC Address Cycle Zero */
+#define		ATMEL_HSMC_NFC_ADDR_CYCLE0	(0xff)
+
+#define ATMEL_HSMC_NFC_BANK	0x1c		/* NFC Bank Register */
+#define		ATMEL_HSMC_NFC_BANK0		(0 << 0)
+#define		ATMEL_HSMC_NFC_BANK1		(1 << 0)
+
+#define nfc_writel(addr, reg, value) \
+	writel((value), (addr) + ATMEL_HSMC_NFC_##reg)
+
+#define nfc_readl(addr, reg) \
+	readl_relaxed((addr) + ATMEL_HSMC_NFC_##reg)
+
+/*
+ * NFC Address Command definitions
+ */
+#define NFCADDR_CMD_CMD1	(0xff << 2)	/* Command for Cycle 1 */
+#define NFCADDR_CMD_CMD1_BIT_POS	2
+#define NFCADDR_CMD_CMD2	(0xff << 10)	/* Command for Cycle 2 */
+#define NFCADDR_CMD_CMD2_BIT_POS	10
+#define NFCADDR_CMD_VCMD2	(0x1 << 18)	/* Valid Cycle 2 Command */
+#define NFCADDR_CMD_ACYCLE	(0x7 << 19)	/* Number of Address required */
+#define		NFCADDR_CMD_ACYCLE_NONE		(0x0 << 19)
+#define		NFCADDR_CMD_ACYCLE_1		(0x1 << 19)
+#define		NFCADDR_CMD_ACYCLE_2		(0x2 << 19)
+#define		NFCADDR_CMD_ACYCLE_3		(0x3 << 19)
+#define		NFCADDR_CMD_ACYCLE_4		(0x4 << 19)
+#define		NFCADDR_CMD_ACYCLE_5		(0x5 << 19)
+#define NFCADDR_CMD_ACYCLE_BIT_POS	19
+#define NFCADDR_CMD_CSID	(0x7 << 22)	/* Chip Select Identifier */
+#define		NFCADDR_CMD_CSID_0		(0x0 << 22)
+#define		NFCADDR_CMD_CSID_1		(0x1 << 22)
+#define		NFCADDR_CMD_CSID_2		(0x2 << 22)
+#define		NFCADDR_CMD_CSID_3		(0x3 << 22)
+#define		NFCADDR_CMD_CSID_4		(0x4 << 22)
+#define		NFCADDR_CMD_CSID_5		(0x5 << 22)
+#define		NFCADDR_CMD_CSID_6		(0x6 << 22)
+#define		NFCADDR_CMD_CSID_7		(0x7 << 22)
+#define NFCADDR_CMD_DATAEN	(0x1 << 25)	/* Data Transfer Enable */
+#define NFCADDR_CMD_DATADIS	(0x0 << 25)	/* Data Transfer Disable */
+#define NFCADDR_CMD_NFCRD	(0x0 << 26)	/* NFC Read Enable */
+#define NFCADDR_CMD_NFCWR	(0x1 << 26)	/* NFC Write Enable */
+#define NFCADDR_CMD_NFCBUSY	(0x1 << 27)	/* NFC Busy */
+
+#define nfc_cmd_addr1234_writel(cmd, addr1234, nfc_base) \
+	writel((addr1234), (cmd) + nfc_base)
+
+#define nfc_cmd_readl(bitstatus, nfc_base) \
+	readl_relaxed((bitstatus) + nfc_base)
+
+#define NFC_TIME_OUT_MS		100
+#define	NFC_SRAM_BANK1_OFFSET	0x1200
+
+#endif
diff --git a/drivers/mtd/nand/rawnand/au1550nd.c b/drivers/mtd/nand/rawnand/au1550nd.c
new file mode 100644
index 000000000000..9d4a28fa6b73
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/au1550nd.c
@@ -0,0 +1,518 @@ 
+/*
+ *  drivers/mtd/nand/au1550nd.c
+ *
+ *  Copyright (C) 2004 Embedded Edge, LLC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1550nd.h>
+
+
+struct au1550nd_ctx {
+	struct nand_chip chip;
+
+	int cs;
+	void __iomem *base;
+	void (*write_byte)(struct mtd_info *, u_char);
+};
+
+/**
+ * au_read_byte -  read one byte from the chip
+ * @mtd:	MTD device structure
+ *
+ * read function for 8bit buswidth
+ */
+static u_char au_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	u_char ret = readb(this->IO_ADDR_R);
+	wmb(); /* drain writebuffer */
+	return ret;
+}
+
+/**
+ * au_write_byte -  write one byte to the chip
+ * @mtd:	MTD device structure
+ * @byte:	pointer to data byte to write
+ *
+ * write function for 8it buswidth
+ */
+static void au_write_byte(struct mtd_info *mtd, u_char byte)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	writeb(byte, this->IO_ADDR_W);
+	wmb(); /* drain writebuffer */
+}
+
+/**
+ * au_read_byte16 -  read one byte endianness aware from the chip
+ * @mtd:	MTD device structure
+ *
+ * read function for 16bit buswidth with endianness conversion
+ */
+static u_char au_read_byte16(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	u_char ret = (u_char) cpu_to_le16(readw(this->IO_ADDR_R));
+	wmb(); /* drain writebuffer */
+	return ret;
+}
+
+/**
+ * au_write_byte16 -  write one byte endianness aware to the chip
+ * @mtd:	MTD device structure
+ * @byte:	pointer to data byte to write
+ *
+ * write function for 16bit buswidth with endianness conversion
+ */
+static void au_write_byte16(struct mtd_info *mtd, u_char byte)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	writew(le16_to_cpu((u16) byte), this->IO_ADDR_W);
+	wmb(); /* drain writebuffer */
+}
+
+/**
+ * au_read_word -  read one word from the chip
+ * @mtd:	MTD device structure
+ *
+ * read function for 16bit buswidth without endianness conversion
+ */
+static u16 au_read_word(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	u16 ret = readw(this->IO_ADDR_R);
+	wmb(); /* drain writebuffer */
+	return ret;
+}
+
+/**
+ * au_write_buf -  write buffer to chip
+ * @mtd:	MTD device structure
+ * @buf:	data buffer
+ * @len:	number of bytes to write
+ *
+ * write function for 8bit buswidth
+ */
+static void au_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	int i;
+	struct nand_chip *this = mtd_to_nand(mtd);
+
+	for (i = 0; i < len; i++) {
+		writeb(buf[i], this->IO_ADDR_W);
+		wmb(); /* drain writebuffer */
+	}
+}
+
+/**
+ * au_read_buf -  read chip data into buffer
+ * @mtd:	MTD device structure
+ * @buf:	buffer to store date
+ * @len:	number of bytes to read
+ *
+ * read function for 8bit buswidth
+ */
+static void au_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	int i;
+	struct nand_chip *this = mtd_to_nand(mtd);
+
+	for (i = 0; i < len; i++) {
+		buf[i] = readb(this->IO_ADDR_R);
+		wmb(); /* drain writebuffer */
+	}
+}
+
+/**
+ * au_write_buf16 -  write buffer to chip
+ * @mtd:	MTD device structure
+ * @buf:	data buffer
+ * @len:	number of bytes to write
+ *
+ * write function for 16bit buswidth
+ */
+static void au_write_buf16(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	int i;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	u16 *p = (u16 *) buf;
+	len >>= 1;
+
+	for (i = 0; i < len; i++) {
+		writew(p[i], this->IO_ADDR_W);
+		wmb(); /* drain writebuffer */
+	}
+
+}
+
+/**
+ * au_read_buf16 -  read chip data into buffer
+ * @mtd:	MTD device structure
+ * @buf:	buffer to store date
+ * @len:	number of bytes to read
+ *
+ * read function for 16bit buswidth
+ */
+static void au_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+	int i;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	u16 *p = (u16 *) buf;
+	len >>= 1;
+
+	for (i = 0; i < len; i++) {
+		p[i] = readw(this->IO_ADDR_R);
+		wmb(); /* drain writebuffer */
+	}
+}
+
+/* Select the chip by setting nCE to low */
+#define NAND_CTL_SETNCE		1
+/* Deselect the chip by setting nCE to high */
+#define NAND_CTL_CLRNCE		2
+/* Select the command latch by setting CLE to high */
+#define NAND_CTL_SETCLE		3
+/* Deselect the command latch by setting CLE to low */
+#define NAND_CTL_CLRCLE		4
+/* Select the address latch by setting ALE to high */
+#define NAND_CTL_SETALE		5
+/* Deselect the address latch by setting ALE to low */
+#define NAND_CTL_CLRALE		6
+
+static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
+						chip);
+
+	switch (cmd) {
+
+	case NAND_CTL_SETCLE:
+		this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
+		break;
+
+	case NAND_CTL_CLRCLE:
+		this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+		break;
+
+	case NAND_CTL_SETALE:
+		this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
+		break;
+
+	case NAND_CTL_CLRALE:
+		this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
+		/* FIXME: Nobody knows why this is necessary,
+		 * but it works only that way */
+		udelay(1);
+		break;
+
+	case NAND_CTL_SETNCE:
+		/* assert (force assert) chip enable */
+		alchemy_wrsmem((1 << (4 + ctx->cs)), AU1000_MEM_STNDCTL);
+		break;
+
+	case NAND_CTL_CLRNCE:
+		/* deassert chip enable */
+		alchemy_wrsmem(0, AU1000_MEM_STNDCTL);
+		break;
+	}
+
+	this->IO_ADDR_R = this->IO_ADDR_W;
+
+	wmb(); /* Drain the writebuffer */
+}
+
+int au1550_device_ready(struct mtd_info *mtd)
+{
+	return (alchemy_rdsmem(AU1000_MEM_STSTAT) & 0x1) ? 1 : 0;
+}
+
+/**
+ * au1550_select_chip - control -CE line
+ *	Forbid driving -CE manually permitting the NAND controller to do this.
+ *	Keeping -CE asserted during the whole sector reads interferes with the
+ *	NOR flash and PCMCIA drivers as it causes contention on the static bus.
+ *	We only have to hold -CE low for the NAND read commands since the flash
+ *	chip needs it to be asserted during chip not ready time but the NAND
+ *	controller keeps it released.
+ *
+ * @mtd:	MTD device structure
+ * @chip:	chipnumber to select, -1 for deselect
+ */
+static void au1550_select_chip(struct mtd_info *mtd, int chip)
+{
+}
+
+/**
+ * au1550_command - Send command to NAND device
+ * @mtd:	MTD device structure
+ * @command:	the command to be sent
+ * @column:	the column address for this command, -1 if none
+ * @page_addr:	the page address for this command, -1 if none
+ */
+static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct au1550nd_ctx *ctx = container_of(this, struct au1550nd_ctx,
+						chip);
+	int ce_override = 0, i;
+	unsigned long flags = 0;
+
+	/* Begin command latch cycle */
+	au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
+	/*
+	 * Write out the command to the device.
+	 */
+	if (command == NAND_CMD_SEQIN) {
+		int readcmd;
+
+		if (column >= mtd->writesize) {
+			/* OOB area */
+			column -= mtd->writesize;
+			readcmd = NAND_CMD_READOOB;
+		} else if (column < 256) {
+			/* First 256 bytes --> READ0 */
+			readcmd = NAND_CMD_READ0;
+		} else {
+			column -= 256;
+			readcmd = NAND_CMD_READ1;
+		}
+		ctx->write_byte(mtd, readcmd);
+	}
+	ctx->write_byte(mtd, command);
+
+	/* Set ALE and clear CLE to start address cycle */
+	au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
+
+	if (column != -1 || page_addr != -1) {
+		au1550_hwcontrol(mtd, NAND_CTL_SETALE);
+
+		/* Serially input address */
+		if (column != -1) {
+			/* Adjust columns for 16 bit buswidth */
+			if (this->options & NAND_BUSWIDTH_16 &&
+					!nand_opcode_8bits(command))
+				column >>= 1;
+			ctx->write_byte(mtd, column);
+		}
+		if (page_addr != -1) {
+			ctx->write_byte(mtd, (u8)(page_addr & 0xff));
+
+			if (command == NAND_CMD_READ0 ||
+			    command == NAND_CMD_READ1 ||
+			    command == NAND_CMD_READOOB) {
+				/*
+				 * NAND controller will release -CE after
+				 * the last address byte is written, so we'll
+				 * have to forcibly assert it. No interrupts
+				 * are allowed while we do this as we don't
+				 * want the NOR flash or PCMCIA drivers to
+				 * steal our precious bytes of data...
+				 */
+				ce_override = 1;
+				local_irq_save(flags);
+				au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
+			}
+
+			ctx->write_byte(mtd, (u8)(page_addr >> 8));
+
+			/* One more address cycle for devices > 32MiB */
+			if (this->chipsize > (32 << 20))
+				ctx->write_byte(mtd,
+						((page_addr >> 16) & 0x0f));
+		}
+		/* Latch in address */
+		au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
+	}
+
+	/*
+	 * Program and erase have their own busy handlers.
+	 * Status and sequential in need no delay.
+	 */
+	switch (command) {
+
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+	case NAND_CMD_SEQIN:
+	case NAND_CMD_STATUS:
+		return;
+
+	case NAND_CMD_RESET:
+		break;
+
+	case NAND_CMD_READ0:
+	case NAND_CMD_READ1:
+	case NAND_CMD_READOOB:
+		/* Check if we're really driving -CE low (just in case) */
+		if (unlikely(!ce_override))
+			break;
+
+		/* Apply a short delay always to ensure that we do wait tWB. */
+		ndelay(100);
+		/* Wait for a chip to become ready... */
+		for (i = this->chip_delay; !this->dev_ready(mtd) && i > 0; --i)
+			udelay(1);
+
+		/* Release -CE and re-enable interrupts. */
+		au1550_hwcontrol(mtd, NAND_CTL_CLRNCE);
+		local_irq_restore(flags);
+		return;
+	}
+	/* Apply this short delay always to ensure that we do wait tWB. */
+	ndelay(100);
+
+	while(!this->dev_ready(mtd));
+}
+
+static int find_nand_cs(unsigned long nand_base)
+{
+	void __iomem *base =
+			(void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+	unsigned long addr, staddr, start, mask, end;
+	int i;
+
+	for (i = 0; i < 4; i++) {
+		addr = 0x1000 + (i * 0x10);			/* CSx */
+		staddr = __raw_readl(base + addr + 0x08);	/* STADDRx */
+		/* figure out the decoded range of this CS */
+		start = (staddr << 4) & 0xfffc0000;
+		mask = (staddr << 18) & 0xfffc0000;
+		end = (start | (start - 1)) & ~(start ^ mask);
+		if ((nand_base >= start) && (nand_base < end))
+			return i;
+	}
+
+	return -ENODEV;
+}
+
+static int au1550nd_probe(struct platform_device *pdev)
+{
+	struct au1550nd_platdata *pd;
+	struct au1550nd_ctx *ctx;
+	struct nand_chip *this;
+	struct mtd_info *mtd;
+	struct resource *r;
+	int ret, cs;
+
+	pd = dev_get_platdata(&pdev->dev);
+	if (!pd) {
+		dev_err(&pdev->dev, "missing platform data\n");
+		return -ENODEV;
+	}
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		dev_err(&pdev->dev, "no NAND memory resource\n");
+		ret = -ENODEV;
+		goto out1;
+	}
+	if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
+		dev_err(&pdev->dev, "cannot claim NAND memory area\n");
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	ctx->base = ioremap_nocache(r->start, 0x1000);
+	if (!ctx->base) {
+		dev_err(&pdev->dev, "cannot remap NAND memory area\n");
+		ret = -ENODEV;
+		goto out2;
+	}
+
+	this = &ctx->chip;
+	mtd = nand_to_mtd(this);
+	mtd->dev.parent = &pdev->dev;
+
+	/* figure out which CS# r->start belongs to */
+	cs = find_nand_cs(r->start);
+	if (cs < 0) {
+		dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
+		ret = -ENODEV;
+		goto out3;
+	}
+	ctx->cs = cs;
+
+	this->dev_ready = au1550_device_ready;
+	this->select_chip = au1550_select_chip;
+	this->cmdfunc = au1550_command;
+
+	/* 30 us command delay time */
+	this->chip_delay = 30;
+	this->ecc.mode = NAND_ECC_SOFT;
+	this->ecc.algo = NAND_ECC_HAMMING;
+
+	if (pd->devwidth)
+		this->options |= NAND_BUSWIDTH_16;
+
+	this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
+	ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
+	this->read_word = au_read_word;
+	this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
+	this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
+
+	ret = nand_scan(mtd, 1);
+	if (ret) {
+		dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
+		goto out3;
+	}
+
+	mtd_device_register(mtd, pd->parts, pd->num_parts);
+
+	platform_set_drvdata(pdev, ctx);
+
+	return 0;
+
+out3:
+	iounmap(ctx->base);
+out2:
+	release_mem_region(r->start, resource_size(r));
+out1:
+	kfree(ctx);
+	return ret;
+}
+
+static int au1550nd_remove(struct platform_device *pdev)
+{
+	struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
+	struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+	nand_release(nand_to_mtd(&ctx->chip));
+	iounmap(ctx->base);
+	release_mem_region(r->start, 0x1000);
+	kfree(ctx);
+	return 0;
+}
+
+static struct platform_driver au1550nd_driver = {
+	.driver = {
+		.name	= "au1550-nand",
+	},
+	.probe		= au1550nd_probe,
+	.remove		= au1550nd_remove,
+};
+
+module_platform_driver(au1550nd_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Embedded Edge, LLC");
+MODULE_DESCRIPTION("Board-specific glue layer for NAND flash on Pb1550 board");
diff --git a/drivers/mtd/nand/rawnand/bcm47xxnflash/Makefile b/drivers/mtd/nand/rawnand/bcm47xxnflash/Makefile
new file mode 100644
index 000000000000..f05b119e134b
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/bcm47xxnflash/Makefile
@@ -0,0 +1,4 @@ 
+bcm47xxnflash-y				+= main.o
+bcm47xxnflash-y				+= ops_bcm4706.o
+
+obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH)	+= bcm47xxnflash.o
diff --git a/drivers/mtd/nand/rawnand/bcm47xxnflash/bcm47xxnflash.h b/drivers/mtd/nand/rawnand/bcm47xxnflash/bcm47xxnflash.h
new file mode 100644
index 000000000000..c8834767ab6d
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/bcm47xxnflash/bcm47xxnflash.h
@@ -0,0 +1,25 @@ 
+#ifndef __BCM47XXNFLASH_H
+#define __BCM47XXNFLASH_H
+
+#ifndef pr_fmt
+#define pr_fmt(fmt)		KBUILD_MODNAME ": " fmt
+#endif
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+
+struct bcm47xxnflash {
+	struct bcma_drv_cc *cc;
+
+	struct nand_chip nand_chip;
+
+	unsigned curr_command;
+	int curr_page_addr;
+	int curr_column;
+
+	u8 id_data[8];
+};
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n);
+
+#endif /* BCM47XXNFLASH */
diff --git a/drivers/mtd/nand/rawnand/bcm47xxnflash/main.c b/drivers/mtd/nand/rawnand/bcm47xxnflash/main.c
new file mode 100644
index 000000000000..fb31429b70a9
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/bcm47xxnflash/main.c
@@ -0,0 +1,81 @@ 
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/bcma/bcma.h>
+
+MODULE_DESCRIPTION("NAND flash driver for BCMA bus");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rafał Miłecki");
+
+static const char *probes[] = { "bcm47xxpart", NULL };
+
+static int bcm47xxnflash_probe(struct platform_device *pdev)
+{
+	struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev);
+	struct bcm47xxnflash *b47n;
+	struct mtd_info *mtd;
+	int err = 0;
+
+	b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL);
+	if (!b47n)
+		return -ENOMEM;
+
+	nand_set_controller_data(&b47n->nand_chip, b47n);
+	mtd = nand_to_mtd(&b47n->nand_chip);
+	mtd->dev.parent = &pdev->dev;
+	b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash);
+
+	if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
+		err = bcm47xxnflash_ops_bcm4706_init(b47n);
+	} else {
+		pr_err("Device not supported\n");
+		err = -ENOTSUPP;
+	}
+	if (err) {
+		pr_err("Initialization failed: %d\n", err);
+		return err;
+	}
+
+	platform_set_drvdata(pdev, b47n);
+
+	err = mtd_device_parse_register(mtd, probes, NULL, NULL, 0);
+	if (err) {
+		pr_err("Failed to register MTD device: %d\n", err);
+		return err;
+	}
+
+	return 0;
+}
+
+static int bcm47xxnflash_remove(struct platform_device *pdev)
+{
+	struct bcm47xxnflash *nflash = platform_get_drvdata(pdev);
+
+	nand_release(nand_to_mtd(&nflash->nand_chip));
+
+	return 0;
+}
+
+static struct platform_driver bcm47xxnflash_driver = {
+	.probe	= bcm47xxnflash_probe,
+	.remove = bcm47xxnflash_remove,
+	.driver = {
+		.name = "bcma_nflash",
+	},
+};
+
+module_platform_driver(bcm47xxnflash_driver);
diff --git a/drivers/mtd/nand/rawnand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/rawnand/bcm47xxnflash/ops_bcm4706.c
new file mode 100644
index 000000000000..f1da4ea88f2c
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/bcm47xxnflash/ops_bcm4706.c
@@ -0,0 +1,454 @@ 
+/*
+ * BCM47XX NAND flash driver
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include "bcm47xxnflash.h"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/bcma/bcma.h>
+
+/* Broadcom uses 1'000'000 but it seems to be too many. Tests on WNDR4500 has
+ * shown ~1000 retries as maxiumum. */
+#define NFLASH_READY_RETRIES		10000
+
+#define NFLASH_SECTOR_SIZE		512
+
+#define NCTL_CMD0			0x00010000
+#define NCTL_COL			0x00020000	/* Update column with value from BCMA_CC_NFLASH_COL_ADDR */
+#define NCTL_ROW			0x00040000	/* Update row (page) with value from BCMA_CC_NFLASH_ROW_ADDR */
+#define NCTL_CMD1W			0x00080000
+#define NCTL_READ			0x00100000
+#define NCTL_WRITE			0x00200000
+#define NCTL_SPECADDR			0x01000000
+#define NCTL_READY			0x04000000
+#define NCTL_ERR			0x08000000
+#define NCTL_CSA			0x40000000
+#define NCTL_START			0x80000000
+
+/**************************************************
+ * Various helpers
+ **************************************************/
+
+static inline u8 bcm47xxnflash_ops_bcm4706_ns_to_cycle(u16 ns, u16 clock)
+{
+	return ((ns * 1000 * clock) / 1000000) + 1;
+}
+
+static int bcm47xxnflash_ops_bcm4706_ctl_cmd(struct bcma_drv_cc *cc, u32 code)
+{
+	int i = 0;
+
+	bcma_cc_write32(cc, BCMA_CC_NFLASH_CTL, NCTL_START | code);
+	for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+		if (!(bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_START)) {
+			i = 0;
+			break;
+		}
+	}
+	if (i) {
+		pr_err("NFLASH control command not ready!\n");
+		return -EBUSY;
+	}
+	return 0;
+}
+
+static int bcm47xxnflash_ops_bcm4706_poll(struct bcma_drv_cc *cc)
+{
+	int i;
+
+	for (i = 0; i < NFLASH_READY_RETRIES; i++) {
+		if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) & NCTL_READY) {
+			if (bcma_cc_read32(cc, BCMA_CC_NFLASH_CTL) &
+			    BCMA_CC_NFLASH_CTL_ERR) {
+				pr_err("Error on polling\n");
+				return -EBUSY;
+			} else {
+				return 0;
+			}
+		}
+	}
+
+	pr_err("Polling timeout!\n");
+	return -EBUSY;
+}
+
+/**************************************************
+ * R/W
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_read(struct mtd_info *mtd, uint8_t *buf,
+					   int len)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+	u32 ctlcode;
+	u32 *dest = (u32 *)buf;
+	int i;
+	int toread;
+
+	BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+	/* Don't validate column using nand_chip->page_shift, it may be bigger
+	 * when accessing OOB */
+
+	while (len) {
+		/* We can read maximum of 0x200 bytes at once */
+		toread = min(len, 0x200);
+
+		/* Set page and column */
+		bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_COL_ADDR,
+				b47n->curr_column);
+		bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_ROW_ADDR,
+				b47n->curr_page_addr);
+
+		/* Prepare to read */
+		ctlcode = NCTL_CSA | NCTL_CMD1W | NCTL_ROW | NCTL_COL |
+			  NCTL_CMD0;
+		ctlcode |= NAND_CMD_READSTART << 8;
+		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode))
+			return;
+		if (bcm47xxnflash_ops_bcm4706_poll(b47n->cc))
+			return;
+
+		/* Eventually read some data :) */
+		for (i = 0; i < toread; i += 4, dest++) {
+			ctlcode = NCTL_CSA | 0x30000000 | NCTL_READ;
+			if (i == toread - 4) /* Last read goes without that */
+				ctlcode &= ~NCTL_CSA;
+			if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+							      ctlcode))
+				return;
+			*dest = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA);
+		}
+
+		b47n->curr_column += toread;
+		len -= toread;
+	}
+}
+
+static void bcm47xxnflash_ops_bcm4706_write(struct mtd_info *mtd,
+					    const uint8_t *buf, int len)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+	struct bcma_drv_cc *cc = b47n->cc;
+
+	u32 ctlcode;
+	const u32 *data = (u32 *)buf;
+	int i;
+
+	BUG_ON(b47n->curr_page_addr & ~nand_chip->pagemask);
+	/* Don't validate column using nand_chip->page_shift, it may be bigger
+	 * when accessing OOB */
+
+	for (i = 0; i < len; i += 4, data++) {
+		bcma_cc_write32(cc, BCMA_CC_NFLASH_DATA, *data);
+
+		ctlcode = NCTL_CSA | 0x30000000 | NCTL_WRITE;
+		if (i == len - 4) /* Last read goes without that */
+			ctlcode &= ~NCTL_CSA;
+		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) {
+			pr_err("%s ctl_cmd didn't work!\n", __func__);
+			return;
+		}
+	}
+
+	b47n->curr_column += len;
+}
+
+/**************************************************
+ * NAND chip ops
+ **************************************************/
+
+static void bcm47xxnflash_ops_bcm4706_cmd_ctrl(struct mtd_info *mtd, int cmd,
+					       unsigned int ctrl)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+	u32 code = 0;
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (cmd & NAND_CTRL_CLE)
+		code = cmd | NCTL_CMD0;
+
+	/* nCS is not needed for reset command */
+	if (cmd != NAND_CMD_RESET)
+		code |= NCTL_CSA;
+
+	bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, code);
+}
+
+/* Default nand_select_chip calls cmd_ctrl, which is not used in BCM4706 */
+static void bcm47xxnflash_ops_bcm4706_select_chip(struct mtd_info *mtd,
+						  int chip)
+{
+	return;
+}
+
+static int bcm47xxnflash_ops_bcm4706_dev_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+	return !!(bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_CTL) & NCTL_READY);
+}
+
+/*
+ * Default nand_command and nand_command_lp don't match BCM4706 hardware layout.
+ * For example, reading chip id is performed in a non-standard way.
+ * Setting column and page is also handled differently, we use a special
+ * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert
+ * standard commands would be much more complicated.
+ */
+static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct mtd_info *mtd,
+					      unsigned command, int column,
+					      int page_addr)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+	struct bcma_drv_cc *cc = b47n->cc;
+	u32 ctlcode;
+	int i;
+
+	if (column != -1)
+		b47n->curr_column = column;
+	if (page_addr != -1)
+		b47n->curr_page_addr = page_addr;
+
+	switch (command) {
+	case NAND_CMD_RESET:
+		nand_chip->cmd_ctrl(mtd, command, NAND_CTRL_CLE);
+
+		ndelay(100);
+		nand_wait_ready(mtd);
+		break;
+	case NAND_CMD_READID:
+		ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0;
+		ctlcode |= NAND_CMD_READID;
+		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) {
+			pr_err("READID error\n");
+			break;
+		}
+
+		/*
+		 * Reading is specific, last one has to go without NCTL_CSA
+		 * bit. We don't know how many reads NAND subsystem is going
+		 * to perform, so cache everything.
+		 */
+		for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) {
+			ctlcode = NCTL_CSA | NCTL_READ;
+			if (i == ARRAY_SIZE(b47n->id_data) - 1)
+				ctlcode &= ~NCTL_CSA;
+			if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc,
+							      ctlcode)) {
+				pr_err("READID error\n");
+				break;
+			}
+			b47n->id_data[i] =
+				bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA)
+				& 0xFF;
+		}
+
+		break;
+	case NAND_CMD_STATUS:
+		ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS;
+		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+			pr_err("STATUS command error\n");
+		break;
+	case NAND_CMD_READ0:
+		break;
+	case NAND_CMD_READOOB:
+		if (page_addr != -1)
+			b47n->curr_column += mtd->writesize;
+		break;
+	case NAND_CMD_ERASE1:
+		bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+				b47n->curr_page_addr);
+		ctlcode = NCTL_ROW | NCTL_CMD1W | NCTL_CMD0 |
+			  NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8);
+		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+			pr_err("ERASE1 failed\n");
+		break;
+	case NAND_CMD_ERASE2:
+		break;
+	case NAND_CMD_SEQIN:
+		/* Set page and column */
+		bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR,
+				b47n->curr_column);
+		bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR,
+				b47n->curr_page_addr);
+
+		/* Prepare to write */
+		ctlcode = 0x40000000 | NCTL_ROW | NCTL_COL | NCTL_CMD0;
+		ctlcode |= NAND_CMD_SEQIN;
+		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode))
+			pr_err("SEQIN failed\n");
+		break;
+	case NAND_CMD_PAGEPROG:
+		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_CMD0 |
+							  NAND_CMD_PAGEPROG))
+			pr_err("PAGEPROG failed\n");
+		if (bcm47xxnflash_ops_bcm4706_poll(cc))
+			pr_err("PAGEPROG not ready\n");
+		break;
+	default:
+		pr_err("Command 0x%X unsupported\n", command);
+		break;
+	}
+	b47n->curr_command = command;
+}
+
+static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+	struct bcma_drv_cc *cc = b47n->cc;
+	u32 tmp = 0;
+
+	switch (b47n->curr_command) {
+	case NAND_CMD_READID:
+		if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) {
+			pr_err("Requested invalid id_data: %d\n",
+			       b47n->curr_column);
+			return 0;
+		}
+		return b47n->id_data[b47n->curr_column++];
+	case NAND_CMD_STATUS:
+		if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ))
+			return 0;
+		return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff;
+	case NAND_CMD_READOOB:
+		bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4);
+		return tmp & 0xFF;
+	}
+
+	pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command);
+	return 0;
+}
+
+static void bcm47xxnflash_ops_bcm4706_read_buf(struct mtd_info *mtd,
+					       uint8_t *buf, int len)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+	switch (b47n->curr_command) {
+	case NAND_CMD_READ0:
+	case NAND_CMD_READOOB:
+		bcm47xxnflash_ops_bcm4706_read(mtd, buf, len);
+		return;
+	}
+
+	pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command);
+}
+
+static void bcm47xxnflash_ops_bcm4706_write_buf(struct mtd_info *mtd,
+						const uint8_t *buf, int len)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip);
+
+	switch (b47n->curr_command) {
+	case NAND_CMD_SEQIN:
+		bcm47xxnflash_ops_bcm4706_write(mtd, buf, len);
+		return;
+	}
+
+	pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command);
+}
+
+/**************************************************
+ * Init
+ **************************************************/
+
+int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n)
+{
+	struct nand_chip *nand_chip = (struct nand_chip *)&b47n->nand_chip;
+	int err;
+	u32 freq;
+	u16 clock;
+	u8 w0, w1, w2, w3, w4;
+
+	unsigned long chipsize; /* MiB */
+	u8 tbits, col_bits, col_size, row_bits, row_bsize;
+	u32 val;
+
+	b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip;
+	nand_chip->cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl;
+	nand_chip->dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready;
+	b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc;
+	b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte;
+	b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf;
+	b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf;
+
+	nand_chip->chip_delay = 50;
+	b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH;
+	b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */
+
+	/* Enable NAND flash access */
+	bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+		      BCMA_CC_4706_FLASHSCFG_NF1);
+
+	/* Configure wait counters */
+	if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) {
+		/* 400 MHz */
+		freq = 400000000 / 4;
+	} else {
+		freq = bcma_chipco_pll_read(b47n->cc, 4);
+		freq = (freq & 0xFFF) >> 3;
+		/* Fixed reference clock 25 MHz and m = 2 */
+		freq = (freq * 25000000 / 2) / 4;
+	}
+	clock = freq / 1000000;
+	w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock);
+	w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock);
+	w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+	w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock);
+	w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock);
+	bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0,
+			(w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0));
+
+	/* Scan NAND */
+	err = nand_scan(nand_to_mtd(&b47n->nand_chip), 1);
+	if (err) {
+		pr_err("Could not scan NAND flash: %d\n", err);
+		goto exit;
+	}
+
+	/* Configure FLASH */
+	chipsize = b47n->nand_chip.chipsize >> 20;
+	tbits = ffs(chipsize); /* find first bit set */
+	if (!tbits || tbits != fls(chipsize)) {
+		pr_err("Invalid flash size: 0x%lX\n", chipsize);
+		err = -ENOTSUPP;
+		goto exit;
+	}
+	tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */
+
+	col_bits = b47n->nand_chip.page_shift + 1;
+	col_size = (col_bits + 7) / 8;
+
+	row_bits = tbits - col_bits + 1;
+	row_bsize = (row_bits + 7) / 8;
+
+	val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2;
+	bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val);
+
+exit:
+	if (err)
+		bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG,
+			       ~BCMA_CC_4706_FLASHSCFG_NF1);
+	return err;
+}
diff --git a/drivers/mtd/nand/rawnand/bf5xx_nand.c b/drivers/mtd/nand/rawnand/bf5xx_nand.c
new file mode 100644
index 000000000000..5655dca6ce43
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/bf5xx_nand.c
@@ -0,0 +1,860 @@ 
+/* linux/drivers/mtd/nand/bf5xx_nand.c
+ *
+ * Copyright 2006-2008 Analog Devices Inc.
+ *	http://blackfin.uclinux.org/
+ *	Bryan Wu <bryan.wu@analog.com>
+ *
+ * Blackfin BF5xx on-chip NAND flash controller driver
+ *
+ * Derived from drivers/mtd/nand/s3c2410.c
+ * Copyright (c) 2007 Ben Dooks <ben@simtec.co.uk>
+ *
+ * Derived from drivers/mtd/nand/cafe.c
+ * Copyright © 2006 Red Hat, Inc.
+ * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
+ *
+ * Changelog:
+ *	12-Jun-2007  Bryan Wu:  Initial version
+ *	18-Jul-2007  Bryan Wu:
+ *		- ECC_HW and ECC_SW supported
+ *		- DMA supported in ECC_HW
+ *		- YAFFS tested as rootfs in both ECC_HW and ECC_SW
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/blackfin.h>
+#include <asm/dma.h>
+#include <asm/cacheflush.h>
+#include <asm/nand.h>
+#include <asm/portmux.h>
+
+#define DRV_NAME	"bf5xx-nand"
+#define DRV_VERSION	"1.2"
+#define DRV_AUTHOR	"Bryan Wu <bryan.wu@analog.com>"
+#define DRV_DESC	"BF5xx on-chip NAND FLash Controller Driver"
+
+/* NFC_STAT Masks */
+#define NBUSY       0x01  /* Not Busy */
+#define WB_FULL     0x02  /* Write Buffer Full */
+#define PG_WR_STAT  0x04  /* Page Write Pending */
+#define PG_RD_STAT  0x08  /* Page Read Pending */
+#define WB_EMPTY    0x10  /* Write Buffer Empty */
+
+/* NFC_IRQSTAT Masks */
+#define NBUSYIRQ    0x01  /* Not Busy IRQ */
+#define WB_OVF      0x02  /* Write Buffer Overflow */
+#define WB_EDGE     0x04  /* Write Buffer Edge Detect */
+#define RD_RDY      0x08  /* Read Data Ready */
+#define WR_DONE     0x10  /* Page Write Done */
+
+/* NFC_RST Masks */
+#define ECC_RST     0x01  /* ECC (and NFC counters) Reset */
+
+/* NFC_PGCTL Masks */
+#define PG_RD_START 0x01  /* Page Read Start */
+#define PG_WR_START 0x02  /* Page Write Start */
+
+#ifdef CONFIG_MTD_NAND_BF5XX_HWECC
+static int hardware_ecc = 1;
+#else
+static int hardware_ecc;
+#endif
+
+static const unsigned short bfin_nfc_pin_req[] =
+	{P_NAND_CE,
+	 P_NAND_RB,
+	 P_NAND_D0,
+	 P_NAND_D1,
+	 P_NAND_D2,
+	 P_NAND_D3,
+	 P_NAND_D4,
+	 P_NAND_D5,
+	 P_NAND_D6,
+	 P_NAND_D7,
+	 P_NAND_WE,
+	 P_NAND_RE,
+	 P_NAND_CLE,
+	 P_NAND_ALE,
+	 0};
+
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+static int bootrom_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	if (section > 7)
+		return -ERANGE;
+
+	oobregion->offset = section * 8;
+	oobregion->length = 3;
+
+	return 0;
+}
+
+static int bootrom_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section > 7)
+		return -ERANGE;
+
+	oobregion->offset = (section * 8) + 3;
+	oobregion->length = 5;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops bootrom_ooblayout_ops = {
+	.ecc = bootrom_ooblayout_ecc,
+	.free = bootrom_ooblayout_free,
+};
+#endif
+
+/*
+ * Data structures for bf5xx nand flash controller driver
+ */
+
+/* bf5xx nand info */
+struct bf5xx_nand_info {
+	/* mtd info */
+	struct nand_hw_control		controller;
+	struct nand_chip		chip;
+
+	/* platform info */
+	struct bf5xx_nand_platform	*platform;
+
+	/* device info */
+	struct device			*device;
+
+	/* DMA stuff */
+	struct completion		dma_completion;
+};
+
+/*
+ * Conversion functions
+ */
+static struct bf5xx_nand_info *mtd_to_nand_info(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct bf5xx_nand_info,
+			    chip);
+}
+
+static struct bf5xx_nand_info *to_nand_info(struct platform_device *pdev)
+{
+	return platform_get_drvdata(pdev);
+}
+
+static struct bf5xx_nand_platform *to_nand_plat(struct platform_device *pdev)
+{
+	return dev_get_platdata(&pdev->dev);
+}
+
+/*
+ * struct nand_chip interface function pointers
+ */
+
+/*
+ * bf5xx_nand_hwcontrol
+ *
+ * Issue command and address cycles to the chip
+ */
+static void bf5xx_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+				   unsigned int ctrl)
+{
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	while (bfin_read_NFC_STAT() & WB_FULL)
+		cpu_relax();
+
+	if (ctrl & NAND_CLE)
+		bfin_write_NFC_CMD(cmd);
+	else if (ctrl & NAND_ALE)
+		bfin_write_NFC_ADDR(cmd);
+	SSYNC();
+}
+
+/*
+ * bf5xx_nand_devready()
+ *
+ * returns 0 if the nand is busy, 1 if it is ready
+ */
+static int bf5xx_nand_devready(struct mtd_info *mtd)
+{
+	unsigned short val = bfin_read_NFC_STAT();
+
+	if ((val & NBUSY) == NBUSY)
+		return 1;
+	else
+		return 0;
+}
+
+/*
+ * ECC functions
+ * These allow the bf5xx to use the controller's ECC
+ * generator block to ECC the data as it passes through
+ */
+
+/*
+ * ECC error correction function
+ */
+static int bf5xx_nand_correct_data_256(struct mtd_info *mtd, u_char *dat,
+					u_char *read_ecc, u_char *calc_ecc)
+{
+	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+	u32 syndrome[5];
+	u32 calced, stored;
+	int i;
+	unsigned short failing_bit, failing_byte;
+	u_char data;
+
+	calced = calc_ecc[0] | (calc_ecc[1] << 8) | (calc_ecc[2] << 16);
+	stored = read_ecc[0] | (read_ecc[1] << 8) | (read_ecc[2] << 16);
+
+	syndrome[0] = (calced ^ stored);
+
+	/*
+	 * syndrome 0: all zero
+	 * No error in data
+	 * No action
+	 */
+	if (!syndrome[0] || !calced || !stored)
+		return 0;
+
+	/*
+	 * sysdrome 0: only one bit is one
+	 * ECC data was incorrect
+	 * No action
+	 */
+	if (hweight32(syndrome[0]) == 1) {
+		dev_err(info->device, "ECC data was incorrect!\n");
+		return -EBADMSG;
+	}
+
+	syndrome[1] = (calced & 0x7FF) ^ (stored & 0x7FF);
+	syndrome[2] = (calced & 0x7FF) ^ ((calced >> 11) & 0x7FF);
+	syndrome[3] = (stored & 0x7FF) ^ ((stored >> 11) & 0x7FF);
+	syndrome[4] = syndrome[2] ^ syndrome[3];
+
+	for (i = 0; i < 5; i++)
+		dev_info(info->device, "syndrome[%d] 0x%08x\n", i, syndrome[i]);
+
+	dev_info(info->device,
+		"calced[0x%08x], stored[0x%08x]\n",
+		calced, stored);
+
+	/*
+	 * sysdrome 0: exactly 11 bits are one, each parity
+	 * and parity' pair is 1 & 0 or 0 & 1.
+	 * 1-bit correctable error
+	 * Correct the error
+	 */
+	if (hweight32(syndrome[0]) == 11 && syndrome[4] == 0x7FF) {
+		dev_info(info->device,
+			"1-bit correctable error, correct it.\n");
+		dev_info(info->device,
+			"syndrome[1] 0x%08x\n", syndrome[1]);
+
+		failing_bit = syndrome[1] & 0x7;
+		failing_byte = syndrome[1] >> 0x3;
+		data = *(dat + failing_byte);
+		data = data ^ (0x1 << failing_bit);
+		*(dat + failing_byte) = data;
+
+		return 1;
+	}
+
+	/*
+	 * sysdrome 0: random data
+	 * More than 1-bit error, non-correctable error
+	 * Discard data, mark bad block
+	 */
+	dev_err(info->device,
+		"More than 1-bit error, non-correctable error.\n");
+	dev_err(info->device,
+		"Please discard data, mark bad block\n");
+
+	return -EBADMSG;
+}
+
+static int bf5xx_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+					u_char *read_ecc, u_char *calc_ecc)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret, bitflips = 0;
+
+	ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
+	if (ret < 0)
+		return ret;
+
+	bitflips = ret;
+
+	/* If ecc size is 512, correct second 256 bytes */
+	if (chip->ecc.size == 512) {
+		dat += 256;
+		read_ecc += 3;
+		calc_ecc += 3;
+		ret = bf5xx_nand_correct_data_256(mtd, dat, read_ecc, calc_ecc);
+		if (ret < 0)
+			return ret;
+
+		bitflips += ret;
+	}
+
+	return bitflips;
+}
+
+static void bf5xx_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	return;
+}
+
+static int bf5xx_nand_calculate_ecc(struct mtd_info *mtd,
+		const u_char *dat, u_char *ecc_code)
+{
+	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	u16 ecc0, ecc1;
+	u32 code[2];
+	u8 *p;
+
+	/* first 3 bytes ECC code for 256 page size */
+	ecc0 = bfin_read_NFC_ECC0();
+	ecc1 = bfin_read_NFC_ECC1();
+
+	code[0] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
+
+	dev_dbg(info->device, "returning ecc 0x%08x\n", code[0]);
+
+	p = (u8 *) code;
+	memcpy(ecc_code, p, 3);
+
+	/* second 3 bytes ECC code for 512 ecc size */
+	if (chip->ecc.size == 512) {
+		ecc0 = bfin_read_NFC_ECC2();
+		ecc1 = bfin_read_NFC_ECC3();
+		code[1] = (ecc0 & 0x7ff) | ((ecc1 & 0x7ff) << 11);
+
+		/* second 3 bytes in ecc_code for second 256
+		 * bytes of 512 page size
+		 */
+		p = (u8 *) (code + 1);
+		memcpy((ecc_code + 3), p, 3);
+		dev_dbg(info->device, "returning ecc 0x%08x\n", code[1]);
+	}
+
+	return 0;
+}
+
+/*
+ * PIO mode for buffer writing and reading
+ */
+static void bf5xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	int i;
+	unsigned short val;
+
+	/*
+	 * Data reads are requested by first writing to NFC_DATA_RD
+	 * and then reading back from NFC_READ.
+	 */
+	for (i = 0; i < len; i++) {
+		while (bfin_read_NFC_STAT() & WB_FULL)
+			cpu_relax();
+
+		/* Contents do not matter */
+		bfin_write_NFC_DATA_RD(0x0000);
+		SSYNC();
+
+		while ((bfin_read_NFC_IRQSTAT() & RD_RDY) != RD_RDY)
+			cpu_relax();
+
+		buf[i] = bfin_read_NFC_READ();
+
+		val = bfin_read_NFC_IRQSTAT();
+		val |= RD_RDY;
+		bfin_write_NFC_IRQSTAT(val);
+		SSYNC();
+	}
+}
+
+static uint8_t bf5xx_nand_read_byte(struct mtd_info *mtd)
+{
+	uint8_t val;
+
+	bf5xx_nand_read_buf(mtd, &val, 1);
+
+	return val;
+}
+
+static void bf5xx_nand_write_buf(struct mtd_info *mtd,
+				const uint8_t *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		while (bfin_read_NFC_STAT() & WB_FULL)
+			cpu_relax();
+
+		bfin_write_NFC_DATA_WR(buf[i]);
+		SSYNC();
+	}
+}
+
+static void bf5xx_nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	int i;
+	u16 *p = (u16 *) buf;
+	len >>= 1;
+
+	/*
+	 * Data reads are requested by first writing to NFC_DATA_RD
+	 * and then reading back from NFC_READ.
+	 */
+	bfin_write_NFC_DATA_RD(0x5555);
+
+	SSYNC();
+
+	for (i = 0; i < len; i++)
+		p[i] = bfin_read_NFC_READ();
+}
+
+static void bf5xx_nand_write_buf16(struct mtd_info *mtd,
+				const uint8_t *buf, int len)
+{
+	int i;
+	u16 *p = (u16 *) buf;
+	len >>= 1;
+
+	for (i = 0; i < len; i++)
+		bfin_write_NFC_DATA_WR(p[i]);
+
+	SSYNC();
+}
+
+/*
+ * DMA functions for buffer writing and reading
+ */
+static irqreturn_t bf5xx_nand_dma_irq(int irq, void *dev_id)
+{
+	struct bf5xx_nand_info *info = dev_id;
+
+	clear_dma_irqstat(CH_NFC);
+	disable_dma(CH_NFC);
+	complete(&info->dma_completion);
+
+	return IRQ_HANDLED;
+}
+
+static void bf5xx_nand_dma_rw(struct mtd_info *mtd,
+				uint8_t *buf, int is_read)
+{
+	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	unsigned short val;
+
+	dev_dbg(info->device, " mtd->%p, buf->%p, is_read %d\n",
+			mtd, buf, is_read);
+
+	/*
+	 * Before starting a dma transfer, be sure to invalidate/flush
+	 * the cache over the address range of your DMA buffer to
+	 * prevent cache coherency problems. Otherwise very subtle bugs
+	 * can be introduced to your driver.
+	 */
+	if (is_read)
+		invalidate_dcache_range((unsigned int)buf,
+				(unsigned int)(buf + chip->ecc.size));
+	else
+		flush_dcache_range((unsigned int)buf,
+				(unsigned int)(buf + chip->ecc.size));
+
+	/*
+	 * This register must be written before each page is
+	 * transferred to generate the correct ECC register
+	 * values.
+	 */
+	bfin_write_NFC_RST(ECC_RST);
+	SSYNC();
+	while (bfin_read_NFC_RST() & ECC_RST)
+		cpu_relax();
+
+	disable_dma(CH_NFC);
+	clear_dma_irqstat(CH_NFC);
+
+	/* setup DMA register with Blackfin DMA API */
+	set_dma_config(CH_NFC, 0x0);
+	set_dma_start_addr(CH_NFC, (unsigned long) buf);
+
+	/* The DMAs have different size on BF52x and BF54x */
+#ifdef CONFIG_BF52x
+	set_dma_x_count(CH_NFC, (chip->ecc.size >> 1));
+	set_dma_x_modify(CH_NFC, 2);
+	val = DI_EN | WDSIZE_16;
+#endif
+
+#ifdef CONFIG_BF54x
+	set_dma_x_count(CH_NFC, (chip->ecc.size >> 2));
+	set_dma_x_modify(CH_NFC, 4);
+	val = DI_EN | WDSIZE_32;
+#endif
+	/* setup write or read operation */
+	if (is_read)
+		val |= WNR;
+	set_dma_config(CH_NFC, val);
+	enable_dma(CH_NFC);
+
+	/* Start PAGE read/write operation */
+	if (is_read)
+		bfin_write_NFC_PGCTL(PG_RD_START);
+	else
+		bfin_write_NFC_PGCTL(PG_WR_START);
+	wait_for_completion(&info->dma_completion);
+}
+
+static void bf5xx_nand_dma_read_buf(struct mtd_info *mtd,
+					uint8_t *buf, int len)
+{
+	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	dev_dbg(info->device, "mtd->%p, buf->%p, int %d\n", mtd, buf, len);
+
+	if (len == chip->ecc.size)
+		bf5xx_nand_dma_rw(mtd, buf, 1);
+	else
+		bf5xx_nand_read_buf(mtd, buf, len);
+}
+
+static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
+				const uint8_t *buf, int len)
+{
+	struct bf5xx_nand_info *info = mtd_to_nand_info(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	dev_dbg(info->device, "mtd->%p, buf->%p, len %d\n", mtd, buf, len);
+
+	if (len == chip->ecc.size)
+		bf5xx_nand_dma_rw(mtd, (uint8_t *)buf, 0);
+	else
+		bf5xx_nand_write_buf(mtd, buf, len);
+}
+
+static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+		uint8_t *buf, int oob_required, int page)
+{
+	bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
+	bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+static int bf5xx_nand_write_page_raw(struct mtd_info *mtd,
+		struct nand_chip *chip,	const uint8_t *buf, int oob_required,
+		int page)
+{
+	bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
+	bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/*
+ * System initialization functions
+ */
+static int bf5xx_nand_dma_init(struct bf5xx_nand_info *info)
+{
+	int ret;
+
+	/* Do not use dma */
+	if (!hardware_ecc)
+		return 0;
+
+	init_completion(&info->dma_completion);
+
+	/* Request NFC DMA channel */
+	ret = request_dma(CH_NFC, "BF5XX NFC driver");
+	if (ret < 0) {
+		dev_err(info->device, " unable to get DMA channel\n");
+		return ret;
+	}
+
+#ifdef CONFIG_BF54x
+	/* Setup DMAC1 channel mux for NFC which shared with SDH */
+	bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() & ~1);
+	SSYNC();
+#endif
+
+	set_dma_callback(CH_NFC, bf5xx_nand_dma_irq, info);
+
+	/* Turn off the DMA channel first */
+	disable_dma(CH_NFC);
+	return 0;
+}
+
+static void bf5xx_nand_dma_remove(struct bf5xx_nand_info *info)
+{
+	/* Free NFC DMA channel */
+	if (hardware_ecc)
+		free_dma(CH_NFC);
+}
+
+/*
+ * BF5XX NFC hardware initialization
+ *  - pin mux setup
+ *  - clear interrupt status
+ */
+static int bf5xx_nand_hw_init(struct bf5xx_nand_info *info)
+{
+	int err = 0;
+	unsigned short val;
+	struct bf5xx_nand_platform *plat = info->platform;
+
+	/* setup NFC_CTL register */
+	dev_info(info->device,
+		"data_width=%d, wr_dly=%d, rd_dly=%d\n",
+		(plat->data_width ? 16 : 8),
+		plat->wr_dly, plat->rd_dly);
+
+	val = (1 << NFC_PG_SIZE_OFFSET) |
+		(plat->data_width << NFC_NWIDTH_OFFSET) |
+		(plat->rd_dly << NFC_RDDLY_OFFSET) |
+		(plat->wr_dly << NFC_WRDLY_OFFSET);
+	dev_dbg(info->device, "NFC_CTL is 0x%04x\n", val);
+
+	bfin_write_NFC_CTL(val);
+	SSYNC();
+
+	/* clear interrupt status */
+	bfin_write_NFC_IRQMASK(0x0);
+	SSYNC();
+	val = bfin_read_NFC_IRQSTAT();
+	bfin_write_NFC_IRQSTAT(val);
+	SSYNC();
+
+	/* DMA initialization  */
+	if (bf5xx_nand_dma_init(info))
+		err = -ENXIO;
+
+	return err;
+}
+
+/*
+ * Device management interface
+ */
+static int bf5xx_nand_add_partition(struct bf5xx_nand_info *info)
+{
+	struct mtd_info *mtd = nand_to_mtd(&info->chip);
+	struct mtd_partition *parts = info->platform->partitions;
+	int nr = info->platform->nr_partitions;
+
+	return mtd_device_register(mtd, parts, nr);
+}
+
+static int bf5xx_nand_remove(struct platform_device *pdev)
+{
+	struct bf5xx_nand_info *info = to_nand_info(pdev);
+
+	/* first thing we need to do is release all our mtds
+	 * and their partitions, then go through freeing the
+	 * resources used
+	 */
+	nand_release(nand_to_mtd(&info->chip));
+
+	peripheral_free_list(bfin_nfc_pin_req);
+	bf5xx_nand_dma_remove(info);
+
+	return 0;
+}
+
+static int bf5xx_nand_scan(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret;
+
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (ret)
+		return ret;
+
+	if (hardware_ecc) {
+		/*
+		 * for nand with page size > 512B, think it as several sections with 512B
+		 */
+		if (likely(mtd->writesize >= 512)) {
+			chip->ecc.size = 512;
+			chip->ecc.bytes = 6;
+			chip->ecc.strength = 2;
+		} else {
+			chip->ecc.size = 256;
+			chip->ecc.bytes = 3;
+			chip->ecc.strength = 1;
+			bfin_write_NFC_CTL(bfin_read_NFC_CTL() & ~(1 << NFC_PG_SIZE_OFFSET));
+			SSYNC();
+		}
+	}
+
+	return	nand_scan_tail(mtd);
+}
+
+/*
+ * bf5xx_nand_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code checks to see if
+ * it can allocate all necessary resources then calls the
+ * nand layer to look for devices
+ */
+static int bf5xx_nand_probe(struct platform_device *pdev)
+{
+	struct bf5xx_nand_platform *plat = to_nand_plat(pdev);
+	struct bf5xx_nand_info *info = NULL;
+	struct nand_chip *chip = NULL;
+	struct mtd_info *mtd = NULL;
+	int err = 0;
+
+	dev_dbg(&pdev->dev, "(%p)\n", pdev);
+
+	if (!plat) {
+		dev_err(&pdev->dev, "no platform specific information\n");
+		return -EINVAL;
+	}
+
+	if (peripheral_request_list(bfin_nfc_pin_req, DRV_NAME)) {
+		dev_err(&pdev->dev, "requesting Peripherals failed\n");
+		return -EFAULT;
+	}
+
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (info == NULL) {
+		err = -ENOMEM;
+		goto out_err;
+	}
+
+	platform_set_drvdata(pdev, info);
+
+	nand_hw_control_init(&info->controller);
+
+	info->device     = &pdev->dev;
+	info->platform   = plat;
+
+	/* initialise chip data struct */
+	chip = &info->chip;
+	mtd = nand_to_mtd(&info->chip);
+
+	if (plat->data_width)
+		chip->options |= NAND_BUSWIDTH_16;
+
+	chip->options |= NAND_CACHEPRG | NAND_SKIP_BBTSCAN;
+
+	chip->read_buf = (plat->data_width) ?
+		bf5xx_nand_read_buf16 : bf5xx_nand_read_buf;
+	chip->write_buf = (plat->data_width) ?
+		bf5xx_nand_write_buf16 : bf5xx_nand_write_buf;
+
+	chip->read_byte    = bf5xx_nand_read_byte;
+
+	chip->cmd_ctrl     = bf5xx_nand_hwcontrol;
+	chip->dev_ready    = bf5xx_nand_devready;
+
+	nand_set_controller_data(chip, mtd);
+	chip->controller   = &info->controller;
+
+	chip->IO_ADDR_R    = (void __iomem *) NFC_READ;
+	chip->IO_ADDR_W    = (void __iomem *) NFC_DATA_WR;
+
+	chip->chip_delay   = 0;
+
+	/* initialise mtd info data struct */
+	mtd->dev.parent = &pdev->dev;
+
+	/* initialise the hardware */
+	err = bf5xx_nand_hw_init(info);
+	if (err)
+		goto out_err;
+
+	/* setup hardware ECC data struct */
+	if (hardware_ecc) {
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+		mtd_set_ooblayout(mtd, &bootrom_ooblayout_ops);
+#endif
+		chip->read_buf      = bf5xx_nand_dma_read_buf;
+		chip->write_buf     = bf5xx_nand_dma_write_buf;
+		chip->ecc.calculate = bf5xx_nand_calculate_ecc;
+		chip->ecc.correct   = bf5xx_nand_correct_data;
+		chip->ecc.mode	    = NAND_ECC_HW;
+		chip->ecc.hwctl	    = bf5xx_nand_enable_hwecc;
+		chip->ecc.read_page_raw = bf5xx_nand_read_page_raw;
+		chip->ecc.write_page_raw = bf5xx_nand_write_page_raw;
+	} else {
+		chip->ecc.mode	    = NAND_ECC_SOFT;
+		chip->ecc.algo	= NAND_ECC_HAMMING;
+	}
+
+	/* scan hardware nand chip and setup mtd info data struct */
+	if (bf5xx_nand_scan(mtd)) {
+		err = -ENXIO;
+		goto out_err_nand_scan;
+	}
+
+#ifdef CONFIG_MTD_NAND_BF5XX_BOOTROM_ECC
+	chip->badblockpos = 63;
+#endif
+
+	/* add NAND partition */
+	bf5xx_nand_add_partition(info);
+
+	dev_dbg(&pdev->dev, "initialised ok\n");
+	return 0;
+
+out_err_nand_scan:
+	bf5xx_nand_dma_remove(info);
+out_err:
+	peripheral_free_list(bfin_nfc_pin_req);
+
+	return err;
+}
+
+/* driver device registration */
+static struct platform_driver bf5xx_nand_driver = {
+	.probe		= bf5xx_nand_probe,
+	.remove		= bf5xx_nand_remove,
+	.driver		= {
+		.name	= DRV_NAME,
+	},
+};
+
+module_platform_driver(bf5xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/mtd/nand/rawnand/brcmnand/Makefile b/drivers/mtd/nand/rawnand/brcmnand/Makefile
new file mode 100644
index 000000000000..b28ffb59eb43
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/brcmnand/Makefile
@@ -0,0 +1,7 @@ 
+# link order matters; don't link the more generic brcmstb_nand.o before the
+# more specific iproc_nand.o, for instance
+obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= iproc_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= bcm63138_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= bcm6368_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= brcmstb_nand.o
+obj-$(CONFIG_MTD_NAND_BRCMNAND)		+= brcmnand.o
diff --git a/drivers/mtd/nand/rawnand/brcmnand/bcm63138_nand.c b/drivers/mtd/nand/rawnand/brcmnand/bcm63138_nand.c
new file mode 100644
index 000000000000..59444b3a697d
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/brcmnand/bcm63138_nand.c
@@ -0,0 +1,109 @@ 
+/*
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcm63138_nand_soc {
+	struct brcmnand_soc soc;
+	void __iomem *base;
+};
+
+#define BCM63138_NAND_INT_STATUS		0x00
+#define BCM63138_NAND_INT_EN			0x04
+
+enum {
+	BCM63138_CTLRDY		= BIT(4),
+};
+
+static bool bcm63138_nand_intc_ack(struct brcmnand_soc *soc)
+{
+	struct bcm63138_nand_soc *priv =
+			container_of(soc, struct bcm63138_nand_soc, soc);
+	void __iomem *mmio = priv->base + BCM63138_NAND_INT_STATUS;
+	u32 val = brcmnand_readl(mmio);
+
+	if (val & BCM63138_CTLRDY) {
+		brcmnand_writel(val & ~BCM63138_CTLRDY, mmio);
+		return true;
+	}
+
+	return false;
+}
+
+static void bcm63138_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+	struct bcm63138_nand_soc *priv =
+			container_of(soc, struct bcm63138_nand_soc, soc);
+	void __iomem *mmio = priv->base + BCM63138_NAND_INT_EN;
+	u32 val = brcmnand_readl(mmio);
+
+	if (en)
+		val |= BCM63138_CTLRDY;
+	else
+		val &= ~BCM63138_CTLRDY;
+
+	brcmnand_writel(val, mmio);
+}
+
+static int bcm63138_nand_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct bcm63138_nand_soc *priv;
+	struct brcmnand_soc *soc;
+	struct resource *res;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	soc = &priv->soc;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-int-base");
+	priv->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	soc->ctlrdy_ack = bcm63138_nand_intc_ack;
+	soc->ctlrdy_set_enabled = bcm63138_nand_intc_set;
+
+	return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcm63138_nand_of_match[] = {
+	{ .compatible = "brcm,nand-bcm63138" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm63138_nand_of_match);
+
+static struct platform_driver bcm63138_nand_driver = {
+	.probe			= bcm63138_nand_probe,
+	.remove			= brcmnand_remove,
+	.driver = {
+		.name		= "bcm63138_nand",
+		.pm		= &brcmnand_pm_ops,
+		.of_match_table	= bcm63138_nand_of_match,
+	}
+};
+module_platform_driver(bcm63138_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for BCM63138");
diff --git a/drivers/mtd/nand/rawnand/brcmnand/bcm6368_nand.c b/drivers/mtd/nand/rawnand/brcmnand/bcm6368_nand.c
new file mode 100644
index 000000000000..34c91b0e1e69
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/brcmnand/bcm6368_nand.c
@@ -0,0 +1,142 @@ 
+/*
+ * Copyright 2015 Simon Arlott
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Derived from bcm63138_nand.c:
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/include/bcm963xx/63268_map_part.h:
+ * Copyright 2000-2010 Broadcom Corporation
+ *
+ * Derived from bcm963xx_4.12L.06B_consumer/shared/opensource/flash/nandflash.c:
+ * Copyright 2000-2010 Broadcom Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct bcm6368_nand_soc {
+	struct brcmnand_soc soc;
+	void __iomem *base;
+};
+
+#define BCM6368_NAND_INT		0x00
+#define  BCM6368_NAND_STATUS_SHIFT	0
+#define  BCM6368_NAND_STATUS_MASK	(0xfff << BCM6368_NAND_STATUS_SHIFT)
+#define  BCM6368_NAND_ENABLE_SHIFT	16
+#define  BCM6368_NAND_ENABLE_MASK	(0xffff << BCM6368_NAND_ENABLE_SHIFT)
+#define BCM6368_NAND_BASE_ADDR0	0x04
+#define BCM6368_NAND_BASE_ADDR1	0x0c
+
+enum {
+	BCM6368_NP_READ		= BIT(0),
+	BCM6368_BLOCK_ERASE	= BIT(1),
+	BCM6368_COPY_BACK	= BIT(2),
+	BCM6368_PAGE_PGM	= BIT(3),
+	BCM6368_CTRL_READY	= BIT(4),
+	BCM6368_DEV_RBPIN	= BIT(5),
+	BCM6368_ECC_ERR_UNC	= BIT(6),
+	BCM6368_ECC_ERR_CORR	= BIT(7),
+};
+
+static bool bcm6368_nand_intc_ack(struct brcmnand_soc *soc)
+{
+	struct bcm6368_nand_soc *priv =
+			container_of(soc, struct bcm6368_nand_soc, soc);
+	void __iomem *mmio = priv->base + BCM6368_NAND_INT;
+	u32 val = brcmnand_readl(mmio);
+
+	if (val & (BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT)) {
+		/* Ack interrupt */
+		val &= ~BCM6368_NAND_STATUS_MASK;
+		val |= BCM6368_CTRL_READY << BCM6368_NAND_STATUS_SHIFT;
+		brcmnand_writel(val, mmio);
+		return true;
+	}
+
+	return false;
+}
+
+static void bcm6368_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+	struct bcm6368_nand_soc *priv =
+			container_of(soc, struct bcm6368_nand_soc, soc);
+	void __iomem *mmio = priv->base + BCM6368_NAND_INT;
+	u32 val = brcmnand_readl(mmio);
+
+	/* Don't ack any interrupts */
+	val &= ~BCM6368_NAND_STATUS_MASK;
+
+	if (en)
+		val |= BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT;
+	else
+		val &= ~(BCM6368_CTRL_READY << BCM6368_NAND_ENABLE_SHIFT);
+
+	brcmnand_writel(val, mmio);
+}
+
+static int bcm6368_nand_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct bcm6368_nand_soc *priv;
+	struct brcmnand_soc *soc;
+	struct resource *res;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	soc = &priv->soc;
+
+	res = platform_get_resource_byname(pdev,
+		IORESOURCE_MEM, "nand-int-base");
+	priv->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->base))
+		return PTR_ERR(priv->base);
+
+	soc->ctlrdy_ack = bcm6368_nand_intc_ack;
+	soc->ctlrdy_set_enabled = bcm6368_nand_intc_set;
+
+	/* Disable and ack all interrupts  */
+	brcmnand_writel(0, priv->base + BCM6368_NAND_INT);
+	brcmnand_writel(BCM6368_NAND_STATUS_MASK,
+			priv->base + BCM6368_NAND_INT);
+
+	return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id bcm6368_nand_of_match[] = {
+	{ .compatible = "brcm,nand-bcm6368" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm6368_nand_of_match);
+
+static struct platform_driver bcm6368_nand_driver = {
+	.probe			= bcm6368_nand_probe,
+	.remove			= brcmnand_remove,
+	.driver = {
+		.name		= "bcm6368_nand",
+		.pm		= &brcmnand_pm_ops,
+		.of_match_table	= bcm6368_nand_of_match,
+	}
+};
+module_platform_driver(bcm6368_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Simon Arlott");
+MODULE_DESCRIPTION("NAND driver for BCM6368");
diff --git a/drivers/mtd/nand/rawnand/brcmnand/brcmnand.c b/drivers/mtd/nand/rawnand/brcmnand/brcmnand.c
new file mode 100644
index 000000000000..98453816a0a2
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/brcmnand/brcmnand.c
@@ -0,0 +1,2561 @@ 
+/*
+ * Copyright © 2010-2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/ioport.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/log2.h>
+
+#include "brcmnand.h"
+
+/*
+ * This flag controls if WP stays on between erase/write commands to mitigate
+ * flash corruption due to power glitches. Values:
+ * 0: NAND_WP is not used or not available
+ * 1: NAND_WP is set by default, cleared for erase/write operations
+ * 2: NAND_WP is always cleared
+ */
+static int wp_on = 1;
+module_param(wp_on, int, 0444);
+
+/***********************************************************************
+ * Definitions
+ ***********************************************************************/
+
+#define DRV_NAME			"brcmnand"
+
+#define CMD_NULL			0x00
+#define CMD_PAGE_READ			0x01
+#define CMD_SPARE_AREA_READ		0x02
+#define CMD_STATUS_READ			0x03
+#define CMD_PROGRAM_PAGE		0x04
+#define CMD_PROGRAM_SPARE_AREA		0x05
+#define CMD_COPY_BACK			0x06
+#define CMD_DEVICE_ID_READ		0x07
+#define CMD_BLOCK_ERASE			0x08
+#define CMD_FLASH_RESET			0x09
+#define CMD_BLOCKS_LOCK			0x0a
+#define CMD_BLOCKS_LOCK_DOWN		0x0b
+#define CMD_BLOCKS_UNLOCK		0x0c
+#define CMD_READ_BLOCKS_LOCK_STATUS	0x0d
+#define CMD_PARAMETER_READ		0x0e
+#define CMD_PARAMETER_CHANGE_COL	0x0f
+#define CMD_LOW_LEVEL_OP		0x10
+
+struct brcm_nand_dma_desc {
+	u32 next_desc;
+	u32 next_desc_ext;
+	u32 cmd_irq;
+	u32 dram_addr;
+	u32 dram_addr_ext;
+	u32 tfr_len;
+	u32 total_len;
+	u32 flash_addr;
+	u32 flash_addr_ext;
+	u32 cs;
+	u32 pad2[5];
+	u32 status_valid;
+} __packed;
+
+/* Bitfields for brcm_nand_dma_desc::status_valid */
+#define FLASH_DMA_ECC_ERROR	(1 << 8)
+#define FLASH_DMA_CORR_ERROR	(1 << 9)
+
+/* 512B flash cache in the NAND controller HW */
+#define FC_SHIFT		9U
+#define FC_BYTES		512U
+#define FC_WORDS		(FC_BYTES >> 2)
+
+#define BRCMNAND_MIN_PAGESIZE	512
+#define BRCMNAND_MIN_BLOCKSIZE	(8 * 1024)
+#define BRCMNAND_MIN_DEVSIZE	(4ULL * 1024 * 1024)
+
+/* Controller feature flags */
+enum {
+	BRCMNAND_HAS_1K_SECTORS			= BIT(0),
+	BRCMNAND_HAS_PREFETCH			= BIT(1),
+	BRCMNAND_HAS_CACHE_MODE			= BIT(2),
+	BRCMNAND_HAS_WP				= BIT(3),
+};
+
+struct brcmnand_controller {
+	struct device		*dev;
+	struct nand_hw_control	controller;
+	void __iomem		*nand_base;
+	void __iomem		*nand_fc; /* flash cache */
+	void __iomem		*flash_dma_base;
+	unsigned int		irq;
+	unsigned int		dma_irq;
+	int			nand_version;
+
+	/* Some SoCs provide custom interrupt status register(s) */
+	struct brcmnand_soc	*soc;
+
+	/* Some SoCs have a gateable clock for the controller */
+	struct clk		*clk;
+
+	int			cmd_pending;
+	bool			dma_pending;
+	struct completion	done;
+	struct completion	dma_done;
+
+	/* List of NAND hosts (one for each chip-select) */
+	struct list_head host_list;
+
+	struct brcm_nand_dma_desc *dma_desc;
+	dma_addr_t		dma_pa;
+
+	/* in-memory cache of the FLASH_CACHE, used only for some commands */
+	u8			flash_cache[FC_BYTES];
+
+	/* Controller revision details */
+	const u16		*reg_offsets;
+	unsigned int		reg_spacing; /* between CS1, CS2, ... regs */
+	const u8		*cs_offsets; /* within each chip-select */
+	const u8		*cs0_offsets; /* within CS0, if different */
+	unsigned int		max_block_size;
+	const unsigned int	*block_sizes;
+	unsigned int		max_page_size;
+	const unsigned int	*page_sizes;
+	unsigned int		max_oob;
+	u32			features;
+
+	/* for low-power standby/resume only */
+	u32			nand_cs_nand_select;
+	u32			nand_cs_nand_xor;
+	u32			corr_stat_threshold;
+	u32			flash_dma_mode;
+};
+
+struct brcmnand_cfg {
+	u64			device_size;
+	unsigned int		block_size;
+	unsigned int		page_size;
+	unsigned int		spare_area_size;
+	unsigned int		device_width;
+	unsigned int		col_adr_bytes;
+	unsigned int		blk_adr_bytes;
+	unsigned int		ful_adr_bytes;
+	unsigned int		sector_size_1k;
+	unsigned int		ecc_level;
+	/* use for low-power standby/resume only */
+	u32			acc_control;
+	u32			config;
+	u32			config_ext;
+	u32			timing_1;
+	u32			timing_2;
+};
+
+struct brcmnand_host {
+	struct list_head	node;
+
+	struct nand_chip	chip;
+	struct platform_device	*pdev;
+	int			cs;
+
+	unsigned int		last_cmd;
+	unsigned int		last_byte;
+	u64			last_addr;
+	struct brcmnand_cfg	hwcfg;
+	struct brcmnand_controller *ctrl;
+};
+
+enum brcmnand_reg {
+	BRCMNAND_CMD_START = 0,
+	BRCMNAND_CMD_EXT_ADDRESS,
+	BRCMNAND_CMD_ADDRESS,
+	BRCMNAND_INTFC_STATUS,
+	BRCMNAND_CS_SELECT,
+	BRCMNAND_CS_XOR,
+	BRCMNAND_LL_OP,
+	BRCMNAND_CS0_BASE,
+	BRCMNAND_CS1_BASE,		/* CS1 regs, if non-contiguous */
+	BRCMNAND_CORR_THRESHOLD,
+	BRCMNAND_CORR_THRESHOLD_EXT,
+	BRCMNAND_UNCORR_COUNT,
+	BRCMNAND_CORR_COUNT,
+	BRCMNAND_CORR_EXT_ADDR,
+	BRCMNAND_CORR_ADDR,
+	BRCMNAND_UNCORR_EXT_ADDR,
+	BRCMNAND_UNCORR_ADDR,
+	BRCMNAND_SEMAPHORE,
+	BRCMNAND_ID,
+	BRCMNAND_ID_EXT,
+	BRCMNAND_LL_RDATA,
+	BRCMNAND_OOB_READ_BASE,
+	BRCMNAND_OOB_READ_10_BASE,	/* offset 0x10, if non-contiguous */
+	BRCMNAND_OOB_WRITE_BASE,
+	BRCMNAND_OOB_WRITE_10_BASE,	/* offset 0x10, if non-contiguous */
+	BRCMNAND_FC_BASE,
+};
+
+/* BRCMNAND v4.0 */
+static const u16 brcmnand_regs_v40[] = {
+	[BRCMNAND_CMD_START]		=  0x04,
+	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
+	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
+	[BRCMNAND_INTFC_STATUS]		=  0x6c,
+	[BRCMNAND_CS_SELECT]		=  0x14,
+	[BRCMNAND_CS_XOR]		=  0x18,
+	[BRCMNAND_LL_OP]		= 0x178,
+	[BRCMNAND_CS0_BASE]		=  0x40,
+	[BRCMNAND_CS1_BASE]		=  0xd0,
+	[BRCMNAND_CORR_THRESHOLD]	=  0x84,
+	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
+	[BRCMNAND_UNCORR_COUNT]		=     0,
+	[BRCMNAND_CORR_COUNT]		=     0,
+	[BRCMNAND_CORR_EXT_ADDR]	=  0x70,
+	[BRCMNAND_CORR_ADDR]		=  0x74,
+	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x78,
+	[BRCMNAND_UNCORR_ADDR]		=  0x7c,
+	[BRCMNAND_SEMAPHORE]		=  0x58,
+	[BRCMNAND_ID]			=  0x60,
+	[BRCMNAND_ID_EXT]		=  0x64,
+	[BRCMNAND_LL_RDATA]		= 0x17c,
+	[BRCMNAND_OOB_READ_BASE]	=  0x20,
+	[BRCMNAND_OOB_READ_10_BASE]	= 0x130,
+	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
+	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
+	[BRCMNAND_FC_BASE]		= 0x200,
+};
+
+/* BRCMNAND v5.0 */
+static const u16 brcmnand_regs_v50[] = {
+	[BRCMNAND_CMD_START]		=  0x04,
+	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
+	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
+	[BRCMNAND_INTFC_STATUS]		=  0x6c,
+	[BRCMNAND_CS_SELECT]		=  0x14,
+	[BRCMNAND_CS_XOR]		=  0x18,
+	[BRCMNAND_LL_OP]		= 0x178,
+	[BRCMNAND_CS0_BASE]		=  0x40,
+	[BRCMNAND_CS1_BASE]		=  0xd0,
+	[BRCMNAND_CORR_THRESHOLD]	=  0x84,
+	[BRCMNAND_CORR_THRESHOLD_EXT]	=     0,
+	[BRCMNAND_UNCORR_COUNT]		=     0,
+	[BRCMNAND_CORR_COUNT]		=     0,
+	[BRCMNAND_CORR_EXT_ADDR]	=  0x70,
+	[BRCMNAND_CORR_ADDR]		=  0x74,
+	[BRCMNAND_UNCORR_EXT_ADDR]	=  0x78,
+	[BRCMNAND_UNCORR_ADDR]		=  0x7c,
+	[BRCMNAND_SEMAPHORE]		=  0x58,
+	[BRCMNAND_ID]			=  0x60,
+	[BRCMNAND_ID_EXT]		=  0x64,
+	[BRCMNAND_LL_RDATA]		= 0x17c,
+	[BRCMNAND_OOB_READ_BASE]	=  0x20,
+	[BRCMNAND_OOB_READ_10_BASE]	= 0x130,
+	[BRCMNAND_OOB_WRITE_BASE]	=  0x30,
+	[BRCMNAND_OOB_WRITE_10_BASE]	= 0x140,
+	[BRCMNAND_FC_BASE]		= 0x200,
+};
+
+/* BRCMNAND v6.0 - v7.1 */
+static const u16 brcmnand_regs_v60[] = {
+	[BRCMNAND_CMD_START]		=  0x04,
+	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
+	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
+	[BRCMNAND_INTFC_STATUS]		=  0x14,
+	[BRCMNAND_CS_SELECT]		=  0x18,
+	[BRCMNAND_CS_XOR]		=  0x1c,
+	[BRCMNAND_LL_OP]		=  0x20,
+	[BRCMNAND_CS0_BASE]		=  0x50,
+	[BRCMNAND_CS1_BASE]		=     0,
+	[BRCMNAND_CORR_THRESHOLD]	=  0xc0,
+	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xc4,
+	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
+	[BRCMNAND_CORR_COUNT]		= 0x100,
+	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
+	[BRCMNAND_CORR_ADDR]		= 0x110,
+	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
+	[BRCMNAND_UNCORR_ADDR]		= 0x118,
+	[BRCMNAND_SEMAPHORE]		= 0x150,
+	[BRCMNAND_ID]			= 0x194,
+	[BRCMNAND_ID_EXT]		= 0x198,
+	[BRCMNAND_LL_RDATA]		= 0x19c,
+	[BRCMNAND_OOB_READ_BASE]	= 0x200,
+	[BRCMNAND_OOB_READ_10_BASE]	=     0,
+	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
+	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
+	[BRCMNAND_FC_BASE]		= 0x400,
+};
+
+/* BRCMNAND v7.1 */
+static const u16 brcmnand_regs_v71[] = {
+	[BRCMNAND_CMD_START]		=  0x04,
+	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
+	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
+	[BRCMNAND_INTFC_STATUS]		=  0x14,
+	[BRCMNAND_CS_SELECT]		=  0x18,
+	[BRCMNAND_CS_XOR]		=  0x1c,
+	[BRCMNAND_LL_OP]		=  0x20,
+	[BRCMNAND_CS0_BASE]		=  0x50,
+	[BRCMNAND_CS1_BASE]		=     0,
+	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
+	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
+	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
+	[BRCMNAND_CORR_COUNT]		= 0x100,
+	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
+	[BRCMNAND_CORR_ADDR]		= 0x110,
+	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
+	[BRCMNAND_UNCORR_ADDR]		= 0x118,
+	[BRCMNAND_SEMAPHORE]		= 0x150,
+	[BRCMNAND_ID]			= 0x194,
+	[BRCMNAND_ID_EXT]		= 0x198,
+	[BRCMNAND_LL_RDATA]		= 0x19c,
+	[BRCMNAND_OOB_READ_BASE]	= 0x200,
+	[BRCMNAND_OOB_READ_10_BASE]	=     0,
+	[BRCMNAND_OOB_WRITE_BASE]	= 0x280,
+	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
+	[BRCMNAND_FC_BASE]		= 0x400,
+};
+
+/* BRCMNAND v7.2 */
+static const u16 brcmnand_regs_v72[] = {
+	[BRCMNAND_CMD_START]		=  0x04,
+	[BRCMNAND_CMD_EXT_ADDRESS]	=  0x08,
+	[BRCMNAND_CMD_ADDRESS]		=  0x0c,
+	[BRCMNAND_INTFC_STATUS]		=  0x14,
+	[BRCMNAND_CS_SELECT]		=  0x18,
+	[BRCMNAND_CS_XOR]		=  0x1c,
+	[BRCMNAND_LL_OP]		=  0x20,
+	[BRCMNAND_CS0_BASE]		=  0x50,
+	[BRCMNAND_CS1_BASE]		=     0,
+	[BRCMNAND_CORR_THRESHOLD]	=  0xdc,
+	[BRCMNAND_CORR_THRESHOLD_EXT]	=  0xe0,
+	[BRCMNAND_UNCORR_COUNT]		=  0xfc,
+	[BRCMNAND_CORR_COUNT]		= 0x100,
+	[BRCMNAND_CORR_EXT_ADDR]	= 0x10c,
+	[BRCMNAND_CORR_ADDR]		= 0x110,
+	[BRCMNAND_UNCORR_EXT_ADDR]	= 0x114,
+	[BRCMNAND_UNCORR_ADDR]		= 0x118,
+	[BRCMNAND_SEMAPHORE]		= 0x150,
+	[BRCMNAND_ID]			= 0x194,
+	[BRCMNAND_ID_EXT]		= 0x198,
+	[BRCMNAND_LL_RDATA]		= 0x19c,
+	[BRCMNAND_OOB_READ_BASE]	= 0x200,
+	[BRCMNAND_OOB_READ_10_BASE]	=     0,
+	[BRCMNAND_OOB_WRITE_BASE]	= 0x400,
+	[BRCMNAND_OOB_WRITE_10_BASE]	=     0,
+	[BRCMNAND_FC_BASE]		= 0x600,
+};
+
+enum brcmnand_cs_reg {
+	BRCMNAND_CS_CFG_EXT = 0,
+	BRCMNAND_CS_CFG,
+	BRCMNAND_CS_ACC_CONTROL,
+	BRCMNAND_CS_TIMING1,
+	BRCMNAND_CS_TIMING2,
+};
+
+/* Per chip-select offsets for v7.1 */
+static const u8 brcmnand_cs_offsets_v71[] = {
+	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
+	[BRCMNAND_CS_CFG_EXT]		= 0x04,
+	[BRCMNAND_CS_CFG]		= 0x08,
+	[BRCMNAND_CS_TIMING1]		= 0x0c,
+	[BRCMNAND_CS_TIMING2]		= 0x10,
+};
+
+/* Per chip-select offsets for pre v7.1, except CS0 on <= v5.0 */
+static const u8 brcmnand_cs_offsets[] = {
+	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
+	[BRCMNAND_CS_CFG_EXT]		= 0x04,
+	[BRCMNAND_CS_CFG]		= 0x04,
+	[BRCMNAND_CS_TIMING1]		= 0x08,
+	[BRCMNAND_CS_TIMING2]		= 0x0c,
+};
+
+/* Per chip-select offset for <= v5.0 on CS0 only */
+static const u8 brcmnand_cs_offsets_cs0[] = {
+	[BRCMNAND_CS_ACC_CONTROL]	= 0x00,
+	[BRCMNAND_CS_CFG_EXT]		= 0x08,
+	[BRCMNAND_CS_CFG]		= 0x08,
+	[BRCMNAND_CS_TIMING1]		= 0x10,
+	[BRCMNAND_CS_TIMING2]		= 0x14,
+};
+
+/*
+ * Bitfields for the CFG and CFG_EXT registers. Pre-v7.1 controllers only had
+ * one config register, but once the bitfields overflowed, newer controllers
+ * (v7.1 and newer) added a CFG_EXT register and shuffled a few fields around.
+ */
+enum {
+	CFG_BLK_ADR_BYTES_SHIFT		= 8,
+	CFG_COL_ADR_BYTES_SHIFT		= 12,
+	CFG_FUL_ADR_BYTES_SHIFT		= 16,
+	CFG_BUS_WIDTH_SHIFT		= 23,
+	CFG_BUS_WIDTH			= BIT(CFG_BUS_WIDTH_SHIFT),
+	CFG_DEVICE_SIZE_SHIFT		= 24,
+
+	/* Only for pre-v7.1 (with no CFG_EXT register) */
+	CFG_PAGE_SIZE_SHIFT		= 20,
+	CFG_BLK_SIZE_SHIFT		= 28,
+
+	/* Only for v7.1+ (with CFG_EXT register) */
+	CFG_EXT_PAGE_SIZE_SHIFT		= 0,
+	CFG_EXT_BLK_SIZE_SHIFT		= 4,
+};
+
+/* BRCMNAND_INTFC_STATUS */
+enum {
+	INTFC_FLASH_STATUS		= GENMASK(7, 0),
+
+	INTFC_ERASED			= BIT(27),
+	INTFC_OOB_VALID			= BIT(28),
+	INTFC_CACHE_VALID		= BIT(29),
+	INTFC_FLASH_READY		= BIT(30),
+	INTFC_CTLR_READY		= BIT(31),
+};
+
+static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs)
+{
+	return brcmnand_readl(ctrl->nand_base + offs);
+}
+
+static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs,
+				 u32 val)
+{
+	brcmnand_writel(val, ctrl->nand_base + offs);
+}
+
+static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
+{
+	static const unsigned int block_sizes_v6[] = { 8, 16, 128, 256, 512, 1024, 2048, 0 };
+	static const unsigned int block_sizes_v4[] = { 16, 128, 8, 512, 256, 1024, 2048, 0 };
+	static const unsigned int page_sizes[] = { 512, 2048, 4096, 8192, 0 };
+
+	ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff;
+
+	/* Only support v4.0+? */
+	if (ctrl->nand_version < 0x0400) {
+		dev_err(ctrl->dev, "version %#x not supported\n",
+			ctrl->nand_version);
+		return -ENODEV;
+	}
+
+	/* Register offsets */
+	if (ctrl->nand_version >= 0x0702)
+		ctrl->reg_offsets = brcmnand_regs_v72;
+	else if (ctrl->nand_version >= 0x0701)
+		ctrl->reg_offsets = brcmnand_regs_v71;
+	else if (ctrl->nand_version >= 0x0600)
+		ctrl->reg_offsets = brcmnand_regs_v60;
+	else if (ctrl->nand_version >= 0x0500)
+		ctrl->reg_offsets = brcmnand_regs_v50;
+	else if (ctrl->nand_version >= 0x0400)
+		ctrl->reg_offsets = brcmnand_regs_v40;
+
+	/* Chip-select stride */
+	if (ctrl->nand_version >= 0x0701)
+		ctrl->reg_spacing = 0x14;
+	else
+		ctrl->reg_spacing = 0x10;
+
+	/* Per chip-select registers */
+	if (ctrl->nand_version >= 0x0701) {
+		ctrl->cs_offsets = brcmnand_cs_offsets_v71;
+	} else {
+		ctrl->cs_offsets = brcmnand_cs_offsets;
+
+		/* v5.0 and earlier has a different CS0 offset layout */
+		if (ctrl->nand_version <= 0x0500)
+			ctrl->cs0_offsets = brcmnand_cs_offsets_cs0;
+	}
+
+	/* Page / block sizes */
+	if (ctrl->nand_version >= 0x0701) {
+		/* >= v7.1 use nice power-of-2 values! */
+		ctrl->max_page_size = 16 * 1024;
+		ctrl->max_block_size = 2 * 1024 * 1024;
+	} else {
+		ctrl->page_sizes = page_sizes;
+		if (ctrl->nand_version >= 0x0600)
+			ctrl->block_sizes = block_sizes_v6;
+		else
+			ctrl->block_sizes = block_sizes_v4;
+
+		if (ctrl->nand_version < 0x0400) {
+			ctrl->max_page_size = 4096;
+			ctrl->max_block_size = 512 * 1024;
+		}
+	}
+
+	/* Maximum spare area sector size (per 512B) */
+	if (ctrl->nand_version >= 0x0702)
+		ctrl->max_oob = 128;
+	else if (ctrl->nand_version >= 0x0600)
+		ctrl->max_oob = 64;
+	else if (ctrl->nand_version >= 0x0500)
+		ctrl->max_oob = 32;
+	else
+		ctrl->max_oob = 16;
+
+	/* v6.0 and newer (except v6.1) have prefetch support */
+	if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601)
+		ctrl->features |= BRCMNAND_HAS_PREFETCH;
+
+	/*
+	 * v6.x has cache mode, but it's implemented differently. Ignore it for
+	 * now.
+	 */
+	if (ctrl->nand_version >= 0x0700)
+		ctrl->features |= BRCMNAND_HAS_CACHE_MODE;
+
+	if (ctrl->nand_version >= 0x0500)
+		ctrl->features |= BRCMNAND_HAS_1K_SECTORS;
+
+	if (ctrl->nand_version >= 0x0700)
+		ctrl->features |= BRCMNAND_HAS_WP;
+	else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp"))
+		ctrl->features |= BRCMNAND_HAS_WP;
+
+	return 0;
+}
+
+static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl,
+		enum brcmnand_reg reg)
+{
+	u16 offs = ctrl->reg_offsets[reg];
+
+	if (offs)
+		return nand_readreg(ctrl, offs);
+	else
+		return 0;
+}
+
+static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl,
+				      enum brcmnand_reg reg, u32 val)
+{
+	u16 offs = ctrl->reg_offsets[reg];
+
+	if (offs)
+		nand_writereg(ctrl, offs, val);
+}
+
+static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl,
+				    enum brcmnand_reg reg, u32 mask, unsigned
+				    int shift, u32 val)
+{
+	u32 tmp = brcmnand_read_reg(ctrl, reg);
+
+	tmp &= ~mask;
+	tmp |= val << shift;
+	brcmnand_write_reg(ctrl, reg, tmp);
+}
+
+static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word)
+{
+	return __raw_readl(ctrl->nand_fc + word * 4);
+}
+
+static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl,
+				     int word, u32 val)
+{
+	__raw_writel(val, ctrl->nand_fc + word * 4);
+}
+
+static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs,
+				     enum brcmnand_cs_reg reg)
+{
+	u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE];
+	u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE];
+	u8 cs_offs;
+
+	if (cs == 0 && ctrl->cs0_offsets)
+		cs_offs = ctrl->cs0_offsets[reg];
+	else
+		cs_offs = ctrl->cs_offsets[reg];
+
+	if (cs && offs_cs1)
+		return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs;
+
+	return offs_cs0 + cs * ctrl->reg_spacing + cs_offs;
+}
+
+static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl)
+{
+	if (ctrl->nand_version < 0x0600)
+		return 1;
+	return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT);
+}
+
+static void brcmnand_wr_corr_thresh(struct brcmnand_host *host, u8 val)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	unsigned int shift = 0, bits;
+	enum brcmnand_reg reg = BRCMNAND_CORR_THRESHOLD;
+	int cs = host->cs;
+
+	if (ctrl->nand_version >= 0x0702)
+		bits = 7;
+	else if (ctrl->nand_version >= 0x0600)
+		bits = 6;
+	else if (ctrl->nand_version >= 0x0500)
+		bits = 5;
+	else
+		bits = 4;
+
+	if (ctrl->nand_version >= 0x0702) {
+		if (cs >= 4)
+			reg = BRCMNAND_CORR_THRESHOLD_EXT;
+		shift = (cs % 4) * bits;
+	} else if (ctrl->nand_version >= 0x0600) {
+		if (cs >= 5)
+			reg = BRCMNAND_CORR_THRESHOLD_EXT;
+		shift = (cs % 5) * bits;
+	}
+	brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val);
+}
+
+static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl)
+{
+	if (ctrl->nand_version < 0x0602)
+		return 24;
+	return 0;
+}
+
+/***********************************************************************
+ * NAND ACC CONTROL bitfield
+ *
+ * Some bits have remained constant throughout hardware revision, while
+ * others have shifted around.
+ ***********************************************************************/
+
+/* Constant for all versions (where supported) */
+enum {
+	/* See BRCMNAND_HAS_CACHE_MODE */
+	ACC_CONTROL_CACHE_MODE				= BIT(22),
+
+	/* See BRCMNAND_HAS_PREFETCH */
+	ACC_CONTROL_PREFETCH				= BIT(23),
+
+	ACC_CONTROL_PAGE_HIT				= BIT(24),
+	ACC_CONTROL_WR_PREEMPT				= BIT(25),
+	ACC_CONTROL_PARTIAL_PAGE			= BIT(26),
+	ACC_CONTROL_RD_ERASED				= BIT(27),
+	ACC_CONTROL_FAST_PGM_RDIN			= BIT(28),
+	ACC_CONTROL_WR_ECC				= BIT(30),
+	ACC_CONTROL_RD_ECC				= BIT(31),
+};
+
+static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl)
+{
+	if (ctrl->nand_version >= 0x0702)
+		return GENMASK(7, 0);
+	else if (ctrl->nand_version >= 0x0600)
+		return GENMASK(6, 0);
+	else
+		return GENMASK(5, 0);
+}
+
+#define NAND_ACC_CONTROL_ECC_SHIFT	16
+#define NAND_ACC_CONTROL_ECC_EXT_SHIFT	13
+
+static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl)
+{
+	u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f;
+
+	mask <<= NAND_ACC_CONTROL_ECC_SHIFT;
+
+	/* v7.2 includes additional ECC levels */
+	if (ctrl->nand_version >= 0x0702)
+		mask |= 0x7 << NAND_ACC_CONTROL_ECC_EXT_SHIFT;
+
+	return mask;
+}
+
+static void brcmnand_set_ecc_enabled(struct brcmnand_host *host, int en)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+	u32 acc_control = nand_readreg(ctrl, offs);
+	u32 ecc_flags = ACC_CONTROL_WR_ECC | ACC_CONTROL_RD_ECC;
+
+	if (en) {
+		acc_control |= ecc_flags; /* enable RD/WR ECC */
+		acc_control |= host->hwcfg.ecc_level
+			       << NAND_ACC_CONTROL_ECC_SHIFT;
+	} else {
+		acc_control &= ~ecc_flags; /* disable RD/WR ECC */
+		acc_control &= ~brcmnand_ecc_level_mask(ctrl);
+	}
+
+	nand_writereg(ctrl, offs, acc_control);
+}
+
+static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl)
+{
+	if (ctrl->nand_version >= 0x0702)
+		return 9;
+	else if (ctrl->nand_version >= 0x0600)
+		return 7;
+	else if (ctrl->nand_version >= 0x0500)
+		return 6;
+	else
+		return -1;
+}
+
+static int brcmnand_get_sector_size_1k(struct brcmnand_host *host)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	int shift = brcmnand_sector_1k_shift(ctrl);
+	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+						  BRCMNAND_CS_ACC_CONTROL);
+
+	if (shift < 0)
+		return 0;
+
+	return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1;
+}
+
+static void brcmnand_set_sector_size_1k(struct brcmnand_host *host, int val)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	int shift = brcmnand_sector_1k_shift(ctrl);
+	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+						  BRCMNAND_CS_ACC_CONTROL);
+	u32 tmp;
+
+	if (shift < 0)
+		return;
+
+	tmp = nand_readreg(ctrl, acc_control_offs);
+	tmp &= ~(1 << shift);
+	tmp |= (!!val) << shift;
+	nand_writereg(ctrl, acc_control_offs, tmp);
+}
+
+/***********************************************************************
+ * CS_NAND_SELECT
+ ***********************************************************************/
+
+enum {
+	CS_SELECT_NAND_WP			= BIT(29),
+	CS_SELECT_AUTO_DEVICE_ID_CFG		= BIT(30),
+};
+
+static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
+{
+	u32 val = en ? CS_SELECT_NAND_WP : 0;
+
+	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val);
+}
+
+/***********************************************************************
+ * Flash DMA
+ ***********************************************************************/
+
+enum flash_dma_reg {
+	FLASH_DMA_REVISION		= 0x00,
+	FLASH_DMA_FIRST_DESC		= 0x04,
+	FLASH_DMA_FIRST_DESC_EXT	= 0x08,
+	FLASH_DMA_CTRL			= 0x0c,
+	FLASH_DMA_MODE			= 0x10,
+	FLASH_DMA_STATUS		= 0x14,
+	FLASH_DMA_INTERRUPT_DESC	= 0x18,
+	FLASH_DMA_INTERRUPT_DESC_EXT	= 0x1c,
+	FLASH_DMA_ERROR_STATUS		= 0x20,
+	FLASH_DMA_CURRENT_DESC		= 0x24,
+	FLASH_DMA_CURRENT_DESC_EXT	= 0x28,
+};
+
+static inline bool has_flash_dma(struct brcmnand_controller *ctrl)
+{
+	return ctrl->flash_dma_base;
+}
+
+static inline bool flash_dma_buf_ok(const void *buf)
+{
+	return buf && !is_vmalloc_addr(buf) &&
+		likely(IS_ALIGNED((uintptr_t)buf, 4));
+}
+
+static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs,
+				    u32 val)
+{
+	brcmnand_writel(val, ctrl->flash_dma_base + offs);
+}
+
+static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs)
+{
+	return brcmnand_readl(ctrl->flash_dma_base + offs);
+}
+
+/* Low-level operation types: command, address, write, or read */
+enum brcmnand_llop_type {
+	LL_OP_CMD,
+	LL_OP_ADDR,
+	LL_OP_WR,
+	LL_OP_RD,
+};
+
+/***********************************************************************
+ * Internal support functions
+ ***********************************************************************/
+
+static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl,
+				  struct brcmnand_cfg *cfg)
+{
+	if (ctrl->nand_version <= 0x0701)
+		return cfg->sector_size_1k == 0 && cfg->spare_area_size == 16 &&
+			cfg->ecc_level == 15;
+	else
+		return cfg->sector_size_1k == 0 && ((cfg->spare_area_size == 16 &&
+			cfg->ecc_level == 15) ||
+			(cfg->spare_area_size == 28 && cfg->ecc_level == 16));
+}
+
+/*
+ * Set mtd->ooblayout to the appropriate mtd_ooblayout_ops given
+ * the layout/configuration.
+ * Returns -ERRCODE on failure.
+ */
+static int brcmnand_hamming_ooblayout_ecc(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+	if (section >= sectors)
+		return -ERANGE;
+
+	oobregion->offset = (section * sas) + 6;
+	oobregion->length = 3;
+
+	return 0;
+}
+
+static int brcmnand_hamming_ooblayout_free(struct mtd_info *mtd, int section,
+					   struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+	if (section >= sectors * 2)
+		return -ERANGE;
+
+	oobregion->offset = (section / 2) * sas;
+
+	if (section & 1) {
+		oobregion->offset += 9;
+		oobregion->length = 7;
+	} else {
+		oobregion->length = 6;
+
+		/* First sector of each page may have BBI */
+		if (!section) {
+			/*
+			 * Small-page NAND use byte 6 for BBI while large-page
+			 * NAND use byte 0.
+			 */
+			if (cfg->page_size > 512)
+				oobregion->offset++;
+			oobregion->length--;
+		}
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_hamming_ooblayout_ops = {
+	.ecc = brcmnand_hamming_ooblayout_ecc,
+	.free = brcmnand_hamming_ooblayout_free,
+};
+
+static int brcmnand_bch_ooblayout_ecc(struct mtd_info *mtd, int section,
+				      struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+	if (section >= sectors)
+		return -ERANGE;
+
+	oobregion->offset = (section * (sas + 1)) - chip->ecc.bytes;
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int brcmnand_bch_ooblayout_free_lp(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+	int sectors = cfg->page_size / (512 << cfg->sector_size_1k);
+
+	if (section >= sectors)
+		return -ERANGE;
+
+	if (sas <= chip->ecc.bytes)
+		return 0;
+
+	oobregion->offset = section * sas;
+	oobregion->length = sas - chip->ecc.bytes;
+
+	if (!section) {
+		oobregion->offset++;
+		oobregion->length--;
+	}
+
+	return 0;
+}
+
+static int brcmnand_bch_ooblayout_free_sp(struct mtd_info *mtd, int section,
+					  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	int sas = cfg->spare_area_size << cfg->sector_size_1k;
+
+	if (section > 1 || sas - chip->ecc.bytes < 6 ||
+	    (section && sas - chip->ecc.bytes == 6))
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		oobregion->length = 5;
+	} else {
+		oobregion->offset = 6;
+		oobregion->length = sas - chip->ecc.bytes - 6;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops brcmnand_bch_lp_ooblayout_ops = {
+	.ecc = brcmnand_bch_ooblayout_ecc,
+	.free = brcmnand_bch_ooblayout_free_lp,
+};
+
+static const struct mtd_ooblayout_ops brcmnand_bch_sp_ooblayout_ops = {
+	.ecc = brcmnand_bch_ooblayout_ecc,
+	.free = brcmnand_bch_ooblayout_free_sp,
+};
+
+static int brcmstb_choose_ecc_layout(struct brcmnand_host *host)
+{
+	struct brcmnand_cfg *p = &host->hwcfg;
+	struct mtd_info *mtd = nand_to_mtd(&host->chip);
+	struct nand_ecc_ctrl *ecc = &host->chip.ecc;
+	unsigned int ecc_level = p->ecc_level;
+	int sas = p->spare_area_size << p->sector_size_1k;
+	int sectors = p->page_size / (512 << p->sector_size_1k);
+
+	if (p->sector_size_1k)
+		ecc_level <<= 1;
+
+	if (is_hamming_ecc(host->ctrl, p)) {
+		ecc->bytes = 3 * sectors;
+		mtd_set_ooblayout(mtd, &brcmnand_hamming_ooblayout_ops);
+		return 0;
+	}
+
+	/*
+	 * CONTROLLER_VERSION:
+	 *   < v5.0: ECC_REQ = ceil(BCH_T * 13/8)
+	 *  >= v5.0: ECC_REQ = ceil(BCH_T * 14/8)
+	 * But we will just be conservative.
+	 */
+	ecc->bytes = DIV_ROUND_UP(ecc_level * 14, 8);
+	if (p->page_size == 512)
+		mtd_set_ooblayout(mtd, &brcmnand_bch_sp_ooblayout_ops);
+	else
+		mtd_set_ooblayout(mtd, &brcmnand_bch_lp_ooblayout_ops);
+
+	if (ecc->bytes >= sas) {
+		dev_err(&host->pdev->dev,
+			"error: ECC too large for OOB (ECC bytes %d, spare sector %d)\n",
+			ecc->bytes, sas);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void brcmnand_wp(struct mtd_info *mtd, int wp)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_controller *ctrl = host->ctrl;
+
+	if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
+		static int old_wp = -1;
+
+		if (old_wp != wp) {
+			dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
+			old_wp = wp;
+		}
+		brcmnand_set_wp(ctrl, wp);
+	}
+}
+
+/* Helper functions for reading and writing OOB registers */
+static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs)
+{
+	u16 offset0, offset10, reg_offs;
+
+	offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE];
+	offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE];
+
+	if (offs >= ctrl->max_oob)
+		return 0x77;
+
+	if (offs >= 16 && offset10)
+		reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+	else
+		reg_offs = offset0 + (offs & ~0x03);
+
+	return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3));
+}
+
+static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs,
+				 u32 data)
+{
+	u16 offset0, offset10, reg_offs;
+
+	offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE];
+	offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE];
+
+	if (offs >= ctrl->max_oob)
+		return;
+
+	if (offs >= 16 && offset10)
+		reg_offs = offset10 + ((offs - 0x10) & ~0x03);
+	else
+		reg_offs = offset0 + (offs & ~0x03);
+
+	nand_writereg(ctrl, reg_offs, data);
+}
+
+/*
+ * read_oob_from_regs - read data from OOB registers
+ * @ctrl: NAND controller
+ * @i: sub-page sector index
+ * @oob: buffer to read to
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob,
+			      int sas, int sector_1k)
+{
+	int tbytes = sas << sector_1k;
+	int j;
+
+	/* Adjust OOB values for 1K sector size */
+	if (sector_1k && (i & 0x01))
+		tbytes = max(0, tbytes - (int)ctrl->max_oob);
+	tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+	for (j = 0; j < tbytes; j++)
+		oob[j] = oob_reg_read(ctrl, j);
+	return tbytes;
+}
+
+/*
+ * write_oob_to_regs - write data to OOB registers
+ * @i: sub-page sector index
+ * @oob: buffer to write from
+ * @sas: spare area sector size (i.e., OOB size per FLASH_CACHE)
+ * @sector_1k: 1 for 1KiB sectors, 0 for 512B, other values are illegal
+ */
+static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i,
+			     const u8 *oob, int sas, int sector_1k)
+{
+	int tbytes = sas << sector_1k;
+	int j;
+
+	/* Adjust OOB values for 1K sector size */
+	if (sector_1k && (i & 0x01))
+		tbytes = max(0, tbytes - (int)ctrl->max_oob);
+	tbytes = min_t(int, tbytes, ctrl->max_oob);
+
+	for (j = 0; j < tbytes; j += 4)
+		oob_reg_write(ctrl, j,
+				(oob[j + 0] << 24) |
+				(oob[j + 1] << 16) |
+				(oob[j + 2] <<  8) |
+				(oob[j + 3] <<  0));
+	return tbytes;
+}
+
+static irqreturn_t brcmnand_ctlrdy_irq(int irq, void *data)
+{
+	struct brcmnand_controller *ctrl = data;
+
+	/* Discard all NAND_CTLRDY interrupts during DMA */
+	if (ctrl->dma_pending)
+		return IRQ_HANDLED;
+
+	complete(&ctrl->done);
+	return IRQ_HANDLED;
+}
+
+/* Handle SoC-specific interrupt hardware */
+static irqreturn_t brcmnand_irq(int irq, void *data)
+{
+	struct brcmnand_controller *ctrl = data;
+
+	if (ctrl->soc->ctlrdy_ack(ctrl->soc))
+		return brcmnand_ctlrdy_irq(irq, data);
+
+	return IRQ_NONE;
+}
+
+static irqreturn_t brcmnand_dma_irq(int irq, void *data)
+{
+	struct brcmnand_controller *ctrl = data;
+
+	complete(&ctrl->dma_done);
+
+	return IRQ_HANDLED;
+}
+
+static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	u32 intfc;
+
+	dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
+		brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
+	BUG_ON(ctrl->cmd_pending != 0);
+	ctrl->cmd_pending = cmd;
+
+	intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
+	WARN_ON(!(intfc & INTFC_CTLR_READY));
+
+	mb(); /* flush previous writes */
+	brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
+			   cmd << brcmnand_cmd_shift(ctrl));
+}
+
+/***********************************************************************
+ * NAND MTD API: read/program/erase
+ ***********************************************************************/
+
+static void brcmnand_cmd_ctrl(struct mtd_info *mtd, int dat,
+	unsigned int ctrl)
+{
+	/* intentionally left blank */
+}
+
+static int brcmnand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_controller *ctrl = host->ctrl;
+	unsigned long timeo = msecs_to_jiffies(100);
+
+	dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending);
+	if (ctrl->cmd_pending &&
+			wait_for_completion_timeout(&ctrl->done, timeo) <= 0) {
+		u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START)
+					>> brcmnand_cmd_shift(ctrl);
+
+		dev_err_ratelimited(ctrl->dev,
+			"timeout waiting for command %#02x\n", cmd);
+		dev_err_ratelimited(ctrl->dev, "intfc status %08x\n",
+			brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS));
+	}
+	ctrl->cmd_pending = 0;
+	return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+				 INTFC_FLASH_STATUS;
+}
+
+enum {
+	LLOP_RE				= BIT(16),
+	LLOP_WE				= BIT(17),
+	LLOP_ALE			= BIT(18),
+	LLOP_CLE			= BIT(19),
+	LLOP_RETURN_IDLE		= BIT(31),
+
+	LLOP_DATA_MASK			= GENMASK(15, 0),
+};
+
+static int brcmnand_low_level_op(struct brcmnand_host *host,
+				 enum brcmnand_llop_type type, u32 data,
+				 bool last_op)
+{
+	struct mtd_info *mtd = nand_to_mtd(&host->chip);
+	struct nand_chip *chip = &host->chip;
+	struct brcmnand_controller *ctrl = host->ctrl;
+	u32 tmp;
+
+	tmp = data & LLOP_DATA_MASK;
+	switch (type) {
+	case LL_OP_CMD:
+		tmp |= LLOP_WE | LLOP_CLE;
+		break;
+	case LL_OP_ADDR:
+		/* WE | ALE */
+		tmp |= LLOP_WE | LLOP_ALE;
+		break;
+	case LL_OP_WR:
+		/* WE */
+		tmp |= LLOP_WE;
+		break;
+	case LL_OP_RD:
+		/* RE */
+		tmp |= LLOP_RE;
+		break;
+	}
+	if (last_op)
+		/* RETURN_IDLE */
+		tmp |= LLOP_RETURN_IDLE;
+
+	dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp);
+
+	brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp);
+	(void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP);
+
+	brcmnand_send_cmd(host, CMD_LOW_LEVEL_OP);
+	return brcmnand_waitfunc(mtd, chip);
+}
+
+static void brcmnand_cmdfunc(struct mtd_info *mtd, unsigned command,
+			     int column, int page_addr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_controller *ctrl = host->ctrl;
+	u64 addr = (u64)page_addr << chip->page_shift;
+	int native_cmd = 0;
+
+	if (command == NAND_CMD_READID || command == NAND_CMD_PARAM ||
+			command == NAND_CMD_RNDOUT)
+		addr = (u64)column;
+	/* Avoid propagating a negative, don't-care address */
+	else if (page_addr < 0)
+		addr = 0;
+
+	dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command,
+		(unsigned long long)addr);
+
+	host->last_cmd = command;
+	host->last_byte = 0;
+	host->last_addr = addr;
+
+	switch (command) {
+	case NAND_CMD_RESET:
+		native_cmd = CMD_FLASH_RESET;
+		break;
+	case NAND_CMD_STATUS:
+		native_cmd = CMD_STATUS_READ;
+		break;
+	case NAND_CMD_READID:
+		native_cmd = CMD_DEVICE_ID_READ;
+		break;
+	case NAND_CMD_READOOB:
+		native_cmd = CMD_SPARE_AREA_READ;
+		break;
+	case NAND_CMD_ERASE1:
+		native_cmd = CMD_BLOCK_ERASE;
+		brcmnand_wp(mtd, 0);
+		break;
+	case NAND_CMD_PARAM:
+		native_cmd = CMD_PARAMETER_READ;
+		break;
+	case NAND_CMD_SET_FEATURES:
+	case NAND_CMD_GET_FEATURES:
+		brcmnand_low_level_op(host, LL_OP_CMD, command, false);
+		brcmnand_low_level_op(host, LL_OP_ADDR, column, false);
+		break;
+	case NAND_CMD_RNDOUT:
+		native_cmd = CMD_PARAMETER_CHANGE_COL;
+		addr &= ~((u64)(FC_BYTES - 1));
+		/*
+		 * HW quirk: PARAMETER_CHANGE_COL requires SECTOR_SIZE_1K=0
+		 * NB: hwcfg.sector_size_1k may not be initialized yet
+		 */
+		if (brcmnand_get_sector_size_1k(host)) {
+			host->hwcfg.sector_size_1k =
+				brcmnand_get_sector_size_1k(host);
+			brcmnand_set_sector_size_1k(host, 0);
+		}
+		break;
+	}
+
+	if (!native_cmd)
+		return;
+
+	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+		(host->cs << 16) | ((addr >> 32) & 0xffff));
+	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+	brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr));
+	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+
+	brcmnand_send_cmd(host, native_cmd);
+	brcmnand_waitfunc(mtd, chip);
+
+	if (native_cmd == CMD_PARAMETER_READ ||
+			native_cmd == CMD_PARAMETER_CHANGE_COL) {
+		/* Copy flash cache word-wise */
+		u32 *flash_cache = (u32 *)ctrl->flash_cache;
+		int i;
+
+		brcmnand_soc_data_bus_prepare(ctrl->soc, true);
+
+		/*
+		 * Must cache the FLASH_CACHE now, since changes in
+		 * SECTOR_SIZE_1K may invalidate it
+		 */
+		for (i = 0; i < FC_WORDS; i++)
+			/*
+			 * Flash cache is big endian for parameter pages, at
+			 * least on STB SoCs
+			 */
+			flash_cache[i] = be32_to_cpu(brcmnand_read_fc(ctrl, i));
+
+		brcmnand_soc_data_bus_unprepare(ctrl->soc, true);
+
+		/* Cleanup from HW quirk: restore SECTOR_SIZE_1K */
+		if (host->hwcfg.sector_size_1k)
+			brcmnand_set_sector_size_1k(host,
+						    host->hwcfg.sector_size_1k);
+	}
+
+	/* Re-enable protection is necessary only after erase */
+	if (command == NAND_CMD_ERASE1)
+		brcmnand_wp(mtd, 1);
+}
+
+static uint8_t brcmnand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_controller *ctrl = host->ctrl;
+	uint8_t ret = 0;
+	int addr, offs;
+
+	switch (host->last_cmd) {
+	case NAND_CMD_READID:
+		if (host->last_byte < 4)
+			ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >>
+				(24 - (host->last_byte << 3));
+		else if (host->last_byte < 8)
+			ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >>
+				(56 - (host->last_byte << 3));
+		break;
+
+	case NAND_CMD_READOOB:
+		ret = oob_reg_read(ctrl, host->last_byte);
+		break;
+
+	case NAND_CMD_STATUS:
+		ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) &
+					INTFC_FLASH_STATUS;
+		if (wp_on) /* hide WP status */
+			ret |= NAND_STATUS_WP;
+		break;
+
+	case NAND_CMD_PARAM:
+	case NAND_CMD_RNDOUT:
+		addr = host->last_addr + host->last_byte;
+		offs = addr & (FC_BYTES - 1);
+
+		/* At FC_BYTES boundary, switch to next column */
+		if (host->last_byte > 0 && offs == 0)
+			chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1);
+
+		ret = ctrl->flash_cache[offs];
+		break;
+	case NAND_CMD_GET_FEATURES:
+		if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) {
+			ret = 0;
+		} else {
+			bool last = host->last_byte ==
+				ONFI_SUBFEATURE_PARAM_LEN - 1;
+			brcmnand_low_level_op(host, LL_OP_RD, 0, last);
+			ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff;
+		}
+	}
+
+	dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret);
+	host->last_byte++;
+
+	return ret;
+}
+
+static void brcmnand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++, buf++)
+		*buf = brcmnand_read_byte(mtd);
+}
+
+static void brcmnand_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+				   int len)
+{
+	int i;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+
+	switch (host->last_cmd) {
+	case NAND_CMD_SET_FEATURES:
+		for (i = 0; i < len; i++)
+			brcmnand_low_level_op(host, LL_OP_WR, buf[i],
+						  (i + 1) == len);
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+/**
+ * Construct a FLASH_DMA descriptor as part of a linked list. You must know the
+ * following ahead of time:
+ *  - Is this descriptor the beginning or end of a linked list?
+ *  - What is the (DMA) address of the next descriptor in the linked list?
+ */
+static int brcmnand_fill_dma_desc(struct brcmnand_host *host,
+				  struct brcm_nand_dma_desc *desc, u64 addr,
+				  dma_addr_t buf, u32 len, u8 dma_cmd,
+				  bool begin, bool end,
+				  dma_addr_t next_desc)
+{
+	memset(desc, 0, sizeof(*desc));
+	/* Descriptors are written in native byte order (wordwise) */
+	desc->next_desc = lower_32_bits(next_desc);
+	desc->next_desc_ext = upper_32_bits(next_desc);
+	desc->cmd_irq = (dma_cmd << 24) |
+		(end ? (0x03 << 8) : 0) | /* IRQ | STOP */
+		(!!begin) | ((!!end) << 1); /* head, tail */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+	desc->cmd_irq |= 0x01 << 12;
+#endif
+	desc->dram_addr = lower_32_bits(buf);
+	desc->dram_addr_ext = upper_32_bits(buf);
+	desc->tfr_len = len;
+	desc->total_len = len;
+	desc->flash_addr = lower_32_bits(addr);
+	desc->flash_addr_ext = upper_32_bits(addr);
+	desc->cs = host->cs;
+	desc->status_valid = 0x01;
+	return 0;
+}
+
+/**
+ * Kick the FLASH_DMA engine, with a given DMA descriptor
+ */
+static void brcmnand_dma_run(struct brcmnand_host *host, dma_addr_t desc)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	unsigned long timeo = msecs_to_jiffies(100);
+
+	flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc));
+	(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC);
+	flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc));
+	(void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT);
+
+	/* Start FLASH_DMA engine */
+	ctrl->dma_pending = true;
+	mb(); /* flush previous writes */
+	flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */
+
+	if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) {
+		dev_err(ctrl->dev,
+				"timeout waiting for DMA; status %#x, error status %#x\n",
+				flash_dma_readl(ctrl, FLASH_DMA_STATUS),
+				flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS));
+	}
+	ctrl->dma_pending = false;
+	flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */
+}
+
+static int brcmnand_dma_trans(struct brcmnand_host *host, u64 addr, u32 *buf,
+			      u32 len, u8 dma_cmd)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	dma_addr_t buf_pa;
+	int dir = dma_cmd == CMD_PAGE_READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+	buf_pa = dma_map_single(ctrl->dev, buf, len, dir);
+	if (dma_mapping_error(ctrl->dev, buf_pa)) {
+		dev_err(ctrl->dev, "unable to map buffer for DMA\n");
+		return -ENOMEM;
+	}
+
+	brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len,
+				   dma_cmd, true, true, 0);
+
+	brcmnand_dma_run(host, ctrl->dma_pa);
+
+	dma_unmap_single(ctrl->dev, buf_pa, len, dir);
+
+	if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR)
+		return -EBADMSG;
+	else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR)
+		return -EUCLEAN;
+
+	return 0;
+}
+
+/*
+ * Assumes proper CS is already set
+ */
+static int brcmnand_read_by_pio(struct mtd_info *mtd, struct nand_chip *chip,
+				u64 addr, unsigned int trans, u32 *buf,
+				u8 *oob, u64 *err_addr)
+{
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_controller *ctrl = host->ctrl;
+	int i, j, ret = 0;
+
+	/* Clear error addresses */
+	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0);
+	brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0);
+	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0);
+	brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0);
+
+	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+			(host->cs << 16) | ((addr >> 32) & 0xffff));
+	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+
+	for (i = 0; i < trans; i++, addr += FC_BYTES) {
+		brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+				   lower_32_bits(addr));
+		(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+		/* SPARE_AREA_READ does not use ECC, so just use PAGE_READ */
+		brcmnand_send_cmd(host, CMD_PAGE_READ);
+		brcmnand_waitfunc(mtd, chip);
+
+		if (likely(buf)) {
+			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
+
+			for (j = 0; j < FC_WORDS; j++, buf++)
+				*buf = brcmnand_read_fc(ctrl, j);
+
+			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
+		}
+
+		if (oob)
+			oob += read_oob_from_regs(ctrl, i, oob,
+					mtd->oobsize / trans,
+					host->hwcfg.sector_size_1k);
+
+		if (!ret) {
+			*err_addr = brcmnand_read_reg(ctrl,
+					BRCMNAND_UNCORR_ADDR) |
+				((u64)(brcmnand_read_reg(ctrl,
+						BRCMNAND_UNCORR_EXT_ADDR)
+					& 0xffff) << 32);
+			if (*err_addr)
+				ret = -EBADMSG;
+		}
+
+		if (!ret) {
+			*err_addr = brcmnand_read_reg(ctrl,
+					BRCMNAND_CORR_ADDR) |
+				((u64)(brcmnand_read_reg(ctrl,
+						BRCMNAND_CORR_EXT_ADDR)
+					& 0xffff) << 32);
+			if (*err_addr)
+				ret = -EUCLEAN;
+		}
+	}
+
+	return ret;
+}
+
+/*
+ * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC
+ * error
+ *
+ * Because the HW ECC signals an ECC error if an erase paged has even a single
+ * bitflip, we must check each ECC error to see if it is actually an erased
+ * page with bitflips, not a truly corrupted page.
+ *
+ * On a real error, return a negative error code (-EBADMSG for ECC error), and
+ * buf will contain raw data.
+ * Otherwise, buf gets filled with 0xffs and return the maximum number of
+ * bitflips-per-ECC-sector to the caller.
+ *
+ */
+static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd,
+		  struct nand_chip *chip, void *buf, u64 addr)
+{
+	int i, sas;
+	void *oob = chip->oob_poi;
+	int bitflips = 0;
+	int page = addr >> chip->page_shift;
+	int ret;
+
+	if (!buf) {
+		buf = chip->buffers->databuf;
+		/* Invalidate page cache */
+		chip->pagebuf = -1;
+	}
+
+	sas = mtd->oobsize / chip->ecc.steps;
+
+	/* read without ecc for verification */
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+	ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < chip->ecc.steps; i++, oob += sas) {
+		ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size,
+						  oob, sas, NULL, 0,
+						  chip->ecc.strength);
+		if (ret < 0)
+			return ret;
+
+		bitflips = max(bitflips, ret);
+	}
+
+	return bitflips;
+}
+
+static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip,
+			 u64 addr, unsigned int trans, u32 *buf, u8 *oob)
+{
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_controller *ctrl = host->ctrl;
+	u64 err_addr = 0;
+	int err;
+	bool retry = true;
+
+	dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf);
+
+try_dmaread:
+	brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0);
+
+	if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+		err = brcmnand_dma_trans(host, addr, buf, trans * FC_BYTES,
+					     CMD_PAGE_READ);
+		if (err) {
+			if (mtd_is_bitflip_or_eccerr(err))
+				err_addr = addr;
+			else
+				return -EIO;
+		}
+	} else {
+		if (oob)
+			memset(oob, 0x99, mtd->oobsize);
+
+		err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf,
+					       oob, &err_addr);
+	}
+
+	if (mtd_is_eccerr(err)) {
+		/*
+		 * On controller version and 7.0, 7.1 , DMA read after a
+		 * prior PIO read that reported uncorrectable error,
+		 * the DMA engine captures this error following DMA read
+		 * cleared only on subsequent DMA read, so just retry once
+		 * to clear a possible false error reported for current DMA
+		 * read
+		 */
+		if ((ctrl->nand_version == 0x0700) ||
+		    (ctrl->nand_version == 0x0701)) {
+			if (retry) {
+				retry = false;
+				goto try_dmaread;
+			}
+		}
+
+		/*
+		 * Controller version 7.2 has hw encoder to detect erased page
+		 * bitflips, apply sw verification for older controllers only
+		 */
+		if (ctrl->nand_version < 0x0702) {
+			err = brcmstb_nand_verify_erased_page(mtd, chip, buf,
+							      addr);
+			/* erased page bitflips corrected */
+			if (err > 0)
+				return err;
+		}
+
+		dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n",
+			(unsigned long long)err_addr);
+		mtd->ecc_stats.failed++;
+		/* NAND layer expects zero on ECC errors */
+		return 0;
+	}
+
+	if (mtd_is_bitflip(err)) {
+		unsigned int corrected = brcmnand_count_corrected(ctrl);
+
+		dev_dbg(ctrl->dev, "corrected error at 0x%llx\n",
+			(unsigned long long)err_addr);
+		mtd->ecc_stats.corrected += corrected;
+		/* Always exceed the software-imposed threshold */
+		return max(mtd->bitflip_threshold, corrected);
+	}
+
+	return 0;
+}
+
+static int brcmnand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			      uint8_t *buf, int oob_required, int page)
+{
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+
+	return brcmnand_read(mtd, chip, host->last_addr,
+			mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+}
+
+static int brcmnand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				  uint8_t *buf, int oob_required, int page)
+{
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	u8 *oob = oob_required ? (u8 *)chip->oob_poi : NULL;
+	int ret;
+
+	brcmnand_set_ecc_enabled(host, 0);
+	ret = brcmnand_read(mtd, chip, host->last_addr,
+			mtd->writesize >> FC_SHIFT, (u32 *)buf, oob);
+	brcmnand_set_ecc_enabled(host, 1);
+	return ret;
+}
+
+static int brcmnand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			     int page)
+{
+	return brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+			mtd->writesize >> FC_SHIFT,
+			NULL, (u8 *)chip->oob_poi);
+}
+
+static int brcmnand_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				 int page)
+{
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+
+	brcmnand_set_ecc_enabled(host, 0);
+	brcmnand_read(mtd, chip, (u64)page << chip->page_shift,
+		mtd->writesize >> FC_SHIFT,
+		NULL, (u8 *)chip->oob_poi);
+	brcmnand_set_ecc_enabled(host, 1);
+	return 0;
+}
+
+static int brcmnand_write(struct mtd_info *mtd, struct nand_chip *chip,
+			  u64 addr, const u32 *buf, u8 *oob)
+{
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	struct brcmnand_controller *ctrl = host->ctrl;
+	unsigned int i, j, trans = mtd->writesize >> FC_SHIFT;
+	int status, ret = 0;
+
+	dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf);
+
+	if (unlikely((unsigned long)buf & 0x03)) {
+		dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf);
+		buf = (u32 *)((unsigned long)buf & ~0x03);
+	}
+
+	brcmnand_wp(mtd, 0);
+
+	for (i = 0; i < ctrl->max_oob; i += 4)
+		oob_reg_write(ctrl, i, 0xffffffff);
+
+	if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) {
+		if (brcmnand_dma_trans(host, addr, (u32 *)buf,
+					mtd->writesize, CMD_PROGRAM_PAGE))
+			ret = -EIO;
+		goto out;
+	}
+
+	brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS,
+			(host->cs << 16) | ((addr >> 32) & 0xffff));
+	(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS);
+
+	for (i = 0; i < trans; i++, addr += FC_BYTES) {
+		/* full address MUST be set before populating FC */
+		brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS,
+				   lower_32_bits(addr));
+		(void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS);
+
+		if (buf) {
+			brcmnand_soc_data_bus_prepare(ctrl->soc, false);
+
+			for (j = 0; j < FC_WORDS; j++, buf++)
+				brcmnand_write_fc(ctrl, j, *buf);
+
+			brcmnand_soc_data_bus_unprepare(ctrl->soc, false);
+		} else if (oob) {
+			for (j = 0; j < FC_WORDS; j++)
+				brcmnand_write_fc(ctrl, j, 0xffffffff);
+		}
+
+		if (oob) {
+			oob += write_oob_to_regs(ctrl, i, oob,
+					mtd->oobsize / trans,
+					host->hwcfg.sector_size_1k);
+		}
+
+		/* we cannot use SPARE_AREA_PROGRAM when PARTIAL_PAGE_EN=0 */
+		brcmnand_send_cmd(host, CMD_PROGRAM_PAGE);
+		status = brcmnand_waitfunc(mtd, chip);
+
+		if (status & NAND_STATUS_FAIL) {
+			dev_info(ctrl->dev, "program failed at %llx\n",
+				(unsigned long long)addr);
+			ret = -EIO;
+			goto out;
+		}
+	}
+out:
+	brcmnand_wp(mtd, 1);
+	return ret;
+}
+
+static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+			       const uint8_t *buf, int oob_required, int page)
+{
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	void *oob = oob_required ? chip->oob_poi : NULL;
+
+	brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+	return 0;
+}
+
+static int brcmnand_write_page_raw(struct mtd_info *mtd,
+				   struct nand_chip *chip, const uint8_t *buf,
+				   int oob_required, int page)
+{
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	void *oob = oob_required ? chip->oob_poi : NULL;
+
+	brcmnand_set_ecc_enabled(host, 0);
+	brcmnand_write(mtd, chip, host->last_addr, (const u32 *)buf, oob);
+	brcmnand_set_ecc_enabled(host, 1);
+	return 0;
+}
+
+static int brcmnand_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+				  int page)
+{
+	return brcmnand_write(mtd, chip, (u64)page << chip->page_shift,
+				  NULL, chip->oob_poi);
+}
+
+static int brcmnand_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				  int page)
+{
+	struct brcmnand_host *host = nand_get_controller_data(chip);
+	int ret;
+
+	brcmnand_set_ecc_enabled(host, 0);
+	ret = brcmnand_write(mtd, chip, (u64)page << chip->page_shift, NULL,
+				 (u8 *)chip->oob_poi);
+	brcmnand_set_ecc_enabled(host, 1);
+
+	return ret;
+}
+
+/***********************************************************************
+ * Per-CS setup (1 NAND device)
+ ***********************************************************************/
+
+static int brcmnand_set_cfg(struct brcmnand_host *host,
+			    struct brcmnand_cfg *cfg)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	struct nand_chip *chip = &host->chip;
+	u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+	u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+			BRCMNAND_CS_CFG_EXT);
+	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+			BRCMNAND_CS_ACC_CONTROL);
+	u8 block_size = 0, page_size = 0, device_size = 0;
+	u32 tmp;
+
+	if (ctrl->block_sizes) {
+		int i, found;
+
+		for (i = 0, found = 0; ctrl->block_sizes[i]; i++)
+			if (ctrl->block_sizes[i] * 1024 == cfg->block_size) {
+				block_size = i;
+				found = 1;
+			}
+		if (!found) {
+			dev_warn(ctrl->dev, "invalid block size %u\n",
+					cfg->block_size);
+			return -EINVAL;
+		}
+	} else {
+		block_size = ffs(cfg->block_size) - ffs(BRCMNAND_MIN_BLOCKSIZE);
+	}
+
+	if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size &&
+				cfg->block_size > ctrl->max_block_size)) {
+		dev_warn(ctrl->dev, "invalid block size %u\n",
+				cfg->block_size);
+		block_size = 0;
+	}
+
+	if (ctrl->page_sizes) {
+		int i, found;
+
+		for (i = 0, found = 0; ctrl->page_sizes[i]; i++)
+			if (ctrl->page_sizes[i] == cfg->page_size) {
+				page_size = i;
+				found = 1;
+			}
+		if (!found) {
+			dev_warn(ctrl->dev, "invalid page size %u\n",
+					cfg->page_size);
+			return -EINVAL;
+		}
+	} else {
+		page_size = ffs(cfg->page_size) - ffs(BRCMNAND_MIN_PAGESIZE);
+	}
+
+	if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size &&
+				cfg->page_size > ctrl->max_page_size)) {
+		dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size);
+		return -EINVAL;
+	}
+
+	if (fls64(cfg->device_size) < fls64(BRCMNAND_MIN_DEVSIZE)) {
+		dev_warn(ctrl->dev, "invalid device size 0x%llx\n",
+			(unsigned long long)cfg->device_size);
+		return -EINVAL;
+	}
+	device_size = fls64(cfg->device_size) - fls64(BRCMNAND_MIN_DEVSIZE);
+
+	tmp = (cfg->blk_adr_bytes << CFG_BLK_ADR_BYTES_SHIFT) |
+		(cfg->col_adr_bytes << CFG_COL_ADR_BYTES_SHIFT) |
+		(cfg->ful_adr_bytes << CFG_FUL_ADR_BYTES_SHIFT) |
+		(!!(cfg->device_width == 16) << CFG_BUS_WIDTH_SHIFT) |
+		(device_size << CFG_DEVICE_SIZE_SHIFT);
+	if (cfg_offs == cfg_ext_offs) {
+		tmp |= (page_size << CFG_PAGE_SIZE_SHIFT) |
+		       (block_size << CFG_BLK_SIZE_SHIFT);
+		nand_writereg(ctrl, cfg_offs, tmp);
+	} else {
+		nand_writereg(ctrl, cfg_offs, tmp);
+		tmp = (page_size << CFG_EXT_PAGE_SIZE_SHIFT) |
+		      (block_size << CFG_EXT_BLK_SIZE_SHIFT);
+		nand_writereg(ctrl, cfg_ext_offs, tmp);
+	}
+
+	tmp = nand_readreg(ctrl, acc_control_offs);
+	tmp &= ~brcmnand_ecc_level_mask(ctrl);
+	tmp |= cfg->ecc_level << NAND_ACC_CONTROL_ECC_SHIFT;
+	tmp &= ~brcmnand_spare_area_mask(ctrl);
+	tmp |= cfg->spare_area_size;
+	nand_writereg(ctrl, acc_control_offs, tmp);
+
+	brcmnand_set_sector_size_1k(host, cfg->sector_size_1k);
+
+	/* threshold = ceil(BCH-level * 0.75) */
+	brcmnand_wr_corr_thresh(host, DIV_ROUND_UP(chip->ecc.strength * 3, 4));
+
+	return 0;
+}
+
+static void brcmnand_print_cfg(struct brcmnand_host *host,
+			       char *buf, struct brcmnand_cfg *cfg)
+{
+	buf += sprintf(buf,
+		"%lluMiB total, %uKiB blocks, %u%s pages, %uB OOB, %u-bit",
+		(unsigned long long)cfg->device_size >> 20,
+		cfg->block_size >> 10,
+		cfg->page_size >= 1024 ? cfg->page_size >> 10 : cfg->page_size,
+		cfg->page_size >= 1024 ? "KiB" : "B",
+		cfg->spare_area_size, cfg->device_width);
+
+	/* Account for Hamming ECC and for BCH 512B vs 1KiB sectors */
+	if (is_hamming_ecc(host->ctrl, cfg))
+		sprintf(buf, ", Hamming ECC");
+	else if (cfg->sector_size_1k)
+		sprintf(buf, ", BCH-%u (1KiB sector)", cfg->ecc_level << 1);
+	else
+		sprintf(buf, ", BCH-%u", cfg->ecc_level);
+}
+
+/*
+ * Minimum number of bytes to address a page. Calculated as:
+ *     roundup(log2(size / page-size) / 8)
+ *
+ * NB: the following does not "round up" for non-power-of-2 'size'; but this is
+ *     OK because many other things will break if 'size' is irregular...
+ */
+static inline int get_blk_adr_bytes(u64 size, u32 writesize)
+{
+	return ALIGN(ilog2(size) - ilog2(writesize), 8) >> 3;
+}
+
+static int brcmnand_setup_dev(struct brcmnand_host *host)
+{
+	struct mtd_info *mtd = nand_to_mtd(&host->chip);
+	struct nand_chip *chip = &host->chip;
+	struct brcmnand_controller *ctrl = host->ctrl;
+	struct brcmnand_cfg *cfg = &host->hwcfg;
+	char msg[128];
+	u32 offs, tmp, oob_sector;
+	int ret;
+
+	memset(cfg, 0, sizeof(*cfg));
+
+	ret = of_property_read_u32(nand_get_flash_node(chip),
+				   "brcm,nand-oob-sector-size",
+				   &oob_sector);
+	if (ret) {
+		/* Use detected size */
+		cfg->spare_area_size = mtd->oobsize /
+					(mtd->writesize >> FC_SHIFT);
+	} else {
+		cfg->spare_area_size = oob_sector;
+	}
+	if (cfg->spare_area_size > ctrl->max_oob)
+		cfg->spare_area_size = ctrl->max_oob;
+	/*
+	 * Set oobsize to be consistent with controller's spare_area_size, as
+	 * the rest is inaccessible.
+	 */
+	mtd->oobsize = cfg->spare_area_size * (mtd->writesize >> FC_SHIFT);
+
+	cfg->device_size = mtd->size;
+	cfg->block_size = mtd->erasesize;
+	cfg->page_size = mtd->writesize;
+	cfg->device_width = (chip->options & NAND_BUSWIDTH_16) ? 16 : 8;
+	cfg->col_adr_bytes = 2;
+	cfg->blk_adr_bytes = get_blk_adr_bytes(mtd->size, mtd->writesize);
+
+	if (chip->ecc.mode != NAND_ECC_HW) {
+		dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n",
+			chip->ecc.mode);
+		return -EINVAL;
+	}
+
+	if (chip->ecc.algo == NAND_ECC_UNKNOWN) {
+		if (chip->ecc.strength == 1 && chip->ecc.size == 512)
+			/* Default to Hamming for 1-bit ECC, if unspecified */
+			chip->ecc.algo = NAND_ECC_HAMMING;
+		else
+			/* Otherwise, BCH */
+			chip->ecc.algo = NAND_ECC_BCH;
+	}
+
+	if (chip->ecc.algo == NAND_ECC_HAMMING && (chip->ecc.strength != 1 ||
+						   chip->ecc.size != 512)) {
+		dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n",
+			chip->ecc.strength, chip->ecc.size);
+		return -EINVAL;
+	}
+
+	switch (chip->ecc.size) {
+	case 512:
+		if (chip->ecc.algo == NAND_ECC_HAMMING)
+			cfg->ecc_level = 15;
+		else
+			cfg->ecc_level = chip->ecc.strength;
+		cfg->sector_size_1k = 0;
+		break;
+	case 1024:
+		if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) {
+			dev_err(ctrl->dev, "1KB sectors not supported\n");
+			return -EINVAL;
+		}
+		if (chip->ecc.strength & 0x1) {
+			dev_err(ctrl->dev,
+				"odd ECC not supported with 1KB sectors\n");
+			return -EINVAL;
+		}
+
+		cfg->ecc_level = chip->ecc.strength >> 1;
+		cfg->sector_size_1k = 1;
+		break;
+	default:
+		dev_err(ctrl->dev, "unsupported ECC size: %d\n",
+			chip->ecc.size);
+		return -EINVAL;
+	}
+
+	cfg->ful_adr_bytes = cfg->blk_adr_bytes;
+	if (mtd->writesize > 512)
+		cfg->ful_adr_bytes += cfg->col_adr_bytes;
+	else
+		cfg->ful_adr_bytes += 1;
+
+	ret = brcmnand_set_cfg(host, cfg);
+	if (ret)
+		return ret;
+
+	brcmnand_set_ecc_enabled(host, 1);
+
+	brcmnand_print_cfg(host, msg, cfg);
+	dev_info(ctrl->dev, "detected %s\n", msg);
+
+	/* Configure ACC_CONTROL */
+	offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL);
+	tmp = nand_readreg(ctrl, offs);
+	tmp &= ~ACC_CONTROL_PARTIAL_PAGE;
+	tmp &= ~ACC_CONTROL_RD_ERASED;
+
+	/* We need to turn on Read from erased paged protected by ECC */
+	if (ctrl->nand_version >= 0x0702)
+		tmp |= ACC_CONTROL_RD_ERASED;
+	tmp &= ~ACC_CONTROL_FAST_PGM_RDIN;
+	if (ctrl->features & BRCMNAND_HAS_PREFETCH) {
+		/*
+		 * FIXME: Flash DMA + prefetch may see spurious erased-page ECC
+		 * errors
+		 */
+		if (has_flash_dma(ctrl))
+			tmp &= ~ACC_CONTROL_PREFETCH;
+		else
+			tmp |= ACC_CONTROL_PREFETCH;
+	}
+	nand_writereg(ctrl, offs, tmp);
+
+	return 0;
+}
+
+static int brcmnand_init_cs(struct brcmnand_host *host, struct device_node *dn)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	struct platform_device *pdev = host->pdev;
+	struct mtd_info *mtd;
+	struct nand_chip *chip;
+	int ret;
+	u16 cfg_offs;
+
+	ret = of_property_read_u32(dn, "reg", &host->cs);
+	if (ret) {
+		dev_err(&pdev->dev, "can't get chip-select\n");
+		return -ENXIO;
+	}
+
+	mtd = nand_to_mtd(&host->chip);
+	chip = &host->chip;
+
+	nand_set_flash_node(chip, dn);
+	nand_set_controller_data(chip, host);
+	mtd->name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "brcmnand.%d",
+				   host->cs);
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = &pdev->dev;
+
+	chip->IO_ADDR_R = (void __iomem *)0xdeadbeef;
+	chip->IO_ADDR_W = (void __iomem *)0xdeadbeef;
+
+	chip->cmd_ctrl = brcmnand_cmd_ctrl;
+	chip->cmdfunc = brcmnand_cmdfunc;
+	chip->waitfunc = brcmnand_waitfunc;
+	chip->read_byte = brcmnand_read_byte;
+	chip->read_buf = brcmnand_read_buf;
+	chip->write_buf = brcmnand_write_buf;
+
+	chip->ecc.mode = NAND_ECC_HW;
+	chip->ecc.read_page = brcmnand_read_page;
+	chip->ecc.write_page = brcmnand_write_page;
+	chip->ecc.read_page_raw = brcmnand_read_page_raw;
+	chip->ecc.write_page_raw = brcmnand_write_page_raw;
+	chip->ecc.write_oob_raw = brcmnand_write_oob_raw;
+	chip->ecc.read_oob_raw = brcmnand_read_oob_raw;
+	chip->ecc.read_oob = brcmnand_read_oob;
+	chip->ecc.write_oob = brcmnand_write_oob;
+
+	chip->controller = &ctrl->controller;
+
+	/*
+	 * The bootloader might have configured 16bit mode but
+	 * NAND READID command only works in 8bit mode. We force
+	 * 8bit mode here to ensure that NAND READID commands works.
+	 */
+	cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+	nand_writereg(ctrl, cfg_offs,
+		      nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH);
+
+	if (nand_scan_ident(mtd, 1, NULL))
+		return -ENXIO;
+
+	chip->options |= NAND_NO_SUBPAGE_WRITE;
+	/*
+	 * Avoid (for instance) kmap()'d buffers from JFFS2, which we can't DMA
+	 * to/from, and have nand_base pass us a bounce buffer instead, as
+	 * needed.
+	 */
+	chip->options |= NAND_USE_BOUNCE_BUFFER;
+
+	if (chip->bbt_options & NAND_BBT_USE_FLASH)
+		chip->bbt_options |= NAND_BBT_NO_OOB;
+
+	if (brcmnand_setup_dev(host))
+		return -ENXIO;
+
+	chip->ecc.size = host->hwcfg.sector_size_1k ? 1024 : 512;
+	/* only use our internal HW threshold */
+	mtd->bitflip_threshold = 1;
+
+	ret = brcmstb_choose_ecc_layout(host);
+	if (ret)
+		return ret;
+
+	if (nand_scan_tail(mtd))
+		return -ENXIO;
+
+	return mtd_device_register(mtd, NULL, 0);
+}
+
+static void brcmnand_save_restore_cs_config(struct brcmnand_host *host,
+					    int restore)
+{
+	struct brcmnand_controller *ctrl = host->ctrl;
+	u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG);
+	u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs,
+			BRCMNAND_CS_CFG_EXT);
+	u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs,
+			BRCMNAND_CS_ACC_CONTROL);
+	u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1);
+	u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2);
+
+	if (restore) {
+		nand_writereg(ctrl, cfg_offs, host->hwcfg.config);
+		if (cfg_offs != cfg_ext_offs)
+			nand_writereg(ctrl, cfg_ext_offs,
+				      host->hwcfg.config_ext);
+		nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control);
+		nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1);
+		nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2);
+	} else {
+		host->hwcfg.config = nand_readreg(ctrl, cfg_offs);
+		if (cfg_offs != cfg_ext_offs)
+			host->hwcfg.config_ext =
+				nand_readreg(ctrl, cfg_ext_offs);
+		host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs);
+		host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs);
+		host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs);
+	}
+}
+
+static int brcmnand_suspend(struct device *dev)
+{
+	struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+	struct brcmnand_host *host;
+
+	list_for_each_entry(host, &ctrl->host_list, node)
+		brcmnand_save_restore_cs_config(host, 0);
+
+	ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT);
+	ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR);
+	ctrl->corr_stat_threshold =
+		brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD);
+
+	if (has_flash_dma(ctrl))
+		ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE);
+
+	return 0;
+}
+
+static int brcmnand_resume(struct device *dev)
+{
+	struct brcmnand_controller *ctrl = dev_get_drvdata(dev);
+	struct brcmnand_host *host;
+
+	if (has_flash_dma(ctrl)) {
+		flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode);
+		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+	}
+
+	brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select);
+	brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor);
+	brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD,
+			ctrl->corr_stat_threshold);
+	if (ctrl->soc) {
+		/* Clear/re-enable interrupt */
+		ctrl->soc->ctlrdy_ack(ctrl->soc);
+		ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+	}
+
+	list_for_each_entry(host, &ctrl->host_list, node) {
+		struct nand_chip *chip = &host->chip;
+		struct mtd_info *mtd = nand_to_mtd(chip);
+
+		brcmnand_save_restore_cs_config(host, 1);
+
+		/* Reset the chip, required by some chips after power-up */
+		chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+	}
+
+	return 0;
+}
+
+const struct dev_pm_ops brcmnand_pm_ops = {
+	.suspend		= brcmnand_suspend,
+	.resume			= brcmnand_resume,
+};
+EXPORT_SYMBOL_GPL(brcmnand_pm_ops);
+
+static const struct of_device_id brcmnand_of_match[] = {
+	{ .compatible = "brcm,brcmnand-v4.0" },
+	{ .compatible = "brcm,brcmnand-v5.0" },
+	{ .compatible = "brcm,brcmnand-v6.0" },
+	{ .compatible = "brcm,brcmnand-v6.1" },
+	{ .compatible = "brcm,brcmnand-v6.2" },
+	{ .compatible = "brcm,brcmnand-v7.0" },
+	{ .compatible = "brcm,brcmnand-v7.1" },
+	{ .compatible = "brcm,brcmnand-v7.2" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, brcmnand_of_match);
+
+/***********************************************************************
+ * Platform driver setup (per controller)
+ ***********************************************************************/
+
+int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *dn = dev->of_node, *child;
+	struct brcmnand_controller *ctrl;
+	struct resource *res;
+	int ret;
+
+	/* We only support device-tree instantiation */
+	if (!dn)
+		return -ENODEV;
+
+	if (!of_match_node(brcmnand_of_match, dn))
+		return -ENODEV;
+
+	ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+
+	dev_set_drvdata(dev, ctrl);
+	ctrl->dev = dev;
+
+	init_completion(&ctrl->done);
+	init_completion(&ctrl->dma_done);
+	nand_hw_control_init(&ctrl->controller);
+	INIT_LIST_HEAD(&ctrl->host_list);
+
+	/* NAND register range */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ctrl->nand_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ctrl->nand_base))
+		return PTR_ERR(ctrl->nand_base);
+
+	/* Enable clock before using NAND registers */
+	ctrl->clk = devm_clk_get(dev, "nand");
+	if (!IS_ERR(ctrl->clk)) {
+		ret = clk_prepare_enable(ctrl->clk);
+		if (ret)
+			return ret;
+	} else {
+		ret = PTR_ERR(ctrl->clk);
+		if (ret == -EPROBE_DEFER)
+			return ret;
+
+		ctrl->clk = NULL;
+	}
+
+	/* Initialize NAND revision */
+	ret = brcmnand_revision_init(ctrl);
+	if (ret)
+		goto err;
+
+	/*
+	 * Most chips have this cache at a fixed offset within 'nand' block.
+	 * Some must specify this region separately.
+	 */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand-cache");
+	if (res) {
+		ctrl->nand_fc = devm_ioremap_resource(dev, res);
+		if (IS_ERR(ctrl->nand_fc)) {
+			ret = PTR_ERR(ctrl->nand_fc);
+			goto err;
+		}
+	} else {
+		ctrl->nand_fc = ctrl->nand_base +
+				ctrl->reg_offsets[BRCMNAND_FC_BASE];
+	}
+
+	/* FLASH_DMA */
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "flash-dma");
+	if (res) {
+		ctrl->flash_dma_base = devm_ioremap_resource(dev, res);
+		if (IS_ERR(ctrl->flash_dma_base)) {
+			ret = PTR_ERR(ctrl->flash_dma_base);
+			goto err;
+		}
+
+		flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */
+		flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0);
+
+		/* Allocate descriptor(s) */
+		ctrl->dma_desc = dmam_alloc_coherent(dev,
+						     sizeof(*ctrl->dma_desc),
+						     &ctrl->dma_pa, GFP_KERNEL);
+		if (!ctrl->dma_desc) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		ctrl->dma_irq = platform_get_irq(pdev, 1);
+		if ((int)ctrl->dma_irq < 0) {
+			dev_err(dev, "missing FLASH_DMA IRQ\n");
+			ret = -ENODEV;
+			goto err;
+		}
+
+		ret = devm_request_irq(dev, ctrl->dma_irq,
+				brcmnand_dma_irq, 0, DRV_NAME,
+				ctrl);
+		if (ret < 0) {
+			dev_err(dev, "can't allocate IRQ %d: error %d\n",
+					ctrl->dma_irq, ret);
+			goto err;
+		}
+
+		dev_info(dev, "enabling FLASH_DMA\n");
+	}
+
+	/* Disable automatic device ID config, direct addressing */
+	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT,
+			 CS_SELECT_AUTO_DEVICE_ID_CFG | 0xff, 0, 0);
+	/* Disable XOR addressing */
+	brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0);
+
+	if (ctrl->features & BRCMNAND_HAS_WP) {
+		/* Permanently disable write protection */
+		if (wp_on == 2)
+			brcmnand_set_wp(ctrl, false);
+	} else {
+		wp_on = 0;
+	}
+
+	/* IRQ */
+	ctrl->irq = platform_get_irq(pdev, 0);
+	if ((int)ctrl->irq < 0) {
+		dev_err(dev, "no IRQ defined\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	/*
+	 * Some SoCs integrate this controller (e.g., its interrupt bits) in
+	 * interesting ways
+	 */
+	if (soc) {
+		ctrl->soc = soc;
+
+		ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0,
+				       DRV_NAME, ctrl);
+
+		/* Enable interrupt */
+		ctrl->soc->ctlrdy_ack(ctrl->soc);
+		ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true);
+	} else {
+		/* Use standard interrupt infrastructure */
+		ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0,
+				       DRV_NAME, ctrl);
+	}
+	if (ret < 0) {
+		dev_err(dev, "can't allocate IRQ %d: error %d\n",
+			ctrl->irq, ret);
+		goto err;
+	}
+
+	for_each_available_child_of_node(dn, child) {
+		if (of_device_is_compatible(child, "brcm,nandcs")) {
+			struct brcmnand_host *host;
+
+			host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+			if (!host) {
+				of_node_put(child);
+				ret = -ENOMEM;
+				goto err;
+			}
+			host->pdev = pdev;
+			host->ctrl = ctrl;
+
+			ret = brcmnand_init_cs(host, child);
+			if (ret) {
+				devm_kfree(dev, host);
+				continue; /* Try all chip-selects */
+			}
+
+			list_add_tail(&host->node, &ctrl->host_list);
+		}
+	}
+
+	/* No chip-selects could initialize properly */
+	if (list_empty(&ctrl->host_list)) {
+		ret = -ENODEV;
+		goto err;
+	}
+
+	return 0;
+
+err:
+	clk_disable_unprepare(ctrl->clk);
+	return ret;
+
+}
+EXPORT_SYMBOL_GPL(brcmnand_probe);
+
+int brcmnand_remove(struct platform_device *pdev)
+{
+	struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev);
+	struct brcmnand_host *host;
+
+	list_for_each_entry(host, &ctrl->host_list, node)
+		nand_release(nand_to_mtd(&host->chip));
+
+	clk_disable_unprepare(ctrl->clk);
+
+	dev_set_drvdata(&pdev->dev, NULL);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(brcmnand_remove);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Kevin Cernekee");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for Broadcom chips");
+MODULE_ALIAS("platform:brcmnand");
diff --git a/drivers/mtd/nand/rawnand/brcmnand/brcmnand.h b/drivers/mtd/nand/rawnand/brcmnand/brcmnand.h
new file mode 100644
index 000000000000..5c44cd4aba87
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/brcmnand/brcmnand.h
@@ -0,0 +1,74 @@ 
+/*
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __BRCMNAND_H__
+#define __BRCMNAND_H__
+
+#include <linux/types.h>
+#include <linux/io.h>
+
+struct platform_device;
+struct dev_pm_ops;
+
+struct brcmnand_soc {
+	bool (*ctlrdy_ack)(struct brcmnand_soc *soc);
+	void (*ctlrdy_set_enabled)(struct brcmnand_soc *soc, bool en);
+	void (*prepare_data_bus)(struct brcmnand_soc *soc, bool prepare,
+				 bool is_param);
+};
+
+static inline void brcmnand_soc_data_bus_prepare(struct brcmnand_soc *soc,
+						 bool is_param)
+{
+	if (soc && soc->prepare_data_bus)
+		soc->prepare_data_bus(soc, true, is_param);
+}
+
+static inline void brcmnand_soc_data_bus_unprepare(struct brcmnand_soc *soc,
+						   bool is_param)
+{
+	if (soc && soc->prepare_data_bus)
+		soc->prepare_data_bus(soc, false, is_param);
+}
+
+static inline u32 brcmnand_readl(void __iomem *addr)
+{
+	/*
+	 * MIPS endianness is configured by boot strap, which also reverses all
+	 * bus endianness (i.e., big-endian CPU + big endian bus ==> native
+	 * endian I/O).
+	 *
+	 * Other architectures (e.g., ARM) either do not support big endian, or
+	 * else leave I/O in little endian mode.
+	 */
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		return __raw_readl(addr);
+	else
+		return readl_relaxed(addr);
+}
+
+static inline void brcmnand_writel(u32 val, void __iomem *addr)
+{
+	/* See brcmnand_readl() comments */
+	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+		__raw_writel(val, addr);
+	else
+		writel_relaxed(val, addr);
+}
+
+int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc);
+int brcmnand_remove(struct platform_device *pdev);
+
+extern const struct dev_pm_ops brcmnand_pm_ops;
+
+#endif /* __BRCMNAND_H__ */
diff --git a/drivers/mtd/nand/rawnand/brcmnand/brcmstb_nand.c b/drivers/mtd/nand/rawnand/brcmnand/brcmstb_nand.c
new file mode 100644
index 000000000000..5c271077ac87
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/brcmnand/brcmstb_nand.c
@@ -0,0 +1,44 @@ 
+/*
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "brcmnand.h"
+
+static const struct of_device_id brcmstb_nand_of_match[] = {
+	{ .compatible = "brcm,brcmnand" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, brcmstb_nand_of_match);
+
+static int brcmstb_nand_probe(struct platform_device *pdev)
+{
+	return brcmnand_probe(pdev, NULL);
+}
+
+static struct platform_driver brcmstb_nand_driver = {
+	.probe			= brcmstb_nand_probe,
+	.remove			= brcmnand_remove,
+	.driver = {
+		.name		= "brcmstb_nand",
+		.pm		= &brcmnand_pm_ops,
+		.of_match_table = brcmstb_nand_of_match,
+	}
+};
+module_platform_driver(brcmstb_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_DESCRIPTION("NAND driver for Broadcom STB chips");
diff --git a/drivers/mtd/nand/rawnand/brcmnand/iproc_nand.c b/drivers/mtd/nand/rawnand/brcmnand/iproc_nand.c
new file mode 100644
index 000000000000..4c6ae113664d
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/brcmnand/iproc_nand.c
@@ -0,0 +1,160 @@ 
+/*
+ * Copyright © 2015 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "brcmnand.h"
+
+struct iproc_nand_soc {
+	struct brcmnand_soc soc;
+
+	void __iomem *idm_base;
+	void __iomem *ext_base;
+	spinlock_t idm_lock;
+};
+
+#define IPROC_NAND_CTLR_READY_OFFSET       0x10
+#define IPROC_NAND_CTLR_READY              BIT(0)
+
+#define IPROC_NAND_IO_CTRL_OFFSET          0x00
+#define IPROC_NAND_APB_LE_MODE             BIT(24)
+#define IPROC_NAND_INT_CTRL_READ_ENABLE    BIT(6)
+
+static bool iproc_nand_intc_ack(struct brcmnand_soc *soc)
+{
+	struct iproc_nand_soc *priv =
+			container_of(soc, struct iproc_nand_soc, soc);
+	void __iomem *mmio = priv->ext_base + IPROC_NAND_CTLR_READY_OFFSET;
+	u32 val = brcmnand_readl(mmio);
+
+	if (val & IPROC_NAND_CTLR_READY) {
+		brcmnand_writel(IPROC_NAND_CTLR_READY, mmio);
+		return true;
+	}
+
+	return false;
+}
+
+static void iproc_nand_intc_set(struct brcmnand_soc *soc, bool en)
+{
+	struct iproc_nand_soc *priv =
+			container_of(soc, struct iproc_nand_soc, soc);
+	void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->idm_lock, flags);
+
+	val = brcmnand_readl(mmio);
+
+	if (en)
+		val |= IPROC_NAND_INT_CTRL_READ_ENABLE;
+	else
+		val &= ~IPROC_NAND_INT_CTRL_READ_ENABLE;
+
+	brcmnand_writel(val, mmio);
+
+	spin_unlock_irqrestore(&priv->idm_lock, flags);
+}
+
+static void iproc_nand_apb_access(struct brcmnand_soc *soc, bool prepare,
+				  bool is_param)
+{
+	struct iproc_nand_soc *priv =
+			container_of(soc, struct iproc_nand_soc, soc);
+	void __iomem *mmio = priv->idm_base + IPROC_NAND_IO_CTRL_OFFSET;
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&priv->idm_lock, flags);
+
+	val = brcmnand_readl(mmio);
+
+	/*
+	 * In the case of BE or when dealing with NAND data, alway configure
+	 * the APB bus to LE mode before accessing the FIFO and back to BE mode
+	 * after the access is done
+	 */
+	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) || !is_param) {
+		if (prepare)
+			val |= IPROC_NAND_APB_LE_MODE;
+		else
+			val &= ~IPROC_NAND_APB_LE_MODE;
+	} else { /* when in LE accessing the parameter page, keep APB in BE */
+		val &= ~IPROC_NAND_APB_LE_MODE;
+	}
+
+	brcmnand_writel(val, mmio);
+
+	spin_unlock_irqrestore(&priv->idm_lock, flags);
+}
+
+static int iproc_nand_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct iproc_nand_soc *priv;
+	struct brcmnand_soc *soc;
+	struct resource *res;
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	soc = &priv->soc;
+
+	spin_lock_init(&priv->idm_lock);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-idm");
+	priv->idm_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->idm_base))
+		return PTR_ERR(priv->idm_base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iproc-ext");
+	priv->ext_base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(priv->ext_base))
+		return PTR_ERR(priv->ext_base);
+
+	soc->ctlrdy_ack = iproc_nand_intc_ack;
+	soc->ctlrdy_set_enabled = iproc_nand_intc_set;
+	soc->prepare_data_bus = iproc_nand_apb_access;
+
+	return brcmnand_probe(pdev, soc);
+}
+
+static const struct of_device_id iproc_nand_of_match[] = {
+	{ .compatible = "brcm,nand-iproc" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, iproc_nand_of_match);
+
+static struct platform_driver iproc_nand_driver = {
+	.probe			= iproc_nand_probe,
+	.remove			= brcmnand_remove,
+	.driver = {
+		.name		= "iproc_nand",
+		.pm		= &brcmnand_pm_ops,
+		.of_match_table	= iproc_nand_of_match,
+	}
+};
+module_platform_driver(iproc_nand_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Brian Norris");
+MODULE_AUTHOR("Ray Jui");
+MODULE_DESCRIPTION("NAND driver for Broadcom IPROC-based SoCs");
diff --git a/drivers/mtd/nand/rawnand/cafe_nand.c b/drivers/mtd/nand/rawnand/cafe_nand.c
new file mode 100644
index 000000000000..93880171740a
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/cafe_nand.c
@@ -0,0 +1,898 @@ 
+/*
+ * Driver for One Laptop Per Child ‘CAFÉ’ controller, aka Marvell 88ALP01
+ *
+ * The data sheet for this device can be found at:
+ *    http://wiki.laptop.org/go/Datasheets 
+ *
+ * Copyright © 2006 Red Hat, Inc.
+ * Copyright © 2006 David Woodhouse <dwmw2@infradead.org>
+ */
+
+#define DEBUG
+
+#include <linux/device.h>
+#undef DEBUG
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/rslib.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+#define CAFE_NAND_CTRL1		0x00
+#define CAFE_NAND_CTRL2		0x04
+#define CAFE_NAND_CTRL3		0x08
+#define CAFE_NAND_STATUS	0x0c
+#define CAFE_NAND_IRQ		0x10
+#define CAFE_NAND_IRQ_MASK	0x14
+#define CAFE_NAND_DATA_LEN	0x18
+#define CAFE_NAND_ADDR1		0x1c
+#define CAFE_NAND_ADDR2		0x20
+#define CAFE_NAND_TIMING1	0x24
+#define CAFE_NAND_TIMING2	0x28
+#define CAFE_NAND_TIMING3	0x2c
+#define CAFE_NAND_NONMEM	0x30
+#define CAFE_NAND_ECC_RESULT	0x3C
+#define CAFE_NAND_DMA_CTRL	0x40
+#define CAFE_NAND_DMA_ADDR0	0x44
+#define CAFE_NAND_DMA_ADDR1	0x48
+#define CAFE_NAND_ECC_SYN01	0x50
+#define CAFE_NAND_ECC_SYN23	0x54
+#define CAFE_NAND_ECC_SYN45	0x58
+#define CAFE_NAND_ECC_SYN67	0x5c
+#define CAFE_NAND_READ_DATA	0x1000
+#define CAFE_NAND_WRITE_DATA	0x2000
+
+#define CAFE_GLOBAL_CTRL	0x3004
+#define CAFE_GLOBAL_IRQ		0x3008
+#define CAFE_GLOBAL_IRQ_MASK	0x300c
+#define CAFE_NAND_RESET		0x3034
+
+/* Missing from the datasheet: bit 19 of CTRL1 sets CE0 vs. CE1 */
+#define CTRL1_CHIPSELECT	(1<<19)
+
+struct cafe_priv {
+	struct nand_chip nand;
+	struct pci_dev *pdev;
+	void __iomem *mmio;
+	struct rs_control *rs;
+	uint32_t ctl1;
+	uint32_t ctl2;
+	int datalen;
+	int nr_data;
+	int data_pos;
+	int page_addr;
+	dma_addr_t dmaaddr;
+	unsigned char *dmabuf;
+};
+
+static int usedma = 1;
+module_param(usedma, int, 0644);
+
+static int skipbbt = 0;
+module_param(skipbbt, int, 0644);
+
+static int debug = 0;
+module_param(debug, int, 0644);
+
+static int regdebug = 0;
+module_param(regdebug, int, 0644);
+
+static int checkecc = 1;
+module_param(checkecc, int, 0644);
+
+static unsigned int numtimings;
+static int timing[3];
+module_param_array(timing, int, &numtimings, 0644);
+
+static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL };
+
+/* Hrm. Why isn't this already conditional on something in the struct device? */
+#define cafe_dev_dbg(dev, args...) do { if (debug) dev_dbg(dev, ##args); } while(0)
+
+/* Make it easier to switch to PIO if we need to */
+#define cafe_readl(cafe, addr)			readl((cafe)->mmio + CAFE_##addr)
+#define cafe_writel(cafe, datum, addr)		writel(datum, (cafe)->mmio + CAFE_##addr)
+
+static int cafe_device_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+	int result = !!(cafe_readl(cafe, NAND_STATUS) & 0x40000000);
+	uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+
+	cafe_writel(cafe, irqs, NAND_IRQ);
+
+	cafe_dev_dbg(&cafe->pdev->dev, "NAND device is%s ready, IRQ %x (%x) (%x,%x)\n",
+		result?"":" not", irqs, cafe_readl(cafe, NAND_IRQ),
+		cafe_readl(cafe, GLOBAL_IRQ), cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+	return result;
+}
+
+
+static void cafe_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+	if (usedma)
+		memcpy(cafe->dmabuf + cafe->datalen, buf, len);
+	else
+		memcpy_toio(cafe->mmio + CAFE_NAND_WRITE_DATA + cafe->datalen, buf, len);
+
+	cafe->datalen += len;
+
+	cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes to write buffer. datalen 0x%x\n",
+		len, cafe->datalen);
+}
+
+static void cafe_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+	if (usedma)
+		memcpy(buf, cafe->dmabuf + cafe->datalen, len);
+	else
+		memcpy_fromio(buf, cafe->mmio + CAFE_NAND_READ_DATA + cafe->datalen, len);
+
+	cafe_dev_dbg(&cafe->pdev->dev, "Copy 0x%x bytes from position 0x%x in read buffer.\n",
+		  len, cafe->datalen);
+	cafe->datalen += len;
+}
+
+static uint8_t cafe_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+	uint8_t d;
+
+	cafe_read_buf(mtd, &d, 1);
+	cafe_dev_dbg(&cafe->pdev->dev, "Read %02x\n", d);
+
+	return d;
+}
+
+static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+			      int column, int page_addr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+	int adrbytes = 0;
+	uint32_t ctl1;
+	uint32_t doneint = 0x80000000;
+
+	cafe_dev_dbg(&cafe->pdev->dev, "cmdfunc %02x, 0x%x, 0x%x\n",
+		command, column, page_addr);
+
+	if (command == NAND_CMD_ERASE2 || command == NAND_CMD_PAGEPROG) {
+		/* Second half of a command we already calculated */
+		cafe_writel(cafe, cafe->ctl2 | 0x100 | command, NAND_CTRL2);
+		ctl1 = cafe->ctl1;
+		cafe->ctl2 &= ~(1<<30);
+		cafe_dev_dbg(&cafe->pdev->dev, "Continue command, ctl1 %08x, #data %d\n",
+			  cafe->ctl1, cafe->nr_data);
+		goto do_command;
+	}
+	/* Reset ECC engine */
+	cafe_writel(cafe, 0, NAND_CTRL2);
+
+	/* Emulate NAND_CMD_READOOB on large-page chips */
+	if (mtd->writesize > 512 &&
+	    command == NAND_CMD_READOOB) {
+		column += mtd->writesize;
+		command = NAND_CMD_READ0;
+	}
+
+	/* FIXME: Do we need to send read command before sending data
+	   for small-page chips, to position the buffer correctly? */
+
+	if (column != -1) {
+		cafe_writel(cafe, column, NAND_ADDR1);
+		adrbytes = 2;
+		if (page_addr != -1)
+			goto write_adr2;
+	} else if (page_addr != -1) {
+		cafe_writel(cafe, page_addr & 0xffff, NAND_ADDR1);
+		page_addr >>= 16;
+	write_adr2:
+		cafe_writel(cafe, page_addr, NAND_ADDR2);
+		adrbytes += 2;
+		if (mtd->size > mtd->writesize << 16)
+			adrbytes++;
+	}
+
+	cafe->data_pos = cafe->datalen = 0;
+
+	/* Set command valid bit, mask in the chip select bit  */
+	ctl1 = 0x80000000 | command | (cafe->ctl1 & CTRL1_CHIPSELECT);
+
+	/* Set RD or WR bits as appropriate */
+	if (command == NAND_CMD_READID || command == NAND_CMD_STATUS) {
+		ctl1 |= (1<<26); /* rd */
+		/* Always 5 bytes, for now */
+		cafe->datalen = 4;
+		/* And one address cycle -- even for STATUS, since the controller doesn't work without */
+		adrbytes = 1;
+	} else if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
+		   command == NAND_CMD_READOOB || command == NAND_CMD_RNDOUT) {
+		ctl1 |= 1<<26; /* rd */
+		/* For now, assume just read to end of page */
+		cafe->datalen = mtd->writesize + mtd->oobsize - column;
+	} else if (command == NAND_CMD_SEQIN)
+		ctl1 |= 1<<25; /* wr */
+
+	/* Set number of address bytes */
+	if (adrbytes)
+		ctl1 |= ((adrbytes-1)|8) << 27;
+
+	if (command == NAND_CMD_SEQIN || command == NAND_CMD_ERASE1) {
+		/* Ignore the first command of a pair; the hardware
+		   deals with them both at once, later */
+		cafe->ctl1 = ctl1;
+		cafe_dev_dbg(&cafe->pdev->dev, "Setup for delayed command, ctl1 %08x, dlen %x\n",
+			  cafe->ctl1, cafe->datalen);
+		return;
+	}
+	/* RNDOUT and READ0 commands need a following byte */
+	if (command == NAND_CMD_RNDOUT)
+		cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_RNDOUTSTART, NAND_CTRL2);
+	else if (command == NAND_CMD_READ0 && mtd->writesize > 512)
+		cafe_writel(cafe, cafe->ctl2 | 0x100 | NAND_CMD_READSTART, NAND_CTRL2);
+
+ do_command:
+	cafe_dev_dbg(&cafe->pdev->dev, "dlen %x, ctl1 %x, ctl2 %x\n",
+		cafe->datalen, ctl1, cafe_readl(cafe, NAND_CTRL2));
+
+	/* NB: The datasheet lies -- we really should be subtracting 1 here */
+	cafe_writel(cafe, cafe->datalen, NAND_DATA_LEN);
+	cafe_writel(cafe, 0x90000000, NAND_IRQ);
+	if (usedma && (ctl1 & (3<<25))) {
+		uint32_t dmactl = 0xc0000000 + cafe->datalen;
+		/* If WR or RD bits set, set up DMA */
+		if (ctl1 & (1<<26)) {
+			/* It's a read */
+			dmactl |= (1<<29);
+			/* ... so it's done when the DMA is done, not just
+			   the command. */
+			doneint = 0x10000000;
+		}
+		cafe_writel(cafe, dmactl, NAND_DMA_CTRL);
+	}
+	cafe->datalen = 0;
+
+	if (unlikely(regdebug)) {
+		int i;
+		printk("About to write command %08x to register 0\n", ctl1);
+		for (i=4; i< 0x5c; i+=4)
+			printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+	}
+
+	cafe_writel(cafe, ctl1, NAND_CTRL1);
+	/* Apply this short delay always to ensure that we do wait tWB in
+	 * any case on any machine. */
+	ndelay(100);
+
+	if (1) {
+		int c;
+		uint32_t irqs;
+
+		for (c = 500000; c != 0; c--) {
+			irqs = cafe_readl(cafe, NAND_IRQ);
+			if (irqs & doneint)
+				break;
+			udelay(1);
+			if (!(c % 100000))
+				cafe_dev_dbg(&cafe->pdev->dev, "Wait for ready, IRQ %x\n", irqs);
+			cpu_relax();
+		}
+		cafe_writel(cafe, doneint, NAND_IRQ);
+		cafe_dev_dbg(&cafe->pdev->dev, "Command %x completed after %d usec, irqs %x (%x)\n",
+			     command, 500000-c, irqs, cafe_readl(cafe, NAND_IRQ));
+	}
+
+	WARN_ON(cafe->ctl2 & (1<<30));
+
+	switch (command) {
+
+	case NAND_CMD_CACHEDPROG:
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+	case NAND_CMD_SEQIN:
+	case NAND_CMD_RNDIN:
+	case NAND_CMD_STATUS:
+	case NAND_CMD_RNDOUT:
+		cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+		return;
+	}
+	nand_wait_ready(mtd);
+	cafe_writel(cafe, cafe->ctl2, NAND_CTRL2);
+}
+
+static void cafe_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+	cafe_dev_dbg(&cafe->pdev->dev, "select_chip %d\n", chipnr);
+
+	/* Mask the appropriate bit into the stored value of ctl1
+	   which will be used by cafe_nand_cmdfunc() */
+	if (chipnr)
+		cafe->ctl1 |= CTRL1_CHIPSELECT;
+	else
+		cafe->ctl1 &= ~CTRL1_CHIPSELECT;
+}
+
+static irqreturn_t cafe_nand_interrupt(int irq, void *id)
+{
+	struct mtd_info *mtd = id;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+	uint32_t irqs = cafe_readl(cafe, NAND_IRQ);
+	cafe_writel(cafe, irqs & ~0x90000000, NAND_IRQ);
+	if (!irqs)
+		return IRQ_NONE;
+
+	cafe_dev_dbg(&cafe->pdev->dev, "irq, bits %x (%x)\n", irqs, cafe_readl(cafe, NAND_IRQ));
+	return IRQ_HANDLED;
+}
+
+static void cafe_nand_bug(struct mtd_info *mtd)
+{
+	BUG();
+}
+
+static int cafe_nand_write_oob(struct mtd_info *mtd,
+			       struct nand_chip *chip, int page)
+{
+	int status = 0;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/* Don't use -- use nand_read_oob_std for now */
+static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			      int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+/**
+ * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure
+ * @buf:	buffer to store read data
+ * @oob_required:	caller expects OOB data read to chip->oob_poi
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore
+ * we need a special oob layout and handling.
+ */
+static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			       uint8_t *buf, int oob_required, int page)
+{
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+	unsigned int max_bitflips = 0;
+
+	cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
+		     cafe_readl(cafe, NAND_ECC_RESULT),
+		     cafe_readl(cafe, NAND_ECC_SYN01));
+
+	chip->read_buf(mtd, buf, mtd->writesize);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	if (checkecc && cafe_readl(cafe, NAND_ECC_RESULT) & (1<<18)) {
+		unsigned short syn[8], pat[4];
+		int pos[4];
+		u8 *oob = chip->oob_poi;
+		int i, n;
+
+		for (i=0; i<8; i+=2) {
+			uint32_t tmp = cafe_readl(cafe, NAND_ECC_SYN01 + (i*2));
+			syn[i] = cafe->rs->index_of[tmp & 0xfff];
+			syn[i+1] = cafe->rs->index_of[(tmp >> 16) & 0xfff];
+		}
+
+		n = decode_rs16(cafe->rs, NULL, NULL, 1367, syn, 0, pos, 0,
+		                pat);
+
+		for (i = 0; i < n; i++) {
+			int p = pos[i];
+
+			/* The 12-bit symbols are mapped to bytes here */
+
+			if (p > 1374) {
+				/* out of range */
+				n = -1374;
+			} else if (p == 0) {
+				/* high four bits do not correspond to data */
+				if (pat[i] > 0xff)
+					n = -2048;
+				else
+					buf[0] ^= pat[i];
+			} else if (p == 1365) {
+				buf[2047] ^= pat[i] >> 4;
+				oob[0] ^= pat[i] << 4;
+			} else if (p > 1365) {
+				if ((p & 1) == 1) {
+					oob[3*p/2 - 2048] ^= pat[i] >> 4;
+					oob[3*p/2 - 2047] ^= pat[i] << 4;
+				} else {
+					oob[3*p/2 - 2049] ^= pat[i] >> 8;
+					oob[3*p/2 - 2048] ^= pat[i];
+				}
+			} else if ((p & 1) == 1) {
+				buf[3*p/2] ^= pat[i] >> 4;
+				buf[3*p/2 + 1] ^= pat[i] << 4;
+			} else {
+				buf[3*p/2 - 1] ^= pat[i] >> 8;
+				buf[3*p/2] ^= pat[i];
+			}
+		}
+
+		if (n < 0) {
+			dev_dbg(&cafe->pdev->dev, "Failed to correct ECC at %08x\n",
+				cafe_readl(cafe, NAND_ADDR2) * 2048);
+			for (i = 0; i < 0x5c; i += 4)
+				printk("Register %x: %08x\n", i, readl(cafe->mmio + i));
+			mtd->ecc_stats.failed++;
+		} else {
+			dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
+			mtd->ecc_stats.corrected += n;
+			max_bitflips = max_t(unsigned int, max_bitflips, n);
+		}
+	}
+
+	return max_bitflips;
+}
+
+static int cafe_ooblayout_ecc(struct mtd_info *mtd, int section,
+			      struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 0;
+	oobregion->length = chip->ecc.total;
+
+	return 0;
+}
+
+static int cafe_ooblayout_free(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = chip->ecc.total;
+	oobregion->length = mtd->oobsize - chip->ecc.total;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops cafe_ooblayout_ops = {
+	.ecc = cafe_ooblayout_ecc,
+	.free = cafe_ooblayout_free,
+};
+
+/* Ick. The BBT code really ought to be able to work this bit out
+   for itself from the above, at least for the 2KiB case */
+static uint8_t cafe_bbt_pattern_2048[] = { 'B', 'b', 't', '0' };
+static uint8_t cafe_mirror_pattern_2048[] = { '1', 't', 'b', 'B' };
+
+static uint8_t cafe_bbt_pattern_512[] = { 0xBB };
+static uint8_t cafe_mirror_pattern_512[] = { 0xBC };
+
+
+static struct nand_bbt_descr cafe_bbt_main_descr_2048 = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	14,
+	.len = 4,
+	.veroffs = 18,
+	.maxblocks = 4,
+	.pattern = cafe_bbt_pattern_2048
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_2048 = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	14,
+	.len = 4,
+	.veroffs = 18,
+	.maxblocks = 4,
+	.pattern = cafe_mirror_pattern_2048
+};
+
+static struct nand_bbt_descr cafe_bbt_main_descr_512 = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	14,
+	.len = 1,
+	.veroffs = 15,
+	.maxblocks = 4,
+	.pattern = cafe_bbt_pattern_512
+};
+
+static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	14,
+	.len = 1,
+	.veroffs = 15,
+	.maxblocks = 4,
+	.pattern = cafe_mirror_pattern_512
+};
+
+
+static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
+					  struct nand_chip *chip,
+					  const uint8_t *buf, int oob_required,
+					  int page)
+{
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+	chip->write_buf(mtd, buf, mtd->writesize);
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	/* Set up ECC autogeneration */
+	cafe->ctl2 |= (1<<30);
+
+	return 0;
+}
+
+static int cafe_nand_block_bad(struct mtd_info *mtd, loff_t ofs)
+{
+	return 0;
+}
+
+/* F_2[X]/(X**6+X+1)  */
+static unsigned short gf64_mul(u8 a, u8 b)
+{
+	u8 c;
+	unsigned int i;
+
+	c = 0;
+	for (i = 0; i < 6; i++) {
+		if (a & 1)
+			c ^= b;
+		a >>= 1;
+		b <<= 1;
+		if ((b & 0x40) != 0)
+			b ^= 0x43;
+	}
+
+	return c;
+}
+
+/* F_64[X]/(X**2+X+A**-1) with A the generator of F_64[X]  */
+static u16 gf4096_mul(u16 a, u16 b)
+{
+	u8 ah, al, bh, bl, ch, cl;
+
+	ah = a >> 6;
+	al = a & 0x3f;
+	bh = b >> 6;
+	bl = b & 0x3f;
+
+	ch = gf64_mul(ah ^ al, bh ^ bl) ^ gf64_mul(al, bl);
+	cl = gf64_mul(gf64_mul(ah, bh), 0x21) ^ gf64_mul(al, bl);
+
+	return (ch << 6) ^ cl;
+}
+
+static int cafe_mul(int x)
+{
+	if (x == 0)
+		return 1;
+	return gf4096_mul(x, 0xe01);
+}
+
+static int cafe_nand_probe(struct pci_dev *pdev,
+				     const struct pci_device_id *ent)
+{
+	struct mtd_info *mtd;
+	struct cafe_priv *cafe;
+	uint32_t ctrl;
+	int err = 0;
+	int old_dma;
+	struct nand_buffers *nbuf;
+
+	/* Very old versions shared the same PCI ident for all three
+	   functions on the chip. Verify the class too... */
+	if ((pdev->class >> 8) != PCI_CLASS_MEMORY_FLASH)
+		return -ENODEV;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	pci_set_master(pdev);
+
+	cafe = kzalloc(sizeof(*cafe), GFP_KERNEL);
+	if (!cafe)
+		return  -ENOMEM;
+
+	mtd = nand_to_mtd(&cafe->nand);
+	mtd->dev.parent = &pdev->dev;
+	nand_set_controller_data(&cafe->nand, cafe);
+
+	cafe->pdev = pdev;
+	cafe->mmio = pci_iomap(pdev, 0, 0);
+	if (!cafe->mmio) {
+		dev_warn(&pdev->dev, "failed to iomap\n");
+		err = -ENOMEM;
+		goto out_free_mtd;
+	}
+
+	cafe->rs = init_rs_non_canonical(12, &cafe_mul, 0, 1, 8);
+	if (!cafe->rs) {
+		err = -ENOMEM;
+		goto out_ior;
+	}
+
+	cafe->nand.cmdfunc = cafe_nand_cmdfunc;
+	cafe->nand.dev_ready = cafe_device_ready;
+	cafe->nand.read_byte = cafe_read_byte;
+	cafe->nand.read_buf = cafe_read_buf;
+	cafe->nand.write_buf = cafe_write_buf;
+	cafe->nand.select_chip = cafe_select_chip;
+
+	cafe->nand.chip_delay = 0;
+
+	/* Enable the following for a flash based bad block table */
+	cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
+	cafe->nand.options = NAND_OWN_BUFFERS;
+
+	if (skipbbt) {
+		cafe->nand.options |= NAND_SKIP_BBTSCAN;
+		cafe->nand.block_bad = cafe_nand_block_bad;
+	}
+
+	if (numtimings && numtimings != 3) {
+		dev_warn(&cafe->pdev->dev, "%d timing register values ignored; precisely three are required\n", numtimings);
+	}
+
+	if (numtimings == 3) {
+		cafe_dev_dbg(&cafe->pdev->dev, "Using provided timings (%08x %08x %08x)\n",
+			     timing[0], timing[1], timing[2]);
+	} else {
+		timing[0] = cafe_readl(cafe, NAND_TIMING1);
+		timing[1] = cafe_readl(cafe, NAND_TIMING2);
+		timing[2] = cafe_readl(cafe, NAND_TIMING3);
+
+		if (timing[0] | timing[1] | timing[2]) {
+			cafe_dev_dbg(&cafe->pdev->dev, "Timing registers already set (%08x %08x %08x)\n",
+				     timing[0], timing[1], timing[2]);
+		} else {
+			dev_warn(&cafe->pdev->dev, "Timing registers unset; using most conservative defaults\n");
+			timing[0] = timing[1] = timing[2] = 0xffffffff;
+		}
+	}
+
+	/* Start off by resetting the NAND controller completely */
+	cafe_writel(cafe, 1, NAND_RESET);
+	cafe_writel(cafe, 0, NAND_RESET);
+
+	cafe_writel(cafe, timing[0], NAND_TIMING1);
+	cafe_writel(cafe, timing[1], NAND_TIMING2);
+	cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+	cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+	err = request_irq(pdev->irq, &cafe_nand_interrupt, IRQF_SHARED,
+			  "CAFE NAND", mtd);
+	if (err) {
+		dev_warn(&pdev->dev, "Could not register IRQ %d\n", pdev->irq);
+		goto out_ior;
+	}
+
+	/* Disable master reset, enable NAND clock */
+	ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+	ctrl &= 0xffffeff0;
+	ctrl |= 0x00007000;
+	cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+	cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+	cafe_writel(cafe, 0, NAND_DMA_CTRL);
+
+	cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+	cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+	/* Enable NAND IRQ in global IRQ mask register */
+	cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+	cafe_dev_dbg(&cafe->pdev->dev, "Control %x, IRQ mask %x\n",
+		cafe_readl(cafe, GLOBAL_CTRL),
+		cafe_readl(cafe, GLOBAL_IRQ_MASK));
+
+	/* Do not use the DMA for the nand_scan_ident() */
+	old_dma = usedma;
+	usedma = 0;
+
+	/* Scan to find existence of the device */
+	if (nand_scan_ident(mtd, 2, NULL)) {
+		err = -ENXIO;
+		goto out_irq;
+	}
+
+	cafe->dmabuf = dma_alloc_coherent(&cafe->pdev->dev,
+				2112 + sizeof(struct nand_buffers) +
+				mtd->writesize + mtd->oobsize,
+				&cafe->dmaaddr, GFP_KERNEL);
+	if (!cafe->dmabuf) {
+		err = -ENOMEM;
+		goto out_irq;
+	}
+	cafe->nand.buffers = nbuf = (void *)cafe->dmabuf + 2112;
+
+	/* Set up DMA address */
+	cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
+	if (sizeof(cafe->dmaaddr) > 4)
+		/* Shift in two parts to shut the compiler up */
+		cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
+	else
+		cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+
+	cafe_dev_dbg(&cafe->pdev->dev, "Set DMA address to %x (virt %p)\n",
+		cafe_readl(cafe, NAND_DMA_ADDR0), cafe->dmabuf);
+
+	/* this driver does not need the @ecccalc and @ecccode */
+	nbuf->ecccalc = NULL;
+	nbuf->ecccode = NULL;
+	nbuf->databuf = (uint8_t *)(nbuf + 1);
+
+	/* Restore the DMA flag */
+	usedma = old_dma;
+
+	cafe->ctl2 = 1<<27; /* Reed-Solomon ECC */
+	if (mtd->writesize == 2048)
+		cafe->ctl2 |= 1<<29; /* 2KiB page size */
+
+	/* Set up ECC according to the type of chip we found */
+	mtd_set_ooblayout(mtd, &cafe_ooblayout_ops);
+	if (mtd->writesize == 2048) {
+		cafe->nand.bbt_td = &cafe_bbt_main_descr_2048;
+		cafe->nand.bbt_md = &cafe_bbt_mirror_descr_2048;
+	} else if (mtd->writesize == 512) {
+		cafe->nand.bbt_td = &cafe_bbt_main_descr_512;
+		cafe->nand.bbt_md = &cafe_bbt_mirror_descr_512;
+	} else {
+		printk(KERN_WARNING "Unexpected NAND flash writesize %d. Aborting\n",
+		       mtd->writesize);
+		goto out_free_dma;
+	}
+	cafe->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+	cafe->nand.ecc.size = mtd->writesize;
+	cafe->nand.ecc.bytes = 14;
+	cafe->nand.ecc.strength = 4;
+	cafe->nand.ecc.hwctl  = (void *)cafe_nand_bug;
+	cafe->nand.ecc.calculate = (void *)cafe_nand_bug;
+	cafe->nand.ecc.correct  = (void *)cafe_nand_bug;
+	cafe->nand.ecc.write_page = cafe_nand_write_page_lowlevel;
+	cafe->nand.ecc.write_oob = cafe_nand_write_oob;
+	cafe->nand.ecc.read_page = cafe_nand_read_page;
+	cafe->nand.ecc.read_oob = cafe_nand_read_oob;
+
+	err = nand_scan_tail(mtd);
+	if (err)
+		goto out_free_dma;
+
+	pci_set_drvdata(pdev, mtd);
+
+	mtd->name = "cafe_nand";
+	mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+
+	goto out;
+
+ out_free_dma:
+	dma_free_coherent(&cafe->pdev->dev,
+			2112 + sizeof(struct nand_buffers) +
+			mtd->writesize + mtd->oobsize,
+			cafe->dmabuf, cafe->dmaaddr);
+ out_irq:
+	/* Disable NAND IRQ in global IRQ mask register */
+	cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+	free_irq(pdev->irq, mtd);
+ out_ior:
+	pci_iounmap(pdev, cafe->mmio);
+ out_free_mtd:
+	kfree(cafe);
+ out:
+	return err;
+}
+
+static void cafe_nand_remove(struct pci_dev *pdev)
+{
+	struct mtd_info *mtd = pci_get_drvdata(pdev);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+	/* Disable NAND IRQ in global IRQ mask register */
+	cafe_writel(cafe, ~1 & cafe_readl(cafe, GLOBAL_IRQ_MASK), GLOBAL_IRQ_MASK);
+	free_irq(pdev->irq, mtd);
+	nand_release(mtd);
+	free_rs(cafe->rs);
+	pci_iounmap(pdev, cafe->mmio);
+	dma_free_coherent(&cafe->pdev->dev,
+			2112 + sizeof(struct nand_buffers) +
+			mtd->writesize + mtd->oobsize,
+			cafe->dmabuf, cafe->dmaaddr);
+	kfree(cafe);
+}
+
+static const struct pci_device_id cafe_nand_tbl[] = {
+	{ PCI_VENDOR_ID_MARVELL, PCI_DEVICE_ID_MARVELL_88ALP01_NAND,
+	  PCI_ANY_ID, PCI_ANY_ID },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, cafe_nand_tbl);
+
+static int cafe_nand_resume(struct pci_dev *pdev)
+{
+	uint32_t ctrl;
+	struct mtd_info *mtd = pci_get_drvdata(pdev);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct cafe_priv *cafe = nand_get_controller_data(chip);
+
+       /* Start off by resetting the NAND controller completely */
+	cafe_writel(cafe, 1, NAND_RESET);
+	cafe_writel(cafe, 0, NAND_RESET);
+	cafe_writel(cafe, 0xffffffff, NAND_IRQ_MASK);
+
+	/* Restore timing configuration */
+	cafe_writel(cafe, timing[0], NAND_TIMING1);
+	cafe_writel(cafe, timing[1], NAND_TIMING2);
+	cafe_writel(cafe, timing[2], NAND_TIMING3);
+
+        /* Disable master reset, enable NAND clock */
+	ctrl = cafe_readl(cafe, GLOBAL_CTRL);
+	ctrl &= 0xffffeff0;
+	ctrl |= 0x00007000;
+	cafe_writel(cafe, ctrl | 0x05, GLOBAL_CTRL);
+	cafe_writel(cafe, ctrl | 0x0a, GLOBAL_CTRL);
+	cafe_writel(cafe, 0, NAND_DMA_CTRL);
+	cafe_writel(cafe, 0x7006, GLOBAL_CTRL);
+	cafe_writel(cafe, 0x700a, GLOBAL_CTRL);
+
+	/* Set up DMA address */
+	cafe_writel(cafe, cafe->dmaaddr & 0xffffffff, NAND_DMA_ADDR0);
+	if (sizeof(cafe->dmaaddr) > 4)
+	/* Shift in two parts to shut the compiler up */
+		cafe_writel(cafe, (cafe->dmaaddr >> 16) >> 16, NAND_DMA_ADDR1);
+	else
+		cafe_writel(cafe, 0, NAND_DMA_ADDR1);
+
+	/* Enable NAND IRQ in global IRQ mask register */
+	cafe_writel(cafe, 0x80000007, GLOBAL_IRQ_MASK);
+	return 0;
+}
+
+static struct pci_driver cafe_nand_pci_driver = {
+	.name = "CAFÉ NAND",
+	.id_table = cafe_nand_tbl,
+	.probe = cafe_nand_probe,
+	.remove = cafe_nand_remove,
+	.resume = cafe_nand_resume,
+};
+
+module_pci_driver(cafe_nand_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND flash driver for OLPC CAFÉ chip");
diff --git a/drivers/mtd/nand/rawnand/cmx270_nand.c b/drivers/mtd/nand/rawnand/cmx270_nand.c
new file mode 100644
index 000000000000..2efe6a56557f
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/cmx270_nand.c
@@ -0,0 +1,246 @@ 
+/*
+ *  linux/drivers/mtd/nand/cmx270-nand.c
+ *
+ *  Copyright (C) 2006 Compulab, Ltd.
+ *  Mike Rapoport <mike@compulab.co.il>
+ *
+ *  Derived from drivers/mtd/nand/h1910.c
+ *       Copyright (C) 2002 Marius Gröger (mag@sysgo.de)
+ *       Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Overview:
+ *   This is a device driver for the NAND flash device found on the
+ *   CM-X270 board.
+ */
+
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/mach-types.h>
+
+#include <mach/pxa2xx-regs.h>
+
+#define GPIO_NAND_CS	(11)
+#define GPIO_NAND_RB	(89)
+
+/* MTD structure for CM-X270 board */
+static struct mtd_info *cmx270_nand_mtd;
+
+/* remaped IO address of the device */
+static void __iomem *cmx270_nand_io;
+
+/*
+ * Define static partitions for flash device
+ */
+static struct mtd_partition partition_info[] = {
+	[0] = {
+		.name	= "cmx270-0",
+		.offset	= 0,
+		.size	= MTDPART_SIZ_FULL
+	}
+};
+#define NUM_PARTITIONS (ARRAY_SIZE(partition_info))
+
+static u_char cmx270_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+
+	return (readl(this->IO_ADDR_R) >> 16);
+}
+
+static void cmx270_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	int i;
+	struct nand_chip *this = mtd_to_nand(mtd);
+
+	for (i=0; i<len; i++)
+		writel((*buf++ << 16), this->IO_ADDR_W);
+}
+
+static void cmx270_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	int i;
+	struct nand_chip *this = mtd_to_nand(mtd);
+
+	for (i=0; i<len; i++)
+		*buf++ = readl(this->IO_ADDR_R) >> 16;
+}
+
+static inline void nand_cs_on(void)
+{
+	gpio_set_value(GPIO_NAND_CS, 0);
+}
+
+static void nand_cs_off(void)
+{
+	dsb();
+
+	gpio_set_value(GPIO_NAND_CS, 1);
+}
+
+/*
+ *	hardware specific access to control-lines
+ */
+static void cmx270_hwcontrol(struct mtd_info *mtd, int dat,
+			     unsigned int ctrl)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	unsigned int nandaddr = (unsigned int)this->IO_ADDR_W;
+
+	dsb();
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		if ( ctrl & NAND_ALE )
+			nandaddr |=  (1 << 3);
+		else
+			nandaddr &= ~(1 << 3);
+		if ( ctrl & NAND_CLE )
+			nandaddr |=  (1 << 2);
+		else
+			nandaddr &= ~(1 << 2);
+		if ( ctrl & NAND_NCE )
+			nand_cs_on();
+		else
+			nand_cs_off();
+	}
+
+	dsb();
+	this->IO_ADDR_W = (void __iomem*)nandaddr;
+	if (dat != NAND_CMD_NONE)
+		writel((dat << 16), this->IO_ADDR_W);
+
+	dsb();
+}
+
+/*
+ *	read device ready pin
+ */
+static int cmx270_device_ready(struct mtd_info *mtd)
+{
+	dsb();
+
+	return (gpio_get_value(GPIO_NAND_RB));
+}
+
+/*
+ * Main initialization routine
+ */
+static int __init cmx270_init(void)
+{
+	struct nand_chip *this;
+	int ret;
+
+	if (!(machine_is_armcore() && cpu_is_pxa27x()))
+		return -ENODEV;
+
+	ret = gpio_request(GPIO_NAND_CS, "NAND CS");
+	if (ret) {
+		pr_warning("CM-X270: failed to request NAND CS gpio\n");
+		return ret;
+	}
+
+	gpio_direction_output(GPIO_NAND_CS, 1);
+
+	ret = gpio_request(GPIO_NAND_RB, "NAND R/B");
+	if (ret) {
+		pr_warning("CM-X270: failed to request NAND R/B gpio\n");
+		goto err_gpio_request;
+	}
+
+	gpio_direction_input(GPIO_NAND_RB);
+
+	/* Allocate memory for MTD device structure and private data */
+	this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+	if (!this) {
+		ret = -ENOMEM;
+		goto err_kzalloc;
+	}
+
+	cmx270_nand_io = ioremap(PXA_CS1_PHYS, 12);
+	if (!cmx270_nand_io) {
+		pr_debug("Unable to ioremap NAND device\n");
+		ret = -EINVAL;
+		goto err_ioremap;
+	}
+
+	cmx270_nand_mtd = nand_to_mtd(this);
+
+	/* Link the private data with the MTD structure */
+	cmx270_nand_mtd->owner = THIS_MODULE;
+
+	/* insert callbacks */
+	this->IO_ADDR_R = cmx270_nand_io;
+	this->IO_ADDR_W = cmx270_nand_io;
+	this->cmd_ctrl = cmx270_hwcontrol;
+	this->dev_ready = cmx270_device_ready;
+
+	/* 15 us command delay time */
+	this->chip_delay = 20;
+	this->ecc.mode = NAND_ECC_SOFT;
+	this->ecc.algo = NAND_ECC_HAMMING;
+
+	/* read/write functions */
+	this->read_byte = cmx270_read_byte;
+	this->read_buf = cmx270_read_buf;
+	this->write_buf = cmx270_write_buf;
+
+	/* Scan to find existence of the device */
+	if (nand_scan (cmx270_nand_mtd, 1)) {
+		pr_notice("No NAND device\n");
+		ret = -ENXIO;
+		goto err_scan;
+	}
+
+	/* Register the partitions */
+	ret = mtd_device_parse_register(cmx270_nand_mtd, NULL, NULL,
+					partition_info, NUM_PARTITIONS);
+	if (ret)
+		goto err_scan;
+
+	/* Return happy */
+	return 0;
+
+err_scan:
+	iounmap(cmx270_nand_io);
+err_ioremap:
+	kfree(this);
+err_kzalloc:
+	gpio_free(GPIO_NAND_RB);
+err_gpio_request:
+	gpio_free(GPIO_NAND_CS);
+
+	return ret;
+
+}
+module_init(cmx270_init);
+
+/*
+ * Clean up routine
+ */
+static void __exit cmx270_cleanup(void)
+{
+	/* Release resources, unregister device */
+	nand_release(cmx270_nand_mtd);
+
+	gpio_free(GPIO_NAND_RB);
+	gpio_free(GPIO_NAND_CS);
+
+	iounmap(cmx270_nand_io);
+
+	kfree(mtd_to_nand(cmx270_nand_mtd));
+}
+module_exit(cmx270_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
+MODULE_DESCRIPTION("NAND flash driver for Compulab CM-X270 Module");
diff --git a/drivers/mtd/nand/rawnand/cs553x_nand.c b/drivers/mtd/nand/rawnand/cs553x_nand.c
new file mode 100644
index 000000000000..8fafb4b4488d
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/cs553x_nand.c
@@ -0,0 +1,358 @@ 
+/*
+ * drivers/mtd/nand/cs553x_nand.c
+ *
+ * (C) 2005, 2006 Red Hat Inc.
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ *	   Tom Sylla <tom.sylla@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *  Overview:
+ *   This is a device driver for the NAND flash controller found on
+ *   the AMD CS5535/CS5536 companion chipsets for the Geode processor.
+ *   mtd-id for command line partitioning is cs553x_nand_cs[0-3]
+ *   where 0-3 reflects the chip select for NAND.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/msr.h>
+#include <asm/io.h>
+
+#define NR_CS553X_CONTROLLERS	4
+
+#define MSR_DIVIL_GLD_CAP	0x51400000	/* DIVIL capabilitiies */
+#define CAP_CS5535		0x2df000ULL
+#define CAP_CS5536		0x5df500ULL
+
+/* NAND Timing MSRs */
+#define MSR_NANDF_DATA		0x5140001b	/* NAND Flash Data Timing MSR */
+#define MSR_NANDF_CTL		0x5140001c	/* NAND Flash Control Timing */
+#define MSR_NANDF_RSVD		0x5140001d	/* Reserved */
+
+/* NAND BAR MSRs */
+#define MSR_DIVIL_LBAR_FLSH0	0x51400010	/* Flash Chip Select 0 */
+#define MSR_DIVIL_LBAR_FLSH1	0x51400011	/* Flash Chip Select 1 */
+#define MSR_DIVIL_LBAR_FLSH2	0x51400012	/* Flash Chip Select 2 */
+#define MSR_DIVIL_LBAR_FLSH3	0x51400013	/* Flash Chip Select 3 */
+	/* Each made up of... */
+#define FLSH_LBAR_EN		(1ULL<<32)
+#define FLSH_NOR_NAND		(1ULL<<33)	/* 1 for NAND */
+#define FLSH_MEM_IO		(1ULL<<34)	/* 1 for MMIO */
+	/* I/O BARs have BASE_ADDR in bits 15:4, IO_MASK in 47:36 */
+	/* MMIO BARs have BASE_ADDR in bits 31:12, MEM_MASK in 63:44 */
+
+/* Pin function selection MSR (IDE vs. flash on the IDE pins) */
+#define MSR_DIVIL_BALL_OPTS	0x51400015
+#define PIN_OPT_IDE		(1<<0)	/* 0 for flash, 1 for IDE */
+
+/* Registers within the NAND flash controller BAR -- memory mapped */
+#define MM_NAND_DATA		0x00	/* 0 to 0x7ff, in fact */
+#define MM_NAND_CTL		0x800	/* Any even address 0x800-0x80e */
+#define MM_NAND_IO		0x801	/* Any odd address 0x801-0x80f */
+#define MM_NAND_STS		0x810
+#define MM_NAND_ECC_LSB		0x811
+#define MM_NAND_ECC_MSB		0x812
+#define MM_NAND_ECC_COL		0x813
+#define MM_NAND_LAC		0x814
+#define MM_NAND_ECC_CTL		0x815
+
+/* Registers within the NAND flash controller BAR -- I/O mapped */
+#define IO_NAND_DATA		0x00	/* 0 to 3, in fact */
+#define IO_NAND_CTL		0x04
+#define IO_NAND_IO		0x05
+#define IO_NAND_STS		0x06
+#define IO_NAND_ECC_CTL		0x08
+#define IO_NAND_ECC_LSB		0x09
+#define IO_NAND_ECC_MSB		0x0a
+#define IO_NAND_ECC_COL		0x0b
+#define IO_NAND_LAC		0x0c
+
+#define CS_NAND_CTL_DIST_EN	(1<<4)	/* Enable NAND Distract interrupt */
+#define CS_NAND_CTL_RDY_INT_MASK	(1<<3)	/* Enable RDY/BUSY# interrupt */
+#define CS_NAND_CTL_ALE		(1<<2)
+#define CS_NAND_CTL_CLE		(1<<1)
+#define CS_NAND_CTL_CE		(1<<0)	/* Keep low; 1 to reset */
+
+#define CS_NAND_STS_FLASH_RDY	(1<<3)
+#define CS_NAND_CTLR_BUSY	(1<<2)
+#define CS_NAND_CMD_COMP	(1<<1)
+#define CS_NAND_DIST_ST		(1<<0)
+
+#define CS_NAND_ECC_PARITY	(1<<2)
+#define CS_NAND_ECC_CLRECC	(1<<1)
+#define CS_NAND_ECC_ENECC	(1<<0)
+
+static void cs553x_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+
+	while (unlikely(len > 0x800)) {
+		memcpy_fromio(buf, this->IO_ADDR_R, 0x800);
+		buf += 0x800;
+		len -= 0x800;
+	}
+	memcpy_fromio(buf, this->IO_ADDR_R, len);
+}
+
+static void cs553x_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+
+	while (unlikely(len > 0x800)) {
+		memcpy_toio(this->IO_ADDR_R, buf, 0x800);
+		buf += 0x800;
+		len -= 0x800;
+	}
+	memcpy_toio(this->IO_ADDR_R, buf, len);
+}
+
+static unsigned char cs553x_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	return readb(this->IO_ADDR_R);
+}
+
+static void cs553x_write_byte(struct mtd_info *mtd, u_char byte)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int i = 100000;
+
+	while (i && readb(this->IO_ADDR_R + MM_NAND_STS) & CS_NAND_CTLR_BUSY) {
+		udelay(1);
+		i--;
+	}
+	writeb(byte, this->IO_ADDR_W + 0x801);
+}
+
+static void cs553x_hwcontrol(struct mtd_info *mtd, int cmd,
+			     unsigned int ctrl)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	void __iomem *mmio_base = this->IO_ADDR_R;
+	if (ctrl & NAND_CTRL_CHANGE) {
+		unsigned char ctl = (ctrl & ~NAND_CTRL_CHANGE ) ^ 0x01;
+		writeb(ctl, mmio_base + MM_NAND_CTL);
+	}
+	if (cmd != NAND_CMD_NONE)
+		cs553x_write_byte(mtd, cmd);
+}
+
+static int cs553x_device_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	void __iomem *mmio_base = this->IO_ADDR_R;
+	unsigned char foo = readb(mmio_base + MM_NAND_STS);
+
+	return (foo & CS_NAND_STS_FLASH_RDY) && !(foo & CS_NAND_CTLR_BUSY);
+}
+
+static void cs_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	void __iomem *mmio_base = this->IO_ADDR_R;
+
+	writeb(0x07, mmio_base + MM_NAND_ECC_CTL);
+}
+
+static int cs_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code)
+{
+	uint32_t ecc;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	void __iomem *mmio_base = this->IO_ADDR_R;
+
+	ecc = readl(mmio_base + MM_NAND_STS);
+
+	ecc_code[1] = ecc >> 8;
+	ecc_code[0] = ecc >> 16;
+	ecc_code[2] = ecc >> 24;
+	return 0;
+}
+
+static struct mtd_info *cs553x_mtd[4];
+
+static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
+{
+	int err = 0;
+	struct nand_chip *this;
+	struct mtd_info *new_mtd;
+
+	printk(KERN_NOTICE "Probing CS553x NAND controller CS#%d at %sIO 0x%08lx\n", cs, mmio?"MM":"P", adr);
+
+	if (!mmio) {
+		printk(KERN_NOTICE "PIO mode not yet implemented for CS553X NAND controller\n");
+		return -ENXIO;
+	}
+
+	/* Allocate memory for MTD device structure and private data */
+	this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+	if (!this) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	new_mtd = nand_to_mtd(this);
+
+	/* Link the private data with the MTD structure */
+	new_mtd->owner = THIS_MODULE;
+
+	/* map physical address */
+	this->IO_ADDR_R = this->IO_ADDR_W = ioremap(adr, 4096);
+	if (!this->IO_ADDR_R) {
+		printk(KERN_WARNING "ioremap cs553x NAND @0x%08lx failed\n", adr);
+		err = -EIO;
+		goto out_mtd;
+	}
+
+	this->cmd_ctrl = cs553x_hwcontrol;
+	this->dev_ready = cs553x_device_ready;
+	this->read_byte = cs553x_read_byte;
+	this->read_buf = cs553x_read_buf;
+	this->write_buf = cs553x_write_buf;
+
+	this->chip_delay = 0;
+
+	this->ecc.mode = NAND_ECC_HW;
+	this->ecc.size = 256;
+	this->ecc.bytes = 3;
+	this->ecc.hwctl  = cs_enable_hwecc;
+	this->ecc.calculate = cs_calculate_ecc;
+	this->ecc.correct  = nand_correct_data;
+	this->ecc.strength = 1;
+
+	/* Enable the following for a flash based bad block table */
+	this->bbt_options = NAND_BBT_USE_FLASH;
+
+	new_mtd->name = kasprintf(GFP_KERNEL, "cs553x_nand_cs%d", cs);
+	if (!new_mtd->name) {
+		err = -ENOMEM;
+		goto out_ior;
+	}
+
+	/* Scan to find existence of the device */
+	if (nand_scan(new_mtd, 1)) {
+		err = -ENXIO;
+		goto out_free;
+	}
+
+	cs553x_mtd[cs] = new_mtd;
+	goto out;
+
+out_free:
+	kfree(new_mtd->name);
+out_ior:
+	iounmap(this->IO_ADDR_R);
+out_mtd:
+	kfree(this);
+out:
+	return err;
+}
+
+static int is_geode(void)
+{
+	/* These are the CPUs which will have a CS553[56] companion chip */
+	if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+	    boot_cpu_data.x86 == 5 &&
+	    boot_cpu_data.x86_model == 10)
+		return 1; /* Geode LX */
+
+	if ((boot_cpu_data.x86_vendor == X86_VENDOR_NSC ||
+	     boot_cpu_data.x86_vendor == X86_VENDOR_CYRIX) &&
+	    boot_cpu_data.x86 == 5 &&
+	    boot_cpu_data.x86_model == 5)
+		return 1; /* Geode GX (née GX2) */
+
+	return 0;
+}
+
+static int __init cs553x_init(void)
+{
+	int err = -ENXIO;
+	int i;
+	uint64_t val;
+
+	/* If the CPU isn't a Geode GX or LX, abort */
+	if (!is_geode())
+		return -ENXIO;
+
+	/* If it doesn't have the CS553[56], abort */
+	rdmsrl(MSR_DIVIL_GLD_CAP, val);
+	val &= ~0xFFULL;
+	if (val != CAP_CS5535 && val != CAP_CS5536)
+		return -ENXIO;
+
+	/* If it doesn't have the NAND controller enabled, abort */
+	rdmsrl(MSR_DIVIL_BALL_OPTS, val);
+	if (val & PIN_OPT_IDE) {
+		printk(KERN_INFO "CS553x NAND controller: Flash I/O not enabled in MSR_DIVIL_BALL_OPTS.\n");
+		return -ENXIO;
+	}
+
+	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+		rdmsrl(MSR_DIVIL_LBAR_FLSH0 + i, val);
+
+		if ((val & (FLSH_LBAR_EN|FLSH_NOR_NAND)) == (FLSH_LBAR_EN|FLSH_NOR_NAND))
+			err = cs553x_init_one(i, !!(val & FLSH_MEM_IO), val & 0xFFFFFFFF);
+	}
+
+	/* Register all devices together here. This means we can easily hack it to
+	   do mtdconcat etc. if we want to. */
+	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+		if (cs553x_mtd[i]) {
+			/* If any devices registered, return success. Else the last error. */
+			mtd_device_parse_register(cs553x_mtd[i], NULL, NULL,
+						  NULL, 0);
+			err = 0;
+		}
+	}
+
+	return err;
+}
+
+module_init(cs553x_init);
+
+static void __exit cs553x_cleanup(void)
+{
+	int i;
+
+	for (i = 0; i < NR_CS553X_CONTROLLERS; i++) {
+		struct mtd_info *mtd = cs553x_mtd[i];
+		struct nand_chip *this;
+		void __iomem *mmio_base;
+
+		if (!mtd)
+			continue;
+
+		this = mtd_to_nand(mtd);
+		mmio_base = this->IO_ADDR_R;
+
+		/* Release resources, unregister device */
+		nand_release(mtd);
+		kfree(mtd->name);
+		cs553x_mtd[i] = NULL;
+
+		/* unmap physical address */
+		iounmap(mmio_base);
+
+		/* Free the MTD device structure */
+		kfree(this);
+	}
+}
+
+module_exit(cs553x_cleanup);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("NAND controller driver for AMD CS5535/CS5536 companion chip");
diff --git a/drivers/mtd/nand/rawnand/davinci_nand.c b/drivers/mtd/nand/rawnand/davinci_nand.c
new file mode 100644
index 000000000000..fcc533261c06
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/davinci_nand.c
@@ -0,0 +1,862 @@ 
+/*
+ * davinci_nand.c - NAND Flash Driver for DaVinci family chips
+ *
+ * Copyright © 2006 Texas Instruments.
+ *
+ * Port to 2.6.23 Copyright © 2008 by:
+ *   Sander Huijsen <Shuijsen@optelecom-nkf.com>
+ *   Troy Kisky <troy.kisky@boundarydevices.com>
+ *   Dirk Behme <Dirk.Behme@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+
+#include <linux/platform_data/mtd-davinci.h>
+#include <linux/platform_data/mtd-davinci-aemif.h>
+
+/*
+ * This is a device driver for the NAND flash controller found on the
+ * various DaVinci family chips.  It handles up to four SoC chipselects,
+ * and some flavors of secondary chipselect (e.g. based on A12) as used
+ * with multichip packages.
+ *
+ * The 1-bit ECC hardware is supported, as well as the newer 4-bit ECC
+ * available on chips like the DM355 and OMAP-L137 and needed with the
+ * more error-prone MLC NAND chips.
+ *
+ * This driver assumes EM_WAIT connects all the NAND devices' RDY/nBUSY
+ * outputs in a "wire-AND" configuration, with no per-chip signals.
+ */
+struct davinci_nand_info {
+	struct nand_chip	chip;
+
+	struct device		*dev;
+	struct clk		*clk;
+
+	bool			is_readmode;
+
+	void __iomem		*base;
+	void __iomem		*vaddr;
+
+	uint32_t		ioaddr;
+	uint32_t		current_cs;
+
+	uint32_t		mask_chipsel;
+	uint32_t		mask_ale;
+	uint32_t		mask_cle;
+
+	uint32_t		core_chipsel;
+
+	struct davinci_aemif_timing	*timing;
+};
+
+static DEFINE_SPINLOCK(davinci_nand_lock);
+static bool ecc4_busy;
+
+static inline struct davinci_nand_info *to_davinci_nand(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct davinci_nand_info, chip);
+}
+
+static inline unsigned int davinci_nand_readl(struct davinci_nand_info *info,
+		int offset)
+{
+	return __raw_readl(info->base + offset);
+}
+
+static inline void davinci_nand_writel(struct davinci_nand_info *info,
+		int offset, unsigned long value)
+{
+	__raw_writel(value, info->base + offset);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Access to hardware control lines:  ALE, CLE, secondary chipselect.
+ */
+
+static void nand_davinci_hwcontrol(struct mtd_info *mtd, int cmd,
+				   unsigned int ctrl)
+{
+	struct davinci_nand_info	*info = to_davinci_nand(mtd);
+	uint32_t			addr = info->current_cs;
+	struct nand_chip		*nand = mtd_to_nand(mtd);
+
+	/* Did the control lines change? */
+	if (ctrl & NAND_CTRL_CHANGE) {
+		if ((ctrl & NAND_CTRL_CLE) == NAND_CTRL_CLE)
+			addr |= info->mask_cle;
+		else if ((ctrl & NAND_CTRL_ALE) == NAND_CTRL_ALE)
+			addr |= info->mask_ale;
+
+		nand->IO_ADDR_W = (void __iomem __force *)addr;
+	}
+
+	if (cmd != NAND_CMD_NONE)
+		iowrite8(cmd, nand->IO_ADDR_W);
+}
+
+static void nand_davinci_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct davinci_nand_info	*info = to_davinci_nand(mtd);
+	uint32_t			addr = info->ioaddr;
+
+	/* maybe kick in a second chipselect */
+	if (chip > 0)
+		addr |= info->mask_chipsel;
+	info->current_cs = addr;
+
+	info->chip.IO_ADDR_W = (void __iomem __force *)addr;
+	info->chip.IO_ADDR_R = info->chip.IO_ADDR_W;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 1-bit hardware ECC ... context maintained for each core chipselect
+ */
+
+static inline uint32_t nand_davinci_readecc_1bit(struct mtd_info *mtd)
+{
+	struct davinci_nand_info *info = to_davinci_nand(mtd);
+
+	return davinci_nand_readl(info, NANDF1ECC_OFFSET
+			+ 4 * info->core_chipsel);
+}
+
+static void nand_davinci_hwctl_1bit(struct mtd_info *mtd, int mode)
+{
+	struct davinci_nand_info *info;
+	uint32_t nandcfr;
+	unsigned long flags;
+
+	info = to_davinci_nand(mtd);
+
+	/* Reset ECC hardware */
+	nand_davinci_readecc_1bit(mtd);
+
+	spin_lock_irqsave(&davinci_nand_lock, flags);
+
+	/* Restart ECC hardware */
+	nandcfr = davinci_nand_readl(info, NANDFCR_OFFSET);
+	nandcfr |= BIT(8 + info->core_chipsel);
+	davinci_nand_writel(info, NANDFCR_OFFSET, nandcfr);
+
+	spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/*
+ * Read hardware ECC value and pack into three bytes
+ */
+static int nand_davinci_calculate_1bit(struct mtd_info *mtd,
+				      const u_char *dat, u_char *ecc_code)
+{
+	unsigned int ecc_val = nand_davinci_readecc_1bit(mtd);
+	unsigned int ecc24 = (ecc_val & 0x0fff) | ((ecc_val & 0x0fff0000) >> 4);
+
+	/* invert so that erased block ecc is correct */
+	ecc24 = ~ecc24;
+	ecc_code[0] = (u_char)(ecc24);
+	ecc_code[1] = (u_char)(ecc24 >> 8);
+	ecc_code[2] = (u_char)(ecc24 >> 16);
+
+	return 0;
+}
+
+static int nand_davinci_correct_1bit(struct mtd_info *mtd, u_char *dat,
+				     u_char *read_ecc, u_char *calc_ecc)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	uint32_t eccNand = read_ecc[0] | (read_ecc[1] << 8) |
+					  (read_ecc[2] << 16);
+	uint32_t eccCalc = calc_ecc[0] | (calc_ecc[1] << 8) |
+					  (calc_ecc[2] << 16);
+	uint32_t diff = eccCalc ^ eccNand;
+
+	if (diff) {
+		if ((((diff >> 12) ^ diff) & 0xfff) == 0xfff) {
+			/* Correctable error */
+			if ((diff >> (12 + 3)) < chip->ecc.size) {
+				dat[diff >> (12 + 3)] ^= BIT((diff >> 12) & 7);
+				return 1;
+			} else {
+				return -EBADMSG;
+			}
+		} else if (!(diff & (diff - 1))) {
+			/* Single bit ECC error in the ECC itself,
+			 * nothing to fix */
+			return 1;
+		} else {
+			/* Uncorrectable error */
+			return -EBADMSG;
+		}
+
+	}
+	return 0;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * 4-bit hardware ECC ... context maintained over entire AEMIF
+ *
+ * This is a syndrome engine, but we avoid NAND_ECC_HW_SYNDROME
+ * since that forces use of a problematic "infix OOB" layout.
+ * Among other things, it trashes manufacturer bad block markers.
+ * Also, and specific to this hardware, it ECC-protects the "prepad"
+ * in the OOB ... while having ECC protection for parts of OOB would
+ * seem useful, the current MTD stack sometimes wants to update the
+ * OOB without recomputing ECC.
+ */
+
+static void nand_davinci_hwctl_4bit(struct mtd_info *mtd, int mode)
+{
+	struct davinci_nand_info *info = to_davinci_nand(mtd);
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&davinci_nand_lock, flags);
+
+	/* Start 4-bit ECC calculation for read/write */
+	val = davinci_nand_readl(info, NANDFCR_OFFSET);
+	val &= ~(0x03 << 4);
+	val |= (info->core_chipsel << 4) | BIT(12);
+	davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+	info->is_readmode = (mode == NAND_ECC_READ);
+
+	spin_unlock_irqrestore(&davinci_nand_lock, flags);
+}
+
+/* Read raw ECC code after writing to NAND. */
+static void
+nand_davinci_readecc_4bit(struct davinci_nand_info *info, u32 code[4])
+{
+	const u32 mask = 0x03ff03ff;
+
+	code[0] = davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET) & mask;
+	code[1] = davinci_nand_readl(info, NAND_4BIT_ECC2_OFFSET) & mask;
+	code[2] = davinci_nand_readl(info, NAND_4BIT_ECC3_OFFSET) & mask;
+	code[3] = davinci_nand_readl(info, NAND_4BIT_ECC4_OFFSET) & mask;
+}
+
+/* Terminate read ECC; or return ECC (as bytes) of data written to NAND. */
+static int nand_davinci_calculate_4bit(struct mtd_info *mtd,
+		const u_char *dat, u_char *ecc_code)
+{
+	struct davinci_nand_info *info = to_davinci_nand(mtd);
+	u32 raw_ecc[4], *p;
+	unsigned i;
+
+	/* After a read, terminate ECC calculation by a dummy read
+	 * of some 4-bit ECC register.  ECC covers everything that
+	 * was read; correct() just uses the hardware state, so
+	 * ecc_code is not needed.
+	 */
+	if (info->is_readmode) {
+		davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET);
+		return 0;
+	}
+
+	/* Pack eight raw 10-bit ecc values into ten bytes, making
+	 * two passes which each convert four values (in upper and
+	 * lower halves of two 32-bit words) into five bytes.  The
+	 * ROM boot loader uses this same packing scheme.
+	 */
+	nand_davinci_readecc_4bit(info, raw_ecc);
+	for (i = 0, p = raw_ecc; i < 2; i++, p += 2) {
+		*ecc_code++ =   p[0]        & 0xff;
+		*ecc_code++ = ((p[0] >>  8) & 0x03) | ((p[0] >> 14) & 0xfc);
+		*ecc_code++ = ((p[0] >> 22) & 0x0f) | ((p[1] <<  4) & 0xf0);
+		*ecc_code++ = ((p[1] >>  4) & 0x3f) | ((p[1] >> 10) & 0xc0);
+		*ecc_code++ =  (p[1] >> 18) & 0xff;
+	}
+
+	return 0;
+}
+
+/* Correct up to 4 bits in data we just read, using state left in the
+ * hardware plus the ecc_code computed when it was first written.
+ */
+static int nand_davinci_correct_4bit(struct mtd_info *mtd,
+		u_char *data, u_char *ecc_code, u_char *null)
+{
+	int i;
+	struct davinci_nand_info *info = to_davinci_nand(mtd);
+	unsigned short ecc10[8];
+	unsigned short *ecc16;
+	u32 syndrome[4];
+	u32 ecc_state;
+	unsigned num_errors, corrected;
+	unsigned long timeo;
+
+	/* Unpack ten bytes into eight 10 bit values.  We know we're
+	 * little-endian, and use type punning for less shifting/masking.
+	 */
+	if (WARN_ON(0x01 & (unsigned) ecc_code))
+		return -EINVAL;
+	ecc16 = (unsigned short *)ecc_code;
+
+	ecc10[0] =  (ecc16[0] >>  0) & 0x3ff;
+	ecc10[1] = ((ecc16[0] >> 10) & 0x3f) | ((ecc16[1] << 6) & 0x3c0);
+	ecc10[2] =  (ecc16[1] >>  4) & 0x3ff;
+	ecc10[3] = ((ecc16[1] >> 14) & 0x3)  | ((ecc16[2] << 2) & 0x3fc);
+	ecc10[4] =  (ecc16[2] >>  8)         | ((ecc16[3] << 8) & 0x300);
+	ecc10[5] =  (ecc16[3] >>  2) & 0x3ff;
+	ecc10[6] = ((ecc16[3] >> 12) & 0xf)  | ((ecc16[4] << 4) & 0x3f0);
+	ecc10[7] =  (ecc16[4] >>  6) & 0x3ff;
+
+	/* Tell ECC controller about the expected ECC codes. */
+	for (i = 7; i >= 0; i--)
+		davinci_nand_writel(info, NAND_4BIT_ECC_LOAD_OFFSET, ecc10[i]);
+
+	/* Allow time for syndrome calculation ... then read it.
+	 * A syndrome of all zeroes 0 means no detected errors.
+	 */
+	davinci_nand_readl(info, NANDFSR_OFFSET);
+	nand_davinci_readecc_4bit(info, syndrome);
+	if (!(syndrome[0] | syndrome[1] | syndrome[2] | syndrome[3]))
+		return 0;
+
+	/*
+	 * Clear any previous address calculation by doing a dummy read of an
+	 * error address register.
+	 */
+	davinci_nand_readl(info, NAND_ERR_ADD1_OFFSET);
+
+	/* Start address calculation, and wait for it to complete.
+	 * We _could_ start reading more data while this is working,
+	 * to speed up the overall page read.
+	 */
+	davinci_nand_writel(info, NANDFCR_OFFSET,
+			davinci_nand_readl(info, NANDFCR_OFFSET) | BIT(13));
+
+	/*
+	 * ECC_STATE field reads 0x3 (Error correction complete) immediately
+	 * after setting the 4BITECC_ADD_CALC_START bit. So if you immediately
+	 * begin trying to poll for the state, you may fall right out of your
+	 * loop without any of the correction calculations having taken place.
+	 * The recommendation from the hardware team is to initially delay as
+	 * long as ECC_STATE reads less than 4. After that, ECC HW has entered
+	 * correction state.
+	 */
+	timeo = jiffies + usecs_to_jiffies(100);
+	do {
+		ecc_state = (davinci_nand_readl(info,
+				NANDFSR_OFFSET) >> 8) & 0x0f;
+		cpu_relax();
+	} while ((ecc_state < 4) && time_before(jiffies, timeo));
+
+	for (;;) {
+		u32	fsr = davinci_nand_readl(info, NANDFSR_OFFSET);
+
+		switch ((fsr >> 8) & 0x0f) {
+		case 0:		/* no error, should not happen */
+			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+			return 0;
+		case 1:		/* five or more errors detected */
+			davinci_nand_readl(info, NAND_ERR_ERRVAL1_OFFSET);
+			return -EBADMSG;
+		case 2:		/* error addresses computed */
+		case 3:
+			num_errors = 1 + ((fsr >> 16) & 0x03);
+			goto correct;
+		default:	/* still working on it */
+			cpu_relax();
+			continue;
+		}
+	}
+
+correct:
+	/* correct each error */
+	for (i = 0, corrected = 0; i < num_errors; i++) {
+		int error_address, error_value;
+
+		if (i > 1) {
+			error_address = davinci_nand_readl(info,
+						NAND_ERR_ADD2_OFFSET);
+			error_value = davinci_nand_readl(info,
+						NAND_ERR_ERRVAL2_OFFSET);
+		} else {
+			error_address = davinci_nand_readl(info,
+						NAND_ERR_ADD1_OFFSET);
+			error_value = davinci_nand_readl(info,
+						NAND_ERR_ERRVAL1_OFFSET);
+		}
+
+		if (i & 1) {
+			error_address >>= 16;
+			error_value >>= 16;
+		}
+		error_address &= 0x3ff;
+		error_address = (512 + 7) - error_address;
+
+		if (error_address < 512) {
+			data[error_address] ^= error_value;
+			corrected++;
+		}
+	}
+
+	return corrected;
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * NOTE:  NAND boot requires ALE == EM_A[1], CLE == EM_A[2], so that's
+ * how these chips are normally wired.  This translates to both 8 and 16
+ * bit busses using ALE == BIT(3) in byte addresses, and CLE == BIT(4).
+ *
+ * For now we assume that configuration, or any other one which ignores
+ * the two LSBs for NAND access ... so we can issue 32-bit reads/writes
+ * and have that transparently morphed into multiple NAND operations.
+ */
+static void nand_davinci_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+		ioread32_rep(chip->IO_ADDR_R, buf, len >> 2);
+	else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+		ioread16_rep(chip->IO_ADDR_R, buf, len >> 1);
+	else
+		ioread8_rep(chip->IO_ADDR_R, buf, len);
+}
+
+static void nand_davinci_write_buf(struct mtd_info *mtd,
+		const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if ((0x03 & ((unsigned)buf)) == 0 && (0x03 & len) == 0)
+		iowrite32_rep(chip->IO_ADDR_R, buf, len >> 2);
+	else if ((0x01 & ((unsigned)buf)) == 0 && (0x01 & len) == 0)
+		iowrite16_rep(chip->IO_ADDR_R, buf, len >> 1);
+	else
+		iowrite8_rep(chip->IO_ADDR_R, buf, len);
+}
+
+/*
+ * Check hardware register for wait status. Returns 1 if device is ready,
+ * 0 if it is still busy.
+ */
+static int nand_davinci_dev_ready(struct mtd_info *mtd)
+{
+	struct davinci_nand_info *info = to_davinci_nand(mtd);
+
+	return davinci_nand_readl(info, NANDFSR_OFFSET) & BIT(0);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* An ECC layout for using 4-bit ECC with small-page flash, storing
+ * ten ECC bytes plus the manufacturer's bad block marker byte, and
+ * and not overlapping the default BBT markers.
+ */
+static int hwecc4_ooblayout_small_ecc(struct mtd_info *mtd, int section,
+				      struct mtd_oob_region *oobregion)
+{
+	if (section > 2)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		oobregion->length = 5;
+	} else if (section == 1) {
+		oobregion->offset = 6;
+		oobregion->length = 2;
+	} else {
+		oobregion->offset = 13;
+		oobregion->length = 3;
+	}
+
+	return 0;
+}
+
+static int hwecc4_ooblayout_small_free(struct mtd_info *mtd, int section,
+				       struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 8;
+		oobregion->length = 5;
+	} else {
+		oobregion->offset = 16;
+		oobregion->length = mtd->oobsize - 16;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops hwecc4_small_ooblayout_ops = {
+	.ecc = hwecc4_ooblayout_small_ecc,
+	.free = hwecc4_ooblayout_small_free,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_nand_of_match[] = {
+	{.compatible = "ti,davinci-nand", },
+	{.compatible = "ti,keystone-nand", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, davinci_nand_of_match);
+
+static struct davinci_nand_pdata
+	*nand_davinci_get_pdata(struct platform_device *pdev)
+{
+	if (!dev_get_platdata(&pdev->dev) && pdev->dev.of_node) {
+		struct davinci_nand_pdata *pdata;
+		const char *mode;
+		u32 prop;
+
+		pdata =  devm_kzalloc(&pdev->dev,
+				sizeof(struct davinci_nand_pdata),
+				GFP_KERNEL);
+		pdev->dev.platform_data = pdata;
+		if (!pdata)
+			return ERR_PTR(-ENOMEM);
+		if (!of_property_read_u32(pdev->dev.of_node,
+			"ti,davinci-chipselect", &prop))
+			pdev->id = prop;
+		else
+			return ERR_PTR(-EINVAL);
+
+		if (!of_property_read_u32(pdev->dev.of_node,
+			"ti,davinci-mask-ale", &prop))
+			pdata->mask_ale = prop;
+		if (!of_property_read_u32(pdev->dev.of_node,
+			"ti,davinci-mask-cle", &prop))
+			pdata->mask_cle = prop;
+		if (!of_property_read_u32(pdev->dev.of_node,
+			"ti,davinci-mask-chipsel", &prop))
+			pdata->mask_chipsel = prop;
+		if (!of_property_read_string(pdev->dev.of_node,
+			"ti,davinci-ecc-mode", &mode)) {
+			if (!strncmp("none", mode, 4))
+				pdata->ecc_mode = NAND_ECC_NONE;
+			if (!strncmp("soft", mode, 4))
+				pdata->ecc_mode = NAND_ECC_SOFT;
+			if (!strncmp("hw", mode, 2))
+				pdata->ecc_mode = NAND_ECC_HW;
+		}
+		if (!of_property_read_u32(pdev->dev.of_node,
+			"ti,davinci-ecc-bits", &prop))
+			pdata->ecc_bits = prop;
+
+		if (!of_property_read_u32(pdev->dev.of_node,
+			"ti,davinci-nand-buswidth", &prop) && prop == 16)
+			pdata->options |= NAND_BUSWIDTH_16;
+
+		if (of_property_read_bool(pdev->dev.of_node,
+			"ti,davinci-nand-use-bbt"))
+			pdata->bbt_options = NAND_BBT_USE_FLASH;
+
+		if (of_device_is_compatible(pdev->dev.of_node,
+					    "ti,keystone-nand")) {
+			pdata->options |= NAND_NO_SUBPAGE_WRITE;
+		}
+	}
+
+	return dev_get_platdata(&pdev->dev);
+}
+#else
+static struct davinci_nand_pdata
+	*nand_davinci_get_pdata(struct platform_device *pdev)
+{
+	return dev_get_platdata(&pdev->dev);
+}
+#endif
+
+static int nand_davinci_probe(struct platform_device *pdev)
+{
+	struct davinci_nand_pdata	*pdata;
+	struct davinci_nand_info	*info;
+	struct resource			*res1;
+	struct resource			*res2;
+	void __iomem			*vaddr;
+	void __iomem			*base;
+	int				ret;
+	uint32_t			val;
+	struct mtd_info			*mtd;
+
+	pdata = nand_davinci_get_pdata(pdev);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+
+	/* insist on board-specific configuration */
+	if (!pdata)
+		return -ENODEV;
+
+	/* which external chipselect will we be managing? */
+	if (pdev->id < 0 || pdev->id > 3)
+		return -ENODEV;
+
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, info);
+
+	res1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res1 || !res2) {
+		dev_err(&pdev->dev, "resource missing\n");
+		return -EINVAL;
+	}
+
+	vaddr = devm_ioremap_resource(&pdev->dev, res1);
+	if (IS_ERR(vaddr))
+		return PTR_ERR(vaddr);
+
+	/*
+	 * This registers range is used to setup NAND settings. In case with
+	 * TI AEMIF driver, the same memory address range is requested already
+	 * by AEMIF, so we cannot request it twice, just ioremap.
+	 * The AEMIF and NAND drivers not use the same registers in this range.
+	 */
+	base = devm_ioremap(&pdev->dev, res2->start, resource_size(res2));
+	if (!base) {
+		dev_err(&pdev->dev, "ioremap failed for resource %pR\n", res2);
+		return -EADDRNOTAVAIL;
+	}
+
+	info->dev		= &pdev->dev;
+	info->base		= base;
+	info->vaddr		= vaddr;
+
+	mtd			= nand_to_mtd(&info->chip);
+	mtd->dev.parent		= &pdev->dev;
+	nand_set_flash_node(&info->chip, pdev->dev.of_node);
+
+	info->chip.IO_ADDR_R	= vaddr;
+	info->chip.IO_ADDR_W	= vaddr;
+	info->chip.chip_delay	= 0;
+	info->chip.select_chip	= nand_davinci_select_chip;
+
+	/* options such as NAND_BBT_USE_FLASH */
+	info->chip.bbt_options	= pdata->bbt_options;
+	/* options such as 16-bit widths */
+	info->chip.options	= pdata->options;
+	info->chip.bbt_td	= pdata->bbt_td;
+	info->chip.bbt_md	= pdata->bbt_md;
+	info->timing		= pdata->timing;
+
+	info->ioaddr		= (uint32_t __force) vaddr;
+
+	info->current_cs	= info->ioaddr;
+	info->core_chipsel	= pdev->id;
+	info->mask_chipsel	= pdata->mask_chipsel;
+
+	/* use nandboot-capable ALE/CLE masks by default */
+	info->mask_ale		= pdata->mask_ale ? : MASK_ALE;
+	info->mask_cle		= pdata->mask_cle ? : MASK_CLE;
+
+	/* Set address of hardware control function */
+	info->chip.cmd_ctrl	= nand_davinci_hwcontrol;
+	info->chip.dev_ready	= nand_davinci_dev_ready;
+
+	/* Speed up buffer I/O */
+	info->chip.read_buf     = nand_davinci_read_buf;
+	info->chip.write_buf    = nand_davinci_write_buf;
+
+	/* Use board-specific ECC config */
+	info->chip.ecc.mode	= pdata->ecc_mode;
+
+	ret = -EINVAL;
+
+	info->clk = devm_clk_get(&pdev->dev, "aemif");
+	if (IS_ERR(info->clk)) {
+		ret = PTR_ERR(info->clk);
+		dev_dbg(&pdev->dev, "unable to get AEMIF clock, err %d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(info->clk);
+	if (ret < 0) {
+		dev_dbg(&pdev->dev, "unable to enable AEMIF clock, err %d\n",
+			ret);
+		goto err_clk_enable;
+	}
+
+	spin_lock_irq(&davinci_nand_lock);
+
+	/* put CSxNAND into NAND mode */
+	val = davinci_nand_readl(info, NANDFCR_OFFSET);
+	val |= BIT(info->core_chipsel);
+	davinci_nand_writel(info, NANDFCR_OFFSET, val);
+
+	spin_unlock_irq(&davinci_nand_lock);
+
+	/* Scan to find existence of the device(s) */
+	ret = nand_scan_ident(mtd, pdata->mask_chipsel ? 2 : 1, NULL);
+	if (ret < 0) {
+		dev_dbg(&pdev->dev, "no NAND chip(s) found\n");
+		goto err;
+	}
+
+	switch (info->chip.ecc.mode) {
+	case NAND_ECC_NONE:
+		pdata->ecc_bits = 0;
+		break;
+	case NAND_ECC_SOFT:
+		pdata->ecc_bits = 0;
+		/*
+		 * This driver expects Hamming based ECC when ecc_mode is set
+		 * to NAND_ECC_SOFT. Force ecc.algo to NAND_ECC_HAMMING to
+		 * avoid adding an extra ->ecc_algo field to
+		 * davinci_nand_pdata.
+		 */
+		info->chip.ecc.algo = NAND_ECC_HAMMING;
+		break;
+	case NAND_ECC_HW:
+		if (pdata->ecc_bits == 4) {
+			/* No sanity checks:  CPUs must support this,
+			 * and the chips may not use NAND_BUSWIDTH_16.
+			 */
+
+			/* No sharing 4-bit hardware between chipselects yet */
+			spin_lock_irq(&davinci_nand_lock);
+			if (ecc4_busy)
+				ret = -EBUSY;
+			else
+				ecc4_busy = true;
+			spin_unlock_irq(&davinci_nand_lock);
+
+			if (ret == -EBUSY)
+				return ret;
+
+			info->chip.ecc.calculate = nand_davinci_calculate_4bit;
+			info->chip.ecc.correct = nand_davinci_correct_4bit;
+			info->chip.ecc.hwctl = nand_davinci_hwctl_4bit;
+			info->chip.ecc.bytes = 10;
+			info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK;
+		} else {
+			info->chip.ecc.calculate = nand_davinci_calculate_1bit;
+			info->chip.ecc.correct = nand_davinci_correct_1bit;
+			info->chip.ecc.hwctl = nand_davinci_hwctl_1bit;
+			info->chip.ecc.bytes = 3;
+		}
+		info->chip.ecc.size = 512;
+		info->chip.ecc.strength = pdata->ecc_bits;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Update ECC layout if needed ... for 1-bit HW ECC, the default
+	 * is OK, but it allocates 6 bytes when only 3 are needed (for
+	 * each 512 bytes).  For the 4-bit HW ECC, that default is not
+	 * usable:  10 bytes are needed, not 6.
+	 */
+	if (pdata->ecc_bits == 4) {
+		int	chunks = mtd->writesize / 512;
+
+		if (!chunks || mtd->oobsize < 16) {
+			dev_dbg(&pdev->dev, "too small\n");
+			ret = -EINVAL;
+			goto err;
+		}
+
+		/* For small page chips, preserve the manufacturer's
+		 * badblock marking data ... and make sure a flash BBT
+		 * table marker fits in the free bytes.
+		 */
+		if (chunks == 1) {
+			mtd_set_ooblayout(mtd, &hwecc4_small_ooblayout_ops);
+		} else if (chunks == 4 || chunks == 8) {
+			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+			info->chip.ecc.mode = NAND_ECC_HW_OOB_FIRST;
+		} else {
+			ret = -EIO;
+			goto err;
+		}
+	}
+
+	ret = nand_scan_tail(mtd);
+	if (ret < 0)
+		goto err;
+
+	if (pdata->parts)
+		ret = mtd_device_parse_register(mtd, NULL, NULL,
+					pdata->parts, pdata->nr_parts);
+	else
+		ret = mtd_device_register(mtd, NULL, 0);
+	if (ret < 0)
+		goto err;
+
+	val = davinci_nand_readl(info, NRCSR_OFFSET);
+	dev_info(&pdev->dev, "controller rev. %d.%d\n",
+	       (val >> 8) & 0xff, val & 0xff);
+
+	return 0;
+
+err:
+	clk_disable_unprepare(info->clk);
+
+err_clk_enable:
+	spin_lock_irq(&davinci_nand_lock);
+	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+		ecc4_busy = false;
+	spin_unlock_irq(&davinci_nand_lock);
+	return ret;
+}
+
+static int nand_davinci_remove(struct platform_device *pdev)
+{
+	struct davinci_nand_info *info = platform_get_drvdata(pdev);
+
+	spin_lock_irq(&davinci_nand_lock);
+	if (info->chip.ecc.mode == NAND_ECC_HW_SYNDROME)
+		ecc4_busy = false;
+	spin_unlock_irq(&davinci_nand_lock);
+
+	nand_release(nand_to_mtd(&info->chip));
+
+	clk_disable_unprepare(info->clk);
+
+	return 0;
+}
+
+static struct platform_driver nand_davinci_driver = {
+	.probe		= nand_davinci_probe,
+	.remove		= nand_davinci_remove,
+	.driver		= {
+		.name	= "davinci_nand",
+		.of_match_table = of_match_ptr(davinci_nand_of_match),
+	},
+};
+MODULE_ALIAS("platform:davinci_nand");
+
+module_platform_driver(nand_davinci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_DESCRIPTION("Davinci NAND flash driver");
+
diff --git a/drivers/mtd/nand/rawnand/denali.c b/drivers/mtd/nand/rawnand/denali.c
new file mode 100644
index 000000000000..0476ae8776d9
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/denali.c
@@ -0,0 +1,1663 @@ 
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+
+#include "denali.h"
+
+MODULE_LICENSE("GPL");
+
+/*
+ * We define a module parameter that allows the user to override
+ * the hardware and decide what timing mode should be used.
+ */
+#define NAND_DEFAULT_TIMINGS	-1
+
+static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
+module_param(onfi_timing_mode, int, S_IRUGO);
+MODULE_PARM_DESC(onfi_timing_mode,
+	   "Overrides default ONFI setting. -1 indicates use default timings");
+
+#define DENALI_NAND_NAME    "denali-nand"
+
+/*
+ * We define a macro here that combines all interrupts this driver uses into
+ * a single constant value, for convenience.
+ */
+#define DENALI_IRQ_ALL	(INTR_STATUS__DMA_CMD_COMP | \
+			INTR_STATUS__ECC_TRANSACTION_DONE | \
+			INTR_STATUS__ECC_ERR | \
+			INTR_STATUS__PROGRAM_FAIL | \
+			INTR_STATUS__LOAD_COMP | \
+			INTR_STATUS__PROGRAM_COMP | \
+			INTR_STATUS__TIME_OUT | \
+			INTR_STATUS__ERASE_FAIL | \
+			INTR_STATUS__RST_COMP | \
+			INTR_STATUS__ERASE_COMP)
+
+/*
+ * indicates whether or not the internal value for the flash bank is
+ * valid or not
+ */
+#define CHIP_SELECT_INVALID	-1
+
+#define SUPPORT_8BITECC		1
+
+/*
+ * This macro divides two integers and rounds fractional values up
+ * to the nearest integer value.
+ */
+#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
+
+/*
+ * this macro allows us to convert from an MTD structure to our own
+ * device context (denali) structure.
+ */
+static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand);
+}
+
+/*
+ * These constants are defined by the driver to enable common driver
+ * configuration options.
+ */
+#define SPARE_ACCESS		0x41
+#define MAIN_ACCESS		0x42
+#define MAIN_SPARE_ACCESS	0x43
+#define PIPELINE_ACCESS		0x2000
+
+#define DENALI_READ	0
+#define DENALI_WRITE	0x100
+
+/* types of device accesses. We can issue commands and get status */
+#define COMMAND_CYCLE	0
+#define ADDR_CYCLE	1
+#define STATUS_CYCLE	2
+
+/*
+ * this is a helper macro that allows us to
+ * format the bank into the proper bits for the controller
+ */
+#define BANK(x) ((x) << 24)
+
+/* forward declarations */
+static void clear_interrupts(struct denali_nand_info *denali);
+static uint32_t wait_for_irq(struct denali_nand_info *denali,
+							uint32_t irq_mask);
+static void denali_irq_enable(struct denali_nand_info *denali,
+							uint32_t int_mask);
+static uint32_t read_interrupt_status(struct denali_nand_info *denali);
+
+/*
+ * Certain operations for the denali NAND controller use an indexed mode to
+ * read/write data. The operation is performed by writing the address value
+ * of the command to the device memory followed by the data. This function
+ * abstracts this common operation.
+ */
+static void index_addr(struct denali_nand_info *denali,
+				uint32_t address, uint32_t data)
+{
+	iowrite32(address, denali->flash_mem);
+	iowrite32(data, denali->flash_mem + 0x10);
+}
+
+/* Perform an indexed read of the device */
+static void index_addr_read_data(struct denali_nand_info *denali,
+				 uint32_t address, uint32_t *pdata)
+{
+	iowrite32(address, denali->flash_mem);
+	*pdata = ioread32(denali->flash_mem + 0x10);
+}
+
+/*
+ * We need to buffer some data for some of the NAND core routines.
+ * The operations manage buffering that data.
+ */
+static void reset_buf(struct denali_nand_info *denali)
+{
+	denali->buf.head = denali->buf.tail = 0;
+}
+
+static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
+{
+	denali->buf.buf[denali->buf.tail++] = byte;
+}
+
+/* reads the status of the device */
+static void read_status(struct denali_nand_info *denali)
+{
+	uint32_t cmd;
+
+	/* initialize the data buffer to store status */
+	reset_buf(denali);
+
+	cmd = ioread32(denali->flash_reg + WRITE_PROTECT);
+	if (cmd)
+		write_byte_to_buf(denali, NAND_STATUS_WP);
+	else
+		write_byte_to_buf(denali, 0);
+}
+
+/* resets a specific device connected to the core */
+static void reset_bank(struct denali_nand_info *denali)
+{
+	uint32_t irq_status;
+	uint32_t irq_mask = INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT;
+
+	clear_interrupts(denali);
+
+	iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET);
+
+	irq_status = wait_for_irq(denali, irq_mask);
+
+	if (irq_status & INTR_STATUS__TIME_OUT)
+		dev_err(denali->dev, "reset bank failed.\n");
+}
+
+/* Reset the flash controller */
+static uint16_t denali_nand_reset(struct denali_nand_info *denali)
+{
+	int i;
+
+	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+		__FILE__, __LINE__, __func__);
+
+	for (i = 0; i < denali->max_banks; i++)
+		iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+		denali->flash_reg + INTR_STATUS(i));
+
+	for (i = 0; i < denali->max_banks; i++) {
+		iowrite32(1 << i, denali->flash_reg + DEVICE_RESET);
+		while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) &
+			(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT)))
+			cpu_relax();
+		if (ioread32(denali->flash_reg + INTR_STATUS(i)) &
+			INTR_STATUS__TIME_OUT)
+			dev_dbg(denali->dev,
+			"NAND Reset operation timed out on bank %d\n", i);
+	}
+
+	for (i = 0; i < denali->max_banks; i++)
+		iowrite32(INTR_STATUS__RST_COMP | INTR_STATUS__TIME_OUT,
+			  denali->flash_reg + INTR_STATUS(i));
+
+	return PASS;
+}
+
+/*
+ * this routine calculates the ONFI timing values for a given mode and
+ * programs the clocking register accordingly. The mode is determined by
+ * the get_onfi_nand_para routine.
+ */
+static void nand_onfi_timing_set(struct denali_nand_info *denali,
+								uint16_t mode)
+{
+	uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
+	uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
+	uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
+	uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
+	uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
+	uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
+	uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
+	uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
+	uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
+	uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
+	uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
+	uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
+
+	uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
+	uint16_t dv_window = 0;
+	uint16_t en_lo, en_hi;
+	uint16_t acc_clks;
+	uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
+
+	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+		__FILE__, __LINE__, __func__);
+
+	en_lo = CEIL_DIV(Trp[mode], CLK_X);
+	en_hi = CEIL_DIV(Treh[mode], CLK_X);
+#if ONFI_BLOOM_TIME
+	if ((en_hi * CLK_X) < (Treh[mode] + 2))
+		en_hi++;
+#endif
+
+	if ((en_lo + en_hi) * CLK_X < Trc[mode])
+		en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
+
+	if ((en_lo + en_hi) < CLK_MULTI)
+		en_lo += CLK_MULTI - en_lo - en_hi;
+
+	while (dv_window < 8) {
+		data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
+
+		data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
+
+		data_invalid = data_invalid_rhoh < data_invalid_rloh ?
+					data_invalid_rhoh : data_invalid_rloh;
+
+		dv_window = data_invalid - Trea[mode];
+
+		if (dv_window < 8)
+			en_lo++;
+	}
+
+	acc_clks = CEIL_DIV(Trea[mode], CLK_X);
+
+	while (acc_clks * CLK_X - Trea[mode] < 3)
+		acc_clks++;
+
+	if (data_invalid - acc_clks * CLK_X < 2)
+		dev_warn(denali->dev, "%s, Line %d: Warning!\n",
+			 __FILE__, __LINE__);
+
+	addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
+	re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
+	re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
+	we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
+	cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
+	if (cs_cnt == 0)
+		cs_cnt = 1;
+
+	if (Tcea[mode]) {
+		while (cs_cnt * CLK_X + Trea[mode] < Tcea[mode])
+			cs_cnt++;
+	}
+
+#if MODE5_WORKAROUND
+	if (mode == 5)
+		acc_clks = 5;
+#endif
+
+	/* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
+	if (ioread32(denali->flash_reg + MANUFACTURER_ID) == 0 &&
+		ioread32(denali->flash_reg + DEVICE_ID) == 0x88)
+		acc_clks = 6;
+
+	iowrite32(acc_clks, denali->flash_reg + ACC_CLKS);
+	iowrite32(re_2_we, denali->flash_reg + RE_2_WE);
+	iowrite32(re_2_re, denali->flash_reg + RE_2_RE);
+	iowrite32(we_2_re, denali->flash_reg + WE_2_RE);
+	iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
+	iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
+	iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
+	iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
+}
+
+/* queries the NAND device to see what ONFI modes it supports. */
+static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
+{
+	int i;
+
+	/*
+	 * we needn't to do a reset here because driver has already
+	 * reset all the banks before
+	 */
+	if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+		ONFI_TIMING_MODE__VALUE))
+		return FAIL;
+
+	for (i = 5; i > 0; i--) {
+		if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
+			(0x01 << i))
+			break;
+	}
+
+	nand_onfi_timing_set(denali, i);
+
+	/*
+	 * By now, all the ONFI devices we know support the page cache
+	 * rw feature. So here we enable the pipeline_rw_ahead feature
+	 */
+	/* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
+	/* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE);  */
+
+	return PASS;
+}
+
+static void get_samsung_nand_para(struct denali_nand_info *denali,
+							uint8_t device_id)
+{
+	if (device_id == 0xd3) { /* Samsung K9WAG08U1A */
+		/* Set timing register values according to datasheet */
+		iowrite32(5, denali->flash_reg + ACC_CLKS);
+		iowrite32(20, denali->flash_reg + RE_2_WE);
+		iowrite32(12, denali->flash_reg + WE_2_RE);
+		iowrite32(14, denali->flash_reg + ADDR_2_DATA);
+		iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT);
+		iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT);
+		iowrite32(2, denali->flash_reg + CS_SETUP_CNT);
+	}
+}
+
+static void get_toshiba_nand_para(struct denali_nand_info *denali)
+{
+	uint32_t tmp;
+
+	/*
+	 * Workaround to fix a controller bug which reports a wrong
+	 * spare area size for some kind of Toshiba NAND device
+	 */
+	if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
+		(ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
+		iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+		tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
+			ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+		iowrite32(tmp,
+				denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+#if SUPPORT_15BITECC
+		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+	}
+}
+
+static void get_hynix_nand_para(struct denali_nand_info *denali,
+							uint8_t device_id)
+{
+	uint32_t main_size, spare_size;
+
+	switch (device_id) {
+	case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
+	case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
+		iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK);
+		iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
+		iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
+		main_size = 4096 *
+			ioread32(denali->flash_reg + DEVICES_CONNECTED);
+		spare_size = 224 *
+			ioread32(denali->flash_reg + DEVICES_CONNECTED);
+		iowrite32(main_size,
+				denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
+		iowrite32(spare_size,
+				denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
+		iowrite32(0, denali->flash_reg + DEVICE_WIDTH);
+#if SUPPORT_15BITECC
+		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+#elif SUPPORT_8BITECC
+		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+#endif
+		break;
+	default:
+		dev_warn(denali->dev,
+			 "Spectra: Unknown Hynix NAND (Device ID: 0x%x).\n"
+			 "Will use default parameter values instead.\n",
+			 device_id);
+	}
+}
+
+/*
+ * determines how many NAND chips are connected to the controller. Note for
+ * Intel CE4100 devices we don't support more than one device.
+ */
+static void find_valid_banks(struct denali_nand_info *denali)
+{
+	uint32_t id[denali->max_banks];
+	int i;
+
+	denali->total_used_banks = 1;
+	for (i = 0; i < denali->max_banks; i++) {
+		index_addr(denali, MODE_11 | (i << 24) | 0, 0x90);
+		index_addr(denali, MODE_11 | (i << 24) | 1, 0);
+		index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]);
+
+		dev_dbg(denali->dev,
+			"Return 1st ID for bank[%d]: %x\n", i, id[i]);
+
+		if (i == 0) {
+			if (!(id[i] & 0x0ff))
+				break; /* WTF? */
+		} else {
+			if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
+				denali->total_used_banks++;
+			else
+				break;
+		}
+	}
+
+	if (denali->platform == INTEL_CE4100) {
+		/*
+		 * Platform limitations of the CE4100 device limit
+		 * users to a single chip solution for NAND.
+		 * Multichip support is not enabled.
+		 */
+		if (denali->total_used_banks != 1) {
+			dev_err(denali->dev,
+				"Sorry, Intel CE4100 only supports a single NAND device.\n");
+			BUG();
+		}
+	}
+	dev_dbg(denali->dev,
+		"denali->total_used_banks: %d\n", denali->total_used_banks);
+}
+
+/*
+ * Use the configuration feature register to determine the maximum number of
+ * banks that the hardware supports.
+ */
+static void detect_max_banks(struct denali_nand_info *denali)
+{
+	uint32_t features = ioread32(denali->flash_reg + FEATURES);
+	/*
+	 * Read the revision register, so we can calculate the max_banks
+	 * properly: the encoding changed from rev 5.0 to 5.1
+	 */
+	u32 revision = MAKE_COMPARABLE_REVISION(
+				ioread32(denali->flash_reg + REVISION));
+
+	if (revision < REVISION_5_1)
+		denali->max_banks = 2 << (features & FEATURES__N_BANKS);
+	else
+		denali->max_banks = 1 << (features & FEATURES__N_BANKS);
+}
+
+static void detect_partition_feature(struct denali_nand_info *denali)
+{
+	/*
+	 * For MRST platform, denali->fwblks represent the
+	 * number of blocks firmware is taken,
+	 * FW is in protect partition and MTD driver has no
+	 * permission to access it. So let driver know how many
+	 * blocks it can't touch.
+	 */
+	if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
+		if ((ioread32(denali->flash_reg + PERM_SRC_ID(1)) &
+			PERM_SRC_ID__SRCID) == SPECTRA_PARTITION_ID) {
+			denali->fwblks =
+			    ((ioread32(denali->flash_reg + MIN_MAX_BANK(1)) &
+			      MIN_MAX_BANK__MIN_VALUE) *
+			     denali->blksperchip)
+			    +
+			    (ioread32(denali->flash_reg + MIN_BLK_ADDR(1)) &
+			    MIN_BLK_ADDR__VALUE);
+		} else {
+			denali->fwblks = SPECTRA_START_BLOCK;
+		}
+	} else {
+		denali->fwblks = SPECTRA_START_BLOCK;
+	}
+}
+
+static uint16_t denali_nand_timing_set(struct denali_nand_info *denali)
+{
+	uint16_t status = PASS;
+	uint32_t id_bytes[8], addr;
+	uint8_t maf_id, device_id;
+	int i;
+
+	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+			__FILE__, __LINE__, __func__);
+
+	/*
+	 * Use read id method to get device ID and other params.
+	 * For some NAND chips, controller can't report the correct
+	 * device ID by reading from DEVICE_ID register
+	 */
+	addr = MODE_11 | BANK(denali->flash_bank);
+	index_addr(denali, addr | 0, 0x90);
+	index_addr(denali, addr | 1, 0);
+	for (i = 0; i < 8; i++)
+		index_addr_read_data(denali, addr | 2, &id_bytes[i]);
+	maf_id = id_bytes[0];
+	device_id = id_bytes[1];
+
+	if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
+		ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
+		if (FAIL == get_onfi_nand_para(denali))
+			return FAIL;
+	} else if (maf_id == 0xEC) { /* Samsung NAND */
+		get_samsung_nand_para(denali, device_id);
+	} else if (maf_id == 0x98) { /* Toshiba NAND */
+		get_toshiba_nand_para(denali);
+	} else if (maf_id == 0xAD) { /* Hynix NAND */
+		get_hynix_nand_para(denali, device_id);
+	}
+
+	dev_info(denali->dev,
+			"Dump timing register values:\n"
+			"acc_clks: %d, re_2_we: %d, re_2_re: %d\n"
+			"we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n"
+			"rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
+			ioread32(denali->flash_reg + ACC_CLKS),
+			ioread32(denali->flash_reg + RE_2_WE),
+			ioread32(denali->flash_reg + RE_2_RE),
+			ioread32(denali->flash_reg + WE_2_RE),
+			ioread32(denali->flash_reg + ADDR_2_DATA),
+			ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
+			ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
+			ioread32(denali->flash_reg + CS_SETUP_CNT));
+
+	find_valid_banks(denali);
+
+	detect_partition_feature(denali);
+
+	/*
+	 * If the user specified to override the default timings
+	 * with a specific ONFI mode, we apply those changes here.
+	 */
+	if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
+		nand_onfi_timing_set(denali, onfi_timing_mode);
+
+	return status;
+}
+
+static void denali_set_intr_modes(struct denali_nand_info *denali,
+					uint16_t INT_ENABLE)
+{
+	dev_dbg(denali->dev, "%s, Line %d, Function: %s\n",
+		__FILE__, __LINE__, __func__);
+
+	if (INT_ENABLE)
+		iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
+	else
+		iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
+}
+
+/*
+ * validation function to verify that the controlling software is making
+ * a valid request
+ */
+static inline bool is_flash_bank_valid(int flash_bank)
+{
+	return flash_bank >= 0 && flash_bank < 4;
+}
+
+static void denali_irq_init(struct denali_nand_info *denali)
+{
+	uint32_t int_mask;
+	int i;
+
+	/* Disable global interrupts */
+	denali_set_intr_modes(denali, false);
+
+	int_mask = DENALI_IRQ_ALL;
+
+	/* Clear all status bits */
+	for (i = 0; i < denali->max_banks; ++i)
+		iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i));
+
+	denali_irq_enable(denali, int_mask);
+}
+
+static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
+{
+	denali_set_intr_modes(denali, false);
+	free_irq(irqnum, denali);
+}
+
+static void denali_irq_enable(struct denali_nand_info *denali,
+							uint32_t int_mask)
+{
+	int i;
+
+	for (i = 0; i < denali->max_banks; ++i)
+		iowrite32(int_mask, denali->flash_reg + INTR_EN(i));
+}
+
+/*
+ * This function only returns when an interrupt that this driver cares about
+ * occurs. This is to reduce the overhead of servicing interrupts
+ */
+static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
+{
+	return read_interrupt_status(denali) & DENALI_IRQ_ALL;
+}
+
+/* Interrupts are cleared by writing a 1 to the appropriate status bit */
+static inline void clear_interrupt(struct denali_nand_info *denali,
+							uint32_t irq_mask)
+{
+	uint32_t intr_status_reg;
+
+	intr_status_reg = INTR_STATUS(denali->flash_bank);
+
+	iowrite32(irq_mask, denali->flash_reg + intr_status_reg);
+}
+
+static void clear_interrupts(struct denali_nand_info *denali)
+{
+	uint32_t status;
+
+	spin_lock_irq(&denali->irq_lock);
+
+	status = read_interrupt_status(denali);
+	clear_interrupt(denali, status);
+
+	denali->irq_status = 0x0;
+	spin_unlock_irq(&denali->irq_lock);
+}
+
+static uint32_t read_interrupt_status(struct denali_nand_info *denali)
+{
+	uint32_t intr_status_reg;
+
+	intr_status_reg = INTR_STATUS(denali->flash_bank);
+
+	return ioread32(denali->flash_reg + intr_status_reg);
+}
+
+/*
+ * This is the interrupt service routine. It handles all interrupts
+ * sent to this device. Note that on CE4100, this is a shared interrupt.
+ */
+static irqreturn_t denali_isr(int irq, void *dev_id)
+{
+	struct denali_nand_info *denali = dev_id;
+	uint32_t irq_status;
+	irqreturn_t result = IRQ_NONE;
+
+	spin_lock(&denali->irq_lock);
+
+	/* check to see if a valid NAND chip has been selected. */
+	if (is_flash_bank_valid(denali->flash_bank)) {
+		/*
+		 * check to see if controller generated the interrupt,
+		 * since this is a shared interrupt
+		 */
+		irq_status = denali_irq_detected(denali);
+		if (irq_status != 0) {
+			/* handle interrupt */
+			/* first acknowledge it */
+			clear_interrupt(denali, irq_status);
+			/*
+			 * store the status in the device context for someone
+			 * to read
+			 */
+			denali->irq_status |= irq_status;
+			/* notify anyone who cares that it happened */
+			complete(&denali->complete);
+			/* tell the OS that we've handled this */
+			result = IRQ_HANDLED;
+		}
+	}
+	spin_unlock(&denali->irq_lock);
+	return result;
+}
+#define BANK(x) ((x) << 24)
+
+static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
+{
+	unsigned long comp_res;
+	uint32_t intr_status;
+	unsigned long timeout = msecs_to_jiffies(1000);
+
+	do {
+		comp_res =
+			wait_for_completion_timeout(&denali->complete, timeout);
+		spin_lock_irq(&denali->irq_lock);
+		intr_status = denali->irq_status;
+
+		if (intr_status & irq_mask) {
+			denali->irq_status &= ~irq_mask;
+			spin_unlock_irq(&denali->irq_lock);
+			/* our interrupt was detected */
+			break;
+		}
+
+		/*
+		 * these are not the interrupts you are looking for -
+		 * need to wait again
+		 */
+		spin_unlock_irq(&denali->irq_lock);
+	} while (comp_res != 0);
+
+	if (comp_res == 0) {
+		/* timeout */
+		pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n",
+				intr_status, irq_mask);
+
+		intr_status = 0;
+	}
+	return intr_status;
+}
+
+/*
+ * This helper function setups the registers for ECC and whether or not
+ * the spare area will be transferred.
+ */
+static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
+				bool transfer_spare)
+{
+	int ecc_en_flag, transfer_spare_flag;
+
+	/* set ECC, transfer spare bits if needed */
+	ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
+	transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
+
+	/* Enable spare area/ECC per user's request. */
+	iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
+	iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
+}
+
+/*
+ * sends a pipeline command operation to the controller. See the Denali NAND
+ * controller's user guide for more information (section 4.2.3.6).
+ */
+static int denali_send_pipeline_cmd(struct denali_nand_info *denali,
+				    bool ecc_en, bool transfer_spare,
+				    int access_type, int op)
+{
+	int status = PASS;
+	uint32_t page_count = 1;
+	uint32_t addr, cmd, irq_status, irq_mask;
+
+	if (op == DENALI_READ)
+		irq_mask = INTR_STATUS__LOAD_COMP;
+	else if (op == DENALI_WRITE)
+		irq_mask = 0;
+	else
+		BUG();
+
+	setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
+
+	clear_interrupts(denali);
+
+	addr = BANK(denali->flash_bank) | denali->page;
+
+	if (op == DENALI_WRITE && access_type != SPARE_ACCESS) {
+		cmd = MODE_01 | addr;
+		iowrite32(cmd, denali->flash_mem);
+	} else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) {
+		/* read spare area */
+		cmd = MODE_10 | addr;
+		index_addr(denali, cmd, access_type);
+
+		cmd = MODE_01 | addr;
+		iowrite32(cmd, denali->flash_mem);
+	} else if (op == DENALI_READ) {
+		/* setup page read request for access type */
+		cmd = MODE_10 | addr;
+		index_addr(denali, cmd, access_type);
+
+		/*
+		 * page 33 of the NAND controller spec indicates we should not
+		 * use the pipeline commands in Spare area only mode.
+		 * So we don't.
+		 */
+		if (access_type == SPARE_ACCESS) {
+			cmd = MODE_01 | addr;
+			iowrite32(cmd, denali->flash_mem);
+		} else {
+			index_addr(denali, cmd,
+					PIPELINE_ACCESS | op | page_count);
+
+			/*
+			 * wait for command to be accepted
+			 * can always use status0 bit as the
+			 * mask is identical for each bank.
+			 */
+			irq_status = wait_for_irq(denali, irq_mask);
+
+			if (irq_status == 0) {
+				dev_err(denali->dev,
+					"cmd, page, addr on timeout (0x%x, 0x%x, 0x%x)\n",
+					cmd, denali->page, addr);
+				status = FAIL;
+			} else {
+				cmd = MODE_01 | addr;
+				iowrite32(cmd, denali->flash_mem);
+			}
+		}
+	}
+	return status;
+}
+
+/* helper function that simply writes a buffer to the flash */
+static int write_data_to_flash_mem(struct denali_nand_info *denali,
+				   const uint8_t *buf, int len)
+{
+	uint32_t *buf32;
+	int i;
+
+	/*
+	 * verify that the len is a multiple of 4.
+	 * see comment in read_data_from_flash_mem()
+	 */
+	BUG_ON((len % 4) != 0);
+
+	/* write the data to the flash memory */
+	buf32 = (uint32_t *)buf;
+	for (i = 0; i < len / 4; i++)
+		iowrite32(*buf32++, denali->flash_mem + 0x10);
+	return i * 4; /* intent is to return the number of bytes read */
+}
+
+/* helper function that simply reads a buffer from the flash */
+static int read_data_from_flash_mem(struct denali_nand_info *denali,
+				    uint8_t *buf, int len)
+{
+	uint32_t *buf32;
+	int i;
+
+	/*
+	 * we assume that len will be a multiple of 4, if not it would be nice
+	 * to know about it ASAP rather than have random failures...
+	 * This assumption is based on the fact that this function is designed
+	 * to be used to read flash pages, which are typically multiples of 4.
+	 */
+	BUG_ON((len % 4) != 0);
+
+	/* transfer the data from the flash */
+	buf32 = (uint32_t *)buf;
+	for (i = 0; i < len / 4; i++)
+		*buf32++ = ioread32(denali->flash_mem + 0x10);
+	return i * 4; /* intent is to return the number of bytes read */
+}
+
+/* writes OOB data to the device */
+static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	uint32_t irq_status;
+	uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
+						INTR_STATUS__PROGRAM_FAIL;
+	int status = 0;
+
+	denali->page = page;
+
+	if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
+							DENALI_WRITE) == PASS) {
+		write_data_to_flash_mem(denali, buf, mtd->oobsize);
+
+		/* wait for operation to complete */
+		irq_status = wait_for_irq(denali, irq_mask);
+
+		if (irq_status == 0) {
+			dev_err(denali->dev, "OOB write failed\n");
+			status = -EIO;
+		}
+	} else {
+		dev_err(denali->dev, "unable to send pipeline command\n");
+		status = -EIO;
+	}
+	return status;
+}
+
+/* reads OOB data from the device */
+static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	uint32_t irq_mask = INTR_STATUS__LOAD_COMP;
+	uint32_t irq_status, addr, cmd;
+
+	denali->page = page;
+
+	if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
+							DENALI_READ) == PASS) {
+		read_data_from_flash_mem(denali, buf, mtd->oobsize);
+
+		/*
+		 * wait for command to be accepted
+		 * can always use status0 bit as the
+		 * mask is identical for each bank.
+		 */
+		irq_status = wait_for_irq(denali, irq_mask);
+
+		if (irq_status == 0)
+			dev_err(denali->dev, "page on OOB timeout %d\n",
+					denali->page);
+
+		/*
+		 * We set the device back to MAIN_ACCESS here as I observed
+		 * instability with the controller if you do a block erase
+		 * and the last transaction was a SPARE_ACCESS. Block erase
+		 * is reliable (according to the MTD test infrastructure)
+		 * if you are in MAIN_ACCESS.
+		 */
+		addr = BANK(denali->flash_bank) | denali->page;
+		cmd = MODE_10 | addr;
+		index_addr(denali, cmd, MAIN_ACCESS);
+	}
+}
+
+/*
+ * this function examines buffers to see if they contain data that
+ * indicate that the buffer is part of an erased region of flash.
+ */
+static bool is_erased(uint8_t *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		if (buf[i] != 0xFF)
+			return false;
+	return true;
+}
+#define ECC_SECTOR_SIZE 512
+
+#define ECC_SECTOR(x)	(((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
+#define ECC_BYTE(x)	(((x) & ECC_ERROR_ADDRESS__OFFSET))
+#define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
+#define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO__ERROR_TYPE))
+#define ECC_ERR_DEVICE(x)	(((x) & ERR_CORRECTION_INFO__DEVICE_NR) >> 8)
+#define ECC_LAST_ERR(x)		((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
+
+static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
+		       uint32_t irq_status, unsigned int *max_bitflips)
+{
+	bool check_erased_page = false;
+	unsigned int bitflips = 0;
+
+	if (irq_status & INTR_STATUS__ECC_ERR) {
+		/* read the ECC errors. we'll ignore them for now */
+		uint32_t err_address, err_correction_info, err_byte,
+			 err_sector, err_device, err_correction_value;
+		denali_set_intr_modes(denali, false);
+
+		do {
+			err_address = ioread32(denali->flash_reg +
+						ECC_ERROR_ADDRESS);
+			err_sector = ECC_SECTOR(err_address);
+			err_byte = ECC_BYTE(err_address);
+
+			err_correction_info = ioread32(denali->flash_reg +
+						ERR_CORRECTION_INFO);
+			err_correction_value =
+				ECC_CORRECTION_VALUE(err_correction_info);
+			err_device = ECC_ERR_DEVICE(err_correction_info);
+
+			if (ECC_ERROR_CORRECTABLE(err_correction_info)) {
+				/*
+				 * If err_byte is larger than ECC_SECTOR_SIZE,
+				 * means error happened in OOB, so we ignore
+				 * it. It's no need for us to correct it
+				 * err_device is represented the NAND error
+				 * bits are happened in if there are more
+				 * than one NAND connected.
+				 */
+				if (err_byte < ECC_SECTOR_SIZE) {
+					struct mtd_info *mtd =
+						nand_to_mtd(&denali->nand);
+					int offset;
+
+					offset = (err_sector *
+							ECC_SECTOR_SIZE +
+							err_byte) *
+							denali->devnum +
+							err_device;
+					/* correct the ECC error */
+					buf[offset] ^= err_correction_value;
+					mtd->ecc_stats.corrected++;
+					bitflips++;
+				}
+			} else {
+				/*
+				 * if the error is not correctable, need to
+				 * look at the page to see if it is an erased
+				 * page. if so, then it's not a real ECC error
+				 */
+				check_erased_page = true;
+			}
+		} while (!ECC_LAST_ERR(err_correction_info));
+		/*
+		 * Once handle all ecc errors, controller will triger
+		 * a ECC_TRANSACTION_DONE interrupt, so here just wait
+		 * for a while for this interrupt
+		 */
+		while (!(read_interrupt_status(denali) &
+				INTR_STATUS__ECC_TRANSACTION_DONE))
+			cpu_relax();
+		clear_interrupts(denali);
+		denali_set_intr_modes(denali, true);
+	}
+	*max_bitflips = bitflips;
+	return check_erased_page;
+}
+
+/* programs the controller to either enable/disable DMA transfers */
+static void denali_enable_dma(struct denali_nand_info *denali, bool en)
+{
+	iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE);
+	ioread32(denali->flash_reg + DMA_ENABLE);
+}
+
+/* setups the HW to perform the data DMA */
+static void denali_setup_dma(struct denali_nand_info *denali, int op)
+{
+	uint32_t mode;
+	const int page_count = 1;
+	uint32_t addr = denali->buf.dma_buf;
+
+	mode = MODE_10 | BANK(denali->flash_bank);
+
+	/* DMA is a four step process */
+
+	/* 1. setup transfer type and # of pages */
+	index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
+
+	/* 2. set memory high address bits 23:8 */
+	index_addr(denali, mode | ((addr >> 16) << 8), 0x2200);
+
+	/* 3. set memory low address bits 23:8 */
+	index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300);
+
+	/* 4. interrupt when complete, burst len = 64 bytes */
+	index_addr(denali, mode | 0x14000, 0x2400);
+}
+
+/*
+ * writes a page. user specifies type, and this function handles the
+ * configuration details.
+ */
+static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
+			const uint8_t *buf, bool raw_xfer)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	dma_addr_t addr = denali->buf.dma_buf;
+	size_t size = mtd->writesize + mtd->oobsize;
+	uint32_t irq_status;
+	uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
+						INTR_STATUS__PROGRAM_FAIL;
+
+	/*
+	 * if it is a raw xfer, we want to disable ecc and send the spare area.
+	 * !raw_xfer - enable ecc
+	 * raw_xfer - transfer spare
+	 */
+	setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
+
+	/* copy buffer into DMA buffer */
+	memcpy(denali->buf.buf, buf, mtd->writesize);
+
+	if (raw_xfer) {
+		/* transfer the data to the spare area */
+		memcpy(denali->buf.buf + mtd->writesize,
+			chip->oob_poi,
+			mtd->oobsize);
+	}
+
+	dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);
+
+	clear_interrupts(denali);
+	denali_enable_dma(denali, true);
+
+	denali_setup_dma(denali, DENALI_WRITE);
+
+	/* wait for operation to complete */
+	irq_status = wait_for_irq(denali, irq_mask);
+
+	if (irq_status == 0) {
+		dev_err(denali->dev, "timeout on write_page (type = %d)\n",
+			raw_xfer);
+		denali->status = NAND_STATUS_FAIL;
+	}
+
+	denali_enable_dma(denali, false);
+	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
+
+	return 0;
+}
+
+/* NAND core entry points */
+
+/*
+ * this is the callback that the NAND core calls to write a page. Since
+ * writing a page with ECC or without is similar, all the work is done
+ * by write_page above.
+ */
+static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+				const uint8_t *buf, int oob_required, int page)
+{
+	/*
+	 * for regular page writes, we let HW handle all the ECC
+	 * data written to the device.
+	 */
+	return write_page(mtd, chip, buf, false);
+}
+
+/*
+ * This is the callback that the NAND core calls to write a page without ECC.
+ * raw access is similar to ECC page writes, so all the work is done in the
+ * write_page() function above.
+ */
+static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				 const uint8_t *buf, int oob_required,
+				 int page)
+{
+	/*
+	 * for raw page writes, we want to disable ECC and simply write
+	 * whatever data is in the buffer.
+	 */
+	return write_page(mtd, chip, buf, true);
+}
+
+static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			    int page)
+{
+	return write_oob_data(mtd, chip->oob_poi, page);
+}
+
+static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			   int page)
+{
+	read_oob_data(mtd, chip->oob_poi, page);
+
+	return 0;
+}
+
+static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			    uint8_t *buf, int oob_required, int page)
+{
+	unsigned int max_bitflips;
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+	dma_addr_t addr = denali->buf.dma_buf;
+	size_t size = mtd->writesize + mtd->oobsize;
+
+	uint32_t irq_status;
+	uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
+			    INTR_STATUS__ECC_ERR;
+	bool check_erased_page = false;
+
+	if (page != denali->page) {
+		dev_err(denali->dev,
+			"IN %s: page %d is not equal to denali->page %d",
+			__func__, page, denali->page);
+		BUG();
+	}
+
+	setup_ecc_for_xfer(denali, true, false);
+
+	denali_enable_dma(denali, true);
+	dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+	clear_interrupts(denali);
+	denali_setup_dma(denali, DENALI_READ);
+
+	/* wait for operation to complete */
+	irq_status = wait_for_irq(denali, irq_mask);
+
+	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+	memcpy(buf, denali->buf.buf, mtd->writesize);
+
+	check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
+	denali_enable_dma(denali, false);
+
+	if (check_erased_page) {
+		read_oob_data(mtd, chip->oob_poi, denali->page);
+
+		/* check ECC failures that may have occurred on erased pages */
+		if (check_erased_page) {
+			if (!is_erased(buf, mtd->writesize))
+				mtd->ecc_stats.failed++;
+			if (!is_erased(buf, mtd->oobsize))
+				mtd->ecc_stats.failed++;
+		}
+	}
+	return max_bitflips;
+}
+
+static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	dma_addr_t addr = denali->buf.dma_buf;
+	size_t size = mtd->writesize + mtd->oobsize;
+	uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
+
+	if (page != denali->page) {
+		dev_err(denali->dev,
+			"IN %s: page %d is not equal to denali->page %d",
+			__func__, page, denali->page);
+		BUG();
+	}
+
+	setup_ecc_for_xfer(denali, false, true);
+	denali_enable_dma(denali, true);
+
+	dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+	clear_interrupts(denali);
+	denali_setup_dma(denali, DENALI_READ);
+
+	/* wait for operation to complete */
+	wait_for_irq(denali, irq_mask);
+
+	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);
+
+	denali_enable_dma(denali, false);
+
+	memcpy(buf, denali->buf.buf, mtd->writesize);
+	memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
+
+	return 0;
+}
+
+static uint8_t denali_read_byte(struct mtd_info *mtd)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	uint8_t result = 0xff;
+
+	if (denali->buf.head < denali->buf.tail)
+		result = denali->buf.buf[denali->buf.head++];
+
+	return result;
+}
+
+static void denali_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+	spin_lock_irq(&denali->irq_lock);
+	denali->flash_bank = chip;
+	spin_unlock_irq(&denali->irq_lock);
+}
+
+static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	int status = denali->status;
+
+	denali->status = 0;
+
+	return status;
+}
+
+static int denali_erase(struct mtd_info *mtd, int page)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+
+	uint32_t cmd, irq_status;
+
+	clear_interrupts(denali);
+
+	/* setup page read request for access type */
+	cmd = MODE_10 | BANK(denali->flash_bank) | page;
+	index_addr(denali, cmd, 0x1);
+
+	/* wait for erase to complete or failure to occur */
+	irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
+					INTR_STATUS__ERASE_FAIL);
+
+	return irq_status & INTR_STATUS__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
+}
+
+static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
+			   int page)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	uint32_t addr, id;
+	int i;
+
+	switch (cmd) {
+	case NAND_CMD_PAGEPROG:
+		break;
+	case NAND_CMD_STATUS:
+		read_status(denali);
+		break;
+	case NAND_CMD_READID:
+	case NAND_CMD_PARAM:
+		reset_buf(denali);
+		/*
+		 * sometimes ManufactureId read from register is not right
+		 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
+		 * So here we send READID cmd to NAND insteand
+		 */
+		addr = MODE_11 | BANK(denali->flash_bank);
+		index_addr(denali, addr | 0, 0x90);
+		index_addr(denali, addr | 1, col);
+		for (i = 0; i < 8; i++) {
+			index_addr_read_data(denali, addr | 2, &id);
+			write_byte_to_buf(denali, id);
+		}
+		break;
+	case NAND_CMD_READ0:
+	case NAND_CMD_SEQIN:
+		denali->page = page;
+		break;
+	case NAND_CMD_RESET:
+		reset_bank(denali);
+		break;
+	case NAND_CMD_READOOB:
+		/* TODO: Read OOB data */
+		break;
+	default:
+		pr_err(": unsupported command received 0x%x\n", cmd);
+		break;
+	}
+}
+/* end NAND core entry points */
+
+/* Initialization code to bring the device up to a known good state */
+static void denali_hw_init(struct denali_nand_info *denali)
+{
+	/*
+	 * tell driver how many bit controller will skip before
+	 * writing ECC code in OOB, this register may be already
+	 * set by firmware. So we read this value out.
+	 * if this value is 0, just let it be.
+	 */
+	denali->bbtskipbytes = ioread32(denali->flash_reg +
+						SPARE_AREA_SKIP_BYTES);
+	detect_max_banks(denali);
+	denali_nand_reset(denali);
+	iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
+	iowrite32(CHIP_EN_DONT_CARE__FLAG,
+			denali->flash_reg + CHIP_ENABLE_DONT_CARE);
+
+	iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
+
+	/* Should set value for these registers when init */
+	iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
+	iowrite32(1, denali->flash_reg + ECC_ENABLE);
+	denali_nand_timing_set(denali);
+	denali_irq_init(denali);
+}
+
+/*
+ * Althogh controller spec said SLC ECC is forceb to be 4bit,
+ * but denali controller in MRST only support 15bit and 8bit ECC
+ * correction
+ */
+#define ECC_8BITS	14
+#define ECC_15BITS	26
+
+static int denali_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = denali->bbtskipbytes;
+	oobregion->length = chip->ecc.total;
+
+	return 0;
+}
+
+static int denali_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct denali_nand_info *denali = mtd_to_denali(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
+	oobregion->length = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops denali_ooblayout_ops = {
+	.ecc = denali_ooblayout_ecc,
+	.free = denali_ooblayout_free,
+};
+
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs =	8,
+	.len = 4,
+	.veroffs = 12,
+	.maxblocks = 4,
+	.pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs =	8,
+	.len = 4,
+	.veroffs = 12,
+	.maxblocks = 4,
+	.pattern = mirror_pattern,
+};
+
+/* initialize driver data structures */
+static void denali_drv_init(struct denali_nand_info *denali)
+{
+	denali->idx = 0;
+
+	/* setup interrupt handler */
+	/*
+	 * the completion object will be used to notify
+	 * the callee that the interrupt is done
+	 */
+	init_completion(&denali->complete);
+
+	/*
+	 * the spinlock will be used to synchronize the ISR with any
+	 * element that might be access shared data (interrupt status)
+	 */
+	spin_lock_init(&denali->irq_lock);
+
+	/* indicate that MTD has not selected a valid bank yet */
+	denali->flash_bank = CHIP_SELECT_INVALID;
+
+	/* initialize our irq_status variable to indicate no interrupts */
+	denali->irq_status = 0;
+}
+
+int denali_init(struct denali_nand_info *denali)
+{
+	struct mtd_info *mtd = nand_to_mtd(&denali->nand);
+	int ret;
+
+	if (denali->platform == INTEL_CE4100) {
+		/*
+		 * Due to a silicon limitation, we can only support
+		 * ONFI timing mode 1 and below.
+		 */
+		if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
+			pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
+			return -EINVAL;
+		}
+	}
+
+	/* allocate a temporary buffer for nand_scan_ident() */
+	denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE,
+					GFP_DMA | GFP_KERNEL);
+	if (!denali->buf.buf)
+		return -ENOMEM;
+
+	mtd->dev.parent = denali->dev;
+	denali_hw_init(denali);
+	denali_drv_init(denali);
+
+	/*
+	 * denali_isr register is done after all the hardware
+	 * initilization is finished
+	 */
+	if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
+			DENALI_NAND_NAME, denali)) {
+		pr_err("Spectra: Unable to allocate IRQ\n");
+		return -ENODEV;
+	}
+
+	/* now that our ISR is registered, we can enable interrupts */
+	denali_set_intr_modes(denali, true);
+	mtd->name = "denali-nand";
+
+	/* register the driver with the NAND core subsystem */
+	denali->nand.select_chip = denali_select_chip;
+	denali->nand.cmdfunc = denali_cmdfunc;
+	denali->nand.read_byte = denali_read_byte;
+	denali->nand.waitfunc = denali_waitfunc;
+
+	/*
+	 * scan for NAND devices attached to the controller
+	 * this is the first stage in a two step process to register
+	 * with the nand subsystem
+	 */
+	if (nand_scan_ident(mtd, denali->max_banks, NULL)) {
+		ret = -ENXIO;
+		goto failed_req_irq;
+	}
+
+	/* allocate the right size buffer now */
+	devm_kfree(denali->dev, denali->buf.buf);
+	denali->buf.buf = devm_kzalloc(denali->dev,
+			     mtd->writesize + mtd->oobsize,
+			     GFP_KERNEL);
+	if (!denali->buf.buf) {
+		ret = -ENOMEM;
+		goto failed_req_irq;
+	}
+
+	/* Is 32-bit DMA supported? */
+	ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		pr_err("Spectra: no usable DMA configuration\n");
+		goto failed_req_irq;
+	}
+
+	denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
+			     mtd->writesize + mtd->oobsize,
+			     DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
+		dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
+		ret = -EIO;
+		goto failed_req_irq;
+	}
+
+	/*
+	 * support for multi nand
+	 * MTD known nothing about multi nand, so we should tell it
+	 * the real pagesize and anything necessery
+	 */
+	denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
+	denali->nand.chipsize <<= (denali->devnum - 1);
+	denali->nand.page_shift += (denali->devnum - 1);
+	denali->nand.pagemask = (denali->nand.chipsize >>
+						denali->nand.page_shift) - 1;
+	denali->nand.bbt_erase_shift += (denali->devnum - 1);
+	denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
+	denali->nand.chip_shift += (denali->devnum - 1);
+	mtd->writesize <<= (denali->devnum - 1);
+	mtd->oobsize <<= (denali->devnum - 1);
+	mtd->erasesize <<= (denali->devnum - 1);
+	mtd->size = denali->nand.numchips * denali->nand.chipsize;
+	denali->bbtskipbytes *= denali->devnum;
+
+	/*
+	 * second stage of the NAND scan
+	 * this stage requires information regarding ECC and
+	 * bad block management.
+	 */
+
+	/* Bad block management */
+	denali->nand.bbt_td = &bbt_main_descr;
+	denali->nand.bbt_md = &bbt_mirror_descr;
+
+	/* skip the scan for now until we have OOB read and write support */
+	denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
+	denali->nand.options |= NAND_SKIP_BBTSCAN;
+	denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
+
+	/* no subpage writes on denali */
+	denali->nand.options |= NAND_NO_SUBPAGE_WRITE;
+
+	/*
+	 * Denali Controller only support 15bit and 8bit ECC in MRST,
+	 * so just let controller do 15bit ECC for MLC and 8bit ECC for
+	 * SLC if possible.
+	 * */
+	if (!nand_is_slc(&denali->nand) &&
+			(mtd->oobsize > (denali->bbtskipbytes +
+			ECC_15BITS * (mtd->writesize /
+			ECC_SECTOR_SIZE)))) {
+		/* if MLC OOB size is large enough, use 15bit ECC*/
+		denali->nand.ecc.strength = 15;
+		denali->nand.ecc.bytes = ECC_15BITS;
+		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
+	} else if (mtd->oobsize < (denali->bbtskipbytes +
+			ECC_8BITS * (mtd->writesize /
+			ECC_SECTOR_SIZE))) {
+		pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes");
+		goto failed_req_irq;
+	} else {
+		denali->nand.ecc.strength = 8;
+		denali->nand.ecc.bytes = ECC_8BITS;
+		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
+	}
+
+	mtd_set_ooblayout(mtd, &denali_ooblayout_ops);
+	denali->nand.ecc.bytes *= denali->devnum;
+	denali->nand.ecc.strength *= denali->devnum;
+
+	/*
+	 * Let driver know the total blocks number and how many blocks
+	 * contained by each nand chip. blksperchip will help driver to
+	 * know how many blocks is taken by FW.
+	 */
+	denali->totalblks = mtd->size >> denali->nand.phys_erase_shift;
+	denali->blksperchip = denali->totalblks / denali->nand.numchips;
+
+	/* override the default read operations */
+	denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
+	denali->nand.ecc.read_page = denali_read_page;
+	denali->nand.ecc.read_page_raw = denali_read_page_raw;
+	denali->nand.ecc.write_page = denali_write_page;
+	denali->nand.ecc.write_page_raw = denali_write_page_raw;
+	denali->nand.ecc.read_oob = denali_read_oob;
+	denali->nand.ecc.write_oob = denali_write_oob;
+	denali->nand.erase = denali_erase;
+
+	if (nand_scan_tail(mtd)) {
+		ret = -ENXIO;
+		goto failed_req_irq;
+	}
+
+	ret = mtd_device_register(mtd, NULL, 0);
+	if (ret) {
+		dev_err(denali->dev, "Spectra: Failed to register MTD: %d\n",
+				ret);
+		goto failed_req_irq;
+	}
+	return 0;
+
+failed_req_irq:
+	denali_irq_cleanup(denali->irq, denali);
+
+	return ret;
+}
+EXPORT_SYMBOL(denali_init);
+
+/* driver exit point */
+void denali_remove(struct denali_nand_info *denali)
+{
+	struct mtd_info *mtd = nand_to_mtd(&denali->nand);
+	/*
+	 * Pre-compute DMA buffer size to avoid any problems in case
+	 * nand_release() ever changes in a way that mtd->writesize and
+	 * mtd->oobsize are not reliable after this call.
+	 */
+	int bufsize = mtd->writesize + mtd->oobsize;
+
+	nand_release(mtd);
+	denali_irq_cleanup(denali->irq, denali);
+	dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize,
+			 DMA_BIDIRECTIONAL);
+}
+EXPORT_SYMBOL(denali_remove);
diff --git a/drivers/mtd/nand/rawnand/denali.h b/drivers/mtd/nand/rawnand/denali.h
new file mode 100644
index 000000000000..37618b532317
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/denali.h
@@ -0,0 +1,484 @@ 
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright (c) 2009 - 2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ */
+
+#ifndef __DENALI_H__
+#define __DENALI_H__
+
+#include <linux/mtd/rawnand.h>
+
+#define DEVICE_RESET				0x0
+#define     DEVICE_RESET__BANK0				0x0001
+#define     DEVICE_RESET__BANK1				0x0002
+#define     DEVICE_RESET__BANK2				0x0004
+#define     DEVICE_RESET__BANK3				0x0008
+
+#define TRANSFER_SPARE_REG			0x10
+#define     TRANSFER_SPARE_REG__FLAG			0x0001
+
+#define LOAD_WAIT_CNT				0x20
+#define     LOAD_WAIT_CNT__VALUE			0xffff
+
+#define PROGRAM_WAIT_CNT			0x30
+#define     PROGRAM_WAIT_CNT__VALUE			0xffff
+
+#define ERASE_WAIT_CNT				0x40
+#define     ERASE_WAIT_CNT__VALUE			0xffff
+
+#define INT_MON_CYCCNT				0x50
+#define     INT_MON_CYCCNT__VALUE			0xffff
+
+#define RB_PIN_ENABLED				0x60
+#define     RB_PIN_ENABLED__BANK0			0x0001
+#define     RB_PIN_ENABLED__BANK1			0x0002
+#define     RB_PIN_ENABLED__BANK2			0x0004
+#define     RB_PIN_ENABLED__BANK3			0x0008
+
+#define MULTIPLANE_OPERATION			0x70
+#define     MULTIPLANE_OPERATION__FLAG			0x0001
+
+#define MULTIPLANE_READ_ENABLE			0x80
+#define     MULTIPLANE_READ_ENABLE__FLAG		0x0001
+
+#define COPYBACK_DISABLE			0x90
+#define     COPYBACK_DISABLE__FLAG			0x0001
+
+#define CACHE_WRITE_ENABLE			0xa0
+#define     CACHE_WRITE_ENABLE__FLAG			0x0001
+
+#define CACHE_READ_ENABLE			0xb0
+#define     CACHE_READ_ENABLE__FLAG			0x0001
+
+#define PREFETCH_MODE				0xc0
+#define     PREFETCH_MODE__PREFETCH_EN			0x0001
+#define     PREFETCH_MODE__PREFETCH_BURST_LENGTH	0xfff0
+
+#define CHIP_ENABLE_DONT_CARE			0xd0
+#define     CHIP_EN_DONT_CARE__FLAG			0x01
+
+#define ECC_ENABLE				0xe0
+#define     ECC_ENABLE__FLAG				0x0001
+
+#define GLOBAL_INT_ENABLE			0xf0
+#define     GLOBAL_INT_EN_FLAG				0x01
+
+#define WE_2_RE					0x100
+#define     WE_2_RE__VALUE				0x003f
+
+#define ADDR_2_DATA				0x110
+#define     ADDR_2_DATA__VALUE				0x003f
+
+#define RE_2_WE					0x120
+#define     RE_2_WE__VALUE				0x003f
+
+#define ACC_CLKS				0x130
+#define     ACC_CLKS__VALUE				0x000f
+
+#define NUMBER_OF_PLANES			0x140
+#define     NUMBER_OF_PLANES__VALUE			0x0007
+
+#define PAGES_PER_BLOCK				0x150
+#define     PAGES_PER_BLOCK__VALUE			0xffff
+
+#define DEVICE_WIDTH				0x160
+#define     DEVICE_WIDTH__VALUE				0x0003
+
+#define DEVICE_MAIN_AREA_SIZE			0x170
+#define     DEVICE_MAIN_AREA_SIZE__VALUE		0xffff
+
+#define DEVICE_SPARE_AREA_SIZE			0x180
+#define     DEVICE_SPARE_AREA_SIZE__VALUE		0xffff
+
+#define TWO_ROW_ADDR_CYCLES			0x190
+#define     TWO_ROW_ADDR_CYCLES__FLAG			0x0001
+
+#define MULTIPLANE_ADDR_RESTRICT		0x1a0
+#define     MULTIPLANE_ADDR_RESTRICT__FLAG		0x0001
+
+#define ECC_CORRECTION				0x1b0
+#define     ECC_CORRECTION__VALUE			0x001f
+
+#define READ_MODE				0x1c0
+#define     READ_MODE__VALUE				0x000f
+
+#define WRITE_MODE				0x1d0
+#define     WRITE_MODE__VALUE				0x000f
+
+#define COPYBACK_MODE				0x1e0
+#define     COPYBACK_MODE__VALUE			0x000f
+
+#define RDWR_EN_LO_CNT				0x1f0
+#define     RDWR_EN_LO_CNT__VALUE			0x001f
+
+#define RDWR_EN_HI_CNT				0x200
+#define     RDWR_EN_HI_CNT__VALUE			0x001f
+
+#define MAX_RD_DELAY				0x210
+#define     MAX_RD_DELAY__VALUE				0x000f
+
+#define CS_SETUP_CNT				0x220
+#define     CS_SETUP_CNT__VALUE				0x001f
+
+#define SPARE_AREA_SKIP_BYTES			0x230
+#define     SPARE_AREA_SKIP_BYTES__VALUE		0x003f
+
+#define SPARE_AREA_MARKER			0x240
+#define     SPARE_AREA_MARKER__VALUE			0xffff
+
+#define DEVICES_CONNECTED			0x250
+#define     DEVICES_CONNECTED__VALUE			0x0007
+
+#define DIE_MASK				0x260
+#define     DIE_MASK__VALUE				0x00ff
+
+#define FIRST_BLOCK_OF_NEXT_PLANE		0x270
+#define     FIRST_BLOCK_OF_NEXT_PLANE__VALUE		0xffff
+
+#define WRITE_PROTECT				0x280
+#define     WRITE_PROTECT__FLAG				0x0001
+
+#define RE_2_RE					0x290
+#define     RE_2_RE__VALUE				0x003f
+
+#define MANUFACTURER_ID				0x300
+#define     MANUFACTURER_ID__VALUE			0x00ff
+
+#define DEVICE_ID				0x310
+#define     DEVICE_ID__VALUE				0x00ff
+
+#define DEVICE_PARAM_0				0x320
+#define     DEVICE_PARAM_0__VALUE			0x00ff
+
+#define DEVICE_PARAM_1				0x330
+#define     DEVICE_PARAM_1__VALUE			0x00ff
+
+#define DEVICE_PARAM_2				0x340
+#define     DEVICE_PARAM_2__VALUE			0x00ff
+
+#define LOGICAL_PAGE_DATA_SIZE			0x350
+#define     LOGICAL_PAGE_DATA_SIZE__VALUE		0xffff
+
+#define LOGICAL_PAGE_SPARE_SIZE			0x360
+#define     LOGICAL_PAGE_SPARE_SIZE__VALUE		0xffff
+
+#define REVISION				0x370
+#define     REVISION__VALUE				0xffff
+#define MAKE_COMPARABLE_REVISION(x)		swab16((x) & REVISION__VALUE)
+#define REVISION_5_1				0x00000501
+
+#define ONFI_DEVICE_FEATURES			0x380
+#define     ONFI_DEVICE_FEATURES__VALUE			0x003f
+
+#define ONFI_OPTIONAL_COMMANDS			0x390
+#define     ONFI_OPTIONAL_COMMANDS__VALUE		0x003f
+
+#define ONFI_TIMING_MODE			0x3a0
+#define     ONFI_TIMING_MODE__VALUE			0x003f
+
+#define ONFI_PGM_CACHE_TIMING_MODE		0x3b0
+#define     ONFI_PGM_CACHE_TIMING_MODE__VALUE		0x003f
+
+#define ONFI_DEVICE_NO_OF_LUNS			0x3c0
+#define     ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS		0x00ff
+#define     ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE		0x0100
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L	0x3d0
+#define     ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE	0xffff
+
+#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U	0x3e0
+#define     ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE	0xffff
+
+#define FEATURES					0x3f0
+#define     FEATURES__N_BANKS				0x0003
+#define     FEATURES__ECC_MAX_ERR			0x003c
+#define     FEATURES__DMA				0x0040
+#define     FEATURES__CMD_DMA				0x0080
+#define     FEATURES__PARTITION				0x0100
+#define     FEATURES__XDMA_SIDEBAND			0x0200
+#define     FEATURES__GPREG				0x0400
+#define     FEATURES__INDEX_ADDR			0x0800
+
+#define TRANSFER_MODE				0x400
+#define     TRANSFER_MODE__VALUE			0x0003
+
+#define INTR_STATUS(__bank)	(0x410 + ((__bank) * 0x50))
+#define INTR_EN(__bank)		(0x420 + ((__bank) * 0x50))
+
+#define     INTR_STATUS__ECC_TRANSACTION_DONE		0x0001
+#define     INTR_STATUS__ECC_ERR			0x0002
+#define     INTR_STATUS__DMA_CMD_COMP			0x0004
+#define     INTR_STATUS__TIME_OUT			0x0008
+#define     INTR_STATUS__PROGRAM_FAIL			0x0010
+#define     INTR_STATUS__ERASE_FAIL			0x0020
+#define     INTR_STATUS__LOAD_COMP			0x0040
+#define     INTR_STATUS__PROGRAM_COMP			0x0080
+#define     INTR_STATUS__ERASE_COMP			0x0100
+#define     INTR_STATUS__PIPE_CPYBCK_CMD_COMP		0x0200
+#define     INTR_STATUS__LOCKED_BLK			0x0400
+#define     INTR_STATUS__UNSUP_CMD			0x0800
+#define     INTR_STATUS__INT_ACT			0x1000
+#define     INTR_STATUS__RST_COMP			0x2000
+#define     INTR_STATUS__PIPE_CMD_ERR			0x4000
+#define     INTR_STATUS__PAGE_XFER_INC			0x8000
+
+#define     INTR_EN__ECC_TRANSACTION_DONE		0x0001
+#define     INTR_EN__ECC_ERR				0x0002
+#define     INTR_EN__DMA_CMD_COMP			0x0004
+#define     INTR_EN__TIME_OUT				0x0008
+#define     INTR_EN__PROGRAM_FAIL			0x0010
+#define     INTR_EN__ERASE_FAIL				0x0020
+#define     INTR_EN__LOAD_COMP				0x0040
+#define     INTR_EN__PROGRAM_COMP			0x0080
+#define     INTR_EN__ERASE_COMP				0x0100
+#define     INTR_EN__PIPE_CPYBCK_CMD_COMP		0x0200
+#define     INTR_EN__LOCKED_BLK				0x0400
+#define     INTR_EN__UNSUP_CMD				0x0800
+#define     INTR_EN__INT_ACT				0x1000
+#define     INTR_EN__RST_COMP				0x2000
+#define     INTR_EN__PIPE_CMD_ERR			0x4000
+#define     INTR_EN__PAGE_XFER_INC			0x8000
+
+#define PAGE_CNT(__bank)	(0x430 + ((__bank) * 0x50))
+#define ERR_PAGE_ADDR(__bank)	(0x440 + ((__bank) * 0x50))
+#define ERR_BLOCK_ADDR(__bank)	(0x450 + ((__bank) * 0x50))
+
+#define DATA_INTR				0x550
+#define     DATA_INTR__WRITE_SPACE_AV			0x0001
+#define     DATA_INTR__READ_DATA_AV			0x0002
+
+#define DATA_INTR_EN				0x560
+#define     DATA_INTR_EN__WRITE_SPACE_AV		0x0001
+#define     DATA_INTR_EN__READ_DATA_AV			0x0002
+
+#define GPREG_0					0x570
+#define     GPREG_0__VALUE				0xffff
+
+#define GPREG_1					0x580
+#define     GPREG_1__VALUE				0xffff
+
+#define GPREG_2					0x590
+#define     GPREG_2__VALUE				0xffff
+
+#define GPREG_3					0x5a0
+#define     GPREG_3__VALUE				0xffff
+
+#define ECC_THRESHOLD				0x600
+#define     ECC_THRESHOLD__VALUE			0x03ff
+
+#define ECC_ERROR_BLOCK_ADDRESS			0x610
+#define     ECC_ERROR_BLOCK_ADDRESS__VALUE		0xffff
+
+#define ECC_ERROR_PAGE_ADDRESS			0x620
+#define     ECC_ERROR_PAGE_ADDRESS__VALUE		0x0fff
+#define     ECC_ERROR_PAGE_ADDRESS__BANK		0xf000
+
+#define ECC_ERROR_ADDRESS			0x630
+#define     ECC_ERROR_ADDRESS__OFFSET			0x0fff
+#define     ECC_ERROR_ADDRESS__SECTOR_NR		0xf000
+
+#define ERR_CORRECTION_INFO			0x640
+#define     ERR_CORRECTION_INFO__BYTEMASK		0x00ff
+#define     ERR_CORRECTION_INFO__DEVICE_NR		0x0f00
+#define     ERR_CORRECTION_INFO__ERROR_TYPE		0x4000
+#define     ERR_CORRECTION_INFO__LAST_ERR_INFO		0x8000
+
+#define DMA_ENABLE				0x700
+#define     DMA_ENABLE__FLAG				0x0001
+
+#define IGNORE_ECC_DONE				0x710
+#define     IGNORE_ECC_DONE__FLAG			0x0001
+
+#define DMA_INTR				0x720
+#define     DMA_INTR__TARGET_ERROR			0x0001
+#define     DMA_INTR__DESC_COMP_CHANNEL0		0x0002
+#define     DMA_INTR__DESC_COMP_CHANNEL1		0x0004
+#define     DMA_INTR__DESC_COMP_CHANNEL2		0x0008
+#define     DMA_INTR__DESC_COMP_CHANNEL3		0x0010
+#define     DMA_INTR__MEMCOPY_DESC_COMP		0x0020
+
+#define DMA_INTR_EN				0x730
+#define     DMA_INTR_EN__TARGET_ERROR			0x0001
+#define     DMA_INTR_EN__DESC_COMP_CHANNEL0		0x0002
+#define     DMA_INTR_EN__DESC_COMP_CHANNEL1		0x0004
+#define     DMA_INTR_EN__DESC_COMP_CHANNEL2		0x0008
+#define     DMA_INTR_EN__DESC_COMP_CHANNEL3		0x0010
+#define     DMA_INTR_EN__MEMCOPY_DESC_COMP		0x0020
+
+#define TARGET_ERR_ADDR_LO			0x740
+#define     TARGET_ERR_ADDR_LO__VALUE			0xffff
+
+#define TARGET_ERR_ADDR_HI			0x750
+#define     TARGET_ERR_ADDR_HI__VALUE			0xffff
+
+#define CHNL_ACTIVE				0x760
+#define     CHNL_ACTIVE__CHANNEL0			0x0001
+#define     CHNL_ACTIVE__CHANNEL1			0x0002
+#define     CHNL_ACTIVE__CHANNEL2			0x0004
+#define     CHNL_ACTIVE__CHANNEL3			0x0008
+
+#define ACTIVE_SRC_ID				0x800
+#define     ACTIVE_SRC_ID__VALUE			0x00ff
+
+#define PTN_INTR					0x810
+#define     PTN_INTR__CONFIG_ERROR			0x0001
+#define     PTN_INTR__ACCESS_ERROR_BANK0		0x0002
+#define     PTN_INTR__ACCESS_ERROR_BANK1		0x0004
+#define     PTN_INTR__ACCESS_ERROR_BANK2		0x0008
+#define     PTN_INTR__ACCESS_ERROR_BANK3		0x0010
+#define     PTN_INTR__REG_ACCESS_ERROR			0x0020
+
+#define PTN_INTR_EN				0x820
+#define     PTN_INTR_EN__CONFIG_ERROR			0x0001
+#define     PTN_INTR_EN__ACCESS_ERROR_BANK0		0x0002
+#define     PTN_INTR_EN__ACCESS_ERROR_BANK1		0x0004
+#define     PTN_INTR_EN__ACCESS_ERROR_BANK2		0x0008
+#define     PTN_INTR_EN__ACCESS_ERROR_BANK3		0x0010
+#define     PTN_INTR_EN__REG_ACCESS_ERROR		0x0020
+
+#define PERM_SRC_ID(__bank)	(0x830 + ((__bank) * 0x40))
+#define     PERM_SRC_ID__SRCID				0x00ff
+#define     PERM_SRC_ID__DIRECT_ACCESS_ACTIVE		0x0800
+#define     PERM_SRC_ID__WRITE_ACTIVE			0x2000
+#define     PERM_SRC_ID__READ_ACTIVE			0x4000
+#define     PERM_SRC_ID__PARTITION_VALID		0x8000
+
+#define MIN_BLK_ADDR(__bank)	(0x840 + ((__bank) * 0x40))
+#define     MIN_BLK_ADDR__VALUE				0xffff
+
+#define MAX_BLK_ADDR(__bank)	(0x850 + ((__bank) * 0x40))
+#define     MAX_BLK_ADDR__VALUE				0xffff
+
+#define MIN_MAX_BANK(__bank)	(0x860 + ((__bank) * 0x40))
+#define     MIN_MAX_BANK__MIN_VALUE			0x0003
+#define     MIN_MAX_BANK__MAX_VALUE			0x000c
+
+
+/* ffsdefs.h */
+#define CLEAR 0                 /*use this to clear a field instead of "fail"*/
+#define SET   1                 /*use this to set a field instead of "pass"*/
+#define FAIL 1                  /*failed flag*/
+#define PASS 0                  /*success flag*/
+#define ERR -1                  /*error flag*/
+
+/* lld.h */
+#define GOOD_BLOCK 0
+#define DEFECTIVE_BLOCK 1
+#define READ_ERROR 2
+
+#define CLK_X  5
+#define CLK_MULTI 4
+
+/* spectraswconfig.h */
+#define CMD_DMA 0
+
+#define SPECTRA_PARTITION_ID    0
+/**** Block Table and Reserved Block Parameters *****/
+#define SPECTRA_START_BLOCK     3
+#define NUM_FREE_BLOCKS_GATE    30
+
+/* KBV - Updated to LNW scratch register address */
+#define SCRATCH_REG_ADDR    CONFIG_MTD_NAND_DENALI_SCRATCH_REG_ADDR
+#define SCRATCH_REG_SIZE    64
+
+#define GLOB_HWCTL_DEFAULT_BLKS    2048
+
+#define SUPPORT_15BITECC        1
+#define SUPPORT_8BITECC         1
+
+#define CUSTOM_CONF_PARAMS      0
+
+#define ONFI_BLOOM_TIME         1
+#define MODE5_WORKAROUND        0
+
+
+#define MODE_00    0x00000000
+#define MODE_01    0x04000000
+#define MODE_10    0x08000000
+#define MODE_11    0x0C000000
+
+
+#define DATA_TRANSFER_MODE              0
+#define PROTECTION_PER_BLOCK            1
+#define LOAD_WAIT_COUNT                 2
+#define PROGRAM_WAIT_COUNT              3
+#define ERASE_WAIT_COUNT                4
+#define INT_MONITOR_CYCLE_COUNT         5
+#define READ_BUSY_PIN_ENABLED           6
+#define MULTIPLANE_OPERATION_SUPPORT    7
+#define PRE_FETCH_MODE                  8
+#define CE_DONT_CARE_SUPPORT            9
+#define COPYBACK_SUPPORT                10
+#define CACHE_WRITE_SUPPORT             11
+#define CACHE_READ_SUPPORT              12
+#define NUM_PAGES_IN_BLOCK              13
+#define ECC_ENABLE_SELECT               14
+#define WRITE_ENABLE_2_READ_ENABLE      15
+#define ADDRESS_2_DATA                  16
+#define READ_ENABLE_2_WRITE_ENABLE      17
+#define TWO_ROW_ADDRESS_CYCLES          18
+#define MULTIPLANE_ADDRESS_RESTRICT     19
+#define ACC_CLOCKS                      20
+#define READ_WRITE_ENABLE_LOW_COUNT     21
+#define READ_WRITE_ENABLE_HIGH_COUNT    22
+
+#define ECC_SECTOR_SIZE     512
+
+struct nand_buf {
+	int head;
+	int tail;
+	uint8_t *buf;
+	dma_addr_t dma_buf;
+};
+
+#define INTEL_CE4100	1
+#define INTEL_MRST	2
+#define DT		3
+
+struct denali_nand_info {
+	struct nand_chip nand;
+	int flash_bank; /* currently selected chip */
+	int status;
+	int platform;
+	struct nand_buf buf;
+	struct device *dev;
+	int total_used_banks;
+	uint32_t block;  /* stored for future use */
+	uint16_t page;
+	void __iomem *flash_reg;  /* Mapped io reg base address */
+	void __iomem *flash_mem;  /* Mapped io reg base address */
+
+	/* elements used by ISR */
+	struct completion complete;
+	spinlock_t irq_lock;
+	uint32_t irq_status;
+	int irq_debug_array[32];
+	int idx;
+	int irq;
+
+	uint32_t devnum;	/* represent how many nands connected */
+	uint32_t fwblks; /* represent how many blocks FW used */
+	uint32_t totalblks;
+	uint32_t blksperchip;
+	uint32_t bbtskipbytes;
+	uint32_t max_banks;
+};
+
+extern int denali_init(struct denali_nand_info *denali);
+extern void denali_remove(struct denali_nand_info *denali);
+
+#endif /* __DENALI_H__ */
diff --git a/drivers/mtd/nand/rawnand/denali_dt.c b/drivers/mtd/nand/rawnand/denali_dt.c
new file mode 100644
index 000000000000..0cb1e8d9fbfc
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/denali_dt.c
@@ -0,0 +1,131 @@ 
+/*
+ * NAND Flash Controller Device Driver for DT
+ *
+ * Copyright © 2011, Picochip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+struct denali_dt {
+	struct denali_nand_info	denali;
+	struct clk		*clk;
+};
+
+static const struct of_device_id denali_nand_dt_ids[] = {
+		{ .compatible = "denali,denali-nand-dt" },
+		{ /* sentinel */ }
+	};
+
+MODULE_DEVICE_TABLE(of, denali_nand_dt_ids);
+
+static u64 denali_dma_mask;
+
+static int denali_dt_probe(struct platform_device *ofdev)
+{
+	struct resource *denali_reg, *nand_data;
+	struct denali_dt *dt;
+	struct denali_nand_info *denali;
+	int ret;
+	const struct of_device_id *of_id;
+
+	of_id = of_match_device(denali_nand_dt_ids, &ofdev->dev);
+	if (of_id) {
+		ofdev->id_entry = of_id->data;
+	} else {
+		pr_err("Failed to find the right device id.\n");
+		return -ENOMEM;
+	}
+
+	dt = devm_kzalloc(&ofdev->dev, sizeof(*dt), GFP_KERNEL);
+	if (!dt)
+		return -ENOMEM;
+	denali = &dt->denali;
+
+	denali->platform = DT;
+	denali->dev = &ofdev->dev;
+	denali->irq = platform_get_irq(ofdev, 0);
+	if (denali->irq < 0) {
+		dev_err(&ofdev->dev, "no irq defined\n");
+		return denali->irq;
+	}
+
+	denali_reg = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "denali_reg");
+	denali->flash_reg = devm_ioremap_resource(&ofdev->dev, denali_reg);
+	if (IS_ERR(denali->flash_reg))
+		return PTR_ERR(denali->flash_reg);
+
+	nand_data = platform_get_resource_byname(ofdev, IORESOURCE_MEM, "nand_data");
+	denali->flash_mem = devm_ioremap_resource(&ofdev->dev, nand_data);
+	if (IS_ERR(denali->flash_mem))
+		return PTR_ERR(denali->flash_mem);
+
+	if (!of_property_read_u32(ofdev->dev.of_node,
+		"dma-mask", (u32 *)&denali_dma_mask)) {
+		denali->dev->dma_mask = &denali_dma_mask;
+	} else {
+		denali->dev->dma_mask = NULL;
+	}
+
+	dt->clk = devm_clk_get(&ofdev->dev, NULL);
+	if (IS_ERR(dt->clk)) {
+		dev_err(&ofdev->dev, "no clk available\n");
+		return PTR_ERR(dt->clk);
+	}
+	clk_prepare_enable(dt->clk);
+
+	ret = denali_init(denali);
+	if (ret)
+		goto out_disable_clk;
+
+	platform_set_drvdata(ofdev, dt);
+	return 0;
+
+out_disable_clk:
+	clk_disable_unprepare(dt->clk);
+
+	return ret;
+}
+
+static int denali_dt_remove(struct platform_device *ofdev)
+{
+	struct denali_dt *dt = platform_get_drvdata(ofdev);
+
+	denali_remove(&dt->denali);
+	clk_disable(dt->clk);
+
+	return 0;
+}
+
+static struct platform_driver denali_dt_driver = {
+	.probe		= denali_dt_probe,
+	.remove		= denali_dt_remove,
+	.driver		= {
+		.name	= "denali-nand-dt",
+		.of_match_table	= denali_nand_dt_ids,
+	},
+};
+
+module_platform_driver(denali_dt_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jamie Iles");
+MODULE_DESCRIPTION("DT driver for Denali NAND controller");
diff --git a/drivers/mtd/nand/rawnand/denali_pci.c b/drivers/mtd/nand/rawnand/denali_pci.c
new file mode 100644
index 000000000000..de31514df282
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/denali_pci.c
@@ -0,0 +1,121 @@ 
+/*
+ * NAND Flash Controller Device Driver
+ * Copyright © 2009-2010, Intel Corporation and its suppliers.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include "denali.h"
+
+#define DENALI_NAND_NAME    "denali-nand-pci"
+
+/* List of platforms this NAND controller has be integrated into */
+static const struct pci_device_id denali_pci_ids[] = {
+	{ PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
+	{ PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
+	{ /* end: all zeroes */ }
+};
+MODULE_DEVICE_TABLE(pci, denali_pci_ids);
+
+static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int ret;
+	resource_size_t csr_base, mem_base;
+	unsigned long csr_len, mem_len;
+	struct denali_nand_info *denali;
+
+	denali = devm_kzalloc(&dev->dev, sizeof(*denali), GFP_KERNEL);
+	if (!denali)
+		return -ENOMEM;
+
+	ret = pcim_enable_device(dev);
+	if (ret) {
+		dev_err(&dev->dev, "Spectra: pci_enable_device failed.\n");
+		return ret;
+	}
+
+	if (id->driver_data == INTEL_CE4100) {
+		denali->platform = INTEL_CE4100;
+		mem_base = pci_resource_start(dev, 0);
+		mem_len = pci_resource_len(dev, 1);
+		csr_base = pci_resource_start(dev, 1);
+		csr_len = pci_resource_len(dev, 1);
+	} else {
+		denali->platform = INTEL_MRST;
+		csr_base = pci_resource_start(dev, 0);
+		csr_len = pci_resource_len(dev, 0);
+		mem_base = pci_resource_start(dev, 1);
+		mem_len = pci_resource_len(dev, 1);
+		if (!mem_len) {
+			mem_base = csr_base + csr_len;
+			mem_len = csr_len;
+		}
+	}
+
+	pci_set_master(dev);
+	denali->dev = &dev->dev;
+	denali->irq = dev->irq;
+
+	ret = pci_request_regions(dev, DENALI_NAND_NAME);
+	if (ret) {
+		dev_err(&dev->dev, "Spectra: Unable to request memory regions\n");
+		return ret;
+	}
+
+	denali->flash_reg = ioremap_nocache(csr_base, csr_len);
+	if (!denali->flash_reg) {
+		dev_err(&dev->dev, "Spectra: Unable to remap memory region\n");
+		return -ENOMEM;
+	}
+
+	denali->flash_mem = ioremap_nocache(mem_base, mem_len);
+	if (!denali->flash_mem) {
+		dev_err(&dev->dev, "Spectra: ioremap_nocache failed!");
+		ret = -ENOMEM;
+		goto failed_remap_reg;
+	}
+
+	ret = denali_init(denali);
+	if (ret)
+		goto failed_remap_mem;
+
+	pci_set_drvdata(dev, denali);
+
+	return 0;
+
+failed_remap_mem:
+	iounmap(denali->flash_mem);
+failed_remap_reg:
+	iounmap(denali->flash_reg);
+	return ret;
+}
+
+/* driver exit point */
+static void denali_pci_remove(struct pci_dev *dev)
+{
+	struct denali_nand_info *denali = pci_get_drvdata(dev);
+
+	denali_remove(denali);
+	iounmap(denali->flash_reg);
+	iounmap(denali->flash_mem);
+}
+
+static struct pci_driver denali_pci_driver = {
+	.name = DENALI_NAND_NAME,
+	.id_table = denali_pci_ids,
+	.probe = denali_pci_probe,
+	.remove = denali_pci_remove,
+};
+
+module_pci_driver(denali_pci_driver);
diff --git a/drivers/mtd/nand/rawnand/diskonchip.c b/drivers/mtd/nand/rawnand/diskonchip.c
new file mode 100644
index 000000000000..c3aa53caab5c
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/diskonchip.c
@@ -0,0 +1,1712 @@ 
+/*
+ * drivers/mtd/nand/diskonchip.c
+ *
+ * (C) 2003 Red Hat, Inc.
+ * (C) 2004 Dan Brown <dan_brown@ieee.org>
+ * (C) 2004 Kalev Lember <kalev@smartlink.ee>
+ *
+ * Author: David Woodhouse <dwmw2@infradead.org>
+ * Additional Diskonchip 2000 and Millennium support by Dan Brown <dan_brown@ieee.org>
+ * Diskonchip Millennium Plus support by Kalev Lember <kalev@smartlink.ee>
+ *
+ * Error correction code lifted from the old docecc code
+ * Author: Fabrice Bellard (fabrice.bellard@netgem.com)
+ * Copyright (C) 2000 Netgem S.A.
+ * converted to the generic Reed-Solomon library by Thomas Gleixner <tglx@linutronix.de>
+ *
+ * Interface to generic NAND code for M-Systems DiskOnChip devices
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/rslib.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/doc2000.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/inftl.h>
+#include <linux/module.h>
+
+/* Where to look for the devices? */
+#ifndef CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS
+#define CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS 0
+#endif
+
+static unsigned long doc_locations[] __initdata = {
+#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__)
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_PROBE_HIGH
+	0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000,
+	0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000,
+	0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000,
+	0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000,
+	0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000,
+#else
+	0xc8000, 0xca000, 0xcc000, 0xce000,
+	0xd0000, 0xd2000, 0xd4000, 0xd6000,
+	0xd8000, 0xda000, 0xdc000, 0xde000,
+	0xe0000, 0xe2000, 0xe4000, 0xe6000,
+	0xe8000, 0xea000, 0xec000, 0xee000,
+#endif
+#endif
+	0xffffffff };
+
+static struct mtd_info *doclist = NULL;
+
+struct doc_priv {
+	void __iomem *virtadr;
+	unsigned long physadr;
+	u_char ChipID;
+	u_char CDSNControl;
+	int chips_per_floor;	/* The number of chips detected on each floor */
+	int curfloor;
+	int curchip;
+	int mh0_page;
+	int mh1_page;
+	struct mtd_info *nextdoc;
+
+	/* Handle the last stage of initialization (BBT scan, partitioning) */
+	int (*late_init)(struct mtd_info *mtd);
+};
+
+/* This is the ecc value computed by the HW ecc generator upon writing an empty
+   page, one with all 0xff for data. */
+static u_char empty_write_ecc[6] = { 0x4b, 0x00, 0xe2, 0x0e, 0x93, 0xf7 };
+
+#define INFTL_BBT_RESERVED_BLOCKS 4
+
+#define DoC_is_MillenniumPlus(doc) ((doc)->ChipID == DOC_ChipID_DocMilPlus16 || (doc)->ChipID == DOC_ChipID_DocMilPlus32)
+#define DoC_is_Millennium(doc) ((doc)->ChipID == DOC_ChipID_DocMil)
+#define DoC_is_2000(doc) ((doc)->ChipID == DOC_ChipID_Doc2k)
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+			      unsigned int bitmask);
+static void doc200x_select_chip(struct mtd_info *mtd, int chip);
+
+static int debug = 0;
+module_param(debug, int, 0);
+
+static int try_dword = 1;
+module_param(try_dword, int, 0);
+
+static int no_ecc_failures = 0;
+module_param(no_ecc_failures, int, 0);
+
+static int no_autopart = 0;
+module_param(no_autopart, int, 0);
+
+static int show_firmware_partition = 0;
+module_param(show_firmware_partition, int, 0);
+
+#ifdef CONFIG_MTD_NAND_DISKONCHIP_BBTWRITE
+static int inftl_bbt_write = 1;
+#else
+static int inftl_bbt_write = 0;
+#endif
+module_param(inftl_bbt_write, int, 0);
+
+static unsigned long doc_config_location = CONFIG_MTD_NAND_DISKONCHIP_PROBE_ADDRESS;
+module_param(doc_config_location, ulong, 0);
+MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip");
+
+/* Sector size for HW ECC */
+#define SECTOR_SIZE 512
+/* The sector bytes are packed into NB_DATA 10 bit words */
+#define NB_DATA (((SECTOR_SIZE + 1) * 8 + 6) / 10)
+/* Number of roots */
+#define NROOTS 4
+/* First consective root */
+#define FCR 510
+/* Number of symbols */
+#define NN 1023
+
+/* the Reed Solomon control structure */
+static struct rs_control *rs_decoder;
+
+/*
+ * The HW decoder in the DoC ASIC's provides us a error syndrome,
+ * which we must convert to a standard syndrome usable by the generic
+ * Reed-Solomon library code.
+ *
+ * Fabrice Bellard figured this out in the old docecc code. I added
+ * some comments, improved a minor bit and converted it to make use
+ * of the generic Reed-Solomon library. tglx
+ */
+static int doc_ecc_decode(struct rs_control *rs, uint8_t *data, uint8_t *ecc)
+{
+	int i, j, nerr, errpos[8];
+	uint8_t parity;
+	uint16_t ds[4], s[5], tmp, errval[8], syn[4];
+
+	memset(syn, 0, sizeof(syn));
+	/* Convert the ecc bytes into words */
+	ds[0] = ((ecc[4] & 0xff) >> 0) | ((ecc[5] & 0x03) << 8);
+	ds[1] = ((ecc[5] & 0xfc) >> 2) | ((ecc[2] & 0x0f) << 6);
+	ds[2] = ((ecc[2] & 0xf0) >> 4) | ((ecc[3] & 0x3f) << 4);
+	ds[3] = ((ecc[3] & 0xc0) >> 6) | ((ecc[0] & 0xff) << 2);
+	parity = ecc[1];
+
+	/* Initialize the syndrome buffer */
+	for (i = 0; i < NROOTS; i++)
+		s[i] = ds[0];
+	/*
+	 *  Evaluate
+	 *  s[i] = ds[3]x^3 + ds[2]x^2 + ds[1]x^1 + ds[0]
+	 *  where x = alpha^(FCR + i)
+	 */
+	for (j = 1; j < NROOTS; j++) {
+		if (ds[j] == 0)
+			continue;
+		tmp = rs->index_of[ds[j]];
+		for (i = 0; i < NROOTS; i++)
+			s[i] ^= rs->alpha_to[rs_modnn(rs, tmp + (FCR + i) * j)];
+	}
+
+	/* Calc syn[i] = s[i] / alpha^(v + i) */
+	for (i = 0; i < NROOTS; i++) {
+		if (s[i])
+			syn[i] = rs_modnn(rs, rs->index_of[s[i]] + (NN - FCR - i));
+	}
+	/* Call the decoder library */
+	nerr = decode_rs16(rs, NULL, NULL, 1019, syn, 0, errpos, 0, errval);
+
+	/* Incorrectable errors ? */
+	if (nerr < 0)
+		return nerr;
+
+	/*
+	 * Correct the errors. The bitpositions are a bit of magic,
+	 * but they are given by the design of the de/encoder circuit
+	 * in the DoC ASIC's.
+	 */
+	for (i = 0; i < nerr; i++) {
+		int index, bitpos, pos = 1015 - errpos[i];
+		uint8_t val;
+		if (pos >= NB_DATA && pos < 1019)
+			continue;
+		if (pos < NB_DATA) {
+			/* extract bit position (MSB first) */
+			pos = 10 * (NB_DATA - 1 - pos) - 6;
+			/* now correct the following 10 bits. At most two bytes
+			   can be modified since pos is even */
+			index = (pos >> 3) ^ 1;
+			bitpos = pos & 7;
+			if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+				val = (uint8_t) (errval[i] >> (2 + bitpos));
+				parity ^= val;
+				if (index < SECTOR_SIZE)
+					data[index] ^= val;
+			}
+			index = ((pos >> 3) + 1) ^ 1;
+			bitpos = (bitpos + 10) & 7;
+			if (bitpos == 0)
+				bitpos = 8;
+			if ((index >= 0 && index < SECTOR_SIZE) || index == (SECTOR_SIZE + 1)) {
+				val = (uint8_t) (errval[i] << (8 - bitpos));
+				parity ^= val;
+				if (index < SECTOR_SIZE)
+					data[index] ^= val;
+			}
+		}
+	}
+	/* If the parity is wrong, no rescue possible */
+	return parity ? -EBADMSG : nerr;
+}
+
+static void DoC_Delay(struct doc_priv *doc, unsigned short cycles)
+{
+	volatile char dummy;
+	int i;
+
+	for (i = 0; i < cycles; i++) {
+		if (DoC_is_Millennium(doc))
+			dummy = ReadDOC(doc->virtadr, NOP);
+		else if (DoC_is_MillenniumPlus(doc))
+			dummy = ReadDOC(doc->virtadr, Mplus_NOP);
+		else
+			dummy = ReadDOC(doc->virtadr, DOCStatus);
+	}
+
+}
+
+#define CDSN_CTRL_FR_B_MASK	(CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1)
+
+/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */
+static int _DoC_WaitReady(struct doc_priv *doc)
+{
+	void __iomem *docptr = doc->virtadr;
+	unsigned long timeo = jiffies + (HZ * 10);
+
+	if (debug)
+		printk("_DoC_WaitReady...\n");
+	/* Out-of-line routine to wait for chip response */
+	if (DoC_is_MillenniumPlus(doc)) {
+		while ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+			if (time_after(jiffies, timeo)) {
+				printk("_DoC_WaitReady timed out.\n");
+				return -EIO;
+			}
+			udelay(1);
+			cond_resched();
+		}
+	} else {
+		while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+			if (time_after(jiffies, timeo)) {
+				printk("_DoC_WaitReady timed out.\n");
+				return -EIO;
+			}
+			udelay(1);
+			cond_resched();
+		}
+	}
+
+	return 0;
+}
+
+static inline int DoC_WaitReady(struct doc_priv *doc)
+{
+	void __iomem *docptr = doc->virtadr;
+	int ret = 0;
+
+	if (DoC_is_MillenniumPlus(doc)) {
+		DoC_Delay(doc, 4);
+
+		if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK)
+			/* Call the out-of-line routine to wait */
+			ret = _DoC_WaitReady(doc);
+	} else {
+		DoC_Delay(doc, 4);
+
+		if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B))
+			/* Call the out-of-line routine to wait */
+			ret = _DoC_WaitReady(doc);
+		DoC_Delay(doc, 2);
+	}
+
+	if (debug)
+		printk("DoC_WaitReady OK\n");
+	return ret;
+}
+
+static void doc2000_write_byte(struct mtd_info *mtd, u_char datum)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+
+	if (debug)
+		printk("write_byte %02x\n", datum);
+	WriteDOC(datum, docptr, CDSNSlowIO);
+	WriteDOC(datum, docptr, 2k_CDSN_IO);
+}
+
+static u_char doc2000_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	u_char ret;
+
+	ReadDOC(docptr, CDSNSlowIO);
+	DoC_Delay(doc, 2);
+	ret = ReadDOC(docptr, 2k_CDSN_IO);
+	if (debug)
+		printk("read_byte returns %02x\n", ret);
+	return ret;
+}
+
+static void doc2000_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+	if (debug)
+		printk("writebuf of %d bytes: ", len);
+	for (i = 0; i < len; i++) {
+		WriteDOC_(buf[i], docptr, DoC_2k_CDSN_IO + i);
+		if (debug && i < 16)
+			printk("%02x ", buf[i]);
+	}
+	if (debug)
+		printk("\n");
+}
+
+static void doc2000_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+
+	if (debug)
+		printk("readbuf of %d bytes: ", len);
+
+	for (i = 0; i < len; i++) {
+		buf[i] = ReadDOC(docptr, 2k_CDSN_IO + i);
+	}
+}
+
+static void doc2000_readbuf_dword(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+
+	if (debug)
+		printk("readbuf_dword of %d bytes: ", len);
+
+	if (unlikely((((unsigned long)buf) | len) & 3)) {
+		for (i = 0; i < len; i++) {
+			*(uint8_t *) (&buf[i]) = ReadDOC(docptr, 2k_CDSN_IO + i);
+		}
+	} else {
+		for (i = 0; i < len; i += 4) {
+			*(uint32_t *) (&buf[i]) = readl(docptr + DoC_2k_CDSN_IO + i);
+		}
+	}
+}
+
+static uint16_t __init doc200x_ident_chip(struct mtd_info *mtd, int nr)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	uint16_t ret;
+
+	doc200x_select_chip(mtd, nr);
+	doc200x_hwcontrol(mtd, NAND_CMD_READID,
+			  NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+	doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+	doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+	/* We can't use dev_ready here, but at least we wait for the
+	 * command to complete
+	 */
+	udelay(50);
+
+	ret = this->read_byte(mtd) << 8;
+	ret |= this->read_byte(mtd);
+
+	if (doc->ChipID == DOC_ChipID_Doc2k && try_dword && !nr) {
+		/* First chip probe. See if we get same results by 32-bit access */
+		union {
+			uint32_t dword;
+			uint8_t byte[4];
+		} ident;
+		void __iomem *docptr = doc->virtadr;
+
+		doc200x_hwcontrol(mtd, NAND_CMD_READID,
+				  NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+		doc200x_hwcontrol(mtd, 0, NAND_CTRL_ALE | NAND_CTRL_CHANGE);
+		doc200x_hwcontrol(mtd, NAND_CMD_NONE,
+				  NAND_NCE | NAND_CTRL_CHANGE);
+
+		udelay(50);
+
+		ident.dword = readl(docptr + DoC_2k_CDSN_IO);
+		if (((ident.byte[0] << 8) | ident.byte[1]) == ret) {
+			printk(KERN_INFO "DiskOnChip 2000 responds to DWORD access\n");
+			this->read_buf = &doc2000_readbuf_dword;
+		}
+	}
+
+	return ret;
+}
+
+static void __init doc2000_count_chips(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	uint16_t mfrid;
+	int i;
+
+	/* Max 4 chips per floor on DiskOnChip 2000 */
+	doc->chips_per_floor = 4;
+
+	/* Find out what the first chip is */
+	mfrid = doc200x_ident_chip(mtd, 0);
+
+	/* Find how many chips in each floor. */
+	for (i = 1; i < 4; i++) {
+		if (doc200x_ident_chip(mtd, i) != mfrid)
+			break;
+	}
+	doc->chips_per_floor = i;
+	printk(KERN_DEBUG "Detected %d chips per floor.\n", i);
+}
+
+static int doc200x_wait(struct mtd_info *mtd, struct nand_chip *this)
+{
+	struct doc_priv *doc = nand_get_controller_data(this);
+
+	int status;
+
+	DoC_WaitReady(doc);
+	this->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+	DoC_WaitReady(doc);
+	status = (int)this->read_byte(mtd);
+
+	return status;
+}
+
+static void doc2001_write_byte(struct mtd_info *mtd, u_char datum)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+
+	WriteDOC(datum, docptr, CDSNSlowIO);
+	WriteDOC(datum, docptr, Mil_CDSN_IO);
+	WriteDOC(datum, docptr, WritePipeTerm);
+}
+
+static u_char doc2001_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+
+	//ReadDOC(docptr, CDSNSlowIO);
+	/* 11.4.5 -- delay twice to allow extended length cycle */
+	DoC_Delay(doc, 2);
+	ReadDOC(docptr, ReadPipeInit);
+	//return ReadDOC(docptr, Mil_CDSN_IO);
+	return ReadDOC(docptr, LastDataRead);
+}
+
+static void doc2001_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+
+	for (i = 0; i < len; i++)
+		WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+	/* Terminate write pipeline */
+	WriteDOC(0x00, docptr, WritePipeTerm);
+}
+
+static void doc2001_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+
+	/* Start read pipeline */
+	ReadDOC(docptr, ReadPipeInit);
+
+	for (i = 0; i < len - 1; i++)
+		buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff));
+
+	/* Terminate read pipeline */
+	buf[i] = ReadDOC(docptr, LastDataRead);
+}
+
+static u_char doc2001plus_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	u_char ret;
+
+	ReadDOC(docptr, Mplus_ReadPipeInit);
+	ReadDOC(docptr, Mplus_ReadPipeInit);
+	ret = ReadDOC(docptr, Mplus_LastDataRead);
+	if (debug)
+		printk("read_byte returns %02x\n", ret);
+	return ret;
+}
+
+static void doc2001plus_writebuf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+
+	if (debug)
+		printk("writebuf of %d bytes: ", len);
+	for (i = 0; i < len; i++) {
+		WriteDOC_(buf[i], docptr, DoC_Mil_CDSN_IO + i);
+		if (debug && i < 16)
+			printk("%02x ", buf[i]);
+	}
+	if (debug)
+		printk("\n");
+}
+
+static void doc2001plus_readbuf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+
+	if (debug)
+		printk("readbuf of %d bytes: ", len);
+
+	/* Start read pipeline */
+	ReadDOC(docptr, Mplus_ReadPipeInit);
+	ReadDOC(docptr, Mplus_ReadPipeInit);
+
+	for (i = 0; i < len - 2; i++) {
+		buf[i] = ReadDOC(docptr, Mil_CDSN_IO);
+		if (debug && i < 16)
+			printk("%02x ", buf[i]);
+	}
+
+	/* Terminate read pipeline */
+	buf[len - 2] = ReadDOC(docptr, Mplus_LastDataRead);
+	if (debug && i < 16)
+		printk("%02x ", buf[len - 2]);
+	buf[len - 1] = ReadDOC(docptr, Mplus_LastDataRead);
+	if (debug && i < 16)
+		printk("%02x ", buf[len - 1]);
+	if (debug)
+		printk("\n");
+}
+
+static void doc2001plus_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int floor = 0;
+
+	if (debug)
+		printk("select chip (%d)\n", chip);
+
+	if (chip == -1) {
+		/* Disable flash internally */
+		WriteDOC(0, docptr, Mplus_FlashSelect);
+		return;
+	}
+
+	floor = chip / doc->chips_per_floor;
+	chip -= (floor * doc->chips_per_floor);
+
+	/* Assert ChipEnable and deassert WriteProtect */
+	WriteDOC((DOC_FLASH_CE), docptr, Mplus_FlashSelect);
+	this->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+	doc->curchip = chip;
+	doc->curfloor = floor;
+}
+
+static void doc200x_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int floor = 0;
+
+	if (debug)
+		printk("select chip (%d)\n", chip);
+
+	if (chip == -1)
+		return;
+
+	floor = chip / doc->chips_per_floor;
+	chip -= (floor * doc->chips_per_floor);
+
+	/* 11.4.4 -- deassert CE before changing chip */
+	doc200x_hwcontrol(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+
+	WriteDOC(floor, docptr, FloorSelect);
+	WriteDOC(chip, docptr, CDSNDeviceSelect);
+
+	doc200x_hwcontrol(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+	doc->curchip = chip;
+	doc->curfloor = floor;
+}
+
+#define CDSN_CTRL_MSK (CDSN_CTRL_CE | CDSN_CTRL_CLE | CDSN_CTRL_ALE)
+
+static void doc200x_hwcontrol(struct mtd_info *mtd, int cmd,
+			      unsigned int ctrl)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		doc->CDSNControl &= ~CDSN_CTRL_MSK;
+		doc->CDSNControl |= ctrl & CDSN_CTRL_MSK;
+		if (debug)
+			printk("hwcontrol(%d): %02x\n", cmd, doc->CDSNControl);
+		WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+		/* 11.4.3 -- 4 NOPs after CSDNControl write */
+		DoC_Delay(doc, 4);
+	}
+	if (cmd != NAND_CMD_NONE) {
+		if (DoC_is_2000(doc))
+			doc2000_write_byte(mtd, cmd);
+		else
+			doc2001_write_byte(mtd, cmd);
+	}
+}
+
+static void doc2001plus_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+
+	/*
+	 * Must terminate write pipeline before sending any commands
+	 * to the device.
+	 */
+	if (command == NAND_CMD_PAGEPROG) {
+		WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+		WriteDOC(0x00, docptr, Mplus_WritePipeTerm);
+	}
+
+	/*
+	 * Write out the command to the device.
+	 */
+	if (command == NAND_CMD_SEQIN) {
+		int readcmd;
+
+		if (column >= mtd->writesize) {
+			/* OOB area */
+			column -= mtd->writesize;
+			readcmd = NAND_CMD_READOOB;
+		} else if (column < 256) {
+			/* First 256 bytes --> READ0 */
+			readcmd = NAND_CMD_READ0;
+		} else {
+			column -= 256;
+			readcmd = NAND_CMD_READ1;
+		}
+		WriteDOC(readcmd, docptr, Mplus_FlashCmd);
+	}
+	WriteDOC(command, docptr, Mplus_FlashCmd);
+	WriteDOC(0, docptr, Mplus_WritePipeTerm);
+	WriteDOC(0, docptr, Mplus_WritePipeTerm);
+
+	if (column != -1 || page_addr != -1) {
+		/* Serially input address */
+		if (column != -1) {
+			/* Adjust columns for 16 bit buswidth */
+			if (this->options & NAND_BUSWIDTH_16 &&
+					!nand_opcode_8bits(command))
+				column >>= 1;
+			WriteDOC(column, docptr, Mplus_FlashAddress);
+		}
+		if (page_addr != -1) {
+			WriteDOC((unsigned char)(page_addr & 0xff), docptr, Mplus_FlashAddress);
+			WriteDOC((unsigned char)((page_addr >> 8) & 0xff), docptr, Mplus_FlashAddress);
+			/* One more address cycle for higher density devices */
+			if (this->chipsize & 0x0c000000) {
+				WriteDOC((unsigned char)((page_addr >> 16) & 0x0f), docptr, Mplus_FlashAddress);
+				printk("high density\n");
+			}
+		}
+		WriteDOC(0, docptr, Mplus_WritePipeTerm);
+		WriteDOC(0, docptr, Mplus_WritePipeTerm);
+		/* deassert ALE */
+		if (command == NAND_CMD_READ0 || command == NAND_CMD_READ1 ||
+		    command == NAND_CMD_READOOB || command == NAND_CMD_READID)
+			WriteDOC(0, docptr, Mplus_FlashControl);
+	}
+
+	/*
+	 * program and erase have their own busy handlers
+	 * status and sequential in needs no delay
+	 */
+	switch (command) {
+
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+	case NAND_CMD_SEQIN:
+	case NAND_CMD_STATUS:
+		return;
+
+	case NAND_CMD_RESET:
+		if (this->dev_ready)
+			break;
+		udelay(this->chip_delay);
+		WriteDOC(NAND_CMD_STATUS, docptr, Mplus_FlashCmd);
+		WriteDOC(0, docptr, Mplus_WritePipeTerm);
+		WriteDOC(0, docptr, Mplus_WritePipeTerm);
+		while (!(this->read_byte(mtd) & 0x40)) ;
+		return;
+
+		/* This applies to read commands */
+	default:
+		/*
+		 * If we don't have access to the busy pin, we apply the given
+		 * command delay
+		 */
+		if (!this->dev_ready) {
+			udelay(this->chip_delay);
+			return;
+		}
+	}
+
+	/* Apply this short delay always to ensure that we do wait tWB in
+	 * any case on any machine. */
+	ndelay(100);
+	/* wait until command is processed */
+	while (!this->dev_ready(mtd)) ;
+}
+
+static int doc200x_dev_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+
+	if (DoC_is_MillenniumPlus(doc)) {
+		/* 11.4.2 -- must NOP four times before checking FR/B# */
+		DoC_Delay(doc, 4);
+		if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) {
+			if (debug)
+				printk("not ready\n");
+			return 0;
+		}
+		if (debug)
+			printk("was ready\n");
+		return 1;
+	} else {
+		/* 11.4.2 -- must NOP four times before checking FR/B# */
+		DoC_Delay(doc, 4);
+		if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) {
+			if (debug)
+				printk("not ready\n");
+			return 0;
+		}
+		/* 11.4.2 -- Must NOP twice if it's ready */
+		DoC_Delay(doc, 2);
+		if (debug)
+			printk("was ready\n");
+		return 1;
+	}
+}
+
+static int doc200x_block_bad(struct mtd_info *mtd, loff_t ofs)
+{
+	/* This is our last resort if we couldn't find or create a BBT.  Just
+	   pretend all blocks are good. */
+	return 0;
+}
+
+static void doc200x_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+
+	/* Prime the ECC engine */
+	switch (mode) {
+	case NAND_ECC_READ:
+		WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+		WriteDOC(DOC_ECC_EN, docptr, ECCConf);
+		break;
+	case NAND_ECC_WRITE:
+		WriteDOC(DOC_ECC_RESET, docptr, ECCConf);
+		WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf);
+		break;
+	}
+}
+
+static void doc2001plus_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+
+	/* Prime the ECC engine */
+	switch (mode) {
+	case NAND_ECC_READ:
+		WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+		WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf);
+		break;
+	case NAND_ECC_WRITE:
+		WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf);
+		WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf);
+		break;
+	}
+}
+
+/* This code is only called on write */
+static int doc200x_calculate_ecc(struct mtd_info *mtd, const u_char *dat, unsigned char *ecc_code)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+	int emptymatch = 1;
+
+	/* flush the pipeline */
+	if (DoC_is_2000(doc)) {
+		WriteDOC(doc->CDSNControl & ~CDSN_CTRL_FLASH_IO, docptr, CDSNControl);
+		WriteDOC(0, docptr, 2k_CDSN_IO);
+		WriteDOC(0, docptr, 2k_CDSN_IO);
+		WriteDOC(0, docptr, 2k_CDSN_IO);
+		WriteDOC(doc->CDSNControl, docptr, CDSNControl);
+	} else if (DoC_is_MillenniumPlus(doc)) {
+		WriteDOC(0, docptr, Mplus_NOP);
+		WriteDOC(0, docptr, Mplus_NOP);
+		WriteDOC(0, docptr, Mplus_NOP);
+	} else {
+		WriteDOC(0, docptr, NOP);
+		WriteDOC(0, docptr, NOP);
+		WriteDOC(0, docptr, NOP);
+	}
+
+	for (i = 0; i < 6; i++) {
+		if (DoC_is_MillenniumPlus(doc))
+			ecc_code[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+		else
+			ecc_code[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+		if (ecc_code[i] != empty_write_ecc[i])
+			emptymatch = 0;
+	}
+	if (DoC_is_MillenniumPlus(doc))
+		WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+	else
+		WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+#if 0
+	/* If emptymatch=1, we might have an all-0xff data buffer.  Check. */
+	if (emptymatch) {
+		/* Note: this somewhat expensive test should not be triggered
+		   often.  It could be optimized away by examining the data in
+		   the writebuf routine, and remembering the result. */
+		for (i = 0; i < 512; i++) {
+			if (dat[i] == 0xff)
+				continue;
+			emptymatch = 0;
+			break;
+		}
+	}
+	/* If emptymatch still =1, we do have an all-0xff data buffer.
+	   Return all-0xff ecc value instead of the computed one, so
+	   it'll look just like a freshly-erased page. */
+	if (emptymatch)
+		memset(ecc_code, 0xff, 6);
+#endif
+	return 0;
+}
+
+static int doc200x_correct_data(struct mtd_info *mtd, u_char *dat,
+				u_char *read_ecc, u_char *isnull)
+{
+	int i, ret = 0;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	void __iomem *docptr = doc->virtadr;
+	uint8_t calc_ecc[6];
+	volatile u_char dummy;
+
+	/* flush the pipeline */
+	if (DoC_is_2000(doc)) {
+		dummy = ReadDOC(docptr, 2k_ECCStatus);
+		dummy = ReadDOC(docptr, 2k_ECCStatus);
+		dummy = ReadDOC(docptr, 2k_ECCStatus);
+	} else if (DoC_is_MillenniumPlus(doc)) {
+		dummy = ReadDOC(docptr, Mplus_ECCConf);
+		dummy = ReadDOC(docptr, Mplus_ECCConf);
+		dummy = ReadDOC(docptr, Mplus_ECCConf);
+	} else {
+		dummy = ReadDOC(docptr, ECCConf);
+		dummy = ReadDOC(docptr, ECCConf);
+		dummy = ReadDOC(docptr, ECCConf);
+	}
+
+	/* Error occurred ? */
+	if (dummy & 0x80) {
+		for (i = 0; i < 6; i++) {
+			if (DoC_is_MillenniumPlus(doc))
+				calc_ecc[i] = ReadDOC_(docptr, DoC_Mplus_ECCSyndrome0 + i);
+			else
+				calc_ecc[i] = ReadDOC_(docptr, DoC_ECCSyndrome0 + i);
+		}
+
+		ret = doc_ecc_decode(rs_decoder, dat, calc_ecc);
+		if (ret > 0)
+			printk(KERN_ERR "doc200x_correct_data corrected %d errors\n", ret);
+	}
+	if (DoC_is_MillenniumPlus(doc))
+		WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf);
+	else
+		WriteDOC(DOC_ECC_DIS, docptr, ECCConf);
+	if (no_ecc_failures && mtd_is_eccerr(ret)) {
+		printk(KERN_ERR "suppressing ECC failure\n");
+		ret = 0;
+	}
+	return ret;
+}
+
+//u_char mydatabuf[528];
+
+static int doc200x_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 0;
+	oobregion->length = 6;
+
+	return 0;
+}
+
+static int doc200x_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	/*
+	 * The strange out-of-order free bytes definition is a (possibly
+	 * unneeded) attempt to retain compatibility.  It used to read:
+	 *	.oobfree = { {8, 8} }
+	 * Since that leaves two bytes unusable, it was changed.  But the
+	 * following scheme might affect existing jffs2 installs by moving the
+	 * cleanmarker:
+	 *	.oobfree = { {6, 10} }
+	 * jffs2 seems to handle the above gracefully, but the current scheme
+	 * seems safer. The only problem with it is that any code retrieving
+	 * free bytes position must be able to handle out-of-order segments.
+	 */
+	if (!section) {
+		oobregion->offset = 8;
+		oobregion->length = 8;
+	} else {
+		oobregion->offset = 6;
+		oobregion->length = 2;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops doc200x_ooblayout_ops = {
+	.ecc = doc200x_ooblayout_ecc,
+	.free = doc200x_ooblayout_free,
+};
+
+/* Find the (I)NFTL Media Header, and optionally also the mirror media header.
+   On successful return, buf will contain a copy of the media header for
+   further processing.  id is the string to scan for, and will presumably be
+   either "ANAND" or "BNAND".  If findmirror=1, also look for the mirror media
+   header.  The page #s of the found media headers are placed in mh0_page and
+   mh1_page in the DOC private structure. */
+static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const char *id, int findmirror)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	unsigned offs;
+	int ret;
+	size_t retlen;
+
+	for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
+		ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+		if (retlen != mtd->writesize)
+			continue;
+		if (ret) {
+			printk(KERN_WARNING "ECC error scanning DOC at 0x%x\n", offs);
+		}
+		if (memcmp(buf, id, 6))
+			continue;
+		printk(KERN_INFO "Found DiskOnChip %s Media Header at 0x%x\n", id, offs);
+		if (doc->mh0_page == -1) {
+			doc->mh0_page = offs >> this->page_shift;
+			if (!findmirror)
+				return 1;
+			continue;
+		}
+		doc->mh1_page = offs >> this->page_shift;
+		return 2;
+	}
+	if (doc->mh0_page == -1) {
+		printk(KERN_WARNING "DiskOnChip %s Media Header not found.\n", id);
+		return 0;
+	}
+	/* Only one mediaheader was found.  We want buf to contain a
+	   mediaheader on return, so we'll have to re-read the one we found. */
+	offs = doc->mh0_page << this->page_shift;
+	ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
+	if (retlen != mtd->writesize) {
+		/* Insanity.  Give up. */
+		printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
+		return 0;
+	}
+	return 1;
+}
+
+static inline int __init nftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	int ret = 0;
+	u_char *buf;
+	struct NFTLMediaHeader *mh;
+	const unsigned psize = 1 << this->page_shift;
+	int numparts = 0;
+	unsigned blocks, maxblocks;
+	int offs, numheaders;
+
+	buf = kmalloc(mtd->writesize, GFP_KERNEL);
+	if (!buf) {
+		return 0;
+	}
+	if (!(numheaders = find_media_headers(mtd, buf, "ANAND", 1)))
+		goto out;
+	mh = (struct NFTLMediaHeader *)buf;
+
+	le16_to_cpus(&mh->NumEraseUnits);
+	le16_to_cpus(&mh->FirstPhysicalEUN);
+	le32_to_cpus(&mh->FormattedSize);
+
+	printk(KERN_INFO "    DataOrgID        = %s\n"
+			 "    NumEraseUnits    = %d\n"
+			 "    FirstPhysicalEUN = %d\n"
+			 "    FormattedSize    = %d\n"
+			 "    UnitSizeFactor   = %d\n",
+		mh->DataOrgID, mh->NumEraseUnits,
+		mh->FirstPhysicalEUN, mh->FormattedSize,
+		mh->UnitSizeFactor);
+
+	blocks = mtd->size >> this->phys_erase_shift;
+	maxblocks = min(32768U, mtd->erasesize - psize);
+
+	if (mh->UnitSizeFactor == 0x00) {
+		/* Auto-determine UnitSizeFactor.  The constraints are:
+		   - There can be at most 32768 virtual blocks.
+		   - There can be at most (virtual block size - page size)
+		   virtual blocks (because MediaHeader+BBT must fit in 1).
+		 */
+		mh->UnitSizeFactor = 0xff;
+		while (blocks > maxblocks) {
+			blocks >>= 1;
+			maxblocks = min(32768U, (maxblocks << 1) + psize);
+			mh->UnitSizeFactor--;
+		}
+		printk(KERN_WARNING "UnitSizeFactor=0x00 detected.  Correct value is assumed to be 0x%02x.\n", mh->UnitSizeFactor);
+	}
+
+	/* NOTE: The lines below modify internal variables of the NAND and MTD
+	   layers; variables with have already been configured by nand_scan.
+	   Unfortunately, we didn't know before this point what these values
+	   should be.  Thus, this code is somewhat dependent on the exact
+	   implementation of the NAND layer.  */
+	if (mh->UnitSizeFactor != 0xff) {
+		this->bbt_erase_shift += (0xff - mh->UnitSizeFactor);
+		mtd->erasesize <<= (0xff - mh->UnitSizeFactor);
+		printk(KERN_INFO "Setting virtual erase size to %d\n", mtd->erasesize);
+		blocks = mtd->size >> this->bbt_erase_shift;
+		maxblocks = min(32768U, mtd->erasesize - psize);
+	}
+
+	if (blocks > maxblocks) {
+		printk(KERN_ERR "UnitSizeFactor of 0x%02x is inconsistent with device size.  Aborting.\n", mh->UnitSizeFactor);
+		goto out;
+	}
+
+	/* Skip past the media headers. */
+	offs = max(doc->mh0_page, doc->mh1_page);
+	offs <<= this->page_shift;
+	offs += mtd->erasesize;
+
+	if (show_firmware_partition == 1) {
+		parts[0].name = " DiskOnChip Firmware / Media Header partition";
+		parts[0].offset = 0;
+		parts[0].size = offs;
+		numparts = 1;
+	}
+
+	parts[numparts].name = " DiskOnChip BDTL partition";
+	parts[numparts].offset = offs;
+	parts[numparts].size = (mh->NumEraseUnits - numheaders) << this->bbt_erase_shift;
+
+	offs += parts[numparts].size;
+	numparts++;
+
+	if (offs < mtd->size) {
+		parts[numparts].name = " DiskOnChip Remainder partition";
+		parts[numparts].offset = offs;
+		parts[numparts].size = mtd->size - offs;
+		numparts++;
+	}
+
+	ret = numparts;
+ out:
+	kfree(buf);
+	return ret;
+}
+
+/* This is a stripped-down copy of the code in inftlmount.c */
+static inline int __init inftl_partscan(struct mtd_info *mtd, struct mtd_partition *parts)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	int ret = 0;
+	u_char *buf;
+	struct INFTLMediaHeader *mh;
+	struct INFTLPartition *ip;
+	int numparts = 0;
+	int blocks;
+	int vshift, lastvunit = 0;
+	int i;
+	int end = mtd->size;
+
+	if (inftl_bbt_write)
+		end -= (INFTL_BBT_RESERVED_BLOCKS << this->phys_erase_shift);
+
+	buf = kmalloc(mtd->writesize, GFP_KERNEL);
+	if (!buf) {
+		return 0;
+	}
+
+	if (!find_media_headers(mtd, buf, "BNAND", 0))
+		goto out;
+	doc->mh1_page = doc->mh0_page + (4096 >> this->page_shift);
+	mh = (struct INFTLMediaHeader *)buf;
+
+	le32_to_cpus(&mh->NoOfBootImageBlocks);
+	le32_to_cpus(&mh->NoOfBinaryPartitions);
+	le32_to_cpus(&mh->NoOfBDTLPartitions);
+	le32_to_cpus(&mh->BlockMultiplierBits);
+	le32_to_cpus(&mh->FormatFlags);
+	le32_to_cpus(&mh->PercentUsed);
+
+	printk(KERN_INFO "    bootRecordID          = %s\n"
+			 "    NoOfBootImageBlocks   = %d\n"
+			 "    NoOfBinaryPartitions  = %d\n"
+			 "    NoOfBDTLPartitions    = %d\n"
+			 "    BlockMultiplerBits    = %d\n"
+			 "    FormatFlgs            = %d\n"
+			 "    OsakVersion           = %d.%d.%d.%d\n"
+			 "    PercentUsed           = %d\n",
+		mh->bootRecordID, mh->NoOfBootImageBlocks,
+		mh->NoOfBinaryPartitions,
+		mh->NoOfBDTLPartitions,
+		mh->BlockMultiplierBits, mh->FormatFlags,
+		((unsigned char *) &mh->OsakVersion)[0] & 0xf,
+		((unsigned char *) &mh->OsakVersion)[1] & 0xf,
+		((unsigned char *) &mh->OsakVersion)[2] & 0xf,
+		((unsigned char *) &mh->OsakVersion)[3] & 0xf,
+		mh->PercentUsed);
+
+	vshift = this->phys_erase_shift + mh->BlockMultiplierBits;
+
+	blocks = mtd->size >> vshift;
+	if (blocks > 32768) {
+		printk(KERN_ERR "BlockMultiplierBits=%d is inconsistent with device size.  Aborting.\n", mh->BlockMultiplierBits);
+		goto out;
+	}
+
+	blocks = doc->chips_per_floor << (this->chip_shift - this->phys_erase_shift);
+	if (inftl_bbt_write && (blocks > mtd->erasesize)) {
+		printk(KERN_ERR "Writeable BBTs spanning more than one erase block are not yet supported.  FIX ME!\n");
+		goto out;
+	}
+
+	/* Scan the partitions */
+	for (i = 0; (i < 4); i++) {
+		ip = &(mh->Partitions[i]);
+		le32_to_cpus(&ip->virtualUnits);
+		le32_to_cpus(&ip->firstUnit);
+		le32_to_cpus(&ip->lastUnit);
+		le32_to_cpus(&ip->flags);
+		le32_to_cpus(&ip->spareUnits);
+		le32_to_cpus(&ip->Reserved0);
+
+		printk(KERN_INFO	"    PARTITION[%d] ->\n"
+			"        virtualUnits    = %d\n"
+			"        firstUnit       = %d\n"
+			"        lastUnit        = %d\n"
+			"        flags           = 0x%x\n"
+			"        spareUnits      = %d\n",
+			i, ip->virtualUnits, ip->firstUnit,
+			ip->lastUnit, ip->flags,
+			ip->spareUnits);
+
+		if ((show_firmware_partition == 1) &&
+		    (i == 0) && (ip->firstUnit > 0)) {
+			parts[0].name = " DiskOnChip IPL / Media Header partition";
+			parts[0].offset = 0;
+			parts[0].size = mtd->erasesize * ip->firstUnit;
+			numparts = 1;
+		}
+
+		if (ip->flags & INFTL_BINARY)
+			parts[numparts].name = " DiskOnChip BDK partition";
+		else
+			parts[numparts].name = " DiskOnChip BDTL partition";
+		parts[numparts].offset = ip->firstUnit << vshift;
+		parts[numparts].size = (1 + ip->lastUnit - ip->firstUnit) << vshift;
+		numparts++;
+		if (ip->lastUnit > lastvunit)
+			lastvunit = ip->lastUnit;
+		if (ip->flags & INFTL_LAST)
+			break;
+	}
+	lastvunit++;
+	if ((lastvunit << vshift) < end) {
+		parts[numparts].name = " DiskOnChip Remainder partition";
+		parts[numparts].offset = lastvunit << vshift;
+		parts[numparts].size = end - parts[numparts].offset;
+		numparts++;
+	}
+	ret = numparts;
+ out:
+	kfree(buf);
+	return ret;
+}
+
+static int __init nftl_scan_bbt(struct mtd_info *mtd)
+{
+	int ret, numparts;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	struct mtd_partition parts[2];
+
+	memset((char *)parts, 0, sizeof(parts));
+	/* On NFTL, we have to find the media headers before we can read the
+	   BBTs, since they're stored in the media header eraseblocks. */
+	numparts = nftl_partscan(mtd, parts);
+	if (!numparts)
+		return -EIO;
+	this->bbt_td->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+				NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+				NAND_BBT_VERSION;
+	this->bbt_td->veroffs = 7;
+	this->bbt_td->pages[0] = doc->mh0_page + 1;
+	if (doc->mh1_page != -1) {
+		this->bbt_md->options = NAND_BBT_ABSPAGE | NAND_BBT_8BIT |
+					NAND_BBT_SAVECONTENT | NAND_BBT_WRITE |
+					NAND_BBT_VERSION;
+		this->bbt_md->veroffs = 7;
+		this->bbt_md->pages[0] = doc->mh1_page + 1;
+	} else {
+		this->bbt_md = NULL;
+	}
+
+	ret = this->scan_bbt(mtd);
+	if (ret)
+		return ret;
+
+	return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
+}
+
+static int __init inftl_scan_bbt(struct mtd_info *mtd)
+{
+	int ret, numparts;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+	struct mtd_partition parts[5];
+
+	if (this->numchips > doc->chips_per_floor) {
+		printk(KERN_ERR "Multi-floor INFTL devices not yet supported.\n");
+		return -EIO;
+	}
+
+	if (DoC_is_MillenniumPlus(doc)) {
+		this->bbt_td->options = NAND_BBT_2BIT | NAND_BBT_ABSPAGE;
+		if (inftl_bbt_write)
+			this->bbt_td->options |= NAND_BBT_WRITE;
+		this->bbt_td->pages[0] = 2;
+		this->bbt_md = NULL;
+	} else {
+		this->bbt_td->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+		if (inftl_bbt_write)
+			this->bbt_td->options |= NAND_BBT_WRITE;
+		this->bbt_td->offs = 8;
+		this->bbt_td->len = 8;
+		this->bbt_td->veroffs = 7;
+		this->bbt_td->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+		this->bbt_td->reserved_block_code = 0x01;
+		this->bbt_td->pattern = "MSYS_BBT";
+
+		this->bbt_md->options = NAND_BBT_LASTBLOCK | NAND_BBT_8BIT | NAND_BBT_VERSION;
+		if (inftl_bbt_write)
+			this->bbt_md->options |= NAND_BBT_WRITE;
+		this->bbt_md->offs = 8;
+		this->bbt_md->len = 8;
+		this->bbt_md->veroffs = 7;
+		this->bbt_md->maxblocks = INFTL_BBT_RESERVED_BLOCKS;
+		this->bbt_md->reserved_block_code = 0x01;
+		this->bbt_md->pattern = "TBB_SYSM";
+	}
+
+	ret = this->scan_bbt(mtd);
+	if (ret)
+		return ret;
+
+	memset((char *)parts, 0, sizeof(parts));
+	numparts = inftl_partscan(mtd, parts);
+	/* At least for now, require the INFTL Media Header.  We could probably
+	   do without it for non-INFTL use, since all it gives us is
+	   autopartitioning, but I want to give it more thought. */
+	if (!numparts)
+		return -EIO;
+	return mtd_device_register(mtd, parts, no_autopart ? 0 : numparts);
+}
+
+static inline int __init doc2000_init(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+
+	this->read_byte = doc2000_read_byte;
+	this->write_buf = doc2000_writebuf;
+	this->read_buf = doc2000_readbuf;
+	doc->late_init = nftl_scan_bbt;
+
+	doc->CDSNControl = CDSN_CTRL_FLASH_IO | CDSN_CTRL_ECC_IO;
+	doc2000_count_chips(mtd);
+	mtd->name = "DiskOnChip 2000 (NFTL Model)";
+	return (4 * doc->chips_per_floor);
+}
+
+static inline int __init doc2001_init(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+
+	this->read_byte = doc2001_read_byte;
+	this->write_buf = doc2001_writebuf;
+	this->read_buf = doc2001_readbuf;
+
+	ReadDOC(doc->virtadr, ChipID);
+	ReadDOC(doc->virtadr, ChipID);
+	ReadDOC(doc->virtadr, ChipID);
+	if (ReadDOC(doc->virtadr, ChipID) != DOC_ChipID_DocMil) {
+		/* It's not a Millennium; it's one of the newer
+		   DiskOnChip 2000 units with a similar ASIC.
+		   Treat it like a Millennium, except that it
+		   can have multiple chips. */
+		doc2000_count_chips(mtd);
+		mtd->name = "DiskOnChip 2000 (INFTL Model)";
+		doc->late_init = inftl_scan_bbt;
+		return (4 * doc->chips_per_floor);
+	} else {
+		/* Bog-standard Millennium */
+		doc->chips_per_floor = 1;
+		mtd->name = "DiskOnChip Millennium";
+		doc->late_init = nftl_scan_bbt;
+		return 1;
+	}
+}
+
+static inline int __init doc2001plus_init(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct doc_priv *doc = nand_get_controller_data(this);
+
+	this->read_byte = doc2001plus_read_byte;
+	this->write_buf = doc2001plus_writebuf;
+	this->read_buf = doc2001plus_readbuf;
+	doc->late_init = inftl_scan_bbt;
+	this->cmd_ctrl = NULL;
+	this->select_chip = doc2001plus_select_chip;
+	this->cmdfunc = doc2001plus_command;
+	this->ecc.hwctl = doc2001plus_enable_hwecc;
+
+	doc->chips_per_floor = 1;
+	mtd->name = "DiskOnChip Millennium Plus";
+
+	return 1;
+}
+
+static int __init doc_probe(unsigned long physadr)
+{
+	unsigned char ChipID;
+	struct mtd_info *mtd;
+	struct nand_chip *nand;
+	struct doc_priv *doc;
+	void __iomem *virtadr;
+	unsigned char save_control;
+	unsigned char tmp, tmpb, tmpc;
+	int reg, len, numchips;
+	int ret = 0;
+
+	if (!request_mem_region(physadr, DOC_IOREMAP_LEN, "DiskOnChip"))
+		return -EBUSY;
+	virtadr = ioremap(physadr, DOC_IOREMAP_LEN);
+	if (!virtadr) {
+		printk(KERN_ERR "Diskonchip ioremap failed: 0x%x bytes at 0x%lx\n", DOC_IOREMAP_LEN, physadr);
+		ret = -EIO;
+		goto error_ioremap;
+	}
+
+	/* It's not possible to cleanly detect the DiskOnChip - the
+	 * bootup procedure will put the device into reset mode, and
+	 * it's not possible to talk to it without actually writing
+	 * to the DOCControl register. So we store the current contents
+	 * of the DOCControl register's location, in case we later decide
+	 * that it's not a DiskOnChip, and want to put it back how we
+	 * found it.
+	 */
+	save_control = ReadDOC(virtadr, DOCControl);
+
+	/* Reset the DiskOnChip ASIC */
+	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, virtadr, DOCControl);
+
+	/* Enable the DiskOnChip ASIC */
+	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+	WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, virtadr, DOCControl);
+
+	ChipID = ReadDOC(virtadr, ChipID);
+
+	switch (ChipID) {
+	case DOC_ChipID_Doc2k:
+		reg = DoC_2k_ECCStatus;
+		break;
+	case DOC_ChipID_DocMil:
+		reg = DoC_ECCConf;
+		break;
+	case DOC_ChipID_DocMilPlus16:
+	case DOC_ChipID_DocMilPlus32:
+	case 0:
+		/* Possible Millennium Plus, need to do more checks */
+		/* Possibly release from power down mode */
+		for (tmp = 0; (tmp < 4); tmp++)
+			ReadDOC(virtadr, Mplus_Power);
+
+		/* Reset the Millennium Plus ASIC */
+		tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+		WriteDOC(tmp, virtadr, Mplus_DOCControl);
+		WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+
+		mdelay(1);
+		/* Enable the Millennium Plus ASIC */
+		tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | DOC_MODE_BDECT;
+		WriteDOC(tmp, virtadr, Mplus_DOCControl);
+		WriteDOC(~tmp, virtadr, Mplus_CtrlConfirm);
+		mdelay(1);
+
+		ChipID = ReadDOC(virtadr, ChipID);
+
+		switch (ChipID) {
+		case DOC_ChipID_DocMilPlus16:
+			reg = DoC_Mplus_Toggle;
+			break;
+		case DOC_ChipID_DocMilPlus32:
+			printk(KERN_ERR "DiskOnChip Millennium Plus 32MB is not supported, ignoring.\n");
+		default:
+			ret = -ENODEV;
+			goto notfound;
+		}
+		break;
+
+	default:
+		ret = -ENODEV;
+		goto notfound;
+	}
+	/* Check the TOGGLE bit in the ECC register */
+	tmp = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+	tmpb = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+	tmpc = ReadDOC_(virtadr, reg) & DOC_TOGGLE_BIT;
+	if ((tmp == tmpb) || (tmp != tmpc)) {
+		printk(KERN_WARNING "Possible DiskOnChip at 0x%lx failed TOGGLE test, dropping.\n", physadr);
+		ret = -ENODEV;
+		goto notfound;
+	}
+
+	for (mtd = doclist; mtd; mtd = doc->nextdoc) {
+		unsigned char oldval;
+		unsigned char newval;
+		nand = mtd_to_nand(mtd);
+		doc = nand_get_controller_data(nand);
+		/* Use the alias resolution register to determine if this is
+		   in fact the same DOC aliased to a new address.  If writes
+		   to one chip's alias resolution register change the value on
+		   the other chip, they're the same chip. */
+		if (ChipID == DOC_ChipID_DocMilPlus16) {
+			oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+			newval = ReadDOC(virtadr, Mplus_AliasResolution);
+		} else {
+			oldval = ReadDOC(doc->virtadr, AliasResolution);
+			newval = ReadDOC(virtadr, AliasResolution);
+		}
+		if (oldval != newval)
+			continue;
+		if (ChipID == DOC_ChipID_DocMilPlus16) {
+			WriteDOC(~newval, virtadr, Mplus_AliasResolution);
+			oldval = ReadDOC(doc->virtadr, Mplus_AliasResolution);
+			WriteDOC(newval, virtadr, Mplus_AliasResolution);	// restore it
+		} else {
+			WriteDOC(~newval, virtadr, AliasResolution);
+			oldval = ReadDOC(doc->virtadr, AliasResolution);
+			WriteDOC(newval, virtadr, AliasResolution);	// restore it
+		}
+		newval = ~newval;
+		if (oldval == newval) {
+			printk(KERN_DEBUG "Found alias of DOC at 0x%lx to 0x%lx\n", doc->physadr, physadr);
+			goto notfound;
+		}
+	}
+
+	printk(KERN_NOTICE "DiskOnChip found at 0x%lx\n", physadr);
+
+	len = sizeof(struct nand_chip) + sizeof(struct doc_priv) +
+	      (2 * sizeof(struct nand_bbt_descr));
+	nand = kzalloc(len, GFP_KERNEL);
+	if (!nand) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	mtd			= nand_to_mtd(nand);
+	doc			= (struct doc_priv *) (nand + 1);
+	nand->bbt_td		= (struct nand_bbt_descr *) (doc + 1);
+	nand->bbt_md		= nand->bbt_td + 1;
+
+	mtd->owner		= THIS_MODULE;
+	mtd_set_ooblayout(mtd, &doc200x_ooblayout_ops);
+
+	nand_set_controller_data(nand, doc);
+	nand->select_chip	= doc200x_select_chip;
+	nand->cmd_ctrl		= doc200x_hwcontrol;
+	nand->dev_ready		= doc200x_dev_ready;
+	nand->waitfunc		= doc200x_wait;
+	nand->block_bad		= doc200x_block_bad;
+	nand->ecc.hwctl		= doc200x_enable_hwecc;
+	nand->ecc.calculate	= doc200x_calculate_ecc;
+	nand->ecc.correct	= doc200x_correct_data;
+
+	nand->ecc.mode		= NAND_ECC_HW_SYNDROME;
+	nand->ecc.size		= 512;
+	nand->ecc.bytes		= 6;
+	nand->ecc.strength	= 2;
+	nand->ecc.options	= NAND_ECC_GENERIC_ERASED_CHECK;
+	nand->bbt_options	= NAND_BBT_USE_FLASH;
+	/* Skip the automatic BBT scan so we can run it manually */
+	nand->options		|= NAND_SKIP_BBTSCAN;
+
+	doc->physadr		= physadr;
+	doc->virtadr		= virtadr;
+	doc->ChipID		= ChipID;
+	doc->curfloor		= -1;
+	doc->curchip		= -1;
+	doc->mh0_page		= -1;
+	doc->mh1_page		= -1;
+	doc->nextdoc		= doclist;
+
+	if (ChipID == DOC_ChipID_Doc2k)
+		numchips = doc2000_init(mtd);
+	else if (ChipID == DOC_ChipID_DocMilPlus16)
+		numchips = doc2001plus_init(mtd);
+	else
+		numchips = doc2001_init(mtd);
+
+	if ((ret = nand_scan(mtd, numchips)) || (ret = doc->late_init(mtd))) {
+		/* DBB note: i believe nand_release is necessary here, as
+		   buffers may have been allocated in nand_base.  Check with
+		   Thomas. FIX ME! */
+		/* nand_release will call mtd_device_unregister, but we
+		   haven't yet added it.  This is handled without incident by
+		   mtd_device_unregister, as far as I can tell. */
+		nand_release(mtd);
+		kfree(nand);
+		goto fail;
+	}
+
+	/* Success! */
+	doclist = mtd;
+	return 0;
+
+ notfound:
+	/* Put back the contents of the DOCControl register, in case it's not
+	   actually a DiskOnChip.  */
+	WriteDOC(save_control, virtadr, DOCControl);
+ fail:
+	iounmap(virtadr);
+
+error_ioremap:
+	release_mem_region(physadr, DOC_IOREMAP_LEN);
+
+	return ret;
+}
+
+static void release_nanddoc(void)
+{
+	struct mtd_info *mtd, *nextmtd;
+	struct nand_chip *nand;
+	struct doc_priv *doc;
+
+	for (mtd = doclist; mtd; mtd = nextmtd) {
+		nand = mtd_to_nand(mtd);
+		doc = nand_get_controller_data(nand);
+
+		nextmtd = doc->nextdoc;
+		nand_release(mtd);
+		iounmap(doc->virtadr);
+		release_mem_region(doc->physadr, DOC_IOREMAP_LEN);
+		kfree(nand);
+	}
+}
+
+static int __init init_nanddoc(void)
+{
+	int i, ret = 0;
+
+	/* We could create the decoder on demand, if memory is a concern.
+	 * This way we have it handy, if an error happens
+	 *
+	 * Symbolsize is 10 (bits)
+	 * Primitve polynomial is x^10+x^3+1
+	 * first consecutive root is 510
+	 * primitve element to generate roots = 1
+	 * generator polinomial degree = 4
+	 */
+	rs_decoder = init_rs(10, 0x409, FCR, 1, NROOTS);
+	if (!rs_decoder) {
+		printk(KERN_ERR "DiskOnChip: Could not create a RS decoder\n");
+		return -ENOMEM;
+	}
+
+	if (doc_config_location) {
+		printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location);
+		ret = doc_probe(doc_config_location);
+		if (ret < 0)
+			goto outerr;
+	} else {
+		for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
+			doc_probe(doc_locations[i]);
+		}
+	}
+	/* No banner message any more. Print a message if no DiskOnChip
+	   found, so the user knows we at least tried. */
+	if (!doclist) {
+		printk(KERN_INFO "No valid DiskOnChip devices found\n");
+		ret = -ENODEV;
+		goto outerr;
+	}
+	return 0;
+ outerr:
+	free_rs(rs_decoder);
+	return ret;
+}
+
+static void __exit cleanup_nanddoc(void)
+{
+	/* Cleanup the nand/DoC resources */
+	release_nanddoc();
+
+	/* Free the reed solomon resources */
+	if (rs_decoder) {
+		free_rs(rs_decoder);
+	}
+}
+
+module_init(init_nanddoc);
+module_exit(cleanup_nanddoc);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
+MODULE_DESCRIPTION("M-Systems DiskOnChip 2000, Millennium and Millennium Plus device driver");
diff --git a/drivers/mtd/nand/rawnand/docg4.c b/drivers/mtd/nand/rawnand/docg4.c
new file mode 100644
index 000000000000..e038130b7206
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/docg4.c
@@ -0,0 +1,1410 @@ 
+/*
+ *  Copyright © 2012 Mike Dunn <mikedunn@newsguy.com>
+ *
+ * mtd nand driver for M-Systems DiskOnChip G4
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Tested on the Palm Treo 680.  The G4 is also present on Toshiba Portege, Asus
+ * P526, some HTC smartphones (Wizard, Prophet, ...), O2 XDA Zinc, maybe others.
+ * Should work on these as well.  Let me know!
+ *
+ * TODO:
+ *
+ *  Mechanism for management of password-protected areas
+ *
+ *  Hamming ecc when reading oob only
+ *
+ *  According to the M-Sys documentation, this device is also available in a
+ *  "dual-die" configuration having a 256MB capacity, but no mechanism for
+ *  detecting this variant is documented.  Currently this driver assumes 128MB
+ *  capacity.
+ *
+ *  Support for multiple cascaded devices ("floors").  Not sure which gadgets
+ *  contain multiple G4s in a cascaded configuration, if any.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/export.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/bch.h>
+#include <linux/bitrev.h>
+#include <linux/jiffies.h>
+
+/*
+ * In "reliable mode" consecutive 2k pages are used in parallel (in some
+ * fashion) to store the same data.  The data can be read back from the
+ * even-numbered pages in the normal manner; odd-numbered pages will appear to
+ * contain junk.  Systems that boot from the docg4 typically write the secondary
+ * program loader (SPL) code in this mode.  The SPL is loaded by the initial
+ * program loader (IPL, stored in the docg4's 2k NOR-like region that is mapped
+ * to the reset vector address).  This module parameter enables you to use this
+ * driver to write the SPL.  When in this mode, no more than 2k of data can be
+ * written at a time, because the addresses do not increment in the normal
+ * manner, and the starting offset must be within an even-numbered 2k region;
+ * i.e., invalid starting offsets are 0x800, 0xa00, 0xc00, 0xe00, 0x1800,
+ * 0x1a00, ...  Reliable mode is a special case and should not be used unless
+ * you know what you're doing.
+ */
+static bool reliable_mode;
+module_param(reliable_mode, bool, 0);
+MODULE_PARM_DESC(reliable_mode, "pages are programmed in reliable mode");
+
+/*
+ * You'll want to ignore badblocks if you're reading a partition that contains
+ * data written by the TrueFFS library (i.e., by PalmOS, Windows, etc), since
+ * it does not use mtd nand's method for marking bad blocks (using oob area).
+ * This will also skip the check of the "page written" flag.
+ */
+static bool ignore_badblocks;
+module_param(ignore_badblocks, bool, 0);
+MODULE_PARM_DESC(ignore_badblocks, "no badblock checking performed");
+
+struct docg4_priv {
+	struct mtd_info	*mtd;
+	struct device *dev;
+	void __iomem *virtadr;
+	int status;
+	struct {
+		unsigned int command;
+		int column;
+		int page;
+	} last_command;
+	uint8_t oob_buf[16];
+	uint8_t ecc_buf[7];
+	int oob_page;
+	struct bch_control *bch;
+};
+
+/*
+ * Defines prefixed with DOCG4 are unique to the diskonchip G4.  All others are
+ * shared with other diskonchip devices (P3, G3 at least).
+ *
+ * Functions with names prefixed with docg4_ are mtd / nand interface functions
+ * (though they may also be called internally).  All others are internal.
+ */
+
+#define DOC_IOSPACE_DATA		0x0800
+
+/* register offsets */
+#define DOC_CHIPID			0x1000
+#define DOC_DEVICESELECT		0x100a
+#define DOC_ASICMODE			0x100c
+#define DOC_DATAEND			0x101e
+#define DOC_NOP				0x103e
+
+#define DOC_FLASHSEQUENCE		0x1032
+#define DOC_FLASHCOMMAND		0x1034
+#define DOC_FLASHADDRESS		0x1036
+#define DOC_FLASHCONTROL		0x1038
+#define DOC_ECCCONF0			0x1040
+#define DOC_ECCCONF1			0x1042
+#define DOC_HAMMINGPARITY		0x1046
+#define DOC_BCH_SYNDROM(idx)		(0x1048 + idx)
+
+#define DOC_ASICMODECONFIRM		0x1072
+#define DOC_CHIPID_INV			0x1074
+#define DOC_POWERMODE			0x107c
+
+#define DOCG4_MYSTERY_REG		0x1050
+
+/* apparently used only to write oob bytes 6 and 7 */
+#define DOCG4_OOB_6_7			0x1052
+
+/* DOC_FLASHSEQUENCE register commands */
+#define DOC_SEQ_RESET			0x00
+#define DOCG4_SEQ_PAGE_READ		0x03
+#define DOCG4_SEQ_FLUSH			0x29
+#define DOCG4_SEQ_PAGEWRITE		0x16
+#define DOCG4_SEQ_PAGEPROG		0x1e
+#define DOCG4_SEQ_BLOCKERASE		0x24
+#define DOCG4_SEQ_SETMODE		0x45
+
+/* DOC_FLASHCOMMAND register commands */
+#define DOCG4_CMD_PAGE_READ             0x00
+#define DOC_CMD_ERASECYCLE2		0xd0
+#define DOCG4_CMD_FLUSH                 0x70
+#define DOCG4_CMD_READ2                 0x30
+#define DOC_CMD_PROG_BLOCK_ADDR		0x60
+#define DOCG4_CMD_PAGEWRITE		0x80
+#define DOC_CMD_PROG_CYCLE2		0x10
+#define DOCG4_CMD_FAST_MODE		0xa3 /* functionality guessed */
+#define DOC_CMD_RELIABLE_MODE		0x22
+#define DOC_CMD_RESET			0xff
+
+/* DOC_POWERMODE register bits */
+#define DOC_POWERDOWN_READY		0x80
+
+/* DOC_FLASHCONTROL register bits */
+#define DOC_CTRL_CE			0x10
+#define DOC_CTRL_UNKNOWN		0x40
+#define DOC_CTRL_FLASHREADY		0x01
+
+/* DOC_ECCCONF0 register bits */
+#define DOC_ECCCONF0_READ_MODE		0x8000
+#define DOC_ECCCONF0_UNKNOWN		0x2000
+#define DOC_ECCCONF0_ECC_ENABLE	        0x1000
+#define DOC_ECCCONF0_DATA_BYTES_MASK	0x07ff
+
+/* DOC_ECCCONF1 register bits */
+#define DOC_ECCCONF1_BCH_SYNDROM_ERR	0x80
+#define DOC_ECCCONF1_ECC_ENABLE         0x07
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN	0x20
+
+/* DOC_ASICMODE register bits */
+#define DOC_ASICMODE_RESET		0x00
+#define DOC_ASICMODE_NORMAL		0x01
+#define DOC_ASICMODE_POWERDOWN		0x02
+#define DOC_ASICMODE_MDWREN		0x04
+#define DOC_ASICMODE_BDETCT_RESET	0x08
+#define DOC_ASICMODE_RSTIN_RESET	0x10
+#define DOC_ASICMODE_RAM_WE		0x20
+
+/* good status values read after read/write/erase operations */
+#define DOCG4_PROGSTATUS_GOOD          0x51
+#define DOCG4_PROGSTATUS_GOOD_2        0xe0
+
+/*
+ * On read operations (page and oob-only), the first byte read from I/O reg is a
+ * status.  On error, it reads 0x73; otherwise, it reads either 0x71 (first read
+ * after reset only) or 0x51, so bit 1 is presumed to be an error indicator.
+ */
+#define DOCG4_READ_ERROR           0x02 /* bit 1 indicates read error */
+
+/* anatomy of the device */
+#define DOCG4_CHIP_SIZE        0x8000000
+#define DOCG4_PAGE_SIZE        0x200
+#define DOCG4_PAGES_PER_BLOCK  0x200
+#define DOCG4_BLOCK_SIZE       (DOCG4_PAGES_PER_BLOCK * DOCG4_PAGE_SIZE)
+#define DOCG4_NUMBLOCKS        (DOCG4_CHIP_SIZE / DOCG4_BLOCK_SIZE)
+#define DOCG4_OOB_SIZE         0x10
+#define DOCG4_CHIP_SHIFT       27    /* log_2(DOCG4_CHIP_SIZE) */
+#define DOCG4_PAGE_SHIFT       9     /* log_2(DOCG4_PAGE_SIZE) */
+#define DOCG4_ERASE_SHIFT      18    /* log_2(DOCG4_BLOCK_SIZE) */
+
+/* all but the last byte is included in ecc calculation */
+#define DOCG4_BCH_SIZE         (DOCG4_PAGE_SIZE + DOCG4_OOB_SIZE - 1)
+
+#define DOCG4_USERDATA_LEN     520 /* 512 byte page plus 8 oob avail to user */
+
+/* expected values from the ID registers */
+#define DOCG4_IDREG1_VALUE     0x0400
+#define DOCG4_IDREG2_VALUE     0xfbff
+
+/* primitive polynomial used to build the Galois field used by hw ecc gen */
+#define DOCG4_PRIMITIVE_POLY   0x4443
+
+#define DOCG4_M                14  /* Galois field is of order 2^14 */
+#define DOCG4_T                4   /* BCH alg corrects up to 4 bit errors */
+
+#define DOCG4_FACTORY_BBT_PAGE 16 /* page where read-only factory bbt lives */
+#define DOCG4_REDUNDANT_BBT_PAGE 24 /* page where redundant factory bbt lives */
+
+/*
+ * Bytes 0, 1 are used as badblock marker.
+ * Bytes 2 - 6 are available to the user.
+ * Byte 7 is hamming ecc for first 7 oob bytes only.
+ * Bytes 8 - 14 are hw-generated ecc covering entire page + oob bytes 0 - 14.
+ * Byte 15 (the last) is used by the driver as a "page written" flag.
+ */
+static int docg4_ooblayout_ecc(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 7;
+	oobregion->length = 9;
+
+	return 0;
+}
+
+static int docg4_ooblayout_free(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 2;
+	oobregion->length = 5;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops docg4_ooblayout_ops = {
+	.ecc = docg4_ooblayout_ecc,
+	.free = docg4_ooblayout_free,
+};
+
+/*
+ * The device has a nop register which M-Sys claims is for the purpose of
+ * inserting precise delays.  But beware; at least some operations fail if the
+ * nop writes are replaced with a generic delay!
+ */
+static inline void write_nop(void __iomem *docptr)
+{
+	writew(0, docptr + DOC_NOP);
+}
+
+static void docg4_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	uint16_t *p = (uint16_t *) buf;
+	len >>= 1;
+
+	for (i = 0; i < len; i++)
+		p[i] = readw(nand->IO_ADDR_R);
+}
+
+static void docg4_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	uint16_t *p = (uint16_t *) buf;
+	len >>= 1;
+
+	for (i = 0; i < len; i++)
+		writew(p[i], nand->IO_ADDR_W);
+}
+
+static int poll_status(struct docg4_priv *doc)
+{
+	/*
+	 * Busy-wait for the FLASHREADY bit to be set in the FLASHCONTROL
+	 * register.  Operations known to take a long time (e.g., block erase)
+	 * should sleep for a while before calling this.
+	 */
+
+	uint16_t flash_status;
+	unsigned long timeo;
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	/* hardware quirk requires reading twice initially */
+	flash_status = readw(docptr + DOC_FLASHCONTROL);
+
+	timeo = jiffies + msecs_to_jiffies(200); /* generous timeout */
+	do {
+		cpu_relax();
+		flash_status = readb(docptr + DOC_FLASHCONTROL);
+	} while (!(flash_status & DOC_CTRL_FLASHREADY) &&
+		 time_before(jiffies, timeo));
+
+	if (unlikely(!(flash_status & DOC_CTRL_FLASHREADY))) {
+		dev_err(doc->dev, "%s: timed out!\n", __func__);
+		return NAND_STATUS_FAIL;
+	}
+
+	return 0;
+}
+
+
+static int docg4_wait(struct mtd_info *mtd, struct nand_chip *nand)
+{
+
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	int status = NAND_STATUS_WP;       /* inverse logic?? */
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	/* report any previously unreported error */
+	if (doc->status) {
+		status |= doc->status;
+		doc->status = 0;
+		return status;
+	}
+
+	status |= poll_status(doc);
+	return status;
+}
+
+static void docg4_select_chip(struct mtd_info *mtd, int chip)
+{
+	/*
+	 * Select among multiple cascaded chips ("floors").  Multiple floors are
+	 * not yet supported, so the only valid non-negative value is 0.
+	 */
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev, "%s: chip %d\n", __func__, chip);
+
+	if (chip < 0)
+		return;		/* deselected */
+
+	if (chip > 0)
+		dev_warn(doc->dev, "multiple floors currently unsupported\n");
+
+	writew(0, docptr + DOC_DEVICESELECT);
+}
+
+static void reset(struct mtd_info *mtd)
+{
+	/* full device reset */
+
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+
+	writew(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN,
+	       docptr + DOC_ASICMODE);
+	writew(~(DOC_ASICMODE_RESET | DOC_ASICMODE_MDWREN),
+	       docptr + DOC_ASICMODECONFIRM);
+	write_nop(docptr);
+
+	writew(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN,
+	       docptr + DOC_ASICMODE);
+	writew(~(DOC_ASICMODE_NORMAL | DOC_ASICMODE_MDWREN),
+	       docptr + DOC_ASICMODECONFIRM);
+
+	writew(DOC_ECCCONF1_ECC_ENABLE, docptr + DOC_ECCCONF1);
+
+	poll_status(doc);
+}
+
+static void read_hw_ecc(void __iomem *docptr, uint8_t *ecc_buf)
+{
+	/* read the 7 hw-generated ecc bytes */
+
+	int i;
+	for (i = 0; i < 7; i++) { /* hw quirk; read twice */
+		ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+		ecc_buf[i] = readb(docptr + DOC_BCH_SYNDROM(i));
+	}
+}
+
+static int correct_data(struct mtd_info *mtd, uint8_t *buf, int page)
+{
+	/*
+	 * Called after a page read when hardware reports bitflips.
+	 * Up to four bitflips can be corrected.
+	 */
+
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+	int i, numerrs, errpos[4];
+	const uint8_t blank_read_hwecc[8] = {
+		0xcf, 0x72, 0xfc, 0x1b, 0xa9, 0xc7, 0xb9, 0 };
+
+	read_hw_ecc(docptr, doc->ecc_buf); /* read 7 hw-generated ecc bytes */
+
+	/* check if read error is due to a blank page */
+	if (!memcmp(doc->ecc_buf, blank_read_hwecc, 7))
+		return 0;	/* yes */
+
+	/* skip additional check of "written flag" if ignore_badblocks */
+	if (ignore_badblocks == false) {
+
+		/*
+		 * If the hw ecc bytes are not those of a blank page, there's
+		 * still a chance that the page is blank, but was read with
+		 * errors.  Check the "written flag" in last oob byte, which
+		 * is set to zero when a page is written.  If more than half
+		 * the bits are set, assume a blank page.  Unfortunately, the
+		 * bit flips(s) are not reported in stats.
+		 */
+
+		if (nand->oob_poi[15]) {
+			int bit, numsetbits = 0;
+			unsigned long written_flag = nand->oob_poi[15];
+			for_each_set_bit(bit, &written_flag, 8)
+				numsetbits++;
+			if (numsetbits > 4) { /* assume blank */
+				dev_warn(doc->dev,
+					 "error(s) in blank page "
+					 "at offset %08x\n",
+					 page * DOCG4_PAGE_SIZE);
+				return 0;
+			}
+		}
+	}
+
+	/*
+	 * The hardware ecc unit produces oob_ecc ^ calc_ecc.  The kernel's bch
+	 * algorithm is used to decode this.  However the hw operates on page
+	 * data in a bit order that is the reverse of that of the bch alg,
+	 * requiring that the bits be reversed on the result.  Thanks to Ivan
+	 * Djelic for his analysis!
+	 */
+	for (i = 0; i < 7; i++)
+		doc->ecc_buf[i] = bitrev8(doc->ecc_buf[i]);
+
+	numerrs = decode_bch(doc->bch, NULL, DOCG4_USERDATA_LEN, NULL,
+			     doc->ecc_buf, NULL, errpos);
+
+	if (numerrs == -EBADMSG) {
+		dev_warn(doc->dev, "uncorrectable errors at offset %08x\n",
+			 page * DOCG4_PAGE_SIZE);
+		return -EBADMSG;
+	}
+
+	BUG_ON(numerrs < 0);	/* -EINVAL, or anything other than -EBADMSG */
+
+	/* undo last step in BCH alg (modulo mirroring not needed) */
+	for (i = 0; i < numerrs; i++)
+		errpos[i] = (errpos[i] & ~7)|(7-(errpos[i] & 7));
+
+	/* fix the errors */
+	for (i = 0; i < numerrs; i++) {
+
+		/* ignore if error within oob ecc bytes */
+		if (errpos[i] > DOCG4_USERDATA_LEN * 8)
+			continue;
+
+		/* if error within oob area preceeding ecc bytes... */
+		if (errpos[i] > DOCG4_PAGE_SIZE * 8)
+			change_bit(errpos[i] - DOCG4_PAGE_SIZE * 8,
+				   (unsigned long *)nand->oob_poi);
+
+		else    /* error in page data */
+			change_bit(errpos[i], (unsigned long *)buf);
+	}
+
+	dev_notice(doc->dev, "%d error(s) corrected at offset %08x\n",
+		   numerrs, page * DOCG4_PAGE_SIZE);
+
+	return numerrs;
+}
+
+static uint8_t docg4_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+
+	dev_dbg(doc->dev, "%s\n", __func__);
+
+	if (doc->last_command.command == NAND_CMD_STATUS) {
+		int status;
+
+		/*
+		 * Previous nand command was status request, so nand
+		 * infrastructure code expects to read the status here.  If an
+		 * error occurred in a previous operation, report it.
+		 */
+		doc->last_command.command = 0;
+
+		if (doc->status) {
+			status = doc->status;
+			doc->status = 0;
+		}
+
+		/* why is NAND_STATUS_WP inverse logic?? */
+		else
+			status = NAND_STATUS_WP | NAND_STATUS_READY;
+
+		return status;
+	}
+
+	dev_warn(doc->dev, "unexpected call to read_byte()\n");
+
+	return 0;
+}
+
+static void write_addr(struct docg4_priv *doc, uint32_t docg4_addr)
+{
+	/* write the four address bytes packed in docg4_addr to the device */
+
+	void __iomem *docptr = doc->virtadr;
+	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+	docg4_addr >>= 8;
+	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+	docg4_addr >>= 8;
+	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+	docg4_addr >>= 8;
+	writeb(docg4_addr & 0xff, docptr + DOC_FLASHADDRESS);
+}
+
+static int read_progstatus(struct docg4_priv *doc)
+{
+	/*
+	 * This apparently checks the status of programming.  Done after an
+	 * erasure, and after page data is written.  On error, the status is
+	 * saved, to be later retrieved by the nand infrastructure code.
+	 */
+	void __iomem *docptr = doc->virtadr;
+
+	/* status is read from the I/O reg */
+	uint16_t status1 = readw(docptr + DOC_IOSPACE_DATA);
+	uint16_t status2 = readw(docptr + DOC_IOSPACE_DATA);
+	uint16_t status3 = readw(docptr + DOCG4_MYSTERY_REG);
+
+	dev_dbg(doc->dev, "docg4: %s: %02x %02x %02x\n",
+	      __func__, status1, status2, status3);
+
+	if (status1 != DOCG4_PROGSTATUS_GOOD
+	    || status2 != DOCG4_PROGSTATUS_GOOD_2
+	    || status3 != DOCG4_PROGSTATUS_GOOD_2) {
+		doc->status = NAND_STATUS_FAIL;
+		dev_warn(doc->dev, "read_progstatus failed: "
+			 "%02x, %02x, %02x\n", status1, status2, status3);
+		return -EIO;
+	}
+	return 0;
+}
+
+static int pageprog(struct mtd_info *mtd)
+{
+	/*
+	 * Final step in writing a page.  Writes the contents of its
+	 * internal buffer out to the flash array, or some such.
+	 */
+
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+	int retval = 0;
+
+	dev_dbg(doc->dev, "docg4: %s\n", __func__);
+
+	writew(DOCG4_SEQ_PAGEPROG, docptr + DOC_FLASHSEQUENCE);
+	writew(DOC_CMD_PROG_CYCLE2, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	/* Just busy-wait; usleep_range() slows things down noticeably. */
+	poll_status(doc);
+
+	writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+	writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+	writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	retval = read_progstatus(doc);
+	writew(0, docptr + DOC_DATAEND);
+	write_nop(docptr);
+	poll_status(doc);
+	write_nop(docptr);
+
+	return retval;
+}
+
+static void sequence_reset(struct mtd_info *mtd)
+{
+	/* common starting sequence for all operations */
+
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+
+	writew(DOC_CTRL_UNKNOWN | DOC_CTRL_CE, docptr + DOC_FLASHCONTROL);
+	writew(DOC_SEQ_RESET, docptr + DOC_FLASHSEQUENCE);
+	writew(DOC_CMD_RESET, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_nop(docptr);
+	poll_status(doc);
+	write_nop(docptr);
+}
+
+static void read_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+	/* first step in reading a page */
+
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev,
+	      "docg4: %s: g4 page %08x\n", __func__, docg4_addr);
+
+	sequence_reset(mtd);
+
+	writew(DOCG4_SEQ_PAGE_READ, docptr + DOC_FLASHSEQUENCE);
+	writew(DOCG4_CMD_PAGE_READ, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+
+	write_addr(doc, docg4_addr);
+
+	write_nop(docptr);
+	writew(DOCG4_CMD_READ2, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	poll_status(doc);
+}
+
+static void write_page_prologue(struct mtd_info *mtd, uint32_t docg4_addr)
+{
+	/* first step in writing a page */
+
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev,
+	      "docg4: %s: g4 addr: %x\n", __func__, docg4_addr);
+	sequence_reset(mtd);
+
+	if (unlikely(reliable_mode)) {
+		writew(DOCG4_SEQ_SETMODE, docptr + DOC_FLASHSEQUENCE);
+		writew(DOCG4_CMD_FAST_MODE, docptr + DOC_FLASHCOMMAND);
+		writew(DOC_CMD_RELIABLE_MODE, docptr + DOC_FLASHCOMMAND);
+		write_nop(docptr);
+	}
+
+	writew(DOCG4_SEQ_PAGEWRITE, docptr + DOC_FLASHSEQUENCE);
+	writew(DOCG4_CMD_PAGEWRITE, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_addr(doc, docg4_addr);
+	write_nop(docptr);
+	write_nop(docptr);
+	poll_status(doc);
+}
+
+static uint32_t mtd_to_docg4_address(int page, int column)
+{
+	/*
+	 * Convert mtd address to format used by the device, 32 bit packed.
+	 *
+	 * Some notes on G4 addressing... The M-Sys documentation on this device
+	 * claims that pages are 2K in length, and indeed, the format of the
+	 * address used by the device reflects that.  But within each page are
+	 * four 512 byte "sub-pages", each with its own oob data that is
+	 * read/written immediately after the 512 bytes of page data.  This oob
+	 * data contains the ecc bytes for the preceeding 512 bytes.
+	 *
+	 * Rather than tell the mtd nand infrastructure that page size is 2k,
+	 * with four sub-pages each, we engage in a little subterfuge and tell
+	 * the infrastructure code that pages are 512 bytes in size.  This is
+	 * done because during the course of reverse-engineering the device, I
+	 * never observed an instance where an entire 2K "page" was read or
+	 * written as a unit.  Each "sub-page" is always addressed individually,
+	 * its data read/written, and ecc handled before the next "sub-page" is
+	 * addressed.
+	 *
+	 * This requires us to convert addresses passed by the mtd nand
+	 * infrastructure code to those used by the device.
+	 *
+	 * The address that is written to the device consists of four bytes: the
+	 * first two are the 2k page number, and the second is the index into
+	 * the page.  The index is in terms of 16-bit half-words and includes
+	 * the preceeding oob data, so e.g., the index into the second
+	 * "sub-page" is 0x108, and the full device address of the start of mtd
+	 * page 0x201 is 0x00800108.
+	 */
+	int g4_page = page / 4;	                      /* device's 2K page */
+	int g4_index = (page % 4) * 0x108 + column/2; /* offset into page */
+	return (g4_page << 16) | g4_index;	      /* pack */
+}
+
+static void docg4_command(struct mtd_info *mtd, unsigned command, int column,
+			  int page_addr)
+{
+	/* handle standard nand commands */
+
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	uint32_t g4_addr = mtd_to_docg4_address(page_addr, column);
+
+	dev_dbg(doc->dev, "%s %x, page_addr=%x, column=%x\n",
+	      __func__, command, page_addr, column);
+
+	/*
+	 * Save the command and its arguments.  This enables emulation of
+	 * standard flash devices, and also some optimizations.
+	 */
+	doc->last_command.command = command;
+	doc->last_command.column = column;
+	doc->last_command.page = page_addr;
+
+	switch (command) {
+
+	case NAND_CMD_RESET:
+		reset(mtd);
+		break;
+
+	case NAND_CMD_READ0:
+		read_page_prologue(mtd, g4_addr);
+		break;
+
+	case NAND_CMD_STATUS:
+		/* next call to read_byte() will expect a status */
+		break;
+
+	case NAND_CMD_SEQIN:
+		if (unlikely(reliable_mode)) {
+			uint16_t g4_page = g4_addr >> 16;
+
+			/* writes to odd-numbered 2k pages are invalid */
+			if (g4_page & 0x01)
+				dev_warn(doc->dev,
+					 "invalid reliable mode address\n");
+		}
+
+		write_page_prologue(mtd, g4_addr);
+
+		/* hack for deferred write of oob bytes */
+		if (doc->oob_page == page_addr)
+			memcpy(nand->oob_poi, doc->oob_buf, 16);
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		pageprog(mtd);
+		break;
+
+	/* we don't expect these, based on review of nand_base.c */
+	case NAND_CMD_READOOB:
+	case NAND_CMD_READID:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+		dev_warn(doc->dev, "docg4_command: "
+			 "unexpected nand command 0x%x\n", command);
+		break;
+
+	}
+}
+
+static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
+		     uint8_t *buf, int page, bool use_ecc)
+{
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+	uint16_t status, edc_err, *buf16;
+	int bits_corrected = 0;
+
+	dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
+
+	writew(DOC_ECCCONF0_READ_MODE |
+	       DOC_ECCCONF0_ECC_ENABLE |
+	       DOC_ECCCONF0_UNKNOWN |
+	       DOCG4_BCH_SIZE,
+	       docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	/* the 1st byte from the I/O reg is a status; the rest is page data */
+	status = readw(docptr + DOC_IOSPACE_DATA);
+	if (status & DOCG4_READ_ERROR) {
+		dev_err(doc->dev,
+			"docg4_read_page: bad status: 0x%02x\n", status);
+		writew(0, docptr + DOC_DATAEND);
+		return -EIO;
+	}
+
+	dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+	docg4_read_buf(mtd, buf, DOCG4_PAGE_SIZE); /* read the page data */
+
+	/* this device always reads oob after page data */
+	/* first 14 oob bytes read from I/O reg */
+	docg4_read_buf(mtd, nand->oob_poi, 14);
+
+	/* last 2 read from another reg */
+	buf16 = (uint16_t *)(nand->oob_poi + 14);
+	*buf16 = readw(docptr + DOCG4_MYSTERY_REG);
+
+	write_nop(docptr);
+
+	if (likely(use_ecc == true)) {
+
+		/* read the register that tells us if bitflip(s) detected  */
+		edc_err = readw(docptr + DOC_ECCCONF1);
+		edc_err = readw(docptr + DOC_ECCCONF1);
+		dev_dbg(doc->dev, "%s: edc_err = 0x%02x\n", __func__, edc_err);
+
+		/* If bitflips are reported, attempt to correct with ecc */
+		if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
+			bits_corrected = correct_data(mtd, buf, page);
+			if (bits_corrected == -EBADMSG)
+				mtd->ecc_stats.failed++;
+			else
+				mtd->ecc_stats.corrected += bits_corrected;
+		}
+	}
+
+	writew(0, docptr + DOC_DATAEND);
+	if (bits_corrected == -EBADMSG)	  /* uncorrectable errors */
+		return 0;
+	return bits_corrected;
+}
+
+
+static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+			       uint8_t *buf, int oob_required, int page)
+{
+	return read_page(mtd, nand, buf, page, false);
+}
+
+static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
+			   uint8_t *buf, int oob_required, int page)
+{
+	return read_page(mtd, nand, buf, page, true);
+}
+
+static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
+			  int page)
+{
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+	uint16_t status;
+
+	dev_dbg(doc->dev, "%s: page %x\n", __func__, page);
+
+	docg4_command(mtd, NAND_CMD_READ0, nand->ecc.size, page);
+
+	writew(DOC_ECCCONF0_READ_MODE | DOCG4_OOB_SIZE, docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	/* the 1st byte from the I/O reg is a status; the rest is oob data */
+	status = readw(docptr + DOC_IOSPACE_DATA);
+	if (status & DOCG4_READ_ERROR) {
+		dev_warn(doc->dev,
+			 "docg4_read_oob failed: status = 0x%02x\n", status);
+		return -EIO;
+	}
+
+	dev_dbg(doc->dev, "%s: status = 0x%x\n", __func__, status);
+
+	docg4_read_buf(mtd, nand->oob_poi, 16);
+
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	writew(0, docptr + DOC_DATAEND);
+	write_nop(docptr);
+
+	return 0;
+}
+
+static int docg4_erase_block(struct mtd_info *mtd, int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+	uint16_t g4_page;
+
+	dev_dbg(doc->dev, "%s: page %04x\n", __func__, page);
+
+	sequence_reset(mtd);
+
+	writew(DOCG4_SEQ_BLOCKERASE, docptr + DOC_FLASHSEQUENCE);
+	writew(DOC_CMD_PROG_BLOCK_ADDR, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+
+	/* only 2 bytes of address are written to specify erase block */
+	g4_page = (uint16_t)(page / 4);  /* to g4's 2k page addressing */
+	writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+	g4_page >>= 8;
+	writeb(g4_page & 0xff, docptr + DOC_FLASHADDRESS);
+	write_nop(docptr);
+
+	/* start the erasure */
+	writew(DOC_CMD_ERASECYCLE2, docptr + DOC_FLASHCOMMAND);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	usleep_range(500, 1000); /* erasure is long; take a snooze */
+	poll_status(doc);
+	writew(DOCG4_SEQ_FLUSH, docptr + DOC_FLASHSEQUENCE);
+	writew(DOCG4_CMD_FLUSH, docptr + DOC_FLASHCOMMAND);
+	writew(DOC_ECCCONF0_READ_MODE | 4, docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+	write_nop(docptr);
+
+	read_progstatus(doc);
+
+	writew(0, docptr + DOC_DATAEND);
+	write_nop(docptr);
+	poll_status(doc);
+	write_nop(docptr);
+
+	return nand->waitfunc(mtd, nand);
+}
+
+static int write_page(struct mtd_info *mtd, struct nand_chip *nand,
+		       const uint8_t *buf, bool use_ecc)
+{
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+	uint8_t ecc_buf[8];
+
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	writew(DOC_ECCCONF0_ECC_ENABLE |
+	       DOC_ECCCONF0_UNKNOWN |
+	       DOCG4_BCH_SIZE,
+	       docptr + DOC_ECCCONF0);
+	write_nop(docptr);
+
+	/* write the page data */
+	docg4_write_buf16(mtd, buf, DOCG4_PAGE_SIZE);
+
+	/* oob bytes 0 through 5 are written to I/O reg */
+	docg4_write_buf16(mtd, nand->oob_poi, 6);
+
+	/* oob byte 6 written to a separate reg */
+	writew(nand->oob_poi[6], docptr + DOCG4_OOB_6_7);
+
+	write_nop(docptr);
+	write_nop(docptr);
+
+	/* write hw-generated ecc bytes to oob */
+	if (likely(use_ecc == true)) {
+		/* oob byte 7 is hamming code */
+		uint8_t hamming = readb(docptr + DOC_HAMMINGPARITY);
+		hamming = readb(docptr + DOC_HAMMINGPARITY); /* 2nd read */
+		writew(hamming, docptr + DOCG4_OOB_6_7);
+		write_nop(docptr);
+
+		/* read the 7 bch bytes from ecc regs */
+		read_hw_ecc(docptr, ecc_buf);
+		ecc_buf[7] = 0;         /* clear the "page written" flag */
+	}
+
+	/* write user-supplied bytes to oob */
+	else {
+		writew(nand->oob_poi[7], docptr + DOCG4_OOB_6_7);
+		write_nop(docptr);
+		memcpy(ecc_buf, &nand->oob_poi[8], 8);
+	}
+
+	docg4_write_buf16(mtd, ecc_buf, 8);
+	write_nop(docptr);
+	write_nop(docptr);
+	writew(0, docptr + DOC_DATAEND);
+	write_nop(docptr);
+
+	return 0;
+}
+
+static int docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
+				const uint8_t *buf, int oob_required, int page)
+{
+	return write_page(mtd, nand, buf, false);
+}
+
+static int docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
+			     const uint8_t *buf, int oob_required, int page)
+{
+	return write_page(mtd, nand, buf, true);
+}
+
+static int docg4_write_oob(struct mtd_info *mtd, struct nand_chip *nand,
+			   int page)
+{
+	/*
+	 * Writing oob-only is not really supported, because MLC nand must write
+	 * oob bytes at the same time as page data.  Nonetheless, we save the
+	 * oob buffer contents here, and then write it along with the page data
+	 * if the same page is subsequently written.  This allows user space
+	 * utilities that write the oob data prior to the page data to work
+	 * (e.g., nandwrite).  The disdvantage is that, if the intention was to
+	 * write oob only, the operation is quietly ignored.  Also, oob can get
+	 * corrupted if two concurrent processes are running nandwrite.
+	 */
+
+	/* note that bytes 7..14 are hw generated hamming/ecc and overwritten */
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	doc->oob_page = page;
+	memcpy(doc->oob_buf, nand->oob_poi, 16);
+	return 0;
+}
+
+static int __init read_factory_bbt(struct mtd_info *mtd)
+{
+	/*
+	 * The device contains a read-only factory bad block table.  Read it and
+	 * update the memory-based bbt accordingly.
+	 */
+
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	uint32_t g4_addr = mtd_to_docg4_address(DOCG4_FACTORY_BBT_PAGE, 0);
+	uint8_t *buf;
+	int i, block;
+	__u32 eccfailed_stats = mtd->ecc_stats.failed;
+
+	buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	read_page_prologue(mtd, g4_addr);
+	docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
+
+	/*
+	 * If no memory-based bbt was created, exit.  This will happen if module
+	 * parameter ignore_badblocks is set.  Then why even call this function?
+	 * For an unknown reason, block erase always fails if it's the first
+	 * operation after device power-up.  The above read ensures it never is.
+	 * Ugly, I know.
+	 */
+	if (nand->bbt == NULL)  /* no memory-based bbt */
+		goto exit;
+
+	if (mtd->ecc_stats.failed > eccfailed_stats) {
+		/*
+		 * Whoops, an ecc failure ocurred reading the factory bbt.
+		 * It is stored redundantly, so we get another chance.
+		 */
+		eccfailed_stats = mtd->ecc_stats.failed;
+		docg4_read_page(mtd, nand, buf, 0, DOCG4_REDUNDANT_BBT_PAGE);
+		if (mtd->ecc_stats.failed > eccfailed_stats) {
+			dev_warn(doc->dev,
+				 "The factory bbt could not be read!\n");
+			goto exit;
+		}
+	}
+
+	/*
+	 * Parse factory bbt and update memory-based bbt.  Factory bbt format is
+	 * simple: one bit per block, block numbers increase left to right (msb
+	 * to lsb).  Bit clear means bad block.
+	 */
+	for (i = block = 0; block < DOCG4_NUMBLOCKS; block += 8, i++) {
+		int bitnum;
+		unsigned long bits = ~buf[i];
+		for_each_set_bit(bitnum, &bits, 8) {
+			int badblock = block + 7 - bitnum;
+			nand->bbt[badblock / 4] |=
+				0x03 << ((badblock % 4) * 2);
+			mtd->ecc_stats.badblocks++;
+			dev_notice(doc->dev, "factory-marked bad block: %d\n",
+				   badblock);
+		}
+	}
+ exit:
+	kfree(buf);
+	return 0;
+}
+
+static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	/*
+	 * Mark a block as bad.  Bad blocks are marked in the oob area of the
+	 * first page of the block.  The default scan_bbt() in the nand
+	 * infrastructure code works fine for building the memory-based bbt
+	 * during initialization, as does the nand infrastructure function that
+	 * checks if a block is bad by reading the bbt.  This function replaces
+	 * the nand default because writes to oob-only are not supported.
+	 */
+
+	int ret, i;
+	uint8_t *buf;
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	struct nand_bbt_descr *bbtd = nand->badblock_pattern;
+	int page = (int)(ofs >> nand->page_shift);
+	uint32_t g4_addr = mtd_to_docg4_address(page, 0);
+
+	dev_dbg(doc->dev, "%s: %08llx\n", __func__, ofs);
+
+	if (unlikely(ofs & (DOCG4_BLOCK_SIZE - 1)))
+		dev_warn(doc->dev, "%s: ofs %llx not start of block!\n",
+			 __func__, ofs);
+
+	/* allocate blank buffer for page data */
+	buf = kzalloc(DOCG4_PAGE_SIZE, GFP_KERNEL);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	/* write bit-wise negation of pattern to oob buffer */
+	memset(nand->oob_poi, 0xff, mtd->oobsize);
+	for (i = 0; i < bbtd->len; i++)
+		nand->oob_poi[bbtd->offs + i] = ~bbtd->pattern[i];
+
+	/* write first page of block */
+	write_page_prologue(mtd, g4_addr);
+	docg4_write_page(mtd, nand, buf, 1, page);
+	ret = pageprog(mtd);
+
+	kfree(buf);
+
+	return ret;
+}
+
+static int docg4_block_neverbad(struct mtd_info *mtd, loff_t ofs)
+{
+	/* only called when module_param ignore_badblocks is set */
+	return 0;
+}
+
+static int docg4_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	/*
+	 * Put the device into "deep power-down" mode.  Note that CE# must be
+	 * deasserted for this to take effect.  The xscale, e.g., can be
+	 * configured to float this signal when the processor enters power-down,
+	 * and a suitable pull-up ensures its deassertion.
+	 */
+
+	int i;
+	uint8_t pwr_down;
+	struct docg4_priv *doc = platform_get_drvdata(pdev);
+	void __iomem *docptr = doc->virtadr;
+
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	/* poll the register that tells us we're ready to go to sleep */
+	for (i = 0; i < 10; i++) {
+		pwr_down = readb(docptr + DOC_POWERMODE);
+		if (pwr_down & DOC_POWERDOWN_READY)
+			break;
+		usleep_range(1000, 4000);
+	}
+
+	if (pwr_down & DOC_POWERDOWN_READY) {
+		dev_err(doc->dev, "suspend failed; "
+			"timeout polling DOC_POWERDOWN_READY\n");
+		return -EIO;
+	}
+
+	writew(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN,
+	       docptr + DOC_ASICMODE);
+	writew(~(DOC_ASICMODE_POWERDOWN | DOC_ASICMODE_MDWREN),
+	       docptr + DOC_ASICMODECONFIRM);
+
+	write_nop(docptr);
+
+	return 0;
+}
+
+static int docg4_resume(struct platform_device *pdev)
+{
+
+	/*
+	 * Exit power-down.  Twelve consecutive reads of the address below
+	 * accomplishes this, assuming CE# has been asserted.
+	 */
+
+	struct docg4_priv *doc = platform_get_drvdata(pdev);
+	void __iomem *docptr = doc->virtadr;
+	int i;
+
+	dev_dbg(doc->dev, "%s...\n", __func__);
+
+	for (i = 0; i < 12; i++)
+		readb(docptr + 0x1fff);
+
+	return 0;
+}
+
+static void __init init_mtd_structs(struct mtd_info *mtd)
+{
+	/* initialize mtd and nand data structures */
+
+	/*
+	 * Note that some of the following initializations are not usually
+	 * required within a nand driver because they are performed by the nand
+	 * infrastructure code as part of nand_scan().  In this case they need
+	 * to be initialized here because we skip call to nand_scan_ident() (the
+	 * first half of nand_scan()).  The call to nand_scan_ident() is skipped
+	 * because for this device the chip id is not read in the manner of a
+	 * standard nand device.  Unfortunately, nand_scan_ident() does other
+	 * things as well, such as call nand_set_defaults().
+	 */
+
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+
+	mtd->size = DOCG4_CHIP_SIZE;
+	mtd->name = "Msys_Diskonchip_G4";
+	mtd->writesize = DOCG4_PAGE_SIZE;
+	mtd->erasesize = DOCG4_BLOCK_SIZE;
+	mtd->oobsize = DOCG4_OOB_SIZE;
+	mtd_set_ooblayout(mtd, &docg4_ooblayout_ops);
+	nand->chipsize = DOCG4_CHIP_SIZE;
+	nand->chip_shift = DOCG4_CHIP_SHIFT;
+	nand->bbt_erase_shift = nand->phys_erase_shift = DOCG4_ERASE_SHIFT;
+	nand->chip_delay = 20;
+	nand->page_shift = DOCG4_PAGE_SHIFT;
+	nand->pagemask = 0x3ffff;
+	nand->badblockpos = NAND_LARGE_BADBLOCK_POS;
+	nand->badblockbits = 8;
+	nand->ecc.mode = NAND_ECC_HW_SYNDROME;
+	nand->ecc.size = DOCG4_PAGE_SIZE;
+	nand->ecc.prepad = 8;
+	nand->ecc.bytes	= 8;
+	nand->ecc.strength = DOCG4_T;
+	nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
+	nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
+	nand->controller = &nand->hwcontrol;
+	nand_hw_control_init(nand->controller);
+
+	/* methods */
+	nand->cmdfunc = docg4_command;
+	nand->waitfunc = docg4_wait;
+	nand->select_chip = docg4_select_chip;
+	nand->read_byte = docg4_read_byte;
+	nand->block_markbad = docg4_block_markbad;
+	nand->read_buf = docg4_read_buf;
+	nand->write_buf = docg4_write_buf16;
+	nand->erase = docg4_erase_block;
+	nand->ecc.read_page = docg4_read_page;
+	nand->ecc.write_page = docg4_write_page;
+	nand->ecc.read_page_raw = docg4_read_page_raw;
+	nand->ecc.write_page_raw = docg4_write_page_raw;
+	nand->ecc.read_oob = docg4_read_oob;
+	nand->ecc.write_oob = docg4_write_oob;
+
+	/*
+	 * The way the nand infrastructure code is written, a memory-based bbt
+	 * is not created if NAND_SKIP_BBTSCAN is set.  With no memory bbt,
+	 * nand->block_bad() is used.  So when ignoring bad blocks, we skip the
+	 * scan and define a dummy block_bad() which always returns 0.
+	 */
+	if (ignore_badblocks) {
+		nand->options |= NAND_SKIP_BBTSCAN;
+		nand->block_bad	= docg4_block_neverbad;
+	}
+
+}
+
+static int __init read_id_reg(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct docg4_priv *doc = nand_get_controller_data(nand);
+	void __iomem *docptr = doc->virtadr;
+	uint16_t id1, id2;
+
+	/* check for presence of g4 chip by reading id registers */
+	id1 = readw(docptr + DOC_CHIPID);
+	id1 = readw(docptr + DOCG4_MYSTERY_REG);
+	id2 = readw(docptr + DOC_CHIPID_INV);
+	id2 = readw(docptr + DOCG4_MYSTERY_REG);
+
+	if (id1 == DOCG4_IDREG1_VALUE && id2 == DOCG4_IDREG2_VALUE) {
+		dev_info(doc->dev,
+			 "NAND device: 128MiB Diskonchip G4 detected\n");
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL };
+
+static int __init probe_docg4(struct platform_device *pdev)
+{
+	struct mtd_info *mtd;
+	struct nand_chip *nand;
+	void __iomem *virtadr;
+	struct docg4_priv *doc;
+	int len, retval;
+	struct resource *r;
+	struct device *dev = &pdev->dev;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (r == NULL) {
+		dev_err(dev, "no io memory resource defined!\n");
+		return -ENODEV;
+	}
+
+	virtadr = ioremap(r->start, resource_size(r));
+	if (!virtadr) {
+		dev_err(dev, "Diskonchip ioremap failed: %pR\n", r);
+		return -EIO;
+	}
+
+	len = sizeof(struct nand_chip) + sizeof(struct docg4_priv);
+	nand = kzalloc(len, GFP_KERNEL);
+	if (nand == NULL) {
+		retval = -ENOMEM;
+		goto fail_unmap;
+	}
+
+	mtd = nand_to_mtd(nand);
+	doc = (struct docg4_priv *) (nand + 1);
+	nand_set_controller_data(nand, doc);
+	mtd->dev.parent = &pdev->dev;
+	doc->virtadr = virtadr;
+	doc->dev = dev;
+
+	init_mtd_structs(mtd);
+
+	/* initialize kernel bch algorithm */
+	doc->bch = init_bch(DOCG4_M, DOCG4_T, DOCG4_PRIMITIVE_POLY);
+	if (doc->bch == NULL) {
+		retval = -EINVAL;
+		goto fail;
+	}
+
+	platform_set_drvdata(pdev, doc);
+
+	reset(mtd);
+	retval = read_id_reg(mtd);
+	if (retval == -ENODEV) {
+		dev_warn(dev, "No diskonchip G4 device found.\n");
+		goto fail;
+	}
+
+	retval = nand_scan_tail(mtd);
+	if (retval)
+		goto fail;
+
+	retval = read_factory_bbt(mtd);
+	if (retval)
+		goto fail;
+
+	retval = mtd_device_parse_register(mtd, part_probes, NULL, NULL, 0);
+	if (retval)
+		goto fail;
+
+	doc->mtd = mtd;
+	return 0;
+
+fail:
+	nand_release(mtd); /* deletes partitions and mtd devices */
+	free_bch(doc->bch);
+	kfree(nand);
+
+fail_unmap:
+	iounmap(virtadr);
+
+	return retval;
+}
+
+static int __exit cleanup_docg4(struct platform_device *pdev)
+{
+	struct docg4_priv *doc = platform_get_drvdata(pdev);
+	nand_release(doc->mtd);
+	free_bch(doc->bch);
+	kfree(mtd_to_nand(doc->mtd));
+	iounmap(doc->virtadr);
+	return 0;
+}
+
+static struct platform_driver docg4_driver = {
+	.driver		= {
+		.name	= "docg4",
+	},
+	.suspend	= docg4_suspend,
+	.resume		= docg4_resume,
+	.remove		= __exit_p(cleanup_docg4),
+};
+
+module_platform_driver_probe(docg4_driver, probe_docg4);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Dunn");
+MODULE_DESCRIPTION("M-Systems DiskOnChip G4 device driver");
diff --git a/drivers/mtd/nand/rawnand/fsl_elbc_nand.c b/drivers/mtd/nand/rawnand/fsl_elbc_nand.c
new file mode 100644
index 000000000000..7d8453eb4d0f
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/fsl_elbc_nand.c
@@ -0,0 +1,977 @@ 
+/* Freescale Enhanced Local Bus Controller NAND driver
+ *
+ * Copyright © 2006-2007, 2010 Freescale Semiconductor
+ *
+ * Authors: Nick Spence <nick.spence@freescale.com>,
+ *          Scott Wood <scottwood@freescale.com>
+ *          Jack Lan <jack.lan@freescale.com>
+ *          Roy Zang <tie-fei.zang@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/io.h>
+#include <asm/fsl_lbc.h>
+
+#define MAX_BANKS 8
+#define ERR_BYTE 0xFF /* Value returned for read bytes when read failed */
+#define FCM_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait for FCM */
+
+/* mtd information per set */
+
+struct fsl_elbc_mtd {
+	struct nand_chip chip;
+	struct fsl_lbc_ctrl *ctrl;
+
+	struct device *dev;
+	int bank;               /* Chip select bank number           */
+	u8 __iomem *vbase;      /* Chip select base virtual address  */
+	int page_size;          /* NAND page size (0=512, 1=2048)    */
+	unsigned int fmr;       /* FCM Flash Mode Register value     */
+};
+
+/* Freescale eLBC FCM controller information */
+
+struct fsl_elbc_fcm_ctrl {
+	struct nand_hw_control controller;
+	struct fsl_elbc_mtd *chips[MAX_BANKS];
+
+	u8 __iomem *addr;        /* Address of assigned FCM buffer        */
+	unsigned int page;       /* Last page written to / read from      */
+	unsigned int read_bytes; /* Number of bytes read during command   */
+	unsigned int column;     /* Saved column from SEQIN               */
+	unsigned int index;      /* Pointer to next byte to 'read'        */
+	unsigned int status;     /* status read from LTESR after last op  */
+	unsigned int mdr;        /* UPM/FCM Data Register value           */
+	unsigned int use_mdr;    /* Non zero if the MDR is to be set      */
+	unsigned int oob;        /* Non zero if operating on OOB data     */
+	unsigned int counter;	 /* counter for the initializations	  */
+	unsigned int max_bitflips;  /* Saved during READ0 cmd		  */
+};
+
+/* These map to the positions used by the FCM hardware ECC generator */
+
+static int fsl_elbc_ooblayout_ecc(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (16 * section) + 6;
+	if (priv->fmr & FMR_ECCM)
+		oobregion->offset += 2;
+
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int fsl_elbc_ooblayout_free(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+
+	if (section > chip->ecc.steps)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		if (mtd->writesize > 512)
+			oobregion->offset++;
+		oobregion->length = (priv->fmr & FMR_ECCM) ? 7 : 5;
+	} else {
+		oobregion->offset = (16 * section) -
+				    ((priv->fmr & FMR_ECCM) ? 5 : 7);
+		if (section < chip->ecc.steps)
+			oobregion->length = 13;
+		else
+			oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_elbc_ooblayout_ops = {
+	.ecc = fsl_elbc_ooblayout_ecc,
+	.free = fsl_elbc_ooblayout_free,
+};
+
+/*
+ * ELBC may use HW ECC, so that OOB offsets, that NAND core uses for bbt,
+ * interfere with ECC positions, that's why we implement our own descriptors.
+ * OOB {11, 5}, works for both SP and LP chips, with ECCM = 1 and ECCM = 0.
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+		   NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	11,
+	.len = 4,
+	.veroffs = 15,
+	.maxblocks = 4,
+	.pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+		   NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	11,
+	.len = 4,
+	.veroffs = 15,
+	.maxblocks = 4,
+	.pattern = mirror_pattern,
+};
+
+/*=================================*/
+
+/*
+ * Set up the FCM hardware block and page address fields, and the fcm
+ * structure addr field to point to the correct FCM buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+	int buf_num;
+
+	elbc_fcm_ctrl->page = page_addr;
+
+	if (priv->page_size) {
+		/*
+		 * large page size chip : FPAR[PI] save the lowest 6 bits,
+		 *                        FBAR[BLK] save the other bits.
+		 */
+		out_be32(&lbc->fbar, page_addr >> 6);
+		out_be32(&lbc->fpar,
+		         ((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
+		         (oob ? FPAR_LP_MS : 0) | column);
+		buf_num = (page_addr & 1) << 2;
+	} else {
+		/*
+		 * small page size chip : FPAR[PI] save the lowest 5 bits,
+		 *                        FBAR[BLK] save the other bits.
+		 */
+		out_be32(&lbc->fbar, page_addr >> 5);
+		out_be32(&lbc->fpar,
+		         ((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
+		         (oob ? FPAR_SP_MS : 0) | column);
+		buf_num = page_addr & 7;
+	}
+
+	elbc_fcm_ctrl->addr = priv->vbase + buf_num * 1024;
+	elbc_fcm_ctrl->index = column;
+
+	/* for OOB data point to the second half of the buffer */
+	if (oob)
+		elbc_fcm_ctrl->index += priv->page_size ? 2048 : 512;
+
+	dev_vdbg(priv->dev, "set_addr: bank=%d, "
+			    "elbc_fcm_ctrl->addr=0x%p (0x%p), "
+	                    "index %x, pes %d ps %d\n",
+		 buf_num, elbc_fcm_ctrl->addr, priv->vbase,
+		 elbc_fcm_ctrl->index,
+	         chip->phys_erase_shift, chip->page_shift);
+}
+
+/*
+ * execute FCM command and wait for it to complete
+ */
+static int fsl_elbc_run_command(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+	/* Setup the FMR[OP] to execute without write protection */
+	out_be32(&lbc->fmr, priv->fmr | 3);
+	if (elbc_fcm_ctrl->use_mdr)
+		out_be32(&lbc->mdr, elbc_fcm_ctrl->mdr);
+
+	dev_vdbg(priv->dev,
+	         "fsl_elbc_run_command: fmr=%08x fir=%08x fcr=%08x\n",
+	         in_be32(&lbc->fmr), in_be32(&lbc->fir), in_be32(&lbc->fcr));
+	dev_vdbg(priv->dev,
+	         "fsl_elbc_run_command: fbar=%08x fpar=%08x "
+	         "fbcr=%08x bank=%d\n",
+	         in_be32(&lbc->fbar), in_be32(&lbc->fpar),
+	         in_be32(&lbc->fbcr), priv->bank);
+
+	ctrl->irq_status = 0;
+	/* execute special operation */
+	out_be32(&lbc->lsor, priv->bank);
+
+	/* wait for FCM complete flag or timeout */
+	wait_event_timeout(ctrl->irq_wait, ctrl->irq_status,
+	                   FCM_TIMEOUT_MSECS * HZ/1000);
+	elbc_fcm_ctrl->status = ctrl->irq_status;
+	/* store mdr value in case it was needed */
+	if (elbc_fcm_ctrl->use_mdr)
+		elbc_fcm_ctrl->mdr = in_be32(&lbc->mdr);
+
+	elbc_fcm_ctrl->use_mdr = 0;
+
+	if (elbc_fcm_ctrl->status != LTESR_CC) {
+		dev_info(priv->dev,
+		         "command failed: fir %x fcr %x status %x mdr %x\n",
+		         in_be32(&lbc->fir), in_be32(&lbc->fcr),
+			 elbc_fcm_ctrl->status, elbc_fcm_ctrl->mdr);
+		return -EIO;
+	}
+
+	if (chip->ecc.mode != NAND_ECC_HW)
+		return 0;
+
+	elbc_fcm_ctrl->max_bitflips = 0;
+
+	if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
+		uint32_t lteccr = in_be32(&lbc->lteccr);
+		/*
+		 * if command was a full page read and the ELBC
+		 * has the LTECCR register, then bits 12-15 (ppc order) of
+		 * LTECCR indicates which 512 byte sub-pages had fixed errors.
+		 * bits 28-31 are uncorrectable errors, marked elsewhere.
+		 * for small page nand only 1 bit is used.
+		 * if the ELBC doesn't have the lteccr register it reads 0
+		 * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
+		 * count the number of sub-pages with bitflips and update
+		 * ecc_stats.corrected accordingly.
+		 */
+		if (lteccr & 0x000F000F)
+			out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
+		if (lteccr & 0x000F0000) {
+			mtd->ecc_stats.corrected++;
+			elbc_fcm_ctrl->max_bitflips = 1;
+		}
+	}
+
+	return 0;
+}
+
+static void fsl_elbc_do_read(struct nand_chip *chip, int oob)
+{
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+	if (priv->page_size) {
+		out_be32(&lbc->fir,
+		         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+		         (FIR_OP_CA  << FIR_OP1_SHIFT) |
+		         (FIR_OP_PA  << FIR_OP2_SHIFT) |
+		         (FIR_OP_CM1 << FIR_OP3_SHIFT) |
+		         (FIR_OP_RBW << FIR_OP4_SHIFT));
+
+		out_be32(&lbc->fcr, (NAND_CMD_READ0 << FCR_CMD0_SHIFT) |
+		                    (NAND_CMD_READSTART << FCR_CMD1_SHIFT));
+	} else {
+		out_be32(&lbc->fir,
+		         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+		         (FIR_OP_CA  << FIR_OP1_SHIFT) |
+		         (FIR_OP_PA  << FIR_OP2_SHIFT) |
+		         (FIR_OP_RBW << FIR_OP3_SHIFT));
+
+		if (oob)
+			out_be32(&lbc->fcr, NAND_CMD_READOOB << FCR_CMD0_SHIFT);
+		else
+			out_be32(&lbc->fcr, NAND_CMD_READ0 << FCR_CMD0_SHIFT);
+	}
+}
+
+/* cmdfunc send commands to the FCM */
+static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+                             int column, int page_addr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+
+	elbc_fcm_ctrl->use_mdr = 0;
+
+	/* clear the read buffer */
+	elbc_fcm_ctrl->read_bytes = 0;
+	if (command != NAND_CMD_PAGEPROG)
+		elbc_fcm_ctrl->index = 0;
+
+	switch (command) {
+	/* READ0 and READ1 read the entire buffer to use hardware ECC. */
+	case NAND_CMD_READ1:
+		column += 256;
+
+	/* fall-through */
+	case NAND_CMD_READ0:
+		dev_dbg(priv->dev,
+		        "fsl_elbc_cmdfunc: NAND_CMD_READ0, page_addr:"
+		        " 0x%x, column: 0x%x.\n", page_addr, column);
+
+
+		out_be32(&lbc->fbcr, 0); /* read entire page to enable ECC */
+		set_addr(mtd, 0, page_addr, 0);
+
+		elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+		elbc_fcm_ctrl->index += column;
+
+		fsl_elbc_do_read(chip, 0);
+		fsl_elbc_run_command(mtd);
+		return;
+
+	/* READOOB reads only the OOB because no ECC is performed. */
+	case NAND_CMD_READOOB:
+		dev_vdbg(priv->dev,
+		         "fsl_elbc_cmdfunc: NAND_CMD_READOOB, page_addr:"
+			 " 0x%x, column: 0x%x.\n", page_addr, column);
+
+		out_be32(&lbc->fbcr, mtd->oobsize - column);
+		set_addr(mtd, column, page_addr, 1);
+
+		elbc_fcm_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+		fsl_elbc_do_read(chip, 1);
+		fsl_elbc_run_command(mtd);
+		return;
+
+	case NAND_CMD_READID:
+	case NAND_CMD_PARAM:
+		dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
+
+		out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+		                    (FIR_OP_UA  << FIR_OP1_SHIFT) |
+		                    (FIR_OP_RBW << FIR_OP2_SHIFT));
+		out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+		/*
+		 * although currently it's 8 bytes for READID, we always read
+		 * the maximum 256 bytes(for PARAM)
+		 */
+		out_be32(&lbc->fbcr, 256);
+		elbc_fcm_ctrl->read_bytes = 256;
+		elbc_fcm_ctrl->use_mdr = 1;
+		elbc_fcm_ctrl->mdr = column;
+		set_addr(mtd, 0, 0, 0);
+		fsl_elbc_run_command(mtd);
+		return;
+
+	/* ERASE1 stores the block and page address */
+	case NAND_CMD_ERASE1:
+		dev_vdbg(priv->dev,
+		         "fsl_elbc_cmdfunc: NAND_CMD_ERASE1, "
+		         "page_addr: 0x%x.\n", page_addr);
+		set_addr(mtd, 0, page_addr, 0);
+		return;
+
+	/* ERASE2 uses the block and page address from ERASE1 */
+	case NAND_CMD_ERASE2:
+		dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_ERASE2.\n");
+
+		out_be32(&lbc->fir,
+		         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+		         (FIR_OP_PA  << FIR_OP1_SHIFT) |
+		         (FIR_OP_CM2 << FIR_OP2_SHIFT) |
+		         (FIR_OP_CW1 << FIR_OP3_SHIFT) |
+		         (FIR_OP_RS  << FIR_OP4_SHIFT));
+
+		out_be32(&lbc->fcr,
+		         (NAND_CMD_ERASE1 << FCR_CMD0_SHIFT) |
+		         (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
+		         (NAND_CMD_ERASE2 << FCR_CMD2_SHIFT));
+
+		out_be32(&lbc->fbcr, 0);
+		elbc_fcm_ctrl->read_bytes = 0;
+		elbc_fcm_ctrl->use_mdr = 1;
+
+		fsl_elbc_run_command(mtd);
+		return;
+
+	/* SEQIN sets up the addr buffer and all registers except the length */
+	case NAND_CMD_SEQIN: {
+		__be32 fcr;
+		dev_vdbg(priv->dev,
+			 "fsl_elbc_cmdfunc: NAND_CMD_SEQIN/PAGE_PROG, "
+		         "page_addr: 0x%x, column: 0x%x.\n",
+		         page_addr, column);
+
+		elbc_fcm_ctrl->column = column;
+		elbc_fcm_ctrl->use_mdr = 1;
+
+		if (column >= mtd->writesize) {
+			/* OOB area */
+			column -= mtd->writesize;
+			elbc_fcm_ctrl->oob = 1;
+		} else {
+			WARN_ON(column != 0);
+			elbc_fcm_ctrl->oob = 0;
+		}
+
+		fcr = (NAND_CMD_STATUS   << FCR_CMD1_SHIFT) |
+		      (NAND_CMD_SEQIN    << FCR_CMD2_SHIFT) |
+		      (NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
+
+		if (priv->page_size) {
+			out_be32(&lbc->fir,
+			         (FIR_OP_CM2 << FIR_OP0_SHIFT) |
+			         (FIR_OP_CA  << FIR_OP1_SHIFT) |
+			         (FIR_OP_PA  << FIR_OP2_SHIFT) |
+			         (FIR_OP_WB  << FIR_OP3_SHIFT) |
+			         (FIR_OP_CM3 << FIR_OP4_SHIFT) |
+			         (FIR_OP_CW1 << FIR_OP5_SHIFT) |
+			         (FIR_OP_RS  << FIR_OP6_SHIFT));
+		} else {
+			out_be32(&lbc->fir,
+			         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+			         (FIR_OP_CM2 << FIR_OP1_SHIFT) |
+			         (FIR_OP_CA  << FIR_OP2_SHIFT) |
+			         (FIR_OP_PA  << FIR_OP3_SHIFT) |
+			         (FIR_OP_WB  << FIR_OP4_SHIFT) |
+			         (FIR_OP_CM3 << FIR_OP5_SHIFT) |
+			         (FIR_OP_CW1 << FIR_OP6_SHIFT) |
+			         (FIR_OP_RS  << FIR_OP7_SHIFT));
+
+			if (elbc_fcm_ctrl->oob)
+				/* OOB area --> READOOB */
+				fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
+			else
+				/* First 256 bytes --> READ0 */
+				fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
+		}
+
+		out_be32(&lbc->fcr, fcr);
+		set_addr(mtd, column, page_addr, elbc_fcm_ctrl->oob);
+		return;
+	}
+
+	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+	case NAND_CMD_PAGEPROG: {
+		dev_vdbg(priv->dev,
+		         "fsl_elbc_cmdfunc: NAND_CMD_PAGEPROG "
+			 "writing %d bytes.\n", elbc_fcm_ctrl->index);
+
+		/* if the write did not start at 0 or is not a full page
+		 * then set the exact length, otherwise use a full page
+		 * write so the HW generates the ECC.
+		 */
+		if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
+		    elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
+			out_be32(&lbc->fbcr,
+				elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
+		else
+			out_be32(&lbc->fbcr, 0);
+
+		fsl_elbc_run_command(mtd);
+		return;
+	}
+
+	/* CMD_STATUS must read the status byte while CEB is active */
+	/* Note - it does not wait for the ready line */
+	case NAND_CMD_STATUS:
+		out_be32(&lbc->fir,
+		         (FIR_OP_CM0 << FIR_OP0_SHIFT) |
+		         (FIR_OP_RBW << FIR_OP1_SHIFT));
+		out_be32(&lbc->fcr, NAND_CMD_STATUS << FCR_CMD0_SHIFT);
+		out_be32(&lbc->fbcr, 1);
+		set_addr(mtd, 0, 0, 0);
+		elbc_fcm_ctrl->read_bytes = 1;
+
+		fsl_elbc_run_command(mtd);
+
+		/* The chip always seems to report that it is
+		 * write-protected, even when it is not.
+		 */
+		setbits8(elbc_fcm_ctrl->addr, NAND_STATUS_WP);
+		return;
+
+	/* RESET without waiting for the ready line */
+	case NAND_CMD_RESET:
+		dev_dbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_RESET.\n");
+		out_be32(&lbc->fir, FIR_OP_CM0 << FIR_OP0_SHIFT);
+		out_be32(&lbc->fcr, NAND_CMD_RESET << FCR_CMD0_SHIFT);
+		fsl_elbc_run_command(mtd);
+		return;
+
+	default:
+		dev_err(priv->dev,
+		        "fsl_elbc_cmdfunc: error, unsupported command 0x%x.\n",
+		        command);
+	}
+}
+
+static void fsl_elbc_select_chip(struct mtd_info *mtd, int chip)
+{
+	/* The hardware does not seem to support multiple
+	 * chips per bank.
+	 */
+}
+
+/*
+ * Write buf to the FCM Controller Data Buffer
+ */
+static void fsl_elbc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+	unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+	if (len <= 0) {
+		dev_err(priv->dev, "write_buf of %d bytes", len);
+		elbc_fcm_ctrl->status = 0;
+		return;
+	}
+
+	if ((unsigned int)len > bufsize - elbc_fcm_ctrl->index) {
+		dev_err(priv->dev,
+		        "write_buf beyond end of buffer "
+		        "(%d requested, %u available)\n",
+			len, bufsize - elbc_fcm_ctrl->index);
+		len = bufsize - elbc_fcm_ctrl->index;
+	}
+
+	memcpy_toio(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], buf, len);
+	/*
+	 * This is workaround for the weird elbc hangs during nand write,
+	 * Scott Wood says: "...perhaps difference in how long it takes a
+	 * write to make it through the localbus compared to a write to IMMR
+	 * is causing problems, and sync isn't helping for some reason."
+	 * Reading back the last byte helps though.
+	 */
+	in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index] + len - 1);
+
+	elbc_fcm_ctrl->index += len;
+}
+
+/*
+ * read a byte from either the FCM hardware buffer if it has any data left
+ * otherwise issue a command to read a single byte.
+ */
+static u8 fsl_elbc_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+	/* If there are still bytes in the FCM, then use the next byte. */
+	if (elbc_fcm_ctrl->index < elbc_fcm_ctrl->read_bytes)
+		return in_8(&elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index++]);
+
+	dev_err(priv->dev, "read_byte beyond end of buffer\n");
+	return ERR_BYTE;
+}
+
+/*
+ * Read from the FCM Controller Data Buffer
+ */
+static void fsl_elbc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+	int avail;
+
+	if (len < 0)
+		return;
+
+	avail = min((unsigned int)len,
+			elbc_fcm_ctrl->read_bytes - elbc_fcm_ctrl->index);
+	memcpy_fromio(buf, &elbc_fcm_ctrl->addr[elbc_fcm_ctrl->index], avail);
+	elbc_fcm_ctrl->index += avail;
+
+	if (len > avail)
+		dev_err(priv->dev,
+		        "read_buf beyond end of buffer "
+		        "(%d requested, %d available)\n",
+		        len, avail);
+}
+
+/* This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_elbc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+
+	if (elbc_fcm_ctrl->status != LTESR_CC)
+		return NAND_STATUS_FAIL;
+
+	/* The chip always seems to report that it is
+	 * write-protected, even when it is not.
+	 */
+	return (elbc_fcm_ctrl->mdr & 0xff) | NAND_STATUS_WP;
+}
+
+static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+	unsigned int al;
+
+	/* calculate FMR Address Length field */
+	al = 0;
+	if (chip->pagemask & 0xffff0000)
+		al++;
+	if (chip->pagemask & 0xff000000)
+		al++;
+
+	priv->fmr |= al << FMR_AL_SHIFT;
+
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
+	        chip->numchips);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->chipsize = %lld\n",
+	        chip->chipsize);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->pagemask = %8x\n",
+	        chip->pagemask);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_delay = %d\n",
+	        chip->chip_delay);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->badblockpos = %d\n",
+	        chip->badblockpos);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->chip_shift = %d\n",
+	        chip->chip_shift);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->page_shift = %d\n",
+	        chip->page_shift);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->phys_erase_shift = %d\n",
+	        chip->phys_erase_shift);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.mode = %d\n",
+	        chip->ecc.mode);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.steps = %d\n",
+	        chip->ecc.steps);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.bytes = %d\n",
+	        chip->ecc.bytes);
+	dev_dbg(priv->dev, "fsl_elbc_init: nand->ecc.total = %d\n",
+	        chip->ecc.total);
+	dev_dbg(priv->dev, "fsl_elbc_init: mtd->ooblayout = %p\n",
+		mtd->ooblayout);
+	dev_dbg(priv->dev, "fsl_elbc_init: mtd->flags = %08x\n", mtd->flags);
+	dev_dbg(priv->dev, "fsl_elbc_init: mtd->size = %lld\n", mtd->size);
+	dev_dbg(priv->dev, "fsl_elbc_init: mtd->erasesize = %d\n",
+	        mtd->erasesize);
+	dev_dbg(priv->dev, "fsl_elbc_init: mtd->writesize = %d\n",
+	        mtd->writesize);
+	dev_dbg(priv->dev, "fsl_elbc_init: mtd->oobsize = %d\n",
+	        mtd->oobsize);
+
+	/* adjust Option Register and ECC to match Flash page size */
+	if (mtd->writesize == 512) {
+		priv->page_size = 0;
+		clrbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+	} else if (mtd->writesize == 2048) {
+		priv->page_size = 1;
+		setbits32(&lbc->bank[priv->bank].or, OR_FCM_PGS);
+	} else {
+		dev_err(priv->dev,
+		        "fsl_elbc_init: page size %d is not supported\n",
+		        mtd->writesize);
+		return -1;
+	}
+
+	return 0;
+}
+
+static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			      uint8_t *buf, int oob_required, int page)
+{
+	struct fsl_elbc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+
+	fsl_elbc_read_buf(mtd, buf, mtd->writesize);
+	if (oob_required)
+		fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
+		mtd->ecc_stats.failed++;
+
+	return elbc_fcm_ctrl->max_bitflips;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+				const uint8_t *buf, int oob_required, int page)
+{
+	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_elbc_write_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+				uint32_t offset, uint32_t data_len,
+				const uint8_t *buf, int oob_required, int page)
+{
+	fsl_elbc_write_buf(mtd, buf, mtd->writesize);
+	fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
+{
+	struct fsl_lbc_ctrl *ctrl = priv->ctrl;
+	struct fsl_lbc_regs __iomem *lbc = ctrl->regs;
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
+	struct nand_chip *chip = &priv->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+
+	dev_dbg(priv->dev, "eLBC Set Information for bank %d\n", priv->bank);
+
+	/* Fill in fsl_elbc_mtd structure */
+	mtd->dev.parent = priv->dev;
+	nand_set_flash_node(chip, priv->dev->of_node);
+
+	/* set timeout to maximum */
+	priv->fmr = 15 << FMR_CWTO_SHIFT;
+	if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+		priv->fmr |= FMR_ECCM;
+
+	/* fill in nand_chip structure */
+	/* set up function call table */
+	chip->read_byte = fsl_elbc_read_byte;
+	chip->write_buf = fsl_elbc_write_buf;
+	chip->read_buf = fsl_elbc_read_buf;
+	chip->select_chip = fsl_elbc_select_chip;
+	chip->cmdfunc = fsl_elbc_cmdfunc;
+	chip->waitfunc = fsl_elbc_wait;
+
+	chip->bbt_td = &bbt_main_descr;
+	chip->bbt_md = &bbt_mirror_descr;
+
+	/* set up nand options */
+	chip->bbt_options = NAND_BBT_USE_FLASH;
+
+	chip->controller = &elbc_fcm_ctrl->controller;
+	nand_set_controller_data(chip, priv);
+
+	chip->ecc.read_page = fsl_elbc_read_page;
+	chip->ecc.write_page = fsl_elbc_write_page;
+	chip->ecc.write_subpage = fsl_elbc_write_subpage;
+
+	/* If CS Base Register selects full hardware ECC then use it */
+	if ((in_be32(&lbc->bank[priv->bank].br) & BR_DECC) ==
+	    BR_DECC_CHK_GEN) {
+		chip->ecc.mode = NAND_ECC_HW;
+		mtd_set_ooblayout(mtd, &fsl_elbc_ooblayout_ops);
+		chip->ecc.size = 512;
+		chip->ecc.bytes = 3;
+		chip->ecc.strength = 1;
+	} else {
+		/* otherwise fall back to default software ECC */
+		chip->ecc.mode = NAND_ECC_SOFT;
+		chip->ecc.algo = NAND_ECC_HAMMING;
+	}
+
+	return 0;
+}
+
+static int fsl_elbc_chip_remove(struct fsl_elbc_mtd *priv)
+{
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = priv->ctrl->nand;
+	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+
+	nand_release(mtd);
+
+	kfree(mtd->name);
+
+	if (priv->vbase)
+		iounmap(priv->vbase);
+
+	elbc_fcm_ctrl->chips[priv->bank] = NULL;
+	kfree(priv);
+	return 0;
+}
+
+static DEFINE_MUTEX(fsl_elbc_nand_mutex);
+
+static int fsl_elbc_nand_probe(struct platform_device *pdev)
+{
+	struct fsl_lbc_regs __iomem *lbc;
+	struct fsl_elbc_mtd *priv;
+	struct resource res;
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl;
+	static const char *part_probe_types[]
+		= { "cmdlinepart", "RedBoot", "ofpart", NULL };
+	int ret;
+	int bank;
+	struct device *dev;
+	struct device_node *node = pdev->dev.of_node;
+	struct mtd_info *mtd;
+
+	if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs)
+		return -ENODEV;
+	lbc = fsl_lbc_ctrl_dev->regs;
+	dev = fsl_lbc_ctrl_dev->dev;
+
+	/* get, allocate and map the memory resource */
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		dev_err(dev, "failed to get resource\n");
+		return ret;
+	}
+
+	/* find which chip select it is connected to */
+	for (bank = 0; bank < MAX_BANKS; bank++)
+		if ((in_be32(&lbc->bank[bank].br) & BR_V) &&
+		    (in_be32(&lbc->bank[bank].br) & BR_MSEL) == BR_MS_FCM &&
+		    (in_be32(&lbc->bank[bank].br) &
+		     in_be32(&lbc->bank[bank].or) & BR_BA)
+		     == fsl_lbc_addr(res.start))
+			break;
+
+	if (bank >= MAX_BANKS) {
+		dev_err(dev, "address did not match any chip selects\n");
+		return -ENODEV;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	mutex_lock(&fsl_elbc_nand_mutex);
+	if (!fsl_lbc_ctrl_dev->nand) {
+		elbc_fcm_ctrl = kzalloc(sizeof(*elbc_fcm_ctrl), GFP_KERNEL);
+		if (!elbc_fcm_ctrl) {
+			mutex_unlock(&fsl_elbc_nand_mutex);
+			ret = -ENOMEM;
+			goto err;
+		}
+		elbc_fcm_ctrl->counter++;
+
+		nand_hw_control_init(&elbc_fcm_ctrl->controller);
+		fsl_lbc_ctrl_dev->nand = elbc_fcm_ctrl;
+	} else {
+		elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+	}
+	mutex_unlock(&fsl_elbc_nand_mutex);
+
+	elbc_fcm_ctrl->chips[bank] = priv;
+	priv->bank = bank;
+	priv->ctrl = fsl_lbc_ctrl_dev;
+	priv->dev = &pdev->dev;
+	dev_set_drvdata(priv->dev, priv);
+
+	priv->vbase = ioremap(res.start, resource_size(&res));
+	if (!priv->vbase) {
+		dev_err(dev, "failed to map chip region\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	mtd = nand_to_mtd(&priv->chip);
+	mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+	if (!nand_to_mtd(&priv->chip)->name) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = fsl_elbc_chip_init(priv);
+	if (ret)
+		goto err;
+
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (ret)
+		goto err;
+
+	ret = fsl_elbc_chip_init_tail(mtd);
+	if (ret)
+		goto err;
+
+	ret = nand_scan_tail(mtd);
+	if (ret)
+		goto err;
+
+	/* First look for RedBoot table or partitions on the command
+	 * line, these take precedence over device tree information */
+	mtd_device_parse_register(mtd, part_probe_types, NULL,
+				  NULL, 0);
+
+	printk(KERN_INFO "eLBC NAND device at 0x%llx, bank %d\n",
+	       (unsigned long long)res.start, priv->bank);
+	return 0;
+
+err:
+	fsl_elbc_chip_remove(priv);
+	return ret;
+}
+
+static int fsl_elbc_nand_remove(struct platform_device *pdev)
+{
+	struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = fsl_lbc_ctrl_dev->nand;
+	struct fsl_elbc_mtd *priv = dev_get_drvdata(&pdev->dev);
+
+	fsl_elbc_chip_remove(priv);
+
+	mutex_lock(&fsl_elbc_nand_mutex);
+	elbc_fcm_ctrl->counter--;
+	if (!elbc_fcm_ctrl->counter) {
+		fsl_lbc_ctrl_dev->nand = NULL;
+		kfree(elbc_fcm_ctrl);
+	}
+	mutex_unlock(&fsl_elbc_nand_mutex);
+
+	return 0;
+
+}
+
+static const struct of_device_id fsl_elbc_nand_match[] = {
+	{ .compatible = "fsl,elbc-fcm-nand", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fsl_elbc_nand_match);
+
+static struct platform_driver fsl_elbc_nand_driver = {
+	.driver = {
+		.name = "fsl,elbc-fcm-nand",
+		.of_match_table = fsl_elbc_nand_match,
+	},
+	.probe = fsl_elbc_nand_probe,
+	.remove = fsl_elbc_nand_remove,
+};
+
+module_platform_driver(fsl_elbc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Enhanced Local Bus Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/rawnand/fsl_ifc_nand.c b/drivers/mtd/nand/rawnand/fsl_ifc_nand.c
new file mode 100644
index 000000000000..bcf7f0b8abf9
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/fsl_ifc_nand.c
@@ -0,0 +1,1095 @@ 
+/*
+ * Freescale Integrated Flash Controller NAND driver
+ *
+ * Copyright 2011-2012 Freescale Semiconductor, Inc
+ *
+ * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/fsl_ifc.h>
+
+#define ERR_BYTE		0xFF /* Value returned for read
+					bytes when read failed	*/
+#define IFC_TIMEOUT_MSECS	500  /* Maximum number of mSecs to wait
+					for IFC NAND Machine	*/
+
+struct fsl_ifc_ctrl;
+
+/* mtd information per set */
+struct fsl_ifc_mtd {
+	struct nand_chip chip;
+	struct fsl_ifc_ctrl *ctrl;
+
+	struct device *dev;
+	int bank;		/* Chip select bank number		*/
+	unsigned int bufnum_mask; /* bufnum = page & bufnum_mask */
+	u8 __iomem *vbase;      /* Chip select base virtual address	*/
+};
+
+/* overview of the fsl ifc controller */
+struct fsl_ifc_nand_ctrl {
+	struct nand_hw_control controller;
+	struct fsl_ifc_mtd *chips[FSL_IFC_BANK_COUNT];
+
+	void __iomem *addr;	/* Address of assigned IFC buffer	*/
+	unsigned int page;	/* Last page written to / read from	*/
+	unsigned int read_bytes;/* Number of bytes read during command	*/
+	unsigned int column;	/* Saved column from SEQIN		*/
+	unsigned int index;	/* Pointer to next byte to 'read'	*/
+	unsigned int oob;	/* Non zero if operating on OOB data	*/
+	unsigned int eccread;	/* Non zero for a full-page ECC read	*/
+	unsigned int counter;	/* counter for the initializations	*/
+	unsigned int max_bitflips;  /* Saved during READ0 cmd		*/
+};
+
+static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
+
+/*
+ * Generic flash bbt descriptors
+ */
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+		   NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	2, /* 0 on 8-bit small page */
+	.len = 4,
+	.veroffs = 6,
+	.maxblocks = 4,
+	.pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE |
+		   NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	2, /* 0 on 8-bit small page */
+	.len = 4,
+	.veroffs = 6,
+	.maxblocks = 4,
+	.pattern = mirror_pattern,
+};
+
+static int fsl_ifc_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 8;
+	oobregion->length = chip->ecc.total;
+
+	return 0;
+}
+
+static int fsl_ifc_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section > 1)
+		return -ERANGE;
+
+	if (mtd->writesize == 512 &&
+	    !(chip->options & NAND_BUSWIDTH_16)) {
+		if (!section) {
+			oobregion->offset = 0;
+			oobregion->length = 5;
+		} else {
+			oobregion->offset = 6;
+			oobregion->length = 2;
+		}
+
+		return 0;
+	}
+
+	if (!section) {
+		oobregion->offset = 2;
+		oobregion->length = 6;
+	} else {
+		oobregion->offset = chip->ecc.total + 8;
+		oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops fsl_ifc_ooblayout_ops = {
+	.ecc = fsl_ifc_ooblayout_ecc,
+	.free = fsl_ifc_ooblayout_free,
+};
+
+/*
+ * Set up the IFC hardware block and page address fields, and the ifc nand
+ * structure addr field to point to the correct IFC buffer in memory
+ */
+static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+	int buf_num;
+
+	ifc_nand_ctrl->page = page_addr;
+	/* Program ROW0/COL0 */
+	ifc_out32(page_addr, &ifc->ifc_nand.row0);
+	ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0);
+
+	buf_num = page_addr & priv->bufnum_mask;
+
+	ifc_nand_ctrl->addr = priv->vbase + buf_num * (mtd->writesize * 2);
+	ifc_nand_ctrl->index = column;
+
+	/* for OOB data point to the second half of the buffer */
+	if (oob)
+		ifc_nand_ctrl->index += mtd->writesize;
+}
+
+static int is_blank(struct mtd_info *mtd, unsigned int bufnum)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2);
+	u32 __iomem *mainarea = (u32 __iomem *)addr;
+	u8 __iomem *oob = addr + mtd->writesize;
+	struct mtd_oob_region oobregion = { };
+	int i, section = 0;
+
+	for (i = 0; i < mtd->writesize / 4; i++) {
+		if (__raw_readl(&mainarea[i]) != 0xffffffff)
+			return 0;
+	}
+
+	mtd_ooblayout_ecc(mtd, section++, &oobregion);
+	while (oobregion.length) {
+		for (i = 0; i < oobregion.length; i++) {
+			if (__raw_readb(&oob[oobregion.offset + i]) != 0xff)
+				return 0;
+		}
+
+		mtd_ooblayout_ecc(mtd, section++, &oobregion);
+	}
+
+	return 1;
+}
+
+/* returns nonzero if entire page is blank */
+static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl,
+			  u32 *eccstat, unsigned int bufnum)
+{
+	u32 reg = eccstat[bufnum / 4];
+	int errors;
+
+	errors = (reg >> ((3 - bufnum % 4) * 8)) & 15;
+
+	return errors;
+}
+
+/*
+ * execute IFC NAND command and wait for it to complete
+ */
+static void fsl_ifc_run_command(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+	u32 eccstat[4];
+	int i;
+
+	/* set the chip select for NAND Transaction */
+	ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT,
+		  &ifc->ifc_nand.nand_csel);
+
+	dev_vdbg(priv->dev,
+			"%s: fir0=%08x fcr0=%08x\n",
+			__func__,
+			ifc_in32(&ifc->ifc_nand.nand_fir0),
+			ifc_in32(&ifc->ifc_nand.nand_fcr0));
+
+	ctrl->nand_stat = 0;
+
+	/* start read/write seq */
+	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt);
+
+	/* wait for command complete flag or timeout */
+	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+			   msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+	/* ctrl->nand_stat will be updated from IRQ context */
+	if (!ctrl->nand_stat)
+		dev_err(priv->dev, "Controller is not responding\n");
+	if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_FTOER)
+		dev_err(priv->dev, "NAND Flash Timeout Error\n");
+	if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
+		dev_err(priv->dev, "NAND Flash Write Protect Error\n");
+
+	nctrl->max_bitflips = 0;
+
+	if (nctrl->eccread) {
+		int errors;
+		int bufnum = nctrl->page & priv->bufnum_mask;
+		int sector = bufnum * chip->ecc.steps;
+		int sector_end = sector + chip->ecc.steps - 1;
+
+		for (i = sector / 4; i <= sector_end / 4; i++)
+			eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]);
+
+		for (i = sector; i <= sector_end; i++) {
+			errors = check_read_ecc(mtd, ctrl, eccstat, i);
+
+			if (errors == 15) {
+				/*
+				 * Uncorrectable error.
+				 * OK only if the whole page is blank.
+				 *
+				 * We disable ECCER reporting due to...
+				 * erratum IFC-A002770 -- so report it now if we
+				 * see an uncorrectable error in ECCSTAT.
+				 */
+				if (!is_blank(mtd, bufnum))
+					ctrl->nand_stat |=
+						IFC_NAND_EVTER_STAT_ECCER;
+				break;
+			}
+
+			mtd->ecc_stats.corrected += errors;
+			nctrl->max_bitflips = max_t(unsigned int,
+						    nctrl->max_bitflips,
+						    errors);
+		}
+
+		nctrl->eccread = 0;
+	}
+}
+
+static void fsl_ifc_do_read(struct nand_chip *chip,
+			    int oob,
+			    struct mtd_info *mtd)
+{
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+
+	/* Program FIR/IFC_NAND_FCR0 for Small/Large page */
+	if (mtd->writesize > 512) {
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) |
+			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
+
+		ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) |
+			  (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT),
+			  &ifc->ifc_nand.nand_fcr0);
+	} else {
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_RA0  << IFC_NAND_FIR0_OP2_SHIFT) |
+			  (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(0x0, &ifc->ifc_nand.nand_fir1);
+
+		if (oob)
+			ifc_out32(NAND_CMD_READOOB <<
+				  IFC_NAND_FCR0_CMD0_SHIFT,
+				  &ifc->ifc_nand.nand_fcr0);
+		else
+			ifc_out32(NAND_CMD_READ0 <<
+				  IFC_NAND_FCR0_CMD0_SHIFT,
+				  &ifc->ifc_nand.nand_fcr0);
+	}
+}
+
+/* cmdfunc send commands to the IFC NAND Machine */
+static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
+			     int column, int page_addr) {
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+
+	/* clear the read buffer */
+	ifc_nand_ctrl->read_bytes = 0;
+	if (command != NAND_CMD_PAGEPROG)
+		ifc_nand_ctrl->index = 0;
+
+	switch (command) {
+	/* READ0 read the entire buffer to use hardware ECC. */
+	case NAND_CMD_READ0:
+		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+		set_addr(mtd, 0, page_addr, 0);
+
+		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+		ifc_nand_ctrl->index += column;
+
+		if (chip->ecc.mode == NAND_ECC_HW)
+			ifc_nand_ctrl->eccread = 1;
+
+		fsl_ifc_do_read(chip, 0, mtd);
+		fsl_ifc_run_command(mtd);
+		return;
+
+	/* READOOB reads only the OOB because no ECC is performed. */
+	case NAND_CMD_READOOB:
+		ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr);
+		set_addr(mtd, column, page_addr, 1);
+
+		ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize;
+
+		fsl_ifc_do_read(chip, 1, mtd);
+		fsl_ifc_run_command(mtd);
+
+		return;
+
+	case NAND_CMD_READID:
+	case NAND_CMD_PARAM: {
+		int timing = IFC_FIR_OP_RB;
+		if (command == NAND_CMD_PARAM)
+			timing = IFC_FIR_OP_RBCD;
+
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (timing << IFC_NAND_FIR0_OP2_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(column, &ifc->ifc_nand.row3);
+
+		/*
+		 * although currently it's 8 bytes for READID, we always read
+		 * the maximum 256 bytes(for PARAM)
+		 */
+		ifc_out32(256, &ifc->ifc_nand.nand_fbcr);
+		ifc_nand_ctrl->read_bytes = 256;
+
+		set_addr(mtd, 0, 0, 0);
+		fsl_ifc_run_command(mtd);
+		return;
+	}
+
+	/* ERASE1 stores the block and page address */
+	case NAND_CMD_ERASE1:
+		set_addr(mtd, 0, page_addr, 0);
+		return;
+
+	/* ERASE2 uses the block and page address from ERASE1 */
+	case NAND_CMD_ERASE2:
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+			  (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+
+		ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) |
+			  (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT),
+			  &ifc->ifc_nand.nand_fcr0);
+
+		ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+		ifc_nand_ctrl->read_bytes = 0;
+		fsl_ifc_run_command(mtd);
+		return;
+
+	/* SEQIN sets up the addr buffer and all registers except the length */
+	case NAND_CMD_SEQIN: {
+		u32 nand_fcr0;
+		ifc_nand_ctrl->column = column;
+		ifc_nand_ctrl->oob = 0;
+
+		if (mtd->writesize > 512) {
+			nand_fcr0 =
+				(NAND_CMD_SEQIN << IFC_NAND_FCR0_CMD0_SHIFT) |
+				(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) |
+				(NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT);
+
+			ifc_out32(
+				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) |
+				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) |
+				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT),
+				&ifc->ifc_nand.nand_fir0);
+			ifc_out32(
+				(IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) |
+				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT),
+				&ifc->ifc_nand.nand_fir1);
+		} else {
+			nand_fcr0 = ((NAND_CMD_PAGEPROG <<
+					IFC_NAND_FCR0_CMD1_SHIFT) |
+				    (NAND_CMD_SEQIN <<
+					IFC_NAND_FCR0_CMD2_SHIFT) |
+				    (NAND_CMD_STATUS <<
+					IFC_NAND_FCR0_CMD3_SHIFT));
+
+			ifc_out32(
+				(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+				(IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) |
+				(IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) |
+				(IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) |
+				(IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT),
+				&ifc->ifc_nand.nand_fir0);
+			ifc_out32(
+				(IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) |
+				(IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) |
+				(IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) |
+				(IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT),
+				&ifc->ifc_nand.nand_fir1);
+
+			if (column >= mtd->writesize)
+				nand_fcr0 |=
+				NAND_CMD_READOOB << IFC_NAND_FCR0_CMD0_SHIFT;
+			else
+				nand_fcr0 |=
+				NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT;
+		}
+
+		if (column >= mtd->writesize) {
+			/* OOB area --> READOOB */
+			column -= mtd->writesize;
+			ifc_nand_ctrl->oob = 1;
+		}
+		ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0);
+		set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob);
+		return;
+	}
+
+	/* PAGEPROG reuses all of the setup from SEQIN and adds the length */
+	case NAND_CMD_PAGEPROG: {
+		if (ifc_nand_ctrl->oob) {
+			ifc_out32(ifc_nand_ctrl->index -
+				  ifc_nand_ctrl->column,
+				  &ifc->ifc_nand.nand_fbcr);
+		} else {
+			ifc_out32(0, &ifc->ifc_nand.nand_fbcr);
+		}
+
+		fsl_ifc_run_command(mtd);
+		return;
+	}
+
+	case NAND_CMD_STATUS: {
+		void __iomem *addr;
+
+		ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+			  (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT),
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
+		ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
+		set_addr(mtd, 0, 0, 0);
+		ifc_nand_ctrl->read_bytes = 1;
+
+		fsl_ifc_run_command(mtd);
+
+		/*
+		 * The chip always seems to report that it is
+		 * write-protected, even when it is not.
+		 */
+		addr = ifc_nand_ctrl->addr;
+		if (chip->options & NAND_BUSWIDTH_16)
+			ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr);
+		else
+			ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr);
+		return;
+	}
+
+	case NAND_CMD_RESET:
+		ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT,
+			  &ifc->ifc_nand.nand_fir0);
+		ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT,
+			  &ifc->ifc_nand.nand_fcr0);
+		fsl_ifc_run_command(mtd);
+		return;
+
+	default:
+		dev_err(priv->dev, "%s: error, unsupported command 0x%x.\n",
+					__func__, command);
+	}
+}
+
+static void fsl_ifc_select_chip(struct mtd_info *mtd, int chip)
+{
+	/* The hardware does not seem to support multiple
+	 * chips per bank.
+	 */
+}
+
+/*
+ * Write buf to the IFC NAND Controller Data Buffer
+ */
+static void fsl_ifc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	unsigned int bufsize = mtd->writesize + mtd->oobsize;
+
+	if (len <= 0) {
+		dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+		return;
+	}
+
+	if ((unsigned int)len > bufsize - ifc_nand_ctrl->index) {
+		dev_err(priv->dev,
+			"%s: beyond end of buffer (%d requested, %u available)\n",
+			__func__, len, bufsize - ifc_nand_ctrl->index);
+		len = bufsize - ifc_nand_ctrl->index;
+	}
+
+	memcpy_toio(ifc_nand_ctrl->addr + ifc_nand_ctrl->index, buf, len);
+	ifc_nand_ctrl->index += len;
+}
+
+/*
+ * Read a byte from either the IFC hardware buffer
+ * read function for 8-bit buswidth
+ */
+static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	unsigned int offset;
+
+	/*
+	 * If there are still bytes in the IFC buffer, then use the
+	 * next byte.
+	 */
+	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+		offset = ifc_nand_ctrl->index++;
+		return ifc_in8(ifc_nand_ctrl->addr + offset);
+	}
+
+	dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+	return ERR_BYTE;
+}
+
+/*
+ * Read two bytes from the IFC hardware buffer
+ * read function for 16-bit buswith
+ */
+static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	uint16_t data;
+
+	/*
+	 * If there are still bytes in the IFC buffer, then use the
+	 * next byte.
+	 */
+	if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) {
+		data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index);
+		ifc_nand_ctrl->index += 2;
+		return (uint8_t) data;
+	}
+
+	dev_err(priv->dev, "%s: beyond end of buffer\n", __func__);
+	return ERR_BYTE;
+}
+
+/*
+ * Read from the IFC Controller Data Buffer
+ */
+static void fsl_ifc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	int avail;
+
+	if (len < 0) {
+		dev_err(priv->dev, "%s: len %d bytes", __func__, len);
+		return;
+	}
+
+	avail = min((unsigned int)len,
+			ifc_nand_ctrl->read_bytes - ifc_nand_ctrl->index);
+	memcpy_fromio(buf, ifc_nand_ctrl->addr + ifc_nand_ctrl->index, avail);
+	ifc_nand_ctrl->index += avail;
+
+	if (len > avail)
+		dev_err(priv->dev,
+			"%s: beyond end of buffer (%d requested, %d available)\n",
+			__func__, len, avail);
+}
+
+/*
+ * This function is called after Program and Erase Operations to
+ * check for success or failure.
+ */
+static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs;
+	u32 nand_fsr;
+
+	/* Use READ_STATUS command, but wait for the device to be ready */
+	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+		  (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT),
+		  &ifc->ifc_nand.nand_fir0);
+	ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT,
+		  &ifc->ifc_nand.nand_fcr0);
+	ifc_out32(1, &ifc->ifc_nand.nand_fbcr);
+	set_addr(mtd, 0, 0, 0);
+	ifc_nand_ctrl->read_bytes = 1;
+
+	fsl_ifc_run_command(mtd);
+
+	nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr);
+
+	/*
+	 * The chip always seems to report that it is
+	 * write-protected, even when it is not.
+	 */
+	return nand_fsr | NAND_STATUS_WP;
+}
+
+static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			     uint8_t *buf, int oob_required, int page)
+{
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
+
+	fsl_ifc_read_buf(mtd, buf, mtd->writesize);
+	if (oob_required)
+		fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
+		dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
+
+	if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+		mtd->ecc_stats.failed++;
+
+	return nctrl->max_bitflips;
+}
+
+/* ECC will be calculated automatically, and errors will be detected in
+ * waitfunc.
+ */
+static int fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+			       const uint8_t *buf, int oob_required, int page)
+{
+	fsl_ifc_write_buf(mtd, buf, mtd->writesize);
+	fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+static int fsl_ifc_chip_init_tail(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_ifc_mtd *priv = nand_get_controller_data(chip);
+
+	dev_dbg(priv->dev, "%s: nand->numchips = %d\n", __func__,
+							chip->numchips);
+	dev_dbg(priv->dev, "%s: nand->chipsize = %lld\n", __func__,
+							chip->chipsize);
+	dev_dbg(priv->dev, "%s: nand->pagemask = %8x\n", __func__,
+							chip->pagemask);
+	dev_dbg(priv->dev, "%s: nand->chip_delay = %d\n", __func__,
+							chip->chip_delay);
+	dev_dbg(priv->dev, "%s: nand->badblockpos = %d\n", __func__,
+							chip->badblockpos);
+	dev_dbg(priv->dev, "%s: nand->chip_shift = %d\n", __func__,
+							chip->chip_shift);
+	dev_dbg(priv->dev, "%s: nand->page_shift = %d\n", __func__,
+							chip->page_shift);
+	dev_dbg(priv->dev, "%s: nand->phys_erase_shift = %d\n", __func__,
+							chip->phys_erase_shift);
+	dev_dbg(priv->dev, "%s: nand->ecc.mode = %d\n", __func__,
+							chip->ecc.mode);
+	dev_dbg(priv->dev, "%s: nand->ecc.steps = %d\n", __func__,
+							chip->ecc.steps);
+	dev_dbg(priv->dev, "%s: nand->ecc.bytes = %d\n", __func__,
+							chip->ecc.bytes);
+	dev_dbg(priv->dev, "%s: nand->ecc.total = %d\n", __func__,
+							chip->ecc.total);
+	dev_dbg(priv->dev, "%s: mtd->ooblayout = %p\n", __func__,
+							mtd->ooblayout);
+	dev_dbg(priv->dev, "%s: mtd->flags = %08x\n", __func__, mtd->flags);
+	dev_dbg(priv->dev, "%s: mtd->size = %lld\n", __func__, mtd->size);
+	dev_dbg(priv->dev, "%s: mtd->erasesize = %d\n", __func__,
+							mtd->erasesize);
+	dev_dbg(priv->dev, "%s: mtd->writesize = %d\n", __func__,
+							mtd->writesize);
+	dev_dbg(priv->dev, "%s: mtd->oobsize = %d\n", __func__,
+							mtd->oobsize);
+
+	return 0;
+}
+
+static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv)
+{
+	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
+	struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+	uint32_t csor = 0, csor_8k = 0, csor_ext = 0;
+	uint32_t cs = priv->bank;
+
+	/* Save CSOR and CSOR_ext */
+	csor = ifc_in32(&ifc_global->csor_cs[cs].csor);
+	csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext);
+
+	/* chage PageSize 8K and SpareSize 1K*/
+	csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000;
+	ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor);
+	ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext);
+
+	/* READID */
+	ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) |
+		    (IFC_FIR_OP_UA  << IFC_NAND_FIR0_OP1_SHIFT) |
+		    (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT),
+		    &ifc_runtime->ifc_nand.nand_fir0);
+	ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT,
+		    &ifc_runtime->ifc_nand.nand_fcr0);
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.row3);
+
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr);
+
+	/* Program ROW0/COL0 */
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.row0);
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.col0);
+
+	/* set the chip select for NAND Transaction */
+	ifc_out32(cs << IFC_NAND_CSEL_SHIFT,
+		&ifc_runtime->ifc_nand.nand_csel);
+
+	/* start read seq */
+	ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT,
+		&ifc_runtime->ifc_nand.nandseq_strt);
+
+	/* wait for command complete flag or timeout */
+	wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat,
+			   msecs_to_jiffies(IFC_TIMEOUT_MSECS));
+
+	if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
+		printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n");
+
+	/* Restore CSOR and CSOR_ext */
+	ifc_out32(csor, &ifc_global->csor_cs[cs].csor);
+	ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext);
+}
+
+static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
+{
+	struct fsl_ifc_ctrl *ctrl = priv->ctrl;
+	struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs;
+	struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs;
+	struct nand_chip *chip = &priv->chip;
+	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+	u32 csor;
+
+	/* Fill in fsl_ifc_mtd structure */
+	mtd->dev.parent = priv->dev;
+	nand_set_flash_node(chip, priv->dev->of_node);
+
+	/* fill in nand_chip structure */
+	/* set up function call table */
+	if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr))
+		& CSPR_PORT_SIZE_16)
+		chip->read_byte = fsl_ifc_read_byte16;
+	else
+		chip->read_byte = fsl_ifc_read_byte;
+
+	chip->write_buf = fsl_ifc_write_buf;
+	chip->read_buf = fsl_ifc_read_buf;
+	chip->select_chip = fsl_ifc_select_chip;
+	chip->cmdfunc = fsl_ifc_cmdfunc;
+	chip->waitfunc = fsl_ifc_wait;
+
+	chip->bbt_td = &bbt_main_descr;
+	chip->bbt_md = &bbt_mirror_descr;
+
+	ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr);
+
+	/* set up nand options */
+	chip->bbt_options = NAND_BBT_USE_FLASH;
+	chip->options = NAND_NO_SUBPAGE_WRITE;
+
+	if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)
+		& CSPR_PORT_SIZE_16) {
+		chip->read_byte = fsl_ifc_read_byte16;
+		chip->options |= NAND_BUSWIDTH_16;
+	} else {
+		chip->read_byte = fsl_ifc_read_byte;
+	}
+
+	chip->controller = &ifc_nand_ctrl->controller;
+	nand_set_controller_data(chip, priv);
+
+	chip->ecc.read_page = fsl_ifc_read_page;
+	chip->ecc.write_page = fsl_ifc_write_page;
+
+	csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor);
+
+	switch (csor & CSOR_NAND_PGS_MASK) {
+	case CSOR_NAND_PGS_512:
+		if (!(chip->options & NAND_BUSWIDTH_16)) {
+			/* Avoid conflict with bad block marker */
+			bbt_main_descr.offs = 0;
+			bbt_mirror_descr.offs = 0;
+		}
+
+		priv->bufnum_mask = 15;
+		break;
+
+	case CSOR_NAND_PGS_2K:
+		priv->bufnum_mask = 3;
+		break;
+
+	case CSOR_NAND_PGS_4K:
+		priv->bufnum_mask = 1;
+		break;
+
+	case CSOR_NAND_PGS_8K:
+		priv->bufnum_mask = 0;
+		break;
+
+	default:
+		dev_err(priv->dev, "bad csor %#x: bad page size\n", csor);
+		return -ENODEV;
+	}
+
+	/* Must also set CSOR_NAND_ECC_ENC_EN if DEC_EN set */
+	if (csor & CSOR_NAND_ECC_DEC_EN) {
+		chip->ecc.mode = NAND_ECC_HW;
+		mtd_set_ooblayout(mtd, &fsl_ifc_ooblayout_ops);
+
+		/* Hardware generates ECC per 512 Bytes */
+		chip->ecc.size = 512;
+		if ((csor & CSOR_NAND_ECC_MODE_MASK) == CSOR_NAND_ECC_MODE_4) {
+			chip->ecc.bytes = 8;
+			chip->ecc.strength = 4;
+		} else {
+			chip->ecc.bytes = 16;
+			chip->ecc.strength = 8;
+		}
+	} else {
+		chip->ecc.mode = NAND_ECC_SOFT;
+		chip->ecc.algo = NAND_ECC_HAMMING;
+	}
+
+	if (ctrl->version == FSL_IFC_VERSION_1_1_0)
+		fsl_ifc_sram_init(priv);
+
+	return 0;
+}
+
+static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv)
+{
+	struct mtd_info *mtd = nand_to_mtd(&priv->chip);
+
+	nand_release(mtd);
+
+	kfree(mtd->name);
+
+	if (priv->vbase)
+		iounmap(priv->vbase);
+
+	ifc_nand_ctrl->chips[priv->bank] = NULL;
+
+	return 0;
+}
+
+static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank,
+		      phys_addr_t addr)
+{
+	u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr);
+
+	if (!(cspr & CSPR_V))
+		return 0;
+	if ((cspr & CSPR_MSEL) != CSPR_MSEL_NAND)
+		return 0;
+
+	return (cspr & CSPR_BA) == convert_ifc_address(addr);
+}
+
+static DEFINE_MUTEX(fsl_ifc_nand_mutex);
+
+static int fsl_ifc_nand_probe(struct platform_device *dev)
+{
+	struct fsl_ifc_runtime __iomem *ifc;
+	struct fsl_ifc_mtd *priv;
+	struct resource res;
+	static const char *part_probe_types[]
+		= { "cmdlinepart", "RedBoot", "ofpart", NULL };
+	int ret;
+	int bank;
+	struct device_node *node = dev->dev.of_node;
+	struct mtd_info *mtd;
+
+	if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs)
+		return -ENODEV;
+	ifc = fsl_ifc_ctrl_dev->rregs;
+
+	/* get, allocate and map the memory resource */
+	ret = of_address_to_resource(node, 0, &res);
+	if (ret) {
+		dev_err(&dev->dev, "%s: failed to get resource\n", __func__);
+		return ret;
+	}
+
+	/* find which chip select it is connected to */
+	for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) {
+		if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start))
+			break;
+	}
+
+	if (bank >= fsl_ifc_ctrl_dev->banks) {
+		dev_err(&dev->dev, "%s: address did not match any chip selects\n",
+			__func__);
+		return -ENODEV;
+	}
+
+	priv = devm_kzalloc(&dev->dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	mutex_lock(&fsl_ifc_nand_mutex);
+	if (!fsl_ifc_ctrl_dev->nand) {
+		ifc_nand_ctrl = kzalloc(sizeof(*ifc_nand_ctrl), GFP_KERNEL);
+		if (!ifc_nand_ctrl) {
+			mutex_unlock(&fsl_ifc_nand_mutex);
+			return -ENOMEM;
+		}
+
+		ifc_nand_ctrl->read_bytes = 0;
+		ifc_nand_ctrl->index = 0;
+		ifc_nand_ctrl->addr = NULL;
+		fsl_ifc_ctrl_dev->nand = ifc_nand_ctrl;
+
+		nand_hw_control_init(&ifc_nand_ctrl->controller);
+	} else {
+		ifc_nand_ctrl = fsl_ifc_ctrl_dev->nand;
+	}
+	mutex_unlock(&fsl_ifc_nand_mutex);
+
+	ifc_nand_ctrl->chips[bank] = priv;
+	priv->bank = bank;
+	priv->ctrl = fsl_ifc_ctrl_dev;
+	priv->dev = &dev->dev;
+
+	priv->vbase = ioremap(res.start, resource_size(&res));
+	if (!priv->vbase) {
+		dev_err(priv->dev, "%s: failed to map chip region\n", __func__);
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	dev_set_drvdata(priv->dev, priv);
+
+	ifc_out32(IFC_NAND_EVTER_EN_OPC_EN |
+		  IFC_NAND_EVTER_EN_FTOER_EN |
+		  IFC_NAND_EVTER_EN_WPER_EN,
+		  &ifc->ifc_nand.nand_evter_en);
+
+	/* enable NAND Machine Interrupts */
+	ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN |
+		  IFC_NAND_EVTER_INTR_FTOERIR_EN |
+		  IFC_NAND_EVTER_INTR_WPERIR_EN,
+		  &ifc->ifc_nand.nand_evter_intr_en);
+
+	mtd = nand_to_mtd(&priv->chip);
+	mtd->name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start);
+	if (!mtd->name) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = fsl_ifc_chip_init(priv);
+	if (ret)
+		goto err;
+
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (ret)
+		goto err;
+
+	ret = fsl_ifc_chip_init_tail(mtd);
+	if (ret)
+		goto err;
+
+	ret = nand_scan_tail(mtd);
+	if (ret)
+		goto err;
+
+	/* First look for RedBoot table or partitions on the command
+	 * line, these take precedence over device tree information */
+	mtd_device_parse_register(mtd, part_probe_types, NULL, NULL, 0);
+
+	dev_info(priv->dev, "IFC NAND device at 0x%llx, bank %d\n",
+		 (unsigned long long)res.start, priv->bank);
+	return 0;
+
+err:
+	fsl_ifc_chip_remove(priv);
+	return ret;
+}
+
+static int fsl_ifc_nand_remove(struct platform_device *dev)
+{
+	struct fsl_ifc_mtd *priv = dev_get_drvdata(&dev->dev);
+
+	fsl_ifc_chip_remove(priv);
+
+	mutex_lock(&fsl_ifc_nand_mutex);
+	ifc_nand_ctrl->counter--;
+	if (!ifc_nand_ctrl->counter) {
+		fsl_ifc_ctrl_dev->nand = NULL;
+		kfree(ifc_nand_ctrl);
+	}
+	mutex_unlock(&fsl_ifc_nand_mutex);
+
+	return 0;
+}
+
+static const struct of_device_id fsl_ifc_nand_match[] = {
+	{
+		.compatible = "fsl,ifc-nand",
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match);
+
+static struct platform_driver fsl_ifc_nand_driver = {
+	.driver = {
+		.name	= "fsl,ifc-nand",
+		.of_match_table = fsl_ifc_nand_match,
+	},
+	.probe       = fsl_ifc_nand_probe,
+	.remove      = fsl_ifc_nand_remove,
+};
+
+module_platform_driver(fsl_ifc_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale");
+MODULE_DESCRIPTION("Freescale Integrated Flash Controller MTD NAND driver");
diff --git a/drivers/mtd/nand/rawnand/fsl_upm.c b/drivers/mtd/nand/rawnand/fsl_upm.c
new file mode 100644
index 000000000000..a88e2cf66e0f
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/fsl_upm.c
@@ -0,0 +1,363 @@ 
+/*
+ * Freescale UPM NAND driver.
+ *
+ * Copyright © 2007-2008  MontaVista Software, Inc.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <asm/fsl_lbc.h>
+
+#define FSL_UPM_WAIT_RUN_PATTERN  0x1
+#define FSL_UPM_WAIT_WRITE_BYTE   0x2
+#define FSL_UPM_WAIT_WRITE_BUFFER 0x4
+
+struct fsl_upm_nand {
+	struct device *dev;
+	struct nand_chip chip;
+	int last_ctrl;
+	struct mtd_partition *parts;
+	struct fsl_upm upm;
+	uint8_t upm_addr_offset;
+	uint8_t upm_cmd_offset;
+	void __iomem *io_base;
+	int rnb_gpio[NAND_MAX_CHIPS];
+	uint32_t mchip_offsets[NAND_MAX_CHIPS];
+	uint32_t mchip_count;
+	uint32_t mchip_number;
+	int chip_delay;
+	uint32_t wait_flags;
+};
+
+static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo)
+{
+	return container_of(mtd_to_nand(mtdinfo), struct fsl_upm_nand,
+			    chip);
+}
+
+static int fun_chip_ready(struct mtd_info *mtd)
+{
+	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+	if (gpio_get_value(fun->rnb_gpio[fun->mchip_number]))
+		return 1;
+
+	dev_vdbg(fun->dev, "busy\n");
+	return 0;
+}
+
+static void fun_wait_rnb(struct fsl_upm_nand *fun)
+{
+	if (fun->rnb_gpio[fun->mchip_number] >= 0) {
+		struct mtd_info *mtd = nand_to_mtd(&fun->chip);
+		int cnt = 1000000;
+
+		while (--cnt && !fun_chip_ready(mtd))
+			cpu_relax();
+		if (!cnt)
+			dev_err(fun->dev, "tired waiting for RNB\n");
+	} else {
+		ndelay(100);
+	}
+}
+
+static void fun_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+	u32 mar;
+
+	if (!(ctrl & fun->last_ctrl)) {
+		fsl_upm_end_pattern(&fun->upm);
+
+		if (cmd == NAND_CMD_NONE)
+			return;
+
+		fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE);
+	}
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		if (ctrl & NAND_ALE)
+			fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset);
+		else if (ctrl & NAND_CLE)
+			fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset);
+	}
+
+	mar = (cmd << (32 - fun->upm.width)) |
+		fun->mchip_offsets[fun->mchip_number];
+	fsl_upm_run_pattern(&fun->upm, chip->IO_ADDR_R, mar);
+
+	if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN)
+		fun_wait_rnb(fun);
+}
+
+static void fun_select_chip(struct mtd_info *mtd, int mchip_nr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+	if (mchip_nr == -1) {
+		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+	} else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) {
+		fun->mchip_number = mchip_nr;
+		chip->IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr];
+		chip->IO_ADDR_W = chip->IO_ADDR_R;
+	} else {
+		BUG();
+	}
+}
+
+static uint8_t fun_read_byte(struct mtd_info *mtd)
+{
+	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+
+	return in_8(fun->chip.IO_ADDR_R);
+}
+
+static void fun_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+	int i;
+
+	for (i = 0; i < len; i++)
+		buf[i] = in_8(fun->chip.IO_ADDR_R);
+}
+
+static void fun_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct fsl_upm_nand *fun = to_fsl_upm_nand(mtd);
+	int i;
+
+	for (i = 0; i < len; i++) {
+		out_8(fun->chip.IO_ADDR_W, buf[i]);
+		if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE)
+			fun_wait_rnb(fun);
+	}
+	if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER)
+		fun_wait_rnb(fun);
+}
+
+static int fun_chip_init(struct fsl_upm_nand *fun,
+			 const struct device_node *upm_np,
+			 const struct resource *io_res)
+{
+	struct mtd_info *mtd = nand_to_mtd(&fun->chip);
+	int ret;
+	struct device_node *flash_np;
+
+	fun->chip.IO_ADDR_R = fun->io_base;
+	fun->chip.IO_ADDR_W = fun->io_base;
+	fun->chip.cmd_ctrl = fun_cmd_ctrl;
+	fun->chip.chip_delay = fun->chip_delay;
+	fun->chip.read_byte = fun_read_byte;
+	fun->chip.read_buf = fun_read_buf;
+	fun->chip.write_buf = fun_write_buf;
+	fun->chip.ecc.mode = NAND_ECC_SOFT;
+	fun->chip.ecc.algo = NAND_ECC_HAMMING;
+	if (fun->mchip_count > 1)
+		fun->chip.select_chip = fun_select_chip;
+
+	if (fun->rnb_gpio[0] >= 0)
+		fun->chip.dev_ready = fun_chip_ready;
+
+	mtd->dev.parent = fun->dev;
+
+	flash_np = of_get_next_child(upm_np, NULL);
+	if (!flash_np)
+		return -ENODEV;
+
+	nand_set_flash_node(&fun->chip, flash_np);
+	mtd->name = kasprintf(GFP_KERNEL, "0x%llx.%s", (u64)io_res->start,
+			      flash_np->name);
+	if (!mtd->name) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = nand_scan(mtd, fun->mchip_count);
+	if (ret)
+		goto err;
+
+	ret = mtd_device_register(mtd, NULL, 0);
+err:
+	of_node_put(flash_np);
+	if (ret)
+		kfree(mtd->name);
+	return ret;
+}
+
+static int fun_probe(struct platform_device *ofdev)
+{
+	struct fsl_upm_nand *fun;
+	struct resource io_res;
+	const __be32 *prop;
+	int rnb_gpio;
+	int ret;
+	int size;
+	int i;
+
+	fun = kzalloc(sizeof(*fun), GFP_KERNEL);
+	if (!fun)
+		return -ENOMEM;
+
+	ret = of_address_to_resource(ofdev->dev.of_node, 0, &io_res);
+	if (ret) {
+		dev_err(&ofdev->dev, "can't get IO base\n");
+		goto err1;
+	}
+
+	ret = fsl_upm_find(io_res.start, &fun->upm);
+	if (ret) {
+		dev_err(&ofdev->dev, "can't find UPM\n");
+		goto err1;
+	}
+
+	prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset",
+			       &size);
+	if (!prop || size != sizeof(uint32_t)) {
+		dev_err(&ofdev->dev, "can't get UPM address offset\n");
+		ret = -EINVAL;
+		goto err1;
+	}
+	fun->upm_addr_offset = *prop;
+
+	prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size);
+	if (!prop || size != sizeof(uint32_t)) {
+		dev_err(&ofdev->dev, "can't get UPM command offset\n");
+		ret = -EINVAL;
+		goto err1;
+	}
+	fun->upm_cmd_offset = *prop;
+
+	prop = of_get_property(ofdev->dev.of_node,
+			       "fsl,upm-addr-line-cs-offsets", &size);
+	if (prop && (size / sizeof(uint32_t)) > 0) {
+		fun->mchip_count = size / sizeof(uint32_t);
+		if (fun->mchip_count >= NAND_MAX_CHIPS) {
+			dev_err(&ofdev->dev, "too much multiple chips\n");
+			goto err1;
+		}
+		for (i = 0; i < fun->mchip_count; i++)
+			fun->mchip_offsets[i] = be32_to_cpu(prop[i]);
+	} else {
+		fun->mchip_count = 1;
+	}
+
+	for (i = 0; i < fun->mchip_count; i++) {
+		fun->rnb_gpio[i] = -1;
+		rnb_gpio = of_get_gpio(ofdev->dev.of_node, i);
+		if (rnb_gpio >= 0) {
+			ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev));
+			if (ret) {
+				dev_err(&ofdev->dev,
+					"can't request RNB gpio #%d\n", i);
+				goto err2;
+			}
+			gpio_direction_input(rnb_gpio);
+			fun->rnb_gpio[i] = rnb_gpio;
+		} else if (rnb_gpio == -EINVAL) {
+			dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i);
+			goto err2;
+		}
+	}
+
+	prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL);
+	if (prop)
+		fun->chip_delay = be32_to_cpup(prop);
+	else
+		fun->chip_delay = 50;
+
+	prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size);
+	if (prop && size == sizeof(uint32_t))
+		fun->wait_flags = be32_to_cpup(prop);
+	else
+		fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN |
+				  FSL_UPM_WAIT_WRITE_BYTE;
+
+	fun->io_base = devm_ioremap_nocache(&ofdev->dev, io_res.start,
+					    resource_size(&io_res));
+	if (!fun->io_base) {
+		ret = -ENOMEM;
+		goto err2;
+	}
+
+	fun->dev = &ofdev->dev;
+	fun->last_ctrl = NAND_CLE;
+
+	ret = fun_chip_init(fun, ofdev->dev.of_node, &io_res);
+	if (ret)
+		goto err2;
+
+	dev_set_drvdata(&ofdev->dev, fun);
+
+	return 0;
+err2:
+	for (i = 0; i < fun->mchip_count; i++) {
+		if (fun->rnb_gpio[i] < 0)
+			break;
+		gpio_free(fun->rnb_gpio[i]);
+	}
+err1:
+	kfree(fun);
+
+	return ret;
+}
+
+static int fun_remove(struct platform_device *ofdev)
+{
+	struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev);
+	struct mtd_info *mtd = nand_to_mtd(&fun->chip);
+	int i;
+
+	nand_release(mtd);
+	kfree(mtd->name);
+
+	for (i = 0; i < fun->mchip_count; i++) {
+		if (fun->rnb_gpio[i] < 0)
+			break;
+		gpio_free(fun->rnb_gpio[i]);
+	}
+
+	kfree(fun);
+
+	return 0;
+}
+
+static const struct of_device_id of_fun_match[] = {
+	{ .compatible = "fsl,upm-nand" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_fun_match);
+
+static struct platform_driver of_fun_driver = {
+	.driver = {
+		.name = "fsl,upm-nand",
+		.of_match_table = of_fun_match,
+	},
+	.probe		= fun_probe,
+	.remove		= fun_remove,
+};
+
+module_platform_driver(of_fun_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
+MODULE_DESCRIPTION("Driver for NAND chips working through Freescale "
+		   "LocalBus User-Programmable Machine");
diff --git a/drivers/mtd/nand/rawnand/fsmc_nand.c b/drivers/mtd/nand/rawnand/fsmc_nand.c
new file mode 100644
index 000000000000..5c08694aa153
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/fsmc_nand.c
@@ -0,0 +1,1100 @@ 
+/*
+ * drivers/mtd/nand/fsmc_nand.c
+ *
+ * ST Microelectronics
+ * Flexible Static Memory Controller (FSMC)
+ * Driver for NAND portions
+ *
+ * Copyright © 2010 ST Microelectronics
+ * Vipin Kumar <vipin.kumar@st.com>
+ * Ashish Priyadarshi
+ *
+ * Based on drivers/mtd/nand/nomadik_nand.c
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/resource.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mtd/fsmc.h>
+#include <linux/amba/bus.h>
+#include <mtd/mtd-abi.h>
+
+static int fsmc_ecc1_ooblayout_ecc(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 2;
+	oobregion->length = 3;
+
+	return 0;
+}
+
+static int fsmc_ecc1_ooblayout_free(struct mtd_info *mtd, int section,
+				    struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 8;
+
+	if (section < chip->ecc.steps - 1)
+		oobregion->length = 8;
+	else
+		oobregion->length = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc1_ooblayout_ops = {
+	.ecc = fsmc_ecc1_ooblayout_ecc,
+	.free = fsmc_ecc1_ooblayout_free,
+};
+
+/*
+ * ECC placement definitions in oobfree type format.
+ * There are 13 bytes of ecc for every 512 byte block and it has to be read
+ * consecutively and immediately after the 512 byte data block for hardware to
+ * generate the error bit offsets in 512 byte data.
+ */
+static int fsmc_ecc4_ooblayout_ecc(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->length = chip->ecc.bytes;
+
+	if (!section && mtd->writesize <= 512)
+		oobregion->offset = 0;
+	else
+		oobregion->offset = (section * 16) + 2;
+
+	return 0;
+}
+
+static int fsmc_ecc4_ooblayout_free(struct mtd_info *mtd, int section,
+				    struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 15;
+
+	if (section < chip->ecc.steps - 1)
+		oobregion->length = 3;
+	else
+		oobregion->length = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops fsmc_ecc4_ooblayout_ops = {
+	.ecc = fsmc_ecc4_ooblayout_ecc,
+	.free = fsmc_ecc4_ooblayout_free,
+};
+
+/**
+ * struct fsmc_nand_data - structure for FSMC NAND device state
+ *
+ * @pid:		Part ID on the AMBA PrimeCell format
+ * @mtd:		MTD info for a NAND flash.
+ * @nand:		Chip related info for a NAND flash.
+ * @partitions:		Partition info for a NAND Flash.
+ * @nr_partitions:	Total number of partition of a NAND flash.
+ *
+ * @bank:		Bank number for probed device.
+ * @clk:		Clock structure for FSMC.
+ *
+ * @read_dma_chan:	DMA channel for read access
+ * @write_dma_chan:	DMA channel for write access to NAND
+ * @dma_access_complete: Completion structure
+ *
+ * @data_pa:		NAND Physical port for Data.
+ * @data_va:		NAND port for Data.
+ * @cmd_va:		NAND port for Command.
+ * @addr_va:		NAND port for Address.
+ * @regs_va:		FSMC regs base address.
+ */
+struct fsmc_nand_data {
+	u32			pid;
+	struct nand_chip	nand;
+	struct mtd_partition	*partitions;
+	unsigned int		nr_partitions;
+
+	unsigned int		bank;
+	struct device		*dev;
+	enum access_mode	mode;
+	struct clk		*clk;
+
+	/* DMA related objects */
+	struct dma_chan		*read_dma_chan;
+	struct dma_chan		*write_dma_chan;
+	struct completion	dma_access_complete;
+
+	struct fsmc_nand_timings *dev_timings;
+
+	dma_addr_t		data_pa;
+	void __iomem		*data_va;
+	void __iomem		*cmd_va;
+	void __iomem		*addr_va;
+	void __iomem		*regs_va;
+
+	void			(*select_chip)(uint32_t bank, uint32_t busw);
+};
+
+static inline struct fsmc_nand_data *mtd_to_fsmc(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct fsmc_nand_data, nand);
+}
+
+/* Assert CS signal based on chipnr */
+static void fsmc_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsmc_nand_data *host;
+
+	host = mtd_to_fsmc(mtd);
+
+	switch (chipnr) {
+	case -1:
+		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+		break;
+	case 0:
+	case 1:
+	case 2:
+	case 3:
+		if (host->select_chip)
+			host->select_chip(chipnr,
+					chip->options & NAND_BUSWIDTH_16);
+		break;
+
+	default:
+		dev_err(host->dev, "unsupported chip-select %d\n", chipnr);
+	}
+}
+
+/*
+ * fsmc_cmd_ctrl - For facilitaing Hardware access
+ * This routine allows hardware specific access to control-lines(ALE,CLE)
+ */
+static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+	void __iomem *regs = host->regs_va;
+	unsigned int bank = host->bank;
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		u32 pc;
+
+		if (ctrl & NAND_CLE) {
+			this->IO_ADDR_R = host->cmd_va;
+			this->IO_ADDR_W = host->cmd_va;
+		} else if (ctrl & NAND_ALE) {
+			this->IO_ADDR_R = host->addr_va;
+			this->IO_ADDR_W = host->addr_va;
+		} else {
+			this->IO_ADDR_R = host->data_va;
+			this->IO_ADDR_W = host->data_va;
+		}
+
+		pc = readl(FSMC_NAND_REG(regs, bank, PC));
+		if (ctrl & NAND_NCE)
+			pc |= FSMC_ENABLE;
+		else
+			pc &= ~FSMC_ENABLE;
+		writel_relaxed(pc, FSMC_NAND_REG(regs, bank, PC));
+	}
+
+	mb();
+
+	if (cmd != NAND_CMD_NONE)
+		writeb_relaxed(cmd, this->IO_ADDR_W);
+}
+
+/*
+ * fsmc_nand_setup - FSMC (Flexible Static Memory Controller) init routine
+ *
+ * This routine initializes timing parameters related to NAND memory access in
+ * FSMC registers
+ */
+static void fsmc_nand_setup(void __iomem *regs, uint32_t bank,
+			   uint32_t busw, struct fsmc_nand_timings *timings)
+{
+	uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON;
+	uint32_t tclr, tar, thiz, thold, twait, tset;
+	struct fsmc_nand_timings *tims;
+	struct fsmc_nand_timings default_timings = {
+		.tclr	= FSMC_TCLR_1,
+		.tar	= FSMC_TAR_1,
+		.thiz	= FSMC_THIZ_1,
+		.thold	= FSMC_THOLD_4,
+		.twait	= FSMC_TWAIT_6,
+		.tset	= FSMC_TSET_0,
+	};
+
+	if (timings)
+		tims = timings;
+	else
+		tims = &default_timings;
+
+	tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT;
+	tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT;
+	thiz = (tims->thiz & FSMC_THIZ_MASK) << FSMC_THIZ_SHIFT;
+	thold = (tims->thold & FSMC_THOLD_MASK) << FSMC_THOLD_SHIFT;
+	twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT;
+	tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT;
+
+	if (busw)
+		writel_relaxed(value | FSMC_DEVWID_16,
+				FSMC_NAND_REG(regs, bank, PC));
+	else
+		writel_relaxed(value | FSMC_DEVWID_8,
+				FSMC_NAND_REG(regs, bank, PC));
+
+	writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | tclr | tar,
+			FSMC_NAND_REG(regs, bank, PC));
+	writel_relaxed(thiz | thold | twait | tset,
+			FSMC_NAND_REG(regs, bank, COMM));
+	writel_relaxed(thiz | thold | twait | tset,
+			FSMC_NAND_REG(regs, bank, ATTRIB));
+}
+
+/*
+ * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers
+ */
+static void fsmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+	void __iomem *regs = host->regs_va;
+	uint32_t bank = host->bank;
+
+	writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCPLEN_256,
+			FSMC_NAND_REG(regs, bank, PC));
+	writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) & ~FSMC_ECCEN,
+			FSMC_NAND_REG(regs, bank, PC));
+	writel_relaxed(readl(FSMC_NAND_REG(regs, bank, PC)) | FSMC_ECCEN,
+			FSMC_NAND_REG(regs, bank, PC));
+}
+
+/*
+ * fsmc_read_hwecc_ecc4 - Hardware ECC calculator for ecc4 option supported by
+ * FSMC. ECC is 13 bytes for 512 bytes of data (supports error correction up to
+ * max of 8-bits)
+ */
+static int fsmc_read_hwecc_ecc4(struct mtd_info *mtd, const uint8_t *data,
+				uint8_t *ecc)
+{
+	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+	void __iomem *regs = host->regs_va;
+	uint32_t bank = host->bank;
+	uint32_t ecc_tmp;
+	unsigned long deadline = jiffies + FSMC_BUSY_WAIT_TIMEOUT;
+
+	do {
+		if (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) & FSMC_CODE_RDY)
+			break;
+		else
+			cond_resched();
+	} while (!time_after_eq(jiffies, deadline));
+
+	if (time_after_eq(jiffies, deadline)) {
+		dev_err(host->dev, "calculate ecc timed out\n");
+		return -ETIMEDOUT;
+	}
+
+	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+	ecc[0] = (uint8_t) (ecc_tmp >> 0);
+	ecc[1] = (uint8_t) (ecc_tmp >> 8);
+	ecc[2] = (uint8_t) (ecc_tmp >> 16);
+	ecc[3] = (uint8_t) (ecc_tmp >> 24);
+
+	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
+	ecc[4] = (uint8_t) (ecc_tmp >> 0);
+	ecc[5] = (uint8_t) (ecc_tmp >> 8);
+	ecc[6] = (uint8_t) (ecc_tmp >> 16);
+	ecc[7] = (uint8_t) (ecc_tmp >> 24);
+
+	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
+	ecc[8] = (uint8_t) (ecc_tmp >> 0);
+	ecc[9] = (uint8_t) (ecc_tmp >> 8);
+	ecc[10] = (uint8_t) (ecc_tmp >> 16);
+	ecc[11] = (uint8_t) (ecc_tmp >> 24);
+
+	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
+	ecc[12] = (uint8_t) (ecc_tmp >> 16);
+
+	return 0;
+}
+
+/*
+ * fsmc_read_hwecc_ecc1 - Hardware ECC calculator for ecc1 option supported by
+ * FSMC. ECC is 3 bytes for 512 bytes of data (supports error correction up to
+ * max of 1-bit)
+ */
+static int fsmc_read_hwecc_ecc1(struct mtd_info *mtd, const uint8_t *data,
+				uint8_t *ecc)
+{
+	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+	void __iomem *regs = host->regs_va;
+	uint32_t bank = host->bank;
+	uint32_t ecc_tmp;
+
+	ecc_tmp = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+	ecc[0] = (uint8_t) (ecc_tmp >> 0);
+	ecc[1] = (uint8_t) (ecc_tmp >> 8);
+	ecc[2] = (uint8_t) (ecc_tmp >> 16);
+
+	return 0;
+}
+
+/* Count the number of 0's in buff upto a max of max_bits */
+static int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+	int k, written_bits = 0;
+
+	for (k = 0; k < size; k++) {
+		written_bits += hweight8(~buff[k]);
+		if (written_bits > max_bits)
+			break;
+	}
+
+	return written_bits;
+}
+
+static void dma_complete(void *param)
+{
+	struct fsmc_nand_data *host = param;
+
+	complete(&host->dma_access_complete);
+}
+
+static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
+		enum dma_data_direction direction)
+{
+	struct dma_chan *chan;
+	struct dma_device *dma_dev;
+	struct dma_async_tx_descriptor *tx;
+	dma_addr_t dma_dst, dma_src, dma_addr;
+	dma_cookie_t cookie;
+	unsigned long flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+	int ret;
+	unsigned long time_left;
+
+	if (direction == DMA_TO_DEVICE)
+		chan = host->write_dma_chan;
+	else if (direction == DMA_FROM_DEVICE)
+		chan = host->read_dma_chan;
+	else
+		return -EINVAL;
+
+	dma_dev = chan->device;
+	dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
+
+	if (direction == DMA_TO_DEVICE) {
+		dma_src = dma_addr;
+		dma_dst = host->data_pa;
+	} else {
+		dma_src = host->data_pa;
+		dma_dst = dma_addr;
+	}
+
+	tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
+			len, flags);
+	if (!tx) {
+		dev_err(host->dev, "device_prep_dma_memcpy error\n");
+		ret = -EIO;
+		goto unmap_dma;
+	}
+
+	tx->callback = dma_complete;
+	tx->callback_param = host;
+	cookie = tx->tx_submit(tx);
+
+	ret = dma_submit_error(cookie);
+	if (ret) {
+		dev_err(host->dev, "dma_submit_error %d\n", cookie);
+		goto unmap_dma;
+	}
+
+	dma_async_issue_pending(chan);
+
+	time_left =
+	wait_for_completion_timeout(&host->dma_access_complete,
+				msecs_to_jiffies(3000));
+	if (time_left == 0) {
+		dmaengine_terminate_all(chan);
+		dev_err(host->dev, "wait_for_completion_timeout\n");
+		ret = -ETIMEDOUT;
+		goto unmap_dma;
+	}
+
+	ret = 0;
+
+unmap_dma:
+	dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
+
+	return ret;
+}
+
+/*
+ * fsmc_write_buf - write buffer to chip
+ * @mtd:	MTD device structure
+ * @buf:	data buffer
+ * @len:	number of bytes to write
+ */
+static void fsmc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+			IS_ALIGNED(len, sizeof(uint32_t))) {
+		uint32_t *p = (uint32_t *)buf;
+		len = len >> 2;
+		for (i = 0; i < len; i++)
+			writel_relaxed(p[i], chip->IO_ADDR_W);
+	} else {
+		for (i = 0; i < len; i++)
+			writeb_relaxed(buf[i], chip->IO_ADDR_W);
+	}
+}
+
+/*
+ * fsmc_read_buf - read chip data into buffer
+ * @mtd:	MTD device structure
+ * @buf:	buffer to store date
+ * @len:	number of bytes to read
+ */
+static void fsmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (IS_ALIGNED((uint32_t)buf, sizeof(uint32_t)) &&
+			IS_ALIGNED(len, sizeof(uint32_t))) {
+		uint32_t *p = (uint32_t *)buf;
+		len = len >> 2;
+		for (i = 0; i < len; i++)
+			p[i] = readl_relaxed(chip->IO_ADDR_R);
+	} else {
+		for (i = 0; i < len; i++)
+			buf[i] = readb_relaxed(chip->IO_ADDR_R);
+	}
+}
+
+/*
+ * fsmc_read_buf_dma - read chip data into buffer
+ * @mtd:	MTD device structure
+ * @buf:	buffer to store date
+ * @len:	number of bytes to read
+ */
+static void fsmc_read_buf_dma(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct fsmc_nand_data *host  = mtd_to_fsmc(mtd);
+
+	dma_xfer(host, buf, len, DMA_FROM_DEVICE);
+}
+
+/*
+ * fsmc_write_buf_dma - write buffer to chip
+ * @mtd:	MTD device structure
+ * @buf:	data buffer
+ * @len:	number of bytes to write
+ */
+static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
+		int len)
+{
+	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+
+	dma_xfer(host, (void *)buf, len, DMA_TO_DEVICE);
+}
+
+/*
+ * fsmc_read_page_hwecc
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure
+ * @buf:	buffer to store read data
+ * @oob_required:	caller expects OOB data read to chip->oob_poi
+ * @page:	page number to read
+ *
+ * This routine is needed for fsmc version 8 as reading from NAND chip has to be
+ * performed in a strict sequence as follows:
+ * data(512 byte) -> ecc(13 byte)
+ * After this read, fsmc hardware generates and reports error data bits(up to a
+ * max of 8 bits)
+ */
+static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+				 uint8_t *buf, int oob_required, int page)
+{
+	int i, j, s, stat, eccsize = chip->ecc.size;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	uint8_t *p = buf;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	uint8_t *ecc_code = chip->buffers->ecccode;
+	int off, len, group = 0;
+	/*
+	 * ecc_oob is intentionally taken as uint16_t. In 16bit devices, we
+	 * end up reading 14 bytes (7 words) from oob. The local array is
+	 * to maintain word alignment
+	 */
+	uint16_t ecc_oob[7];
+	uint8_t *oob = (uint8_t *)&ecc_oob[0];
+	unsigned int max_bitflips = 0;
+
+	for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
+		chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
+		chip->ecc.hwctl(mtd, NAND_ECC_READ);
+		chip->read_buf(mtd, p, eccsize);
+
+		for (j = 0; j < eccbytes;) {
+			struct mtd_oob_region oobregion;
+			int ret;
+
+			ret = mtd_ooblayout_ecc(mtd, group++, &oobregion);
+			if (ret)
+				return ret;
+
+			off = oobregion.offset;
+			len = oobregion.length;
+
+			/*
+			 * length is intentionally kept a higher multiple of 2
+			 * to read at least 13 bytes even in case of 16 bit NAND
+			 * devices
+			 */
+			if (chip->options & NAND_BUSWIDTH_16)
+				len = roundup(len, 2);
+
+			chip->cmdfunc(mtd, NAND_CMD_READOOB, off, page);
+			chip->read_buf(mtd, oob + j, len);
+			j += len;
+		}
+
+		memcpy(&ecc_code[i], oob, chip->ecc.bytes);
+		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+		if (stat < 0) {
+			mtd->ecc_stats.failed++;
+		} else {
+			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
+	}
+
+	return max_bitflips;
+}
+
+/*
+ * fsmc_bch8_correct_data
+ * @mtd:	mtd info structure
+ * @dat:	buffer of read data
+ * @read_ecc:	ecc read from device spare area
+ * @calc_ecc:	ecc calculated from read data
+ *
+ * calc_ecc is a 104 bit information containing maximum of 8 error
+ * offset informations of 13 bits each in 512 bytes of read data.
+ */
+static int fsmc_bch8_correct_data(struct mtd_info *mtd, uint8_t *dat,
+			     uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct fsmc_nand_data *host = mtd_to_fsmc(mtd);
+	void __iomem *regs = host->regs_va;
+	unsigned int bank = host->bank;
+	uint32_t err_idx[8];
+	uint32_t num_err, i;
+	uint32_t ecc1, ecc2, ecc3, ecc4;
+
+	num_err = (readl_relaxed(FSMC_NAND_REG(regs, bank, STS)) >> 10) & 0xF;
+
+	/* no bit flipping */
+	if (likely(num_err == 0))
+		return 0;
+
+	/* too many errors */
+	if (unlikely(num_err > 8)) {
+		/*
+		 * This is a temporary erase check. A newly erased page read
+		 * would result in an ecc error because the oob data is also
+		 * erased to FF and the calculated ecc for an FF data is not
+		 * FF..FF.
+		 * This is a workaround to skip performing correction in case
+		 * data is FF..FF
+		 *
+		 * Logic:
+		 * For every page, each bit written as 0 is counted until these
+		 * number of bits are greater than 8 (the maximum correction
+		 * capability of FSMC for each 512 + 13 bytes)
+		 */
+
+		int bits_ecc = count_written_bits(read_ecc, chip->ecc.bytes, 8);
+		int bits_data = count_written_bits(dat, chip->ecc.size, 8);
+
+		if ((bits_ecc + bits_data) <= 8) {
+			if (bits_data)
+				memset(dat, 0xff, chip->ecc.size);
+			return bits_data;
+		}
+
+		return -EBADMSG;
+	}
+
+	/*
+	 * ------------------- calc_ecc[] bit wise -----------|--13 bits--|
+	 * |---idx[7]--|--.....-----|---idx[2]--||---idx[1]--||---idx[0]--|
+	 *
+	 * calc_ecc is a 104 bit information containing maximum of 8 error
+	 * offset informations of 13 bits each. calc_ecc is copied into a
+	 * uint64_t array and error offset indexes are populated in err_idx
+	 * array
+	 */
+	ecc1 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC1));
+	ecc2 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC2));
+	ecc3 = readl_relaxed(FSMC_NAND_REG(regs, bank, ECC3));
+	ecc4 = readl_relaxed(FSMC_NAND_REG(regs, bank, STS));
+
+	err_idx[0] = (ecc1 >> 0) & 0x1FFF;
+	err_idx[1] = (ecc1 >> 13) & 0x1FFF;
+	err_idx[2] = (((ecc2 >> 0) & 0x7F) << 6) | ((ecc1 >> 26) & 0x3F);
+	err_idx[3] = (ecc2 >> 7) & 0x1FFF;
+	err_idx[4] = (((ecc3 >> 0) & 0x1) << 12) | ((ecc2 >> 20) & 0xFFF);
+	err_idx[5] = (ecc3 >> 1) & 0x1FFF;
+	err_idx[6] = (ecc3 >> 14) & 0x1FFF;
+	err_idx[7] = (((ecc4 >> 16) & 0xFF) << 5) | ((ecc3 >> 27) & 0x1F);
+
+	i = 0;
+	while (num_err--) {
+		change_bit(0, (unsigned long *)&err_idx[i]);
+		change_bit(1, (unsigned long *)&err_idx[i]);
+
+		if (err_idx[i] < chip->ecc.size * 8) {
+			change_bit(err_idx[i], (unsigned long *)dat);
+			i++;
+		}
+	}
+	return i;
+}
+
+static bool filter(struct dma_chan *chan, void *slave)
+{
+	chan->private = slave;
+	return true;
+}
+
+#ifdef CONFIG_OF
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+				     struct device_node *np)
+{
+	struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	u32 val;
+	int ret;
+
+	/* Set default NAND width to 8 bits */
+	pdata->width = 8;
+	if (!of_property_read_u32(np, "bank-width", &val)) {
+		if (val == 2) {
+			pdata->width = 16;
+		} else if (val != 1) {
+			dev_err(&pdev->dev, "invalid bank-width %u\n", val);
+			return -EINVAL;
+		}
+	}
+	if (of_get_property(np, "nand-skip-bbtscan", NULL))
+		pdata->options = NAND_SKIP_BBTSCAN;
+
+	pdata->nand_timings = devm_kzalloc(&pdev->dev,
+				sizeof(*pdata->nand_timings), GFP_KERNEL);
+	if (!pdata->nand_timings)
+		return -ENOMEM;
+	ret = of_property_read_u8_array(np, "timings", (u8 *)pdata->nand_timings,
+						sizeof(*pdata->nand_timings));
+	if (ret) {
+		dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n");
+		pdata->nand_timings = NULL;
+	}
+
+	/* Set default NAND bank to 0 */
+	pdata->bank = 0;
+	if (!of_property_read_u32(np, "bank", &val)) {
+		if (val > 3) {
+			dev_err(&pdev->dev, "invalid bank %u\n", val);
+			return -EINVAL;
+		}
+		pdata->bank = val;
+	}
+	return 0;
+}
+#else
+static int fsmc_nand_probe_config_dt(struct platform_device *pdev,
+				     struct device_node *np)
+{
+	return -ENOSYS;
+}
+#endif
+
+/*
+ * fsmc_nand_probe - Probe function
+ * @pdev:       platform device structure
+ */
+static int __init fsmc_nand_probe(struct platform_device *pdev)
+{
+	struct fsmc_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	struct device_node __maybe_unused *np = pdev->dev.of_node;
+	struct fsmc_nand_data *host;
+	struct mtd_info *mtd;
+	struct nand_chip *nand;
+	struct resource *res;
+	dma_cap_mask_t mask;
+	int ret = 0;
+	u32 pid;
+	int i;
+
+	if (np) {
+		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+		pdev->dev.platform_data = pdata;
+		ret = fsmc_nand_probe_config_dt(pdev, np);
+		if (ret) {
+			dev_err(&pdev->dev, "no platform data\n");
+			return -ENODEV;
+		}
+	}
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "platform data is NULL\n");
+		return -EINVAL;
+	}
+
+	/* Allocate memory for the device structure (and zero it) */
+	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data");
+	host->data_va = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(host->data_va))
+		return PTR_ERR(host->data_va);
+
+	host->data_pa = (dma_addr_t)res->start;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_addr");
+	host->addr_va = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(host->addr_va))
+		return PTR_ERR(host->addr_va);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_cmd");
+	host->cmd_va = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(host->cmd_va))
+		return PTR_ERR(host->cmd_va);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fsmc_regs");
+	host->regs_va = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(host->regs_va))
+		return PTR_ERR(host->regs_va);
+
+	host->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(host->clk)) {
+		dev_err(&pdev->dev, "failed to fetch block clock\n");
+		return PTR_ERR(host->clk);
+	}
+
+	ret = clk_prepare_enable(host->clk);
+	if (ret)
+		goto err_clk_prepare_enable;
+
+	/*
+	 * This device ID is actually a common AMBA ID as used on the
+	 * AMBA PrimeCell bus. However it is not a PrimeCell.
+	 */
+	for (pid = 0, i = 0; i < 4; i++)
+		pid |= (readl(host->regs_va + resource_size(res) - 0x20 + 4 * i) & 255) << (i * 8);
+	host->pid = pid;
+	dev_info(&pdev->dev, "FSMC device partno %03x, manufacturer %02x, "
+		 "revision %02x, config %02x\n",
+		 AMBA_PART_BITS(pid), AMBA_MANF_BITS(pid),
+		 AMBA_REV_BITS(pid), AMBA_CONFIG_BITS(pid));
+
+	host->bank = pdata->bank;
+	host->select_chip = pdata->select_bank;
+	host->partitions = pdata->partitions;
+	host->nr_partitions = pdata->nr_partitions;
+	host->dev = &pdev->dev;
+	host->dev_timings = pdata->nand_timings;
+	host->mode = pdata->mode;
+
+	if (host->mode == USE_DMA_ACCESS)
+		init_completion(&host->dma_access_complete);
+
+	/* Link all private pointers */
+	mtd = nand_to_mtd(&host->nand);
+	nand = &host->nand;
+	nand_set_controller_data(nand, host);
+	nand_set_flash_node(nand, np);
+
+	mtd->dev.parent = &pdev->dev;
+	nand->IO_ADDR_R = host->data_va;
+	nand->IO_ADDR_W = host->data_va;
+	nand->cmd_ctrl = fsmc_cmd_ctrl;
+	nand->chip_delay = 30;
+
+	/*
+	 * Setup default ECC mode. nand_dt_init() called from nand_scan_ident()
+	 * can overwrite this value if the DT provides a different value.
+	 */
+	nand->ecc.mode = NAND_ECC_HW;
+	nand->ecc.hwctl = fsmc_enable_hwecc;
+	nand->ecc.size = 512;
+	nand->options = pdata->options;
+	nand->select_chip = fsmc_select_chip;
+	nand->badblockbits = 7;
+	nand_set_flash_node(nand, np);
+
+	if (pdata->width == FSMC_NAND_BW16)
+		nand->options |= NAND_BUSWIDTH_16;
+
+	switch (host->mode) {
+	case USE_DMA_ACCESS:
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_MEMCPY, mask);
+		host->read_dma_chan = dma_request_channel(mask, filter,
+				pdata->read_dma_priv);
+		if (!host->read_dma_chan) {
+			dev_err(&pdev->dev, "Unable to get read dma channel\n");
+			goto err_req_read_chnl;
+		}
+		host->write_dma_chan = dma_request_channel(mask, filter,
+				pdata->write_dma_priv);
+		if (!host->write_dma_chan) {
+			dev_err(&pdev->dev, "Unable to get write dma channel\n");
+			goto err_req_write_chnl;
+		}
+		nand->read_buf = fsmc_read_buf_dma;
+		nand->write_buf = fsmc_write_buf_dma;
+		break;
+
+	default:
+	case USE_WORD_ACCESS:
+		nand->read_buf = fsmc_read_buf;
+		nand->write_buf = fsmc_write_buf;
+		break;
+	}
+
+	fsmc_nand_setup(host->regs_va, host->bank,
+			nand->options & NAND_BUSWIDTH_16,
+			host->dev_timings);
+
+	if (AMBA_REV_BITS(host->pid) >= 8) {
+		nand->ecc.read_page = fsmc_read_page_hwecc;
+		nand->ecc.calculate = fsmc_read_hwecc_ecc4;
+		nand->ecc.correct = fsmc_bch8_correct_data;
+		nand->ecc.bytes = 13;
+		nand->ecc.strength = 8;
+	}
+
+	/*
+	 * Scan to find existence of the device
+	 */
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		ret = -ENXIO;
+		dev_err(&pdev->dev, "No NAND Device found!\n");
+		goto err_scan_ident;
+	}
+
+	if (AMBA_REV_BITS(host->pid) >= 8) {
+		switch (mtd->oobsize) {
+		case 16:
+		case 64:
+		case 128:
+		case 224:
+		case 256:
+			break;
+		default:
+			dev_warn(&pdev->dev, "No oob scheme defined for oobsize %d\n",
+				 mtd->oobsize);
+			ret = -EINVAL;
+			goto err_probe;
+		}
+
+		mtd_set_ooblayout(mtd, &fsmc_ecc4_ooblayout_ops);
+	} else {
+		switch (nand->ecc.mode) {
+		case NAND_ECC_HW:
+			dev_info(&pdev->dev, "Using 1-bit HW ECC scheme\n");
+			nand->ecc.calculate = fsmc_read_hwecc_ecc1;
+			nand->ecc.correct = nand_correct_data;
+			nand->ecc.bytes = 3;
+			nand->ecc.strength = 1;
+			break;
+
+		case NAND_ECC_SOFT:
+			if (nand->ecc.algo == NAND_ECC_BCH) {
+				dev_info(&pdev->dev, "Using 4-bit SW BCH ECC scheme\n");
+				break;
+			}
+
+		default:
+			dev_err(&pdev->dev, "Unsupported ECC mode!\n");
+			goto err_probe;
+		}
+
+		/*
+		 * Don't set layout for BCH4 SW ECC. This will be
+		 * generated later in nand_bch_init() later.
+		 */
+		if (nand->ecc.mode == NAND_ECC_HW) {
+			switch (mtd->oobsize) {
+			case 16:
+			case 64:
+			case 128:
+				mtd_set_ooblayout(mtd,
+						  &fsmc_ecc1_ooblayout_ops);
+				break;
+			default:
+				dev_warn(&pdev->dev,
+					 "No oob scheme defined for oobsize %d\n",
+					 mtd->oobsize);
+				ret = -EINVAL;
+				goto err_probe;
+			}
+		}
+	}
+
+	/* Second stage of scan to fill MTD data-structures */
+	if (nand_scan_tail(mtd)) {
+		ret = -ENXIO;
+		goto err_probe;
+	}
+
+	/*
+	 * The partition information can is accessed by (in the same precedence)
+	 *
+	 * command line through Bootloader,
+	 * platform data,
+	 * default partition information present in driver.
+	 */
+	/*
+	 * Check for partition info passed
+	 */
+	mtd->name = "nand";
+	ret = mtd_device_register(mtd, host->partitions, host->nr_partitions);
+	if (ret)
+		goto err_probe;
+
+	platform_set_drvdata(pdev, host);
+	dev_info(&pdev->dev, "FSMC NAND driver registration successful\n");
+	return 0;
+
+err_probe:
+err_scan_ident:
+	if (host->mode == USE_DMA_ACCESS)
+		dma_release_channel(host->write_dma_chan);
+err_req_write_chnl:
+	if (host->mode == USE_DMA_ACCESS)
+		dma_release_channel(host->read_dma_chan);
+err_req_read_chnl:
+	clk_disable_unprepare(host->clk);
+err_clk_prepare_enable:
+	clk_put(host->clk);
+	return ret;
+}
+
+/*
+ * Clean up routine
+ */
+static int fsmc_nand_remove(struct platform_device *pdev)
+{
+	struct fsmc_nand_data *host = platform_get_drvdata(pdev);
+
+	if (host) {
+		nand_release(nand_to_mtd(&host->nand));
+
+		if (host->mode == USE_DMA_ACCESS) {
+			dma_release_channel(host->write_dma_chan);
+			dma_release_channel(host->read_dma_chan);
+		}
+		clk_disable_unprepare(host->clk);
+		clk_put(host->clk);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int fsmc_nand_suspend(struct device *dev)
+{
+	struct fsmc_nand_data *host = dev_get_drvdata(dev);
+	if (host)
+		clk_disable_unprepare(host->clk);
+	return 0;
+}
+
+static int fsmc_nand_resume(struct device *dev)
+{
+	struct fsmc_nand_data *host = dev_get_drvdata(dev);
+	if (host) {
+		clk_prepare_enable(host->clk);
+		fsmc_nand_setup(host->regs_va, host->bank,
+				host->nand.options & NAND_BUSWIDTH_16,
+				host->dev_timings);
+	}
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(fsmc_nand_pm_ops, fsmc_nand_suspend, fsmc_nand_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id fsmc_nand_id_table[] = {
+	{ .compatible = "st,spear600-fsmc-nand" },
+	{ .compatible = "stericsson,fsmc-nand" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fsmc_nand_id_table);
+#endif
+
+static struct platform_driver fsmc_nand_driver = {
+	.remove = fsmc_nand_remove,
+	.driver = {
+		.name = "fsmc-nand",
+		.of_match_table = of_match_ptr(fsmc_nand_id_table),
+		.pm = &fsmc_nand_pm_ops,
+	},
+};
+
+module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vipin Kumar <vipin.kumar@st.com>, Ashish Priyadarshi");
+MODULE_DESCRIPTION("NAND driver for SPEAr Platforms");
diff --git a/drivers/mtd/nand/rawnand/gpio.c b/drivers/mtd/nand/rawnand/gpio.c
new file mode 100644
index 000000000000..21b19efe1ac7
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/gpio.c
@@ -0,0 +1,322 @@ 
+/*
+ * drivers/mtd/nand/gpio.c
+ *
+ * Updated, and converted to generic GPIO based driver by Russell King.
+ *
+ * Written by Ben Dooks <ben@simtec.co.uk>
+ *   Based on 2.4 version by Mark Whittaker
+ *
+ * © 2004 Simtec Electronics
+ *
+ * Device driver for NAND flash that uses a memory mapped interface to
+ * read/write the NAND commands and data, and GPIO pins for control signals
+ * (the DT binding refers to this as "GPIO assisted NAND flash")
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+
+struct gpiomtd {
+	void __iomem		*io_sync;
+	struct nand_chip	nand_chip;
+	struct gpio_nand_platdata plat;
+};
+
+static inline struct gpiomtd *gpio_nand_getpriv(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct gpiomtd, nand_chip);
+}
+
+
+#ifdef CONFIG_ARM
+/* gpio_nand_dosync()
+ *
+ * Make sure the GPIO state changes occur in-order with writes to NAND
+ * memory region.
+ * Needed on PXA due to bus-reordering within the SoC itself (see section on
+ * I/O ordering in PXA manual (section 2.3, p35)
+ */
+static void gpio_nand_dosync(struct gpiomtd *gpiomtd)
+{
+	unsigned long tmp;
+
+	if (gpiomtd->io_sync) {
+		/*
+		 * Linux memory barriers don't cater for what's required here.
+		 * What's required is what's here - a read from a separate
+		 * region with a dependency on that read.
+		 */
+		tmp = readl(gpiomtd->io_sync);
+		asm volatile("mov %1, %0\n" : "=r" (tmp) : "r" (tmp));
+	}
+}
+#else
+static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {}
+#endif
+
+static void gpio_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+
+	gpio_nand_dosync(gpiomtd);
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		gpio_set_value(gpiomtd->plat.gpio_nce, !(ctrl & NAND_NCE));
+		gpio_set_value(gpiomtd->plat.gpio_cle, !!(ctrl & NAND_CLE));
+		gpio_set_value(gpiomtd->plat.gpio_ale, !!(ctrl & NAND_ALE));
+		gpio_nand_dosync(gpiomtd);
+	}
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	writeb(cmd, gpiomtd->nand_chip.IO_ADDR_W);
+	gpio_nand_dosync(gpiomtd);
+}
+
+static int gpio_nand_devready(struct mtd_info *mtd)
+{
+	struct gpiomtd *gpiomtd = gpio_nand_getpriv(mtd);
+
+	return gpio_get_value(gpiomtd->plat.gpio_rdy);
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+	{ .compatible = "gpio-control-nand" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+				   struct gpio_nand_platdata *plat)
+{
+	u32 val;
+
+	if (!dev->of_node)
+		return -ENODEV;
+
+	if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+		if (val == 2) {
+			plat->options |= NAND_BUSWIDTH_16;
+		} else if (val != 1) {
+			dev_err(dev, "invalid bank-width %u\n", val);
+			return -EINVAL;
+		}
+	}
+
+	plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
+	plat->gpio_nce = of_get_gpio(dev->of_node, 1);
+	plat->gpio_ale = of_get_gpio(dev->of_node, 2);
+	plat->gpio_cle = of_get_gpio(dev->of_node, 3);
+	plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
+
+	if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+		plat->chip_delay = val;
+
+	return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+	struct resource *r;
+	u64 addr;
+
+	if (of_property_read_u64(pdev->dev.of_node,
+				       "gpio-control-nand,io-sync-reg", &addr))
+		return NULL;
+
+	r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+	if (!r)
+		return NULL;
+
+	r->start = addr;
+	r->end = r->start + 0x3;
+	r->flags = IORESOURCE_MEM;
+
+	return r;
+}
+#else /* CONFIG_OF */
+static inline int gpio_nand_get_config_of(const struct device *dev,
+					  struct gpio_nand_platdata *plat)
+{
+	return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+	return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+				       struct gpio_nand_platdata *plat)
+{
+	int ret = gpio_nand_get_config_of(dev, plat);
+
+	if (!ret)
+		return ret;
+
+	if (dev_get_platdata(dev)) {
+		memcpy(plat, dev_get_platdata(dev), sizeof(*plat));
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+	struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+	if (r)
+		return r;
+
+	return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
+static int gpio_nand_remove(struct platform_device *pdev)
+{
+	struct gpiomtd *gpiomtd = platform_get_drvdata(pdev);
+
+	nand_release(nand_to_mtd(&gpiomtd->nand_chip));
+
+	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+		gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+	gpio_set_value(gpiomtd->plat.gpio_nce, 1);
+
+	return 0;
+}
+
+static int gpio_nand_probe(struct platform_device *pdev)
+{
+	struct gpiomtd *gpiomtd;
+	struct nand_chip *chip;
+	struct mtd_info *mtd;
+	struct resource *res;
+	int ret = 0;
+
+	if (!pdev->dev.of_node && !dev_get_platdata(&pdev->dev))
+		return -EINVAL;
+
+	gpiomtd = devm_kzalloc(&pdev->dev, sizeof(*gpiomtd), GFP_KERNEL);
+	if (!gpiomtd)
+		return -ENOMEM;
+
+	chip = &gpiomtd->nand_chip;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(chip->IO_ADDR_R))
+		return PTR_ERR(chip->IO_ADDR_R);
+
+	res = gpio_nand_get_io_sync(pdev);
+	if (res) {
+		gpiomtd->io_sync = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(gpiomtd->io_sync))
+			return PTR_ERR(gpiomtd->io_sync);
+	}
+
+	ret = gpio_nand_get_config(&pdev->dev, &gpiomtd->plat);
+	if (ret)
+		return ret;
+
+	ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nce, "NAND NCE");
+	if (ret)
+		return ret;
+	gpio_direction_output(gpiomtd->plat.gpio_nce, 1);
+
+	if (gpio_is_valid(gpiomtd->plat.gpio_nwp)) {
+		ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_nwp,
+					"NAND NWP");
+		if (ret)
+			return ret;
+	}
+
+	ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_ale, "NAND ALE");
+	if (ret)
+		return ret;
+	gpio_direction_output(gpiomtd->plat.gpio_ale, 0);
+
+	ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_cle, "NAND CLE");
+	if (ret)
+		return ret;
+	gpio_direction_output(gpiomtd->plat.gpio_cle, 0);
+
+	if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) {
+		ret = devm_gpio_request(&pdev->dev, gpiomtd->plat.gpio_rdy,
+					"NAND RDY");
+		if (ret)
+			return ret;
+		gpio_direction_input(gpiomtd->plat.gpio_rdy);
+		chip->dev_ready = gpio_nand_devready;
+	}
+
+	nand_set_flash_node(chip, pdev->dev.of_node);
+	chip->IO_ADDR_W		= chip->IO_ADDR_R;
+	chip->ecc.mode		= NAND_ECC_SOFT;
+	chip->ecc.algo		= NAND_ECC_HAMMING;
+	chip->options		= gpiomtd->plat.options;
+	chip->chip_delay	= gpiomtd->plat.chip_delay;
+	chip->cmd_ctrl		= gpio_nand_cmd_ctrl;
+
+	mtd			= nand_to_mtd(chip);
+	mtd->dev.parent		= &pdev->dev;
+
+	platform_set_drvdata(pdev, gpiomtd);
+
+	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+		gpio_direction_output(gpiomtd->plat.gpio_nwp, 1);
+
+	if (nand_scan(mtd, 1)) {
+		ret = -ENXIO;
+		goto err_wp;
+	}
+
+	if (gpiomtd->plat.adjust_parts)
+		gpiomtd->plat.adjust_parts(&gpiomtd->plat, mtd->size);
+
+	ret = mtd_device_register(mtd, gpiomtd->plat.parts,
+				  gpiomtd->plat.num_parts);
+	if (!ret)
+		return 0;
+
+err_wp:
+	if (gpio_is_valid(gpiomtd->plat.gpio_nwp))
+		gpio_set_value(gpiomtd->plat.gpio_nwp, 0);
+
+	return ret;
+}
+
+static struct platform_driver gpio_nand_driver = {
+	.probe		= gpio_nand_probe,
+	.remove		= gpio_nand_remove,
+	.driver		= {
+		.name	= "gpio-nand",
+		.of_match_table = of_match_ptr(gpio_nand_id_table),
+	},
+};
+
+module_platform_driver(gpio_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("GPIO NAND Driver");
diff --git a/drivers/mtd/nand/rawnand/gpmi-nand/Makefile b/drivers/mtd/nand/rawnand/gpmi-nand/Makefile
new file mode 100644
index 000000000000..3a462487c35e
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/gpmi-nand/Makefile
@@ -0,0 +1,3 @@ 
+obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi_nand.o
+gpmi_nand-objs += gpmi-nand.o
+gpmi_nand-objs += gpmi-lib.o
diff --git a/drivers/mtd/nand/rawnand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/rawnand/gpmi-nand/bch-regs.h
new file mode 100644
index 000000000000..05bb91f2f4c4
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/gpmi-nand/bch-regs.h
@@ -0,0 +1,128 @@ 
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_BCH_REGS_H
+#define __GPMI_NAND_BCH_REGS_H
+
+#define HW_BCH_CTRL				0x00000000
+#define HW_BCH_CTRL_SET				0x00000004
+#define HW_BCH_CTRL_CLR				0x00000008
+#define HW_BCH_CTRL_TOG				0x0000000c
+
+#define BM_BCH_CTRL_COMPLETE_IRQ_EN		(1 << 8)
+#define BM_BCH_CTRL_COMPLETE_IRQ		(1 << 0)
+
+#define HW_BCH_STATUS0				0x00000010
+#define HW_BCH_MODE				0x00000020
+#define HW_BCH_ENCODEPTR			0x00000030
+#define HW_BCH_DATAPTR				0x00000040
+#define HW_BCH_METAPTR				0x00000050
+#define HW_BCH_LAYOUTSELECT			0x00000070
+
+#define HW_BCH_FLASH0LAYOUT0			0x00000080
+
+#define BP_BCH_FLASH0LAYOUT0_NBLOCKS		24
+#define BM_BCH_FLASH0LAYOUT0_NBLOCKS	(0xff << BP_BCH_FLASH0LAYOUT0_NBLOCKS)
+#define BF_BCH_FLASH0LAYOUT0_NBLOCKS(v)		\
+	(((v) << BP_BCH_FLASH0LAYOUT0_NBLOCKS) & BM_BCH_FLASH0LAYOUT0_NBLOCKS)
+
+#define BP_BCH_FLASH0LAYOUT0_META_SIZE		16
+#define BM_BCH_FLASH0LAYOUT0_META_SIZE	(0xff << BP_BCH_FLASH0LAYOUT0_META_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_META_SIZE(v)	\
+	(((v) << BP_BCH_FLASH0LAYOUT0_META_SIZE)\
+					 & BM_BCH_FLASH0LAYOUT0_META_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT0_ECC0		12
+#define BM_BCH_FLASH0LAYOUT0_ECC0	(0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0		11
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0	(0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
+#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x)				\
+	(GPMI_IS_MX6(x)					\
+		? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)	\
+			& MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0)	\
+		: (((v) << BP_BCH_FLASH0LAYOUT0_ECC0)		\
+			& BM_BCH_FLASH0LAYOUT0_ECC0)		\
+	)
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14	10
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14			\
+				(0x1 << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT0_GF(v, x)				\
+	((GPMI_IS_MX6(x) && ((v) == 14))			\
+		? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT0_GF_13_14)	\
+			& MX6Q_BM_BCH_FLASH0LAYOUT0_GF_13_14)	\
+		: 0						\
+	)
+
+#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE		0
+#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE		\
+			(0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE	\
+			(0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
+#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x)				\
+	(GPMI_IS_MX6(x)						\
+		? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)	\
+		: ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE)		\
+	)
+
+#define HW_BCH_FLASH0LAYOUT1			0x00000090
+
+#define BP_BCH_FLASH0LAYOUT1_PAGE_SIZE		16
+#define BM_BCH_FLASH0LAYOUT1_PAGE_SIZE		\
+			(0xffff << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(v)	\
+	(((v) << BP_BCH_FLASH0LAYOUT1_PAGE_SIZE) \
+					 & BM_BCH_FLASH0LAYOUT1_PAGE_SIZE)
+
+#define BP_BCH_FLASH0LAYOUT1_ECCN		12
+#define BM_BCH_FLASH0LAYOUT1_ECCN	(0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN		11
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN	(0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
+#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x)				\
+	(GPMI_IS_MX6(x)					\
+		? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)	\
+			& MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN)	\
+		: (((v) << BP_BCH_FLASH0LAYOUT1_ECCN)		\
+			& BM_BCH_FLASH0LAYOUT1_ECCN)		\
+	)
+
+#define MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14	10
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14			\
+				(0x1 << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)
+#define BF_BCH_FLASH0LAYOUT1_GF(v, x)				\
+	((GPMI_IS_MX6(x) && ((v) == 14))			\
+		? (((1) << MX6Q_BP_BCH_FLASH0LAYOUT1_GF_13_14)	\
+			& MX6Q_BM_BCH_FLASH0LAYOUT1_GF_13_14)	\
+		: 0						\
+	)
+
+#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE		0
+#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE		\
+			(0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE	\
+			(0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
+#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x)				\
+	(GPMI_IS_MX6(x)						\
+		? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)	\
+		: ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE)		\
+	)
+
+#define HW_BCH_VERSION				0x00000160
+#endif
diff --git a/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-lib.c
new file mode 100644
index 000000000000..0f68a99fc4ad
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-lib.c
@@ -0,0 +1,1508 @@ 
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+
+#include "gpmi-nand.h"
+#include "gpmi-regs.h"
+#include "bch-regs.h"
+
+static struct timing_threshod timing_default_threshold = {
+	.max_data_setup_cycles       = (BM_GPMI_TIMING0_DATA_SETUP >>
+						BP_GPMI_TIMING0_DATA_SETUP),
+	.internal_data_setup_in_ns   = 0,
+	.max_sample_delay_factor     = (BM_GPMI_CTRL1_RDN_DELAY >>
+						BP_GPMI_CTRL1_RDN_DELAY),
+	.max_dll_clock_period_in_ns  = 32,
+	.max_dll_delay_in_ns         = 16,
+};
+
+#define MXS_SET_ADDR		0x4
+#define MXS_CLR_ADDR		0x8
+/*
+ * Clear the bit and poll it cleared.  This is usually called with
+ * a reset address and mask being either SFTRST(bit 31) or CLKGATE
+ * (bit 30).
+ */
+static int clear_poll_bit(void __iomem *addr, u32 mask)
+{
+	int timeout = 0x400;
+
+	/* clear the bit */
+	writel(mask, addr + MXS_CLR_ADDR);
+
+	/*
+	 * SFTRST needs 3 GPMI clocks to settle, the reference manual
+	 * recommends to wait 1us.
+	 */
+	udelay(1);
+
+	/* poll the bit becoming clear */
+	while ((readl(addr) & mask) && --timeout)
+		/* nothing */;
+
+	return !timeout;
+}
+
+#define MODULE_CLKGATE		(1 << 30)
+#define MODULE_SFTRST		(1 << 31)
+/*
+ * The current mxs_reset_block() will do two things:
+ *  [1] enable the module.
+ *  [2] reset the module.
+ *
+ * In most of the cases, it's ok.
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
+ * If you try to soft reset the BCH block, it becomes unusable until
+ * the next hard reset. This case occurs in the NAND boot mode. When the board
+ * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
+ * So If the driver tries to reset the BCH again, the BCH will not work anymore.
+ * You will see a DMA timeout in this case. The bug has been fixed
+ * in the following chips, such as MX28.
+ *
+ * To avoid this bug, just add a new parameter `just_enable` for
+ * the mxs_reset_block(), and rewrite it here.
+ */
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+{
+	int ret;
+	int timeout = 0x400;
+
+	/* clear and poll SFTRST */
+	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+	if (unlikely(ret))
+		goto error;
+
+	/* clear CLKGATE */
+	writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
+
+	if (!just_enable) {
+		/* set SFTRST to reset the block */
+		writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
+		udelay(1);
+
+		/* poll CLKGATE becoming set */
+		while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
+			/* nothing */;
+		if (unlikely(!timeout))
+			goto error;
+	}
+
+	/* clear and poll SFTRST */
+	ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
+	if (unlikely(ret))
+		goto error;
+
+	/* clear and poll CLKGATE */
+	ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
+	if (unlikely(ret))
+		goto error;
+
+	return 0;
+
+error:
+	pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
+	return -ETIMEDOUT;
+}
+
+static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
+{
+	struct clk *clk;
+	int ret;
+	int i;
+
+	for (i = 0; i < GPMI_CLK_MAX; i++) {
+		clk = this->resources.clock[i];
+		if (!clk)
+			break;
+
+		if (v) {
+			ret = clk_prepare_enable(clk);
+			if (ret)
+				goto err_clk;
+		} else {
+			clk_disable_unprepare(clk);
+		}
+	}
+	return 0;
+
+err_clk:
+	for (; i > 0; i--)
+		clk_disable_unprepare(this->resources.clock[i - 1]);
+	return ret;
+}
+
+#define gpmi_enable_clk(x) __gpmi_enable_clk(x, true)
+#define gpmi_disable_clk(x) __gpmi_enable_clk(x, false)
+
+int gpmi_init(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	int ret;
+
+	ret = gpmi_enable_clk(this);
+	if (ret)
+		goto err_out;
+	ret = gpmi_reset_block(r->gpmi_regs, false);
+	if (ret)
+		goto err_out;
+
+	/*
+	 * Reset BCH here, too. We got failures otherwise :(
+	 * See later BCH reset for explanation of MX23 handling
+	 */
+	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+	if (ret)
+		goto err_out;
+
+
+	/* Choose NAND mode. */
+	writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+	/* Set the IRQ polarity. */
+	writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
+				r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/* Disable Write-Protection. */
+	writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/* Select BCH ECC. */
+	writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/*
+	 * Decouple the chip select from dma channel. We use dma0 for all
+	 * the chips.
+	 */
+	writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	gpmi_disable_clk(this);
+	return 0;
+err_out:
+	return ret;
+}
+
+/* This function is very useful. It is called only when the bug occur. */
+void gpmi_dump_info(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	struct bch_geometry *geo = &this->bch_geometry;
+	u32 reg;
+	int i;
+
+	dev_err(this->dev, "Show GPMI registers :\n");
+	for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
+		reg = readl(r->gpmi_regs + i * 0x10);
+		dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+	}
+
+	/* start to print out the BCH info */
+	dev_err(this->dev, "Show BCH registers :\n");
+	for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
+		reg = readl(r->bch_regs + i * 0x10);
+		dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
+	}
+	dev_err(this->dev, "BCH Geometry :\n"
+		"GF length              : %u\n"
+		"ECC Strength           : %u\n"
+		"Page Size in Bytes     : %u\n"
+		"Metadata Size in Bytes : %u\n"
+		"ECC Chunk Size in Bytes: %u\n"
+		"ECC Chunk Count        : %u\n"
+		"Payload Size in Bytes  : %u\n"
+		"Auxiliary Size in Bytes: %u\n"
+		"Auxiliary Status Offset: %u\n"
+		"Block Mark Byte Offset : %u\n"
+		"Block Mark Bit Offset  : %u\n",
+		geo->gf_len,
+		geo->ecc_strength,
+		geo->page_size,
+		geo->metadata_size,
+		geo->ecc_chunk_size,
+		geo->ecc_chunk_count,
+		geo->payload_size,
+		geo->auxiliary_size,
+		geo->auxiliary_status_offset,
+		geo->block_mark_byte_offset,
+		geo->block_mark_bit_offset);
+}
+
+/* Configures the geometry for BCH.  */
+int bch_set_geometry(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	struct bch_geometry *bch_geo = &this->bch_geometry;
+	unsigned int block_count;
+	unsigned int block_size;
+	unsigned int metadata_size;
+	unsigned int ecc_strength;
+	unsigned int page_size;
+	unsigned int gf_len;
+	int ret;
+
+	if (common_nfc_set_geometry(this))
+		return !0;
+
+	block_count   = bch_geo->ecc_chunk_count - 1;
+	block_size    = bch_geo->ecc_chunk_size;
+	metadata_size = bch_geo->metadata_size;
+	ecc_strength  = bch_geo->ecc_strength >> 1;
+	page_size     = bch_geo->page_size;
+	gf_len        = bch_geo->gf_len;
+
+	ret = gpmi_enable_clk(this);
+	if (ret)
+		goto err_out;
+
+	/*
+	* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+	* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+	* On the other hand, the MX28 needs the reset, because one case has been
+	* seen where the BCH produced ECC errors constantly after 10000
+	* consecutive reboots. The latter case has not been seen on the MX23
+	* yet, still we don't know if it could happen there as well.
+	*/
+	ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+	if (ret)
+		goto err_out;
+
+	/* Configure layout 0. */
+	writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
+			| BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
+			| BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
+			| BF_BCH_FLASH0LAYOUT0_GF(gf_len, this)
+			| BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
+			r->bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+	writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
+			| BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
+			| BF_BCH_FLASH0LAYOUT1_GF(gf_len, this)
+			| BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
+			r->bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+	/* Set *all* chip selects to use layout 0. */
+	writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
+
+	/* Enable interrupts. */
+	writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
+				r->bch_regs + HW_BCH_CTRL_SET);
+
+	gpmi_disable_clk(this);
+	return 0;
+err_out:
+	return ret;
+}
+
+/* Converts time in nanoseconds to cycles. */
+static unsigned int ns_to_cycles(unsigned int time,
+			unsigned int period, unsigned int min)
+{
+	unsigned int k;
+
+	k = (time + period - 1) / period;
+	return max(k, min);
+}
+
+#define DEF_MIN_PROP_DELAY	5
+#define DEF_MAX_PROP_DELAY	9
+/* Apply timing to current hardware conditions. */
+static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
+					struct gpmi_nfc_hardware_timing *hw)
+{
+	struct timing_threshod *nfc = &timing_default_threshold;
+	struct resources *r = &this->resources;
+	struct nand_chip *nand = &this->nand;
+	struct nand_timing target = this->timing;
+	bool improved_timing_is_available;
+	unsigned long clock_frequency_in_hz;
+	unsigned int clock_period_in_ns;
+	bool dll_use_half_periods;
+	unsigned int dll_delay_shift;
+	unsigned int max_sample_delay_in_ns;
+	unsigned int address_setup_in_cycles;
+	unsigned int data_setup_in_ns;
+	unsigned int data_setup_in_cycles;
+	unsigned int data_hold_in_cycles;
+	int ideal_sample_delay_in_ns;
+	unsigned int sample_delay_factor;
+	int tEYE;
+	unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
+	unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
+
+	/*
+	 * If there are multiple chips, we need to relax the timings to allow
+	 * for signal distortion due to higher capacitance.
+	 */
+	if (nand->numchips > 2) {
+		target.data_setup_in_ns    += 10;
+		target.data_hold_in_ns     += 10;
+		target.address_setup_in_ns += 10;
+	} else if (nand->numchips > 1) {
+		target.data_setup_in_ns    += 5;
+		target.data_hold_in_ns     += 5;
+		target.address_setup_in_ns += 5;
+	}
+
+	/* Check if improved timing information is available. */
+	improved_timing_is_available =
+		(target.tREA_in_ns  >= 0) &&
+		(target.tRLOH_in_ns >= 0) &&
+		(target.tRHOH_in_ns >= 0);
+
+	/* Inspect the clock. */
+	nfc->clock_frequency_in_hz = clk_get_rate(r->clock[0]);
+	clock_frequency_in_hz = nfc->clock_frequency_in_hz;
+	clock_period_in_ns    = NSEC_PER_SEC / clock_frequency_in_hz;
+
+	/*
+	 * The NFC quantizes setup and hold parameters in terms of clock cycles.
+	 * Here, we quantize the setup and hold timing parameters to the
+	 * next-highest clock period to make sure we apply at least the
+	 * specified times.
+	 *
+	 * For data setup and data hold, the hardware interprets a value of zero
+	 * as the largest possible delay. This is not what's intended by a zero
+	 * in the input parameter, so we impose a minimum of one cycle.
+	 */
+	data_setup_in_cycles    = ns_to_cycles(target.data_setup_in_ns,
+							clock_period_in_ns, 1);
+	data_hold_in_cycles     = ns_to_cycles(target.data_hold_in_ns,
+							clock_period_in_ns, 1);
+	address_setup_in_cycles = ns_to_cycles(target.address_setup_in_ns,
+							clock_period_in_ns, 0);
+
+	/*
+	 * The clock's period affects the sample delay in a number of ways:
+	 *
+	 * (1) The NFC HAL tells us the maximum clock period the sample delay
+	 *     DLL can tolerate. If the clock period is greater than half that
+	 *     maximum, we must configure the DLL to be driven by half periods.
+	 *
+	 * (2) We need to convert from an ideal sample delay, in ns, to a
+	 *     "sample delay factor," which the NFC uses. This factor depends on
+	 *     whether we're driving the DLL with full or half periods.
+	 *     Paraphrasing the reference manual:
+	 *
+	 *         AD = SDF x 0.125 x RP
+	 *
+	 * where:
+	 *
+	 *     AD   is the applied delay, in ns.
+	 *     SDF  is the sample delay factor, which is dimensionless.
+	 *     RP   is the reference period, in ns, which is a full clock period
+	 *          if the DLL is being driven by full periods, or half that if
+	 *          the DLL is being driven by half periods.
+	 *
+	 * Let's re-arrange this in a way that's more useful to us:
+	 *
+	 *                        8
+	 *         SDF  =  AD x ----
+	 *                       RP
+	 *
+	 * The reference period is either the clock period or half that, so this
+	 * is:
+	 *
+	 *                        8       AD x DDF
+	 *         SDF  =  AD x -----  =  --------
+	 *                      f x P        P
+	 *
+	 * where:
+	 *
+	 *       f  is 1 or 1/2, depending on how we're driving the DLL.
+	 *       P  is the clock period.
+	 *     DDF  is the DLL Delay Factor, a dimensionless value that
+	 *          incorporates all the constants in the conversion.
+	 *
+	 * DDF will be either 8 or 16, both of which are powers of two. We can
+	 * reduce the cost of this conversion by using bit shifts instead of
+	 * multiplication or division. Thus:
+	 *
+	 *                 AD << DDS
+	 *         SDF  =  ---------
+	 *                     P
+	 *
+	 *     or
+	 *
+	 *         AD  =  (SDF >> DDS) x P
+	 *
+	 * where:
+	 *
+	 *     DDS  is the DLL Delay Shift, the logarithm to base 2 of the DDF.
+	 */
+	if (clock_period_in_ns > (nfc->max_dll_clock_period_in_ns >> 1)) {
+		dll_use_half_periods = true;
+		dll_delay_shift      = 3 + 1;
+	} else {
+		dll_use_half_periods = false;
+		dll_delay_shift      = 3;
+	}
+
+	/*
+	 * Compute the maximum sample delay the NFC allows, under current
+	 * conditions. If the clock is running too slowly, no sample delay is
+	 * possible.
+	 */
+	if (clock_period_in_ns > nfc->max_dll_clock_period_in_ns)
+		max_sample_delay_in_ns = 0;
+	else {
+		/*
+		 * Compute the delay implied by the largest sample delay factor
+		 * the NFC allows.
+		 */
+		max_sample_delay_in_ns =
+			(nfc->max_sample_delay_factor * clock_period_in_ns) >>
+								dll_delay_shift;
+
+		/*
+		 * Check if the implied sample delay larger than the NFC
+		 * actually allows.
+		 */
+		if (max_sample_delay_in_ns > nfc->max_dll_delay_in_ns)
+			max_sample_delay_in_ns = nfc->max_dll_delay_in_ns;
+	}
+
+	/*
+	 * Check if improved timing information is available. If not, we have to
+	 * use a less-sophisticated algorithm.
+	 */
+	if (!improved_timing_is_available) {
+		/*
+		 * Fold the read setup time required by the NFC into the ideal
+		 * sample delay.
+		 */
+		ideal_sample_delay_in_ns = target.gpmi_sample_delay_in_ns +
+						nfc->internal_data_setup_in_ns;
+
+		/*
+		 * The ideal sample delay may be greater than the maximum
+		 * allowed by the NFC. If so, we can trade off sample delay time
+		 * for more data setup time.
+		 *
+		 * In each iteration of the following loop, we add a cycle to
+		 * the data setup time and subtract a corresponding amount from
+		 * the sample delay until we've satisified the constraints or
+		 * can't do any better.
+		 */
+		while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+			data_setup_in_cycles++;
+			ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+			if (ideal_sample_delay_in_ns < 0)
+				ideal_sample_delay_in_ns = 0;
+
+		}
+
+		/*
+		 * Compute the sample delay factor that corresponds most closely
+		 * to the ideal sample delay. If the result is too large for the
+		 * NFC, use the maximum value.
+		 *
+		 * Notice that we use the ns_to_cycles function to compute the
+		 * sample delay factor. We do this because the form of the
+		 * computation is the same as that for calculating cycles.
+		 */
+		sample_delay_factor =
+			ns_to_cycles(
+				ideal_sample_delay_in_ns << dll_delay_shift,
+							clock_period_in_ns, 0);
+
+		if (sample_delay_factor > nfc->max_sample_delay_factor)
+			sample_delay_factor = nfc->max_sample_delay_factor;
+
+		/* Skip to the part where we return our results. */
+		goto return_results;
+	}
+
+	/*
+	 * If control arrives here, we have more detailed timing information,
+	 * so we can use a better algorithm.
+	 */
+
+	/*
+	 * Fold the read setup time required by the NFC into the maximum
+	 * propagation delay.
+	 */
+	max_prop_delay_in_ns += nfc->internal_data_setup_in_ns;
+
+	/*
+	 * Earlier, we computed the number of clock cycles required to satisfy
+	 * the data setup time. Now, we need to know the actual nanoseconds.
+	 */
+	data_setup_in_ns = clock_period_in_ns * data_setup_in_cycles;
+
+	/*
+	 * Compute tEYE, the width of the data eye when reading from the NAND
+	 * Flash. The eye width is fundamentally determined by the data setup
+	 * time, perturbed by propagation delays and some characteristics of the
+	 * NAND Flash device.
+	 *
+	 * start of the eye = max_prop_delay + tREA
+	 * end of the eye   = min_prop_delay + tRHOH + data_setup
+	 */
+	tEYE = (int)min_prop_delay_in_ns + (int)target.tRHOH_in_ns +
+							(int)data_setup_in_ns;
+
+	tEYE -= (int)max_prop_delay_in_ns + (int)target.tREA_in_ns;
+
+	/*
+	 * The eye must be open. If it's not, we can try to open it by
+	 * increasing its main forcer, the data setup time.
+	 *
+	 * In each iteration of the following loop, we increase the data setup
+	 * time by a single clock cycle. We do this until either the eye is
+	 * open or we run into NFC limits.
+	 */
+	while ((tEYE <= 0) &&
+			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+		/* Give a cycle to data setup. */
+		data_setup_in_cycles++;
+		/* Synchronize the data setup time with the cycles. */
+		data_setup_in_ns += clock_period_in_ns;
+		/* Adjust tEYE accordingly. */
+		tEYE += clock_period_in_ns;
+	}
+
+	/*
+	 * When control arrives here, the eye is open. The ideal time to sample
+	 * the data is in the center of the eye:
+	 *
+	 *     end of the eye + start of the eye
+	 *     ---------------------------------  -  data_setup
+	 *                    2
+	 *
+	 * After some algebra, this simplifies to the code immediately below.
+	 */
+	ideal_sample_delay_in_ns =
+		((int)max_prop_delay_in_ns +
+			(int)target.tREA_in_ns +
+				(int)min_prop_delay_in_ns +
+					(int)target.tRHOH_in_ns -
+						(int)data_setup_in_ns) >> 1;
+
+	/*
+	 * The following figure illustrates some aspects of a NAND Flash read:
+	 *
+	 *
+	 *           __                   _____________________________________
+	 * RDN         \_________________/
+	 *
+	 *                                         <---- tEYE ----->
+	 *                                        /-----------------\
+	 * Read Data ----------------------------<                   >---------
+	 *                                        \-----------------/
+	 *             ^                 ^                 ^              ^
+	 *             |                 |                 |              |
+	 *             |<--Data Setup -->|<--Delay Time -->|              |
+	 *             |                 |                 |              |
+	 *             |                 |                                |
+	 *             |                 |<--   Quantized Delay Time   -->|
+	 *             |                 |                                |
+	 *
+	 *
+	 * We have some issues we must now address:
+	 *
+	 * (1) The *ideal* sample delay time must not be negative. If it is, we
+	 *     jam it to zero.
+	 *
+	 * (2) The *ideal* sample delay time must not be greater than that
+	 *     allowed by the NFC. If it is, we can increase the data setup
+	 *     time, which will reduce the delay between the end of the data
+	 *     setup and the center of the eye. It will also make the eye
+	 *     larger, which might help with the next issue...
+	 *
+	 * (3) The *quantized* sample delay time must not fall either before the
+	 *     eye opens or after it closes (the latter is the problem
+	 *     illustrated in the above figure).
+	 */
+
+	/* Jam a negative ideal sample delay to zero. */
+	if (ideal_sample_delay_in_ns < 0)
+		ideal_sample_delay_in_ns = 0;
+
+	/*
+	 * Extend the data setup as needed to reduce the ideal sample delay
+	 * below the maximum permitted by the NFC.
+	 */
+	while ((ideal_sample_delay_in_ns > max_sample_delay_in_ns) &&
+			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+
+		/* Give a cycle to data setup. */
+		data_setup_in_cycles++;
+		/* Synchronize the data setup time with the cycles. */
+		data_setup_in_ns += clock_period_in_ns;
+		/* Adjust tEYE accordingly. */
+		tEYE += clock_period_in_ns;
+
+		/*
+		 * Decrease the ideal sample delay by one half cycle, to keep it
+		 * in the middle of the eye.
+		 */
+		ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+		/* Jam a negative ideal sample delay to zero. */
+		if (ideal_sample_delay_in_ns < 0)
+			ideal_sample_delay_in_ns = 0;
+	}
+
+	/*
+	 * Compute the sample delay factor that corresponds to the ideal sample
+	 * delay. If the result is too large, then use the maximum allowed
+	 * value.
+	 *
+	 * Notice that we use the ns_to_cycles function to compute the sample
+	 * delay factor. We do this because the form of the computation is the
+	 * same as that for calculating cycles.
+	 */
+	sample_delay_factor =
+		ns_to_cycles(ideal_sample_delay_in_ns << dll_delay_shift,
+							clock_period_in_ns, 0);
+
+	if (sample_delay_factor > nfc->max_sample_delay_factor)
+		sample_delay_factor = nfc->max_sample_delay_factor;
+
+	/*
+	 * These macros conveniently encapsulate a computation we'll use to
+	 * continuously evaluate whether or not the data sample delay is inside
+	 * the eye.
+	 */
+	#define IDEAL_DELAY  ((int) ideal_sample_delay_in_ns)
+
+	#define QUANTIZED_DELAY  \
+		((int) ((sample_delay_factor * clock_period_in_ns) >> \
+							dll_delay_shift))
+
+	#define DELAY_ERROR  (abs(QUANTIZED_DELAY - IDEAL_DELAY))
+
+	#define SAMPLE_IS_NOT_WITHIN_THE_EYE  (DELAY_ERROR > (tEYE >> 1))
+
+	/*
+	 * While the quantized sample time falls outside the eye, reduce the
+	 * sample delay or extend the data setup to move the sampling point back
+	 * toward the eye. Do not allow the number of data setup cycles to
+	 * exceed the maximum allowed by the NFC.
+	 */
+	while (SAMPLE_IS_NOT_WITHIN_THE_EYE &&
+			(data_setup_in_cycles < nfc->max_data_setup_cycles)) {
+		/*
+		 * If control arrives here, the quantized sample delay falls
+		 * outside the eye. Check if it's before the eye opens, or after
+		 * the eye closes.
+		 */
+		if (QUANTIZED_DELAY > IDEAL_DELAY) {
+			/*
+			 * If control arrives here, the quantized sample delay
+			 * falls after the eye closes. Decrease the quantized
+			 * delay time and then go back to re-evaluate.
+			 */
+			if (sample_delay_factor != 0)
+				sample_delay_factor--;
+			continue;
+		}
+
+		/*
+		 * If control arrives here, the quantized sample delay falls
+		 * before the eye opens. Shift the sample point by increasing
+		 * data setup time. This will also make the eye larger.
+		 */
+
+		/* Give a cycle to data setup. */
+		data_setup_in_cycles++;
+		/* Synchronize the data setup time with the cycles. */
+		data_setup_in_ns += clock_period_in_ns;
+		/* Adjust tEYE accordingly. */
+		tEYE += clock_period_in_ns;
+
+		/*
+		 * Decrease the ideal sample delay by one half cycle, to keep it
+		 * in the middle of the eye.
+		 */
+		ideal_sample_delay_in_ns -= (clock_period_in_ns >> 1);
+
+		/* ...and one less period for the delay time. */
+		ideal_sample_delay_in_ns -= clock_period_in_ns;
+
+		/* Jam a negative ideal sample delay to zero. */
+		if (ideal_sample_delay_in_ns < 0)
+			ideal_sample_delay_in_ns = 0;
+
+		/*
+		 * We have a new ideal sample delay, so re-compute the quantized
+		 * delay.
+		 */
+		sample_delay_factor =
+			ns_to_cycles(
+				ideal_sample_delay_in_ns << dll_delay_shift,
+							clock_period_in_ns, 0);
+
+		if (sample_delay_factor > nfc->max_sample_delay_factor)
+			sample_delay_factor = nfc->max_sample_delay_factor;
+	}
+
+	/* Control arrives here when we're ready to return our results. */
+return_results:
+	hw->data_setup_in_cycles    = data_setup_in_cycles;
+	hw->data_hold_in_cycles     = data_hold_in_cycles;
+	hw->address_setup_in_cycles = address_setup_in_cycles;
+	hw->use_half_periods        = dll_use_half_periods;
+	hw->sample_delay_factor     = sample_delay_factor;
+	hw->device_busy_timeout     = GPMI_DEFAULT_BUSY_TIMEOUT;
+	hw->wrn_dly_sel             = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
+
+	/* Return success. */
+	return 0;
+}
+
+/*
+ * <1> Firstly, we should know what's the GPMI-clock means.
+ *     The GPMI-clock is the internal clock in the gpmi nand controller.
+ *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
+ *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
+ *
+ * <2> Secondly, we should know what's the frequency on the nand chip pins.
+ *     The frequency on the nand chip pins is derived from the GPMI-clock.
+ *     We can get it from the following equation:
+ *
+ *         F = G / (DS + DH)
+ *
+ *         F  : the frequency on the nand chip pins.
+ *         G  : the GPMI clock, such as 100MHz.
+ *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
+ *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
+ *
+ * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
+ *     the nand EDO(extended Data Out) timing could be applied.
+ *     The GPMI implements a feedback read strobe to sample the read data.
+ *     The feedback read strobe can be delayed to support the nand EDO timing
+ *     where the read strobe may deasserts before the read data is valid, and
+ *     read data is valid for some time after read strobe.
+ *
+ *     The following figure illustrates some aspects of a NAND Flash read:
+ *
+ *                   |<---tREA---->|
+ *                   |             |
+ *                   |         |   |
+ *                   |<--tRP-->|   |
+ *                   |         |   |
+ *                  __          ___|__________________________________
+ *     RDN            \________/   |
+ *                                 |
+ *                                 /---------\
+ *     Read Data    --------------<           >---------
+ *                                 \---------/
+ *                                |     |
+ *                                |<-D->|
+ *     FeedbackRDN  ________             ____________
+ *                          \___________/
+ *
+ *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
+ *
+ *
+ * <4> Now, we begin to describe how to compute the right RDN_DELAY.
+ *
+ *  4.1) From the aspect of the nand chip pins:
+ *        Delay = (tREA + C - tRP)               {1}
+ *
+ *        tREA : the maximum read access time. From the ONFI nand standards,
+ *               we know that tREA is 16ns in mode 5, tREA is 20ns is mode 4.
+ *               Please check it in : www.onfi.org
+ *        C    : a constant for adjust the delay. default is 4.
+ *        tRP  : the read pulse width.
+ *               Specified by the HW_GPMI_TIMING0:DATA_SETUP:
+ *                    tRP = (GPMI-clock-period) * DATA_SETUP
+ *
+ *  4.2) From the aspect of the GPMI nand controller:
+ *         Delay = RDN_DELAY * 0.125 * RP        {2}
+ *
+ *         RP   : the DLL reference period.
+ *            if (GPMI-clock-period > DLL_THRETHOLD)
+ *                   RP = GPMI-clock-period / 2;
+ *            else
+ *                   RP = GPMI-clock-period;
+ *
+ *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
+ *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
+ *            is 16ns, but in mx6q, we use 12ns.
+ *
+ *  4.3) since {1} equals {2}, we get:
+ *
+ *                    (tREA + 4 - tRP) * 8
+ *         RDN_DELAY = ---------------------     {3}
+ *                           RP
+ *
+ *  4.4) We only support the fastest asynchronous mode of ONFI nand.
+ *       For some ONFI nand, the mode 4 is the fastest mode;
+ *       while for some ONFI nand, the mode 5 is the fastest mode.
+ *       So we only support the mode 4 and mode 5. It is no need to
+ *       support other modes.
+ */
+static void gpmi_compute_edo_timing(struct gpmi_nand_data *this,
+			struct gpmi_nfc_hardware_timing *hw)
+{
+	struct resources *r = &this->resources;
+	unsigned long rate = clk_get_rate(r->clock[0]);
+	int mode = this->timing_mode;
+	int dll_threshold = this->devdata->max_chain_delay;
+	unsigned long delay;
+	unsigned long clk_period;
+	int t_rea;
+	int c = 4;
+	int t_rp;
+	int rp;
+
+	/*
+	 * [1] for GPMI_HW_GPMI_TIMING0:
+	 *     The async mode requires 40MHz for mode 4, 50MHz for mode 5.
+	 *     The GPMI can support 100MHz at most. So if we want to
+	 *     get the 40MHz or 50MHz, we have to set DS=1, DH=1.
+	 *     Set the ADDRESS_SETUP to 0 in mode 4.
+	 */
+	hw->data_setup_in_cycles = 1;
+	hw->data_hold_in_cycles = 1;
+	hw->address_setup_in_cycles = ((mode == 5) ? 1 : 0);
+
+	/* [2] for GPMI_HW_GPMI_TIMING1 */
+	hw->device_busy_timeout = 0x9000;
+
+	/* [3] for GPMI_HW_GPMI_CTRL1 */
+	hw->wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
+
+	/*
+	 * Enlarge 10 times for the numerator and denominator in {3}.
+	 * This make us to get more accurate result.
+	 */
+	clk_period = NSEC_PER_SEC / (rate / 10);
+	dll_threshold *= 10;
+	t_rea = ((mode == 5) ? 16 : 20) * 10;
+	c *= 10;
+
+	t_rp = clk_period * 1; /* DATA_SETUP is 1 */
+
+	if (clk_period > dll_threshold) {
+		hw->use_half_periods = 1;
+		rp = clk_period / 2;
+	} else {
+		hw->use_half_periods = 0;
+		rp = clk_period;
+	}
+
+	/*
+	 * Multiply the numerator with 10, we could do a round off:
+	 *      7.8 round up to 8; 7.4 round down to 7.
+	 */
+	delay  = (((t_rea + c - t_rp) * 8) * 10) / rp;
+	delay = (delay + 5) / 10;
+
+	hw->sample_delay_factor = delay;
+}
+
+static int enable_edo_mode(struct gpmi_nand_data *this, int mode)
+{
+	struct resources  *r = &this->resources;
+	struct nand_chip *nand = &this->nand;
+	struct mtd_info	 *mtd = nand_to_mtd(nand);
+	uint8_t *feature;
+	unsigned long rate;
+	int ret;
+
+	feature = kzalloc(ONFI_SUBFEATURE_PARAM_LEN, GFP_KERNEL);
+	if (!feature)
+		return -ENOMEM;
+
+	nand->select_chip(mtd, 0);
+
+	/* [1] send SET FEATURE commond to NAND */
+	feature[0] = mode;
+	ret = nand->onfi_set_features(mtd, nand,
+				ONFI_FEATURE_ADDR_TIMING_MODE, feature);
+	if (ret)
+		goto err_out;
+
+	/* [2] send GET FEATURE command to double-check the timing mode */
+	memset(feature, 0, ONFI_SUBFEATURE_PARAM_LEN);
+	ret = nand->onfi_get_features(mtd, nand,
+				ONFI_FEATURE_ADDR_TIMING_MODE, feature);
+	if (ret || feature[0] != mode)
+		goto err_out;
+
+	nand->select_chip(mtd, -1);
+
+	/* [3] set the main IO clock, 100MHz for mode 5, 80MHz for mode 4. */
+	rate = (mode == 5) ? 100000000 : 80000000;
+	clk_set_rate(r->clock[0], rate);
+
+	/* Let the gpmi_begin() re-compute the timing again. */
+	this->flags &= ~GPMI_TIMING_INIT_OK;
+
+	this->flags |= GPMI_ASYNC_EDO_ENABLED;
+	this->timing_mode = mode;
+	kfree(feature);
+	dev_info(this->dev, "enable the asynchronous EDO mode %d\n", mode);
+	return 0;
+
+err_out:
+	nand->select_chip(mtd, -1);
+	kfree(feature);
+	dev_err(this->dev, "mode:%d ,failed in set feature.\n", mode);
+	return -EINVAL;
+}
+
+int gpmi_extra_init(struct gpmi_nand_data *this)
+{
+	struct nand_chip *chip = &this->nand;
+
+	/* Enable the asynchronous EDO feature. */
+	if (GPMI_IS_MX6(this) && chip->onfi_version) {
+		int mode = onfi_get_async_timing_mode(chip);
+
+		/* We only support the timing mode 4 and mode 5. */
+		if (mode & ONFI_TIMING_MODE_5)
+			mode = 5;
+		else if (mode & ONFI_TIMING_MODE_4)
+			mode = 4;
+		else
+			return 0;
+
+		return enable_edo_mode(this, mode);
+	}
+	return 0;
+}
+
+/* Begin the I/O */
+void gpmi_begin(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	void __iomem *gpmi_regs = r->gpmi_regs;
+	unsigned int   clock_period_in_ns;
+	uint32_t       reg;
+	unsigned int   dll_wait_time_in_us;
+	struct gpmi_nfc_hardware_timing  hw;
+	int ret;
+
+	/* Enable the clock. */
+	ret = gpmi_enable_clk(this);
+	if (ret) {
+		dev_err(this->dev, "We failed in enable the clk\n");
+		goto err_out;
+	}
+
+	/* Only initialize the timing once */
+	if (this->flags & GPMI_TIMING_INIT_OK)
+		return;
+	this->flags |= GPMI_TIMING_INIT_OK;
+
+	if (this->flags & GPMI_ASYNC_EDO_ENABLED)
+		gpmi_compute_edo_timing(this, &hw);
+	else
+		gpmi_nfc_compute_hardware_timing(this, &hw);
+
+	/* [1] Set HW_GPMI_TIMING0 */
+	reg = BF_GPMI_TIMING0_ADDRESS_SETUP(hw.address_setup_in_cycles) |
+		BF_GPMI_TIMING0_DATA_HOLD(hw.data_hold_in_cycles)         |
+		BF_GPMI_TIMING0_DATA_SETUP(hw.data_setup_in_cycles);
+
+	writel(reg, gpmi_regs + HW_GPMI_TIMING0);
+
+	/* [2] Set HW_GPMI_TIMING1 */
+	writel(BF_GPMI_TIMING1_BUSY_TIMEOUT(hw.device_busy_timeout),
+		gpmi_regs + HW_GPMI_TIMING1);
+
+	/* [3] The following code is to set the HW_GPMI_CTRL1. */
+
+	/* Set the WRN_DLY_SEL */
+	writel(BM_GPMI_CTRL1_WRN_DLY_SEL, gpmi_regs + HW_GPMI_CTRL1_CLR);
+	writel(BF_GPMI_CTRL1_WRN_DLY_SEL(hw.wrn_dly_sel),
+					gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/* DLL_ENABLE must be set to 0 when setting RDN_DELAY or HALF_PERIOD. */
+	writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+	/* Clear out the DLL control fields. */
+	reg = BM_GPMI_CTRL1_RDN_DELAY | BM_GPMI_CTRL1_HALF_PERIOD;
+	writel(reg, gpmi_regs + HW_GPMI_CTRL1_CLR);
+
+	/* If no sample delay is called for, return immediately. */
+	if (!hw.sample_delay_factor)
+		return;
+
+	/* Set RDN_DELAY or HALF_PERIOD. */
+	reg = ((hw.use_half_periods) ? BM_GPMI_CTRL1_HALF_PERIOD : 0)
+		| BF_GPMI_CTRL1_RDN_DELAY(hw.sample_delay_factor);
+
+	writel(reg, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/* At last, we enable the DLL. */
+	writel(BM_GPMI_CTRL1_DLL_ENABLE, gpmi_regs + HW_GPMI_CTRL1_SET);
+
+	/*
+	 * After we enable the GPMI DLL, we have to wait 64 clock cycles before
+	 * we can use the GPMI. Calculate the amount of time we need to wait,
+	 * in microseconds.
+	 */
+	clock_period_in_ns = NSEC_PER_SEC / clk_get_rate(r->clock[0]);
+	dll_wait_time_in_us = (clock_period_in_ns * 64) / 1000;
+
+	if (!dll_wait_time_in_us)
+		dll_wait_time_in_us = 1;
+
+	/* Wait for the DLL to settle. */
+	udelay(dll_wait_time_in_us);
+
+err_out:
+	return;
+}
+
+void gpmi_end(struct gpmi_nand_data *this)
+{
+	gpmi_disable_clk(this);
+}
+
+/* Clears a BCH interrupt. */
+void gpmi_clear_bch(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
+}
+
+/* Returns the Ready/Busy status of the given chip. */
+int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
+{
+	struct resources *r = &this->resources;
+	uint32_t mask = 0;
+	uint32_t reg = 0;
+
+	if (GPMI_IS_MX23(this)) {
+		mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
+		reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
+	} else if (GPMI_IS_MX28(this) || GPMI_IS_MX6(this)) {
+		/*
+		 * In the imx6, all the ready/busy pins are bound
+		 * together. So we only need to check chip 0.
+		 */
+		if (GPMI_IS_MX6(this))
+			chip = 0;
+
+		/* MX28 shares the same R/B register as MX6Q. */
+		mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
+		reg = readl(r->gpmi_regs + HW_GPMI_STAT);
+	} else
+		dev_err(this->dev, "unknown arch.\n");
+	return reg & mask;
+}
+
+static inline void set_dma_type(struct gpmi_nand_data *this,
+					enum dma_ops_type type)
+{
+	this->last_dma_type = this->dma_type;
+	this->dma_type = type;
+}
+
+int gpmi_send_command(struct gpmi_nand_data *this)
+{
+	struct dma_chan *channel = get_dma_chan(this);
+	struct dma_async_tx_descriptor *desc;
+	struct scatterlist *sgl;
+	int chip = this->current_chip;
+	u32 pio[3];
+
+	/* [1] send out the PIO words */
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
+		| BM_GPMI_CTRL0_ADDRESS_INCREMENT
+		| BF_GPMI_CTRL0_XFER_COUNT(this->command_length);
+	pio[1] = pio[2] = 0;
+	desc = dmaengine_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+	if (!desc)
+		return -EINVAL;
+
+	/* [2] send out the COMMAND + ADDRESS string stored in @buffer */
+	sgl = &this->cmd_sgl;
+
+	sg_init_one(sgl, this->cmd_buffer, this->command_length);
+	dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
+	desc = dmaengine_prep_slave_sg(channel,
+				sgl, 1, DMA_MEM_TO_DEV,
+				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc)
+		return -EINVAL;
+
+	/* [3] submit the DMA */
+	set_dma_type(this, DMA_FOR_COMMAND);
+	return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_data(struct gpmi_nand_data *this)
+{
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *channel = get_dma_chan(this);
+	int chip = this->current_chip;
+	uint32_t command_mode;
+	uint32_t address;
+	u32 pio[2];
+
+	/* [1] PIO */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+	pio[1] = 0;
+	desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+	if (!desc)
+		return -EINVAL;
+
+	/* [2] send DMA request */
+	prepare_data_dma(this, DMA_TO_DEVICE);
+	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+					1, DMA_MEM_TO_DEV,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc)
+		return -EINVAL;
+
+	/* [3] submit the DMA */
+	set_dma_type(this, DMA_FOR_WRITE_DATA);
+	return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_read_data(struct gpmi_nand_data *this)
+{
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *channel = get_dma_chan(this);
+	int chip = this->current_chip;
+	u32 pio[2];
+
+	/* [1] : send PIO */
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
+		| BF_GPMI_CTRL0_XFER_COUNT(this->upper_len);
+	pio[1] = 0;
+	desc = dmaengine_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
+	if (!desc)
+		return -EINVAL;
+
+	/* [2] : send DMA request */
+	prepare_data_dma(this, DMA_FROM_DEVICE);
+	desc = dmaengine_prep_slave_sg(channel, &this->data_sgl,
+					1, DMA_DEV_TO_MEM,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc)
+		return -EINVAL;
+
+	/* [3] : submit the DMA */
+	set_dma_type(this, DMA_FOR_READ_DATA);
+	return start_dma_without_bch_irq(this, desc);
+}
+
+int gpmi_send_page(struct gpmi_nand_data *this,
+			dma_addr_t payload, dma_addr_t auxiliary)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	uint32_t command_mode;
+	uint32_t address;
+	uint32_t ecc_command;
+	uint32_t buffer_mask;
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *channel = get_dma_chan(this);
+	int chip = this->current_chip;
+	u32 pio[6];
+
+	/* A DMA descriptor that does an ECC page read. */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WRITE;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+	ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE;
+	buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
+				BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(0);
+	pio[1] = 0;
+	pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
+		| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+		| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+	pio[3] = geo->page_size;
+	pio[4] = payload;
+	pio[5] = auxiliary;
+
+	desc = dmaengine_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_TRANS_NONE,
+					DMA_CTRL_ACK);
+	if (!desc)
+		return -EINVAL;
+
+	set_dma_type(this, DMA_FOR_WRITE_ECC_PAGE);
+	return start_dma_with_bch_irq(this, desc);
+}
+
+int gpmi_read_page(struct gpmi_nand_data *this,
+				dma_addr_t payload, dma_addr_t auxiliary)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	uint32_t command_mode;
+	uint32_t address;
+	uint32_t ecc_command;
+	uint32_t buffer_mask;
+	struct dma_async_tx_descriptor *desc;
+	struct dma_chan *channel = get_dma_chan(this);
+	int chip = this->current_chip;
+	u32 pio[6];
+
+	/* [1] Wait for the chip to report ready. */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(0);
+	pio[1] = 0;
+	desc = dmaengine_prep_slave_sg(channel,
+				(struct scatterlist *)pio, 2,
+				DMA_TRANS_NONE, 0);
+	if (!desc)
+		return -EINVAL;
+
+	/* [2] Enable the BCH block and read. */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__READ;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+	ecc_command  = BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE;
+	buffer_mask  = BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
+			| BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY;
+
+	pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+
+	pio[1] = 0;
+	pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
+		| BF_GPMI_ECCCTRL_ECC_CMD(ecc_command)
+		| BF_GPMI_ECCCTRL_BUFFER_MASK(buffer_mask);
+	pio[3] = geo->page_size;
+	pio[4] = payload;
+	pio[5] = auxiliary;
+	desc = dmaengine_prep_slave_sg(channel,
+					(struct scatterlist *)pio,
+					ARRAY_SIZE(pio), DMA_TRANS_NONE,
+					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc)
+		return -EINVAL;
+
+	/* [3] Disable the BCH block */
+	command_mode = BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY;
+	address      = BV_GPMI_CTRL0_ADDRESS__NAND_DATA;
+
+	pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(command_mode)
+		| BM_GPMI_CTRL0_WORD_LENGTH
+		| BF_GPMI_CTRL0_CS(chip, this)
+		| BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
+		| BF_GPMI_CTRL0_ADDRESS(address)
+		| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
+	pio[1] = 0;
+	pio[2] = 0; /* clear GPMI_HW_GPMI_ECCCTRL, disable the BCH. */
+	desc = dmaengine_prep_slave_sg(channel,
+				(struct scatterlist *)pio, 3,
+				DMA_TRANS_NONE,
+				DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!desc)
+		return -EINVAL;
+
+	/* [4] submit the DMA */
+	set_dma_type(this, DMA_FOR_READ_ECC_PAGE);
+	return start_dma_with_bch_irq(this, desc);
+}
+
+/**
+ * gpmi_copy_bits - copy bits from one memory region to another
+ * @dst: destination buffer
+ * @dst_bit_off: bit offset we're starting to write at
+ * @src: source buffer
+ * @src_bit_off: bit offset we're starting to read from
+ * @nbits: number of bits to copy
+ *
+ * This functions copies bits from one memory region to another, and is used by
+ * the GPMI driver to copy ECC sections which are not guaranteed to be byte
+ * aligned.
+ *
+ * src and dst should not overlap.
+ *
+ */
+void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
+		    const u8 *src, size_t src_bit_off,
+		    size_t nbits)
+{
+	size_t i;
+	size_t nbytes;
+	u32 src_buffer = 0;
+	size_t bits_in_src_buffer = 0;
+
+	if (!nbits)
+		return;
+
+	/*
+	 * Move src and dst pointers to the closest byte pointer and store bit
+	 * offsets within a byte.
+	 */
+	src += src_bit_off / 8;
+	src_bit_off %= 8;
+
+	dst += dst_bit_off / 8;
+	dst_bit_off %= 8;
+
+	/*
+	 * Initialize the src_buffer value with bits available in the first
+	 * byte of data so that we end up with a byte aligned src pointer.
+	 */
+	if (src_bit_off) {
+		src_buffer = src[0] >> src_bit_off;
+		if (nbits >= (8 - src_bit_off)) {
+			bits_in_src_buffer += 8 - src_bit_off;
+		} else {
+			src_buffer &= GENMASK(nbits - 1, 0);
+			bits_in_src_buffer += nbits;
+		}
+		nbits -= bits_in_src_buffer;
+		src++;
+	}
+
+	/* Calculate the number of bytes that can be copied from src to dst. */
+	nbytes = nbits / 8;
+
+	/* Try to align dst to a byte boundary. */
+	if (dst_bit_off) {
+		if (bits_in_src_buffer < (8 - dst_bit_off) && nbytes) {
+			src_buffer |= src[0] << bits_in_src_buffer;
+			bits_in_src_buffer += 8;
+			src++;
+			nbytes--;
+		}
+
+		if (bits_in_src_buffer >= (8 - dst_bit_off)) {
+			dst[0] &= GENMASK(dst_bit_off - 1, 0);
+			dst[0] |= src_buffer << dst_bit_off;
+			src_buffer >>= (8 - dst_bit_off);
+			bits_in_src_buffer -= (8 - dst_bit_off);
+			dst_bit_off = 0;
+			dst++;
+			if (bits_in_src_buffer > 7) {
+				bits_in_src_buffer -= 8;
+				dst[0] = src_buffer;
+				dst++;
+				src_buffer >>= 8;
+			}
+		}
+	}
+
+	if (!bits_in_src_buffer && !dst_bit_off) {
+		/*
+		 * Both src and dst pointers are byte aligned, thus we can
+		 * just use the optimized memcpy function.
+		 */
+		if (nbytes)
+			memcpy(dst, src, nbytes);
+	} else {
+		/*
+		 * src buffer is not byte aligned, hence we have to copy each
+		 * src byte to the src_buffer variable before extracting a byte
+		 * to store in dst.
+		 */
+		for (i = 0; i < nbytes; i++) {
+			src_buffer |= src[i] << bits_in_src_buffer;
+			dst[i] = src_buffer;
+			src_buffer >>= 8;
+		}
+	}
+	/* Update dst and src pointers */
+	dst += nbytes;
+	src += nbytes;
+
+	/*
+	 * nbits is the number of remaining bits. It should not exceed 8 as
+	 * we've already copied as much bytes as possible.
+	 */
+	nbits %= 8;
+
+	/*
+	 * If there's no more bits to copy to the destination and src buffer
+	 * was already byte aligned, then we're done.
+	 */
+	if (!nbits && !bits_in_src_buffer)
+		return;
+
+	/* Copy the remaining bits to src_buffer */
+	if (nbits)
+		src_buffer |= (*src & GENMASK(nbits - 1, 0)) <<
+			      bits_in_src_buffer;
+	bits_in_src_buffer += nbits;
+
+	/*
+	 * In case there were not enough bits to get a byte aligned dst buffer
+	 * prepare the src_buffer variable to match the dst organization (shift
+	 * src_buffer by dst_bit_off and retrieve the least significant bits
+	 * from dst).
+	 */
+	if (dst_bit_off)
+		src_buffer = (src_buffer << dst_bit_off) |
+			     (*dst & GENMASK(dst_bit_off - 1, 0));
+	bits_in_src_buffer += dst_bit_off;
+
+	/*
+	 * Keep most significant bits from dst if we end up with an unaligned
+	 * number of bits.
+	 */
+	nbytes = bits_in_src_buffer / 8;
+	if (bits_in_src_buffer % 8) {
+		src_buffer |= (dst[nbytes] &
+			       GENMASK(7, bits_in_src_buffer % 8)) <<
+			      (nbytes * 8);
+		nbytes++;
+	}
+
+	/* Copy the remaining bytes to dst */
+	for (i = 0; i < nbytes; i++) {
+		dst[i] = src_buffer;
+		src_buffer >>= 8;
+	}
+}
diff --git a/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.c
new file mode 100644
index 000000000000..6c062b8251d2
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.c
@@ -0,0 +1,2193 @@ 
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include "gpmi-nand.h"
+#include "bch-regs.h"
+
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
+
+/* add our owner bbt descriptor */
+static uint8_t scan_ff_pattern[] = { 0xff };
+static struct nand_bbt_descr gpmi_bbt_descr = {
+	.options	= 0,
+	.offs		= 0,
+	.len		= 1,
+	.pattern	= scan_ff_pattern
+};
+
+/*
+ * We may change the layout if we can get the ECC info from the datasheet,
+ * else we will use all the (page + OOB).
+ */
+static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
+			      struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	struct bch_geometry *geo = &this->bch_geometry;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 0;
+	oobregion->length = geo->page_size - mtd->writesize;
+
+	return 0;
+}
+
+static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	struct bch_geometry *geo = &this->bch_geometry;
+
+	if (section)
+		return -ERANGE;
+
+	/* The available oob size we have. */
+	if (geo->page_size < mtd->writesize + mtd->oobsize) {
+		oobregion->offset = geo->page_size - mtd->writesize;
+		oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
+	.ecc = gpmi_ooblayout_ecc,
+	.free = gpmi_ooblayout_free,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx23 = {
+	.type = IS_MX23,
+	.bch_max_ecc_strength = 20,
+	.max_chain_delay = 16,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx28 = {
+	.type = IS_MX28,
+	.bch_max_ecc_strength = 20,
+	.max_chain_delay = 16,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6q = {
+	.type = IS_MX6Q,
+	.bch_max_ecc_strength = 40,
+	.max_chain_delay = 12,
+};
+
+static const struct gpmi_devdata gpmi_devdata_imx6sx = {
+	.type = IS_MX6SX,
+	.bch_max_ecc_strength = 62,
+	.max_chain_delay = 12,
+};
+
+static irqreturn_t bch_irq(int irq, void *cookie)
+{
+	struct gpmi_nand_data *this = cookie;
+
+	gpmi_clear_bch(this);
+	complete(&this->bch_done);
+	return IRQ_HANDLED;
+}
+
+/*
+ *  Calculate the ECC strength by hand:
+ *	E : The ECC strength.
+ *	G : the length of Galois Field.
+ *	N : The chunk count of per page.
+ *	O : the oobsize of the NAND chip.
+ *	M : the metasize of per page.
+ *
+ *	The formula is :
+ *		E * G * N
+ *	      ------------ <= (O - M)
+ *                  8
+ *
+ *      So, we get E by:
+ *                    (O - M) * 8
+ *              E <= -------------
+ *                       G * N
+ */
+static inline int get_ecc_strength(struct gpmi_nand_data *this)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	struct mtd_info	*mtd = nand_to_mtd(&this->nand);
+	int ecc_strength;
+
+	ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
+			/ (geo->gf_len * geo->ecc_chunk_count);
+
+	/* We need the minor even number. */
+	return round_down(ecc_strength, 2);
+}
+
+static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+
+	/* Do the sanity check. */
+	if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) {
+		/* The mx23/mx28 only support the GF13. */
+		if (geo->gf_len == 14)
+			return false;
+	}
+	return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
+}
+
+/*
+ * If we can get the ECC information from the nand chip, we do not
+ * need to calculate them ourselves.
+ *
+ * We may have available oob space in this case.
+ */
+static int set_geometry_by_ecc_info(struct gpmi_nand_data *this)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	struct nand_chip *chip = &this->nand;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	unsigned int block_mark_bit_offset;
+
+	if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0))
+		return -EINVAL;
+
+	switch (chip->ecc_step_ds) {
+	case SZ_512:
+		geo->gf_len = 13;
+		break;
+	case SZ_1K:
+		geo->gf_len = 14;
+		break;
+	default:
+		dev_err(this->dev,
+			"unsupported nand chip. ecc bits : %d, ecc size : %d\n",
+			chip->ecc_strength_ds, chip->ecc_step_ds);
+		return -EINVAL;
+	}
+	geo->ecc_chunk_size = chip->ecc_step_ds;
+	geo->ecc_strength = round_up(chip->ecc_strength_ds, 2);
+	if (!gpmi_check_ecc(this))
+		return -EINVAL;
+
+	/* Keep the C >= O */
+	if (geo->ecc_chunk_size < mtd->oobsize) {
+		dev_err(this->dev,
+			"unsupported nand chip. ecc size: %d, oob size : %d\n",
+			chip->ecc_step_ds, mtd->oobsize);
+		return -EINVAL;
+	}
+
+	/* The default value, see comment in the legacy_set_geometry(). */
+	geo->metadata_size = 10;
+
+	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+	/*
+	 * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
+	 *
+	 *    |                          P                            |
+	 *    |<----------------------------------------------------->|
+	 *    |                                                       |
+	 *    |                                        (Block Mark)   |
+	 *    |                      P'                      |      | |     |
+	 *    |<-------------------------------------------->|  D   | |  O' |
+	 *    |                                              |<---->| |<--->|
+	 *    V                                              V      V V     V
+	 *    +---+----------+-+----------+-+----------+-+----------+-+-----+
+	 *    | M |   data   |E|   data   |E|   data   |E|   data   |E|     |
+	 *    +---+----------+-+----------+-+----------+-+----------+-+-----+
+	 *                                                   ^              ^
+	 *                                                   |      O       |
+	 *                                                   |<------------>|
+	 *                                                   |              |
+	 *
+	 *	P : the page size for BCH module.
+	 *	E : The ECC strength.
+	 *	G : the length of Galois Field.
+	 *	N : The chunk count of per page.
+	 *	M : the metasize of per page.
+	 *	C : the ecc chunk size, aka the "data" above.
+	 *	P': the nand chip's page size.
+	 *	O : the nand chip's oob size.
+	 *	O': the free oob.
+	 *
+	 *	The formula for P is :
+	 *
+	 *	            E * G * N
+	 *	       P = ------------ + P' + M
+	 *                      8
+	 *
+	 * The position of block mark moves forward in the ECC-based view
+	 * of page, and the delta is:
+	 *
+	 *                   E * G * (N - 1)
+	 *             D = (---------------- + M)
+	 *                          8
+	 *
+	 * Please see the comment in legacy_set_geometry().
+	 * With the condition C >= O , we still can get same result.
+	 * So the bit position of the physical block mark within the ECC-based
+	 * view of the page is :
+	 *             (P' - D) * 8
+	 */
+	geo->page_size = mtd->writesize + geo->metadata_size +
+		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+
+	geo->payload_size = mtd->writesize;
+
+	geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
+	geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
+				+ ALIGN(geo->ecc_chunk_count, 4);
+
+	if (!this->swap_block_mark)
+		return 0;
+
+	/* For bit swap. */
+	block_mark_bit_offset = mtd->writesize * 8 -
+		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+				+ geo->metadata_size * 8);
+
+	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
+	return 0;
+}
+
+static int legacy_set_geometry(struct gpmi_nand_data *this)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	struct mtd_info *mtd = nand_to_mtd(&this->nand);
+	unsigned int metadata_size;
+	unsigned int status_size;
+	unsigned int block_mark_bit_offset;
+
+	/*
+	 * The size of the metadata can be changed, though we set it to 10
+	 * bytes now. But it can't be too large, because we have to save
+	 * enough space for BCH.
+	 */
+	geo->metadata_size = 10;
+
+	/* The default for the length of Galois Field. */
+	geo->gf_len = 13;
+
+	/* The default for chunk size. */
+	geo->ecc_chunk_size = 512;
+	while (geo->ecc_chunk_size < mtd->oobsize) {
+		geo->ecc_chunk_size *= 2; /* keep C >= O */
+		geo->gf_len = 14;
+	}
+
+	geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
+
+	/* We use the same ECC strength for all chunks. */
+	geo->ecc_strength = get_ecc_strength(this);
+	if (!gpmi_check_ecc(this)) {
+		dev_err(this->dev,
+			"ecc strength: %d cannot be supported by the controller (%d)\n"
+			"try to use minimum ecc strength that NAND chip required\n",
+			geo->ecc_strength,
+			this->devdata->bch_max_ecc_strength);
+		return -EINVAL;
+	}
+
+	geo->page_size = mtd->writesize + geo->metadata_size +
+		(geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
+	geo->payload_size = mtd->writesize;
+
+	/*
+	 * The auxiliary buffer contains the metadata and the ECC status. The
+	 * metadata is padded to the nearest 32-bit boundary. The ECC status
+	 * contains one byte for every ECC chunk, and is also padded to the
+	 * nearest 32-bit boundary.
+	 */
+	metadata_size = ALIGN(geo->metadata_size, 4);
+	status_size   = ALIGN(geo->ecc_chunk_count, 4);
+
+	geo->auxiliary_size = metadata_size + status_size;
+	geo->auxiliary_status_offset = metadata_size;
+
+	if (!this->swap_block_mark)
+		return 0;
+
+	/*
+	 * We need to compute the byte and bit offsets of
+	 * the physical block mark within the ECC-based view of the page.
+	 *
+	 * NAND chip with 2K page shows below:
+	 *                                             (Block Mark)
+	 *                                                   |      |
+	 *                                                   |  D   |
+	 *                                                   |<---->|
+	 *                                                   V      V
+	 *    +---+----------+-+----------+-+----------+-+----------+-+
+	 *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
+	 *    +---+----------+-+----------+-+----------+-+----------+-+
+	 *
+	 * The position of block mark moves forward in the ECC-based view
+	 * of page, and the delta is:
+	 *
+	 *                   E * G * (N - 1)
+	 *             D = (---------------- + M)
+	 *                          8
+	 *
+	 * With the formula to compute the ECC strength, and the condition
+	 *       : C >= O         (C is the ecc chunk size)
+	 *
+	 * It's easy to deduce to the following result:
+	 *
+	 *         E * G       (O - M)      C - M         C - M
+	 *      ----------- <= ------- <=  --------  <  ---------
+	 *           8            N           N          (N - 1)
+	 *
+	 *  So, we get:
+	 *
+	 *                   E * G * (N - 1)
+	 *             D = (---------------- + M) < C
+	 *                          8
+	 *
+	 *  The above inequality means the position of block mark
+	 *  within the ECC-based view of the page is still in the data chunk,
+	 *  and it's NOT in the ECC bits of the chunk.
+	 *
+	 *  Use the following to compute the bit position of the
+	 *  physical block mark within the ECC-based view of the page:
+	 *          (page_size - D) * 8
+	 *
+	 *  --Huang Shijie
+	 */
+	block_mark_bit_offset = mtd->writesize * 8 -
+		(geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
+				+ geo->metadata_size * 8);
+
+	geo->block_mark_byte_offset = block_mark_bit_offset / 8;
+	geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
+	return 0;
+}
+
+int common_nfc_set_geometry(struct gpmi_nand_data *this)
+{
+	if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
+				|| legacy_set_geometry(this))
+		return set_geometry_by_ecc_info(this);
+
+	return 0;
+}
+
+struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
+{
+	/* We use the DMA channel 0 to access all the nand chips. */
+	return this->dma_chans[0];
+}
+
+/* Can we use the upper's buffer directly for DMA? */
+void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr)
+{
+	struct scatterlist *sgl = &this->data_sgl;
+	int ret;
+
+	/* first try to map the upper buffer directly */
+	if (virt_addr_valid(this->upper_buf) &&
+		!object_is_on_stack(this->upper_buf)) {
+		sg_init_one(sgl, this->upper_buf, this->upper_len);
+		ret = dma_map_sg(this->dev, sgl, 1, dr);
+		if (ret == 0)
+			goto map_fail;
+
+		this->direct_dma_map_ok = true;
+		return;
+	}
+
+map_fail:
+	/* We have to use our own DMA buffer. */
+	sg_init_one(sgl, this->data_buffer_dma, this->upper_len);
+
+	if (dr == DMA_TO_DEVICE)
+		memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len);
+
+	dma_map_sg(this->dev, sgl, 1, dr);
+
+	this->direct_dma_map_ok = false;
+}
+
+/* This will be called after the DMA operation is finished. */
+static void dma_irq_callback(void *param)
+{
+	struct gpmi_nand_data *this = param;
+	struct completion *dma_c = &this->dma_done;
+
+	switch (this->dma_type) {
+	case DMA_FOR_COMMAND:
+		dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE);
+		break;
+
+	case DMA_FOR_READ_DATA:
+		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE);
+		if (this->direct_dma_map_ok == false)
+			memcpy(this->upper_buf, this->data_buffer_dma,
+				this->upper_len);
+		break;
+
+	case DMA_FOR_WRITE_DATA:
+		dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE);
+		break;
+
+	case DMA_FOR_READ_ECC_PAGE:
+	case DMA_FOR_WRITE_ECC_PAGE:
+		/* We have to wait the BCH interrupt to finish. */
+		break;
+
+	default:
+		dev_err(this->dev, "in wrong DMA operation.\n");
+	}
+
+	complete(dma_c);
+}
+
+int start_dma_without_bch_irq(struct gpmi_nand_data *this,
+				struct dma_async_tx_descriptor *desc)
+{
+	struct completion *dma_c = &this->dma_done;
+	unsigned long timeout;
+
+	init_completion(dma_c);
+
+	desc->callback		= dma_irq_callback;
+	desc->callback_param	= this;
+	dmaengine_submit(desc);
+	dma_async_issue_pending(get_dma_chan(this));
+
+	/* Wait for the interrupt from the DMA block. */
+	timeout = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000));
+	if (!timeout) {
+		dev_err(this->dev, "DMA timeout, last DMA :%d\n",
+			this->last_dma_type);
+		gpmi_dump_info(this);
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+/*
+ * This function is used in BCH reading or BCH writing pages.
+ * It will wait for the BCH interrupt as long as ONE second.
+ * Actually, we must wait for two interrupts :
+ *	[1] firstly the DMA interrupt and
+ *	[2] secondly the BCH interrupt.
+ */
+int start_dma_with_bch_irq(struct gpmi_nand_data *this,
+			struct dma_async_tx_descriptor *desc)
+{
+	struct completion *bch_c = &this->bch_done;
+	unsigned long timeout;
+
+	/* Prepare to receive an interrupt from the BCH block. */
+	init_completion(bch_c);
+
+	/* start the DMA */
+	start_dma_without_bch_irq(this, desc);
+
+	/* Wait for the interrupt from the BCH block. */
+	timeout = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000));
+	if (!timeout) {
+		dev_err(this->dev, "BCH timeout, last DMA :%d\n",
+			this->last_dma_type);
+		gpmi_dump_info(this);
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+static int acquire_register_block(struct gpmi_nand_data *this,
+				  const char *res_name)
+{
+	struct platform_device *pdev = this->pdev;
+	struct resources *res = &this->resources;
+	struct resource *r;
+	void __iomem *p;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
+	p = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+
+	if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
+		res->gpmi_regs = p;
+	else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
+		res->bch_regs = p;
+	else
+		dev_err(this->dev, "unknown resource name : %s\n", res_name);
+
+	return 0;
+}
+
+static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
+{
+	struct platform_device *pdev = this->pdev;
+	const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
+	struct resource *r;
+	int err;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
+	if (!r) {
+		dev_err(this->dev, "Can't get resource for %s\n", res_name);
+		return -ENODEV;
+	}
+
+	err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
+	if (err)
+		dev_err(this->dev, "error requesting BCH IRQ\n");
+
+	return err;
+}
+
+static void release_dma_channels(struct gpmi_nand_data *this)
+{
+	unsigned int i;
+	for (i = 0; i < DMA_CHANS; i++)
+		if (this->dma_chans[i]) {
+			dma_release_channel(this->dma_chans[i]);
+			this->dma_chans[i] = NULL;
+		}
+}
+
+static int acquire_dma_channels(struct gpmi_nand_data *this)
+{
+	struct platform_device *pdev = this->pdev;
+	struct dma_chan *dma_chan;
+
+	/* request dma channel */
+	dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx");
+	if (!dma_chan) {
+		dev_err(this->dev, "Failed to request DMA channel.\n");
+		goto acquire_err;
+	}
+
+	this->dma_chans[0] = dma_chan;
+	return 0;
+
+acquire_err:
+	release_dma_channels(this);
+	return -EINVAL;
+}
+
+static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = {
+	"gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
+};
+
+static int gpmi_get_clks(struct gpmi_nand_data *this)
+{
+	struct resources *r = &this->resources;
+	char **extra_clks = NULL;
+	struct clk *clk;
+	int err, i;
+
+	/* The main clock is stored in the first. */
+	r->clock[0] = devm_clk_get(this->dev, "gpmi_io");
+	if (IS_ERR(r->clock[0])) {
+		err = PTR_ERR(r->clock[0]);
+		goto err_clock;
+	}
+
+	/* Get extra clocks */
+	if (GPMI_IS_MX6(this))
+		extra_clks = extra_clks_for_mx6q;
+	if (!extra_clks)
+		return 0;
+
+	for (i = 1; i < GPMI_CLK_MAX; i++) {
+		if (extra_clks[i - 1] == NULL)
+			break;
+
+		clk = devm_clk_get(this->dev, extra_clks[i - 1]);
+		if (IS_ERR(clk)) {
+			err = PTR_ERR(clk);
+			goto err_clock;
+		}
+
+		r->clock[i] = clk;
+	}
+
+	if (GPMI_IS_MX6(this))
+		/*
+		 * Set the default value for the gpmi clock.
+		 *
+		 * If you want to use the ONFI nand which is in the
+		 * Synchronous Mode, you should change the clock as you need.
+		 */
+		clk_set_rate(r->clock[0], 22000000);
+
+	return 0;
+
+err_clock:
+	dev_dbg(this->dev, "failed in finding the clocks.\n");
+	return err;
+}
+
+static int acquire_resources(struct gpmi_nand_data *this)
+{
+	int ret;
+
+	ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
+	if (ret)
+		goto exit_regs;
+
+	ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
+	if (ret)
+		goto exit_regs;
+
+	ret = acquire_bch_irq(this, bch_irq);
+	if (ret)
+		goto exit_regs;
+
+	ret = acquire_dma_channels(this);
+	if (ret)
+		goto exit_regs;
+
+	ret = gpmi_get_clks(this);
+	if (ret)
+		goto exit_clock;
+	return 0;
+
+exit_clock:
+	release_dma_channels(this);
+exit_regs:
+	return ret;
+}
+
+static void release_resources(struct gpmi_nand_data *this)
+{
+	release_dma_channels(this);
+}
+
+static int init_hardware(struct gpmi_nand_data *this)
+{
+	int ret;
+
+	/*
+	 * This structure contains the "safe" GPMI timing that should succeed
+	 * with any NAND Flash device
+	 * (although, with less-than-optimal performance).
+	 */
+	struct nand_timing  safe_timing = {
+		.data_setup_in_ns        = 80,
+		.data_hold_in_ns         = 60,
+		.address_setup_in_ns     = 25,
+		.gpmi_sample_delay_in_ns =  6,
+		.tREA_in_ns              = -1,
+		.tRLOH_in_ns             = -1,
+		.tRHOH_in_ns             = -1,
+	};
+
+	/* Initialize the hardwares. */
+	ret = gpmi_init(this);
+	if (ret)
+		return ret;
+
+	this->timing = safe_timing;
+	return 0;
+}
+
+static int read_page_prepare(struct gpmi_nand_data *this,
+			void *destination, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			void **use_virt, dma_addr_t *use_phys)
+{
+	struct device *dev = this->dev;
+
+	if (virt_addr_valid(destination)) {
+		dma_addr_t dest_phys;
+
+		dest_phys = dma_map_single(dev, destination,
+						length, DMA_FROM_DEVICE);
+		if (dma_mapping_error(dev, dest_phys)) {
+			if (alt_size < length) {
+				dev_err(dev, "Alternate buffer is too small\n");
+				return -ENOMEM;
+			}
+			goto map_failed;
+		}
+		*use_virt = destination;
+		*use_phys = dest_phys;
+		this->direct_dma_map_ok = true;
+		return 0;
+	}
+
+map_failed:
+	*use_virt = alt_virt;
+	*use_phys = alt_phys;
+	this->direct_dma_map_ok = false;
+	return 0;
+}
+
+static inline void read_page_end(struct gpmi_nand_data *this,
+			void *destination, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			void *used_virt, dma_addr_t used_phys)
+{
+	if (this->direct_dma_map_ok)
+		dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE);
+}
+
+static inline void read_page_swap_end(struct gpmi_nand_data *this,
+			void *destination, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			void *used_virt, dma_addr_t used_phys)
+{
+	if (!this->direct_dma_map_ok)
+		memcpy(destination, alt_virt, length);
+}
+
+static int send_page_prepare(struct gpmi_nand_data *this,
+			const void *source, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			const void **use_virt, dma_addr_t *use_phys)
+{
+	struct device *dev = this->dev;
+
+	if (virt_addr_valid(source)) {
+		dma_addr_t source_phys;
+
+		source_phys = dma_map_single(dev, (void *)source, length,
+						DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, source_phys)) {
+			if (alt_size < length) {
+				dev_err(dev, "Alternate buffer is too small\n");
+				return -ENOMEM;
+			}
+			goto map_failed;
+		}
+		*use_virt = source;
+		*use_phys = source_phys;
+		return 0;
+	}
+map_failed:
+	/*
+	 * Copy the content of the source buffer into the alternate
+	 * buffer and set up the return values accordingly.
+	 */
+	memcpy(alt_virt, source, length);
+
+	*use_virt = alt_virt;
+	*use_phys = alt_phys;
+	return 0;
+}
+
+static void send_page_end(struct gpmi_nand_data *this,
+			const void *source, unsigned length,
+			void *alt_virt, dma_addr_t alt_phys, unsigned alt_size,
+			const void *used_virt, dma_addr_t used_phys)
+{
+	struct device *dev = this->dev;
+	if (used_virt == source)
+		dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE);
+}
+
+static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
+{
+	struct device *dev = this->dev;
+
+	if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt))
+		dma_free_coherent(dev, this->page_buffer_size,
+					this->page_buffer_virt,
+					this->page_buffer_phys);
+	kfree(this->cmd_buffer);
+	kfree(this->data_buffer_dma);
+	kfree(this->raw_buffer);
+
+	this->cmd_buffer	= NULL;
+	this->data_buffer_dma	= NULL;
+	this->raw_buffer	= NULL;
+	this->page_buffer_virt	= NULL;
+	this->page_buffer_size	=  0;
+}
+
+/* Allocate the DMA buffers */
+static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
+{
+	struct bch_geometry *geo = &this->bch_geometry;
+	struct device *dev = this->dev;
+	struct mtd_info *mtd = nand_to_mtd(&this->nand);
+
+	/* [1] Allocate a command buffer. PAGE_SIZE is enough. */
+	this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL);
+	if (this->cmd_buffer == NULL)
+		goto error_alloc;
+
+	/*
+	 * [2] Allocate a read/write data buffer.
+	 *     The gpmi_alloc_dma_buffer can be called twice.
+	 *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
+	 *     is called before the nand_scan_ident; and we allocate a buffer
+	 *     of the real NAND page size when the gpmi_alloc_dma_buffer is
+	 *     called after the nand_scan_ident.
+	 */
+	this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
+					GFP_DMA | GFP_KERNEL);
+	if (this->data_buffer_dma == NULL)
+		goto error_alloc;
+
+	/*
+	 * [3] Allocate the page buffer.
+	 *
+	 * Both the payload buffer and the auxiliary buffer must appear on
+	 * 32-bit boundaries. We presume the size of the payload buffer is a
+	 * power of two and is much larger than four, which guarantees the
+	 * auxiliary buffer will appear on a 32-bit boundary.
+	 */
+	this->page_buffer_size = geo->payload_size + geo->auxiliary_size;
+	this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size,
+					&this->page_buffer_phys, GFP_DMA);
+	if (!this->page_buffer_virt)
+		goto error_alloc;
+
+	this->raw_buffer = kzalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+	if (!this->raw_buffer)
+		goto error_alloc;
+
+	/* Slice up the page buffer. */
+	this->payload_virt = this->page_buffer_virt;
+	this->payload_phys = this->page_buffer_phys;
+	this->auxiliary_virt = this->payload_virt + geo->payload_size;
+	this->auxiliary_phys = this->payload_phys + geo->payload_size;
+	return 0;
+
+error_alloc:
+	gpmi_free_dma_buffer(this);
+	return -ENOMEM;
+}
+
+static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	int ret;
+
+	/*
+	 * Every operation begins with a command byte and a series of zero or
+	 * more address bytes. These are distinguished by either the Address
+	 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being
+	 * asserted. When MTD is ready to execute the command, it will deassert
+	 * both latch enables.
+	 *
+	 * Rather than run a separate DMA operation for every single byte, we
+	 * queue them up and run a single DMA operation for the entire series
+	 * of command and data bytes. NAND_CMD_NONE means the END of the queue.
+	 */
+	if ((ctrl & (NAND_ALE | NAND_CLE))) {
+		if (data != NAND_CMD_NONE)
+			this->cmd_buffer[this->command_length++] = data;
+		return;
+	}
+
+	if (!this->command_length)
+		return;
+
+	ret = gpmi_send_command(this);
+	if (ret)
+		dev_err(this->dev, "Chip: %u, Error %d\n",
+			this->current_chip, ret);
+
+	this->command_length = 0;
+}
+
+static int gpmi_dev_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+
+	return gpmi_is_ready(this, this->current_chip);
+}
+
+static void gpmi_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+
+	if ((this->current_chip < 0) && (chipnr >= 0))
+		gpmi_begin(this);
+	else if ((this->current_chip >= 0) && (chipnr < 0))
+		gpmi_end(this);
+
+	this->current_chip = chipnr;
+}
+
+static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+
+	dev_dbg(this->dev, "len is %d\n", len);
+	this->upper_buf	= buf;
+	this->upper_len	= len;
+
+	gpmi_read_data(this);
+}
+
+static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+
+	dev_dbg(this->dev, "len is %d\n", len);
+	this->upper_buf	= (uint8_t *)buf;
+	this->upper_len	= len;
+
+	gpmi_send_data(this);
+}
+
+static uint8_t gpmi_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	uint8_t *buf = this->data_buffer_dma;
+
+	gpmi_read_buf(mtd, buf, 1);
+	return buf[0];
+}
+
+/*
+ * Handles block mark swapping.
+ * It can be called in swapping the block mark, or swapping it back,
+ * because the the operations are the same.
+ */
+static void block_mark_swapping(struct gpmi_nand_data *this,
+				void *payload, void *auxiliary)
+{
+	struct bch_geometry *nfc_geo = &this->bch_geometry;
+	unsigned char *p;
+	unsigned char *a;
+	unsigned int  bit;
+	unsigned char mask;
+	unsigned char from_data;
+	unsigned char from_oob;
+
+	if (!this->swap_block_mark)
+		return;
+
+	/*
+	 * If control arrives here, we're swapping. Make some convenience
+	 * variables.
+	 */
+	bit = nfc_geo->block_mark_bit_offset;
+	p   = payload + nfc_geo->block_mark_byte_offset;
+	a   = auxiliary;
+
+	/*
+	 * Get the byte from the data area that overlays the block mark. Since
+	 * the ECC engine applies its own view to the bits in the page, the
+	 * physical block mark won't (in general) appear on a byte boundary in
+	 * the data.
+	 */
+	from_data = (p[0] >> bit) | (p[1] << (8 - bit));
+
+	/* Get the byte from the OOB. */
+	from_oob = a[0];
+
+	/* Swap them. */
+	a[0] = from_data;
+
+	mask = (0x1 << bit) - 1;
+	p[0] = (p[0] & mask) | (from_oob << bit);
+
+	mask = ~0 << bit;
+	p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
+}
+
+static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
+{
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	struct bch_geometry *nfc_geo = &this->bch_geometry;
+	void          *payload_virt;
+	dma_addr_t    payload_phys;
+	void          *auxiliary_virt;
+	dma_addr_t    auxiliary_phys;
+	unsigned int  i;
+	unsigned char *status;
+	unsigned int  max_bitflips = 0;
+	int           ret;
+
+	dev_dbg(this->dev, "page number is : %d\n", page);
+	ret = read_page_prepare(this, buf, nfc_geo->payload_size,
+					this->payload_virt, this->payload_phys,
+					nfc_geo->payload_size,
+					&payload_virt, &payload_phys);
+	if (ret) {
+		dev_err(this->dev, "Inadequate DMA buffer\n");
+		ret = -ENOMEM;
+		return ret;
+	}
+	auxiliary_virt = this->auxiliary_virt;
+	auxiliary_phys = this->auxiliary_phys;
+
+	/* go! */
+	ret = gpmi_read_page(this, payload_phys, auxiliary_phys);
+	read_page_end(this, buf, nfc_geo->payload_size,
+			this->payload_virt, this->payload_phys,
+			nfc_geo->payload_size,
+			payload_virt, payload_phys);
+	if (ret) {
+		dev_err(this->dev, "Error in ECC-based read: %d\n", ret);
+		return ret;
+	}
+
+	/* handle the block mark swapping */
+	block_mark_swapping(this, payload_virt, auxiliary_virt);
+
+	/* Loop over status bytes, accumulating ECC status. */
+	status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+
+	read_page_swap_end(this, buf, nfc_geo->payload_size,
+			   this->payload_virt, this->payload_phys,
+			   nfc_geo->payload_size,
+			   payload_virt, payload_phys);
+
+	for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
+		if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
+			continue;
+
+		if (*status == STATUS_UNCORRECTABLE) {
+			int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+			u8 *eccbuf = this->raw_buffer;
+			int offset, bitoffset;
+			int eccbytes;
+			int flips;
+
+			/* Read ECC bytes into our internal raw_buffer */
+			offset = nfc_geo->metadata_size * 8;
+			offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
+			offset -= eccbits;
+			bitoffset = offset % 8;
+			eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
+			offset /= 8;
+			eccbytes -= offset;
+			chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+			chip->read_buf(mtd, eccbuf, eccbytes);
+
+			/*
+			 * ECC data are not byte aligned and we may have
+			 * in-band data in the first and last byte of
+			 * eccbuf. Set non-eccbits to one so that
+			 * nand_check_erased_ecc_chunk() does not count them
+			 * as bitflips.
+			 */
+			if (bitoffset)
+				eccbuf[0] |= GENMASK(bitoffset - 1, 0);
+
+			bitoffset = (bitoffset + eccbits) % 8;
+			if (bitoffset)
+				eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
+
+			/*
+			 * The ECC hardware has an uncorrectable ECC status
+			 * code in case we have bitflips in an erased page. As
+			 * nothing was written into this subpage the ECC is
+			 * obviously wrong and we can not trust it. We assume
+			 * at this point that we are reading an erased page and
+			 * try to correct the bitflips in buffer up to
+			 * ecc_strength bitflips. If this is a page with random
+			 * data, we exceed this number of bitflips and have a
+			 * ECC failure. Otherwise we use the corrected buffer.
+			 */
+			if (i == 0) {
+				/* The first block includes metadata */
+				flips = nand_check_erased_ecc_chunk(
+						buf + i * nfc_geo->ecc_chunk_size,
+						nfc_geo->ecc_chunk_size,
+						eccbuf, eccbytes,
+						auxiliary_virt,
+						nfc_geo->metadata_size,
+						nfc_geo->ecc_strength);
+			} else {
+				flips = nand_check_erased_ecc_chunk(
+						buf + i * nfc_geo->ecc_chunk_size,
+						nfc_geo->ecc_chunk_size,
+						eccbuf, eccbytes,
+						NULL, 0,
+						nfc_geo->ecc_strength);
+			}
+
+			if (flips > 0) {
+				max_bitflips = max_t(unsigned int, max_bitflips,
+						     flips);
+				mtd->ecc_stats.corrected += flips;
+				continue;
+			}
+
+			mtd->ecc_stats.failed++;
+			continue;
+		}
+
+		mtd->ecc_stats.corrected += *status;
+		max_bitflips = max_t(unsigned int, max_bitflips, *status);
+	}
+
+	if (oob_required) {
+		/*
+		 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
+		 * for details about our policy for delivering the OOB.
+		 *
+		 * We fill the caller's buffer with set bits, and then copy the
+		 * block mark to th caller's buffer. Note that, if block mark
+		 * swapping was necessary, it has already been done, so we can
+		 * rely on the first byte of the auxiliary buffer to contain
+		 * the block mark.
+		 */
+		memset(chip->oob_poi, ~0, mtd->oobsize);
+		chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
+	}
+
+	return max_bitflips;
+}
+
+/* Fake a virtual small page for the subpage read */
+static int gpmi_ecc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+			uint32_t offs, uint32_t len, uint8_t *buf, int page)
+{
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	void __iomem *bch_regs = this->resources.bch_regs;
+	struct bch_geometry old_geo = this->bch_geometry;
+	struct bch_geometry *geo = &this->bch_geometry;
+	int size = chip->ecc.size; /* ECC chunk size */
+	int meta, n, page_size;
+	u32 r1_old, r2_old, r1_new, r2_new;
+	unsigned int max_bitflips;
+	int first, last, marker_pos;
+	int ecc_parity_size;
+	int col = 0;
+	int old_swap_block_mark = this->swap_block_mark;
+
+	/* The size of ECC parity */
+	ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
+
+	/* Align it with the chunk size */
+	first = offs / size;
+	last = (offs + len - 1) / size;
+
+	if (this->swap_block_mark) {
+		/*
+		 * Find the chunk which contains the Block Marker.
+		 * If this chunk is in the range of [first, last],
+		 * we have to read out the whole page.
+		 * Why? since we had swapped the data at the position of Block
+		 * Marker to the metadata which is bound with the chunk 0.
+		 */
+		marker_pos = geo->block_mark_byte_offset / size;
+		if (last >= marker_pos && first <= marker_pos) {
+			dev_dbg(this->dev,
+				"page:%d, first:%d, last:%d, marker at:%d\n",
+				page, first, last, marker_pos);
+			return gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+		}
+	}
+
+	meta = geo->metadata_size;
+	if (first) {
+		col = meta + (size + ecc_parity_size) * first;
+		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, col, -1);
+
+		meta = 0;
+		buf = buf + first * size;
+	}
+
+	/* Save the old environment */
+	r1_old = r1_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT0);
+	r2_old = r2_new = readl(bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+	/* change the BCH registers and bch_geometry{} */
+	n = last - first + 1;
+	page_size = meta + (size + ecc_parity_size) * n;
+
+	r1_new &= ~(BM_BCH_FLASH0LAYOUT0_NBLOCKS |
+			BM_BCH_FLASH0LAYOUT0_META_SIZE);
+	r1_new |= BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1)
+			| BF_BCH_FLASH0LAYOUT0_META_SIZE(meta);
+	writel(r1_new, bch_regs + HW_BCH_FLASH0LAYOUT0);
+
+	r2_new &= ~BM_BCH_FLASH0LAYOUT1_PAGE_SIZE;
+	r2_new |= BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size);
+	writel(r2_new, bch_regs + HW_BCH_FLASH0LAYOUT1);
+
+	geo->ecc_chunk_count = n;
+	geo->payload_size = n * size;
+	geo->page_size = page_size;
+	geo->auxiliary_status_offset = ALIGN(meta, 4);
+
+	dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
+		page, offs, len, col, first, n, page_size);
+
+	/* Read the subpage now */
+	this->swap_block_mark = false;
+	max_bitflips = gpmi_ecc_read_page(mtd, chip, buf, 0, page);
+
+	/* Restore */
+	writel(r1_old, bch_regs + HW_BCH_FLASH0LAYOUT0);
+	writel(r2_old, bch_regs + HW_BCH_FLASH0LAYOUT1);
+	this->bch_geometry = old_geo;
+	this->swap_block_mark = old_swap_block_mark;
+
+	return max_bitflips;
+}
+
+static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+				const uint8_t *buf, int oob_required, int page)
+{
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	struct bch_geometry *nfc_geo = &this->bch_geometry;
+	const void *payload_virt;
+	dma_addr_t payload_phys;
+	const void *auxiliary_virt;
+	dma_addr_t auxiliary_phys;
+	int        ret;
+
+	dev_dbg(this->dev, "ecc write page.\n");
+	if (this->swap_block_mark) {
+		/*
+		 * If control arrives here, we're doing block mark swapping.
+		 * Since we can't modify the caller's buffers, we must copy them
+		 * into our own.
+		 */
+		memcpy(this->payload_virt, buf, mtd->writesize);
+		payload_virt = this->payload_virt;
+		payload_phys = this->payload_phys;
+
+		memcpy(this->auxiliary_virt, chip->oob_poi,
+				nfc_geo->auxiliary_size);
+		auxiliary_virt = this->auxiliary_virt;
+		auxiliary_phys = this->auxiliary_phys;
+
+		/* Handle block mark swapping. */
+		block_mark_swapping(this,
+				(void *)payload_virt, (void *)auxiliary_virt);
+	} else {
+		/*
+		 * If control arrives here, we're not doing block mark swapping,
+		 * so we can to try and use the caller's buffers.
+		 */
+		ret = send_page_prepare(this,
+				buf, mtd->writesize,
+				this->payload_virt, this->payload_phys,
+				nfc_geo->payload_size,
+				&payload_virt, &payload_phys);
+		if (ret) {
+			dev_err(this->dev, "Inadequate payload DMA buffer\n");
+			return 0;
+		}
+
+		ret = send_page_prepare(this,
+				chip->oob_poi, mtd->oobsize,
+				this->auxiliary_virt, this->auxiliary_phys,
+				nfc_geo->auxiliary_size,
+				&auxiliary_virt, &auxiliary_phys);
+		if (ret) {
+			dev_err(this->dev, "Inadequate auxiliary DMA buffer\n");
+			goto exit_auxiliary;
+		}
+	}
+
+	/* Ask the NFC. */
+	ret = gpmi_send_page(this, payload_phys, auxiliary_phys);
+	if (ret)
+		dev_err(this->dev, "Error in ECC-based write: %d\n", ret);
+
+	if (!this->swap_block_mark) {
+		send_page_end(this, chip->oob_poi, mtd->oobsize,
+				this->auxiliary_virt, this->auxiliary_phys,
+				nfc_geo->auxiliary_size,
+				auxiliary_virt, auxiliary_phys);
+exit_auxiliary:
+		send_page_end(this, buf, mtd->writesize,
+				this->payload_virt, this->payload_phys,
+				nfc_geo->payload_size,
+				payload_virt, payload_phys);
+	}
+
+	return 0;
+}
+
+/*
+ * There are several places in this driver where we have to handle the OOB and
+ * block marks. This is the function where things are the most complicated, so
+ * this is where we try to explain it all. All the other places refer back to
+ * here.
+ *
+ * These are the rules, in order of decreasing importance:
+ *
+ * 1) Nothing the caller does can be allowed to imperil the block mark.
+ *
+ * 2) In read operations, the first byte of the OOB we return must reflect the
+ *    true state of the block mark, no matter where that block mark appears in
+ *    the physical page.
+ *
+ * 3) ECC-based read operations return an OOB full of set bits (since we never
+ *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
+ *    return).
+ *
+ * 4) "Raw" read operations return a direct view of the physical bytes in the
+ *    page, using the conventional definition of which bytes are data and which
+ *    are OOB. This gives the caller a way to see the actual, physical bytes
+ *    in the page, without the distortions applied by our ECC engine.
+ *
+ *
+ * What we do for this specific read operation depends on two questions:
+ *
+ * 1) Are we doing a "raw" read, or an ECC-based read?
+ *
+ * 2) Are we using block mark swapping or transcription?
+ *
+ * There are four cases, illustrated by the following Karnaugh map:
+ *
+ *                    |           Raw           |         ECC-based       |
+ *       -------------+-------------------------+-------------------------+
+ *                    | Read the conventional   |                         |
+ *                    | OOB at the end of the   |                         |
+ *       Swapping     | page and return it. It  |                         |
+ *                    | contains exactly what   |                         |
+ *                    | we want.                | Read the block mark and |
+ *       -------------+-------------------------+ return it in a buffer   |
+ *                    | Read the conventional   | full of set bits.       |
+ *                    | OOB at the end of the   |                         |
+ *                    | page and also the block |                         |
+ *       Transcribing | mark in the metadata.   |                         |
+ *                    | Copy the block mark     |                         |
+ *                    | into the first byte of  |                         |
+ *                    | the OOB.                |                         |
+ *       -------------+-------------------------+-------------------------+
+ *
+ * Note that we break rule #4 in the Transcribing/Raw case because we're not
+ * giving an accurate view of the actual, physical bytes in the page (we're
+ * overwriting the block mark). That's OK because it's more important to follow
+ * rule #2.
+ *
+ * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
+ * easy. When reading a page, for example, the NAND Flash MTD code calls our
+ * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
+ * ECC-based or raw view of the page is implicit in which function it calls
+ * (there is a similar pair of ECC-based/raw functions for writing).
+ */
+static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+				int page)
+{
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+
+	dev_dbg(this->dev, "page number is %d\n", page);
+	/* clear the OOB buffer */
+	memset(chip->oob_poi, ~0, mtd->oobsize);
+
+	/* Read out the conventional OOB. */
+	chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	/*
+	 * Now, we want to make sure the block mark is correct. In the
+	 * non-transcribing case (!GPMI_IS_MX23()), we already have it.
+	 * Otherwise, we need to explicitly read it.
+	 */
+	if (GPMI_IS_MX23(this)) {
+		/* Read the block mark into the first byte of the OOB buffer. */
+		chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+		chip->oob_poi[0] = chip->read_byte(mtd);
+	}
+
+	return 0;
+}
+
+static int
+gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+	struct mtd_oob_region of = { };
+	int status = 0;
+
+	/* Do we have available oob area? */
+	mtd_ooblayout_free(mtd, 0, &of);
+	if (!of.length)
+		return -EPERM;
+
+	if (!nand_is_slc(chip))
+		return -EPERM;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize + of.offset, page);
+	chip->write_buf(mtd, chip->oob_poi + of.offset, of.length);
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * This function reads a NAND page without involving the ECC engine (no HW
+ * ECC correction).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the payload data and ECC bits stored in the
+ * page into the provided buffers, which is why we're using gpmi_copy_bits.
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_read_page_raw(struct mtd_info *mtd,
+				  struct nand_chip *chip, uint8_t *buf,
+				  int oob_required, int page)
+{
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	struct bch_geometry *nfc_geo = &this->bch_geometry;
+	int eccsize = nfc_geo->ecc_chunk_size;
+	int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+	u8 *tmp_buf = this->raw_buffer;
+	size_t src_bit_off;
+	size_t oob_bit_off;
+	size_t oob_byte_off;
+	uint8_t *oob = chip->oob_poi;
+	int step;
+
+	chip->read_buf(mtd, tmp_buf,
+		       mtd->writesize + mtd->oobsize);
+
+	/*
+	 * If required, swap the bad block marker and the data stored in the
+	 * metadata section, so that we don't wrongly consider a block as bad.
+	 *
+	 * See the layout description for a detailed explanation on why this
+	 * is needed.
+	 */
+	if (this->swap_block_mark) {
+		u8 swap = tmp_buf[0];
+
+		tmp_buf[0] = tmp_buf[mtd->writesize];
+		tmp_buf[mtd->writesize] = swap;
+	}
+
+	/*
+	 * Copy the metadata section into the oob buffer (this section is
+	 * guaranteed to be aligned on a byte boundary).
+	 */
+	if (oob_required)
+		memcpy(oob, tmp_buf, nfc_geo->metadata_size);
+
+	oob_bit_off = nfc_geo->metadata_size * 8;
+	src_bit_off = oob_bit_off;
+
+	/* Extract interleaved payload data and ECC bits */
+	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+		if (buf)
+			gpmi_copy_bits(buf, step * eccsize * 8,
+				       tmp_buf, src_bit_off,
+				       eccsize * 8);
+		src_bit_off += eccsize * 8;
+
+		/* Align last ECC block to align a byte boundary */
+		if (step == nfc_geo->ecc_chunk_count - 1 &&
+		    (oob_bit_off + eccbits) % 8)
+			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+		if (oob_required)
+			gpmi_copy_bits(oob, oob_bit_off,
+				       tmp_buf, src_bit_off,
+				       eccbits);
+
+		src_bit_off += eccbits;
+		oob_bit_off += eccbits;
+	}
+
+	if (oob_required) {
+		oob_byte_off = oob_bit_off / 8;
+
+		if (oob_byte_off < mtd->oobsize)
+			memcpy(oob + oob_byte_off,
+			       tmp_buf + mtd->writesize + oob_byte_off,
+			       mtd->oobsize - oob_byte_off);
+	}
+
+	return 0;
+}
+
+/*
+ * This function writes a NAND page without involving the ECC engine (no HW
+ * ECC generation).
+ * The tricky part in the GPMI/BCH controller is that it stores ECC bits
+ * inline (interleaved with payload DATA), and do not align data chunk on
+ * byte boundaries.
+ * We thus need to take care moving the OOB area at the right place in the
+ * final page, which is why we're using gpmi_copy_bits.
+ *
+ * See set_geometry_by_ecc_info inline comments to have a full description
+ * of the layout used by the GPMI controller.
+ */
+static int gpmi_ecc_write_page_raw(struct mtd_info *mtd,
+				   struct nand_chip *chip,
+				   const uint8_t *buf,
+				   int oob_required, int page)
+{
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	struct bch_geometry *nfc_geo = &this->bch_geometry;
+	int eccsize = nfc_geo->ecc_chunk_size;
+	int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+	u8 *tmp_buf = this->raw_buffer;
+	uint8_t *oob = chip->oob_poi;
+	size_t dst_bit_off;
+	size_t oob_bit_off;
+	size_t oob_byte_off;
+	int step;
+
+	/*
+	 * Initialize all bits to 1 in case we don't have a buffer for the
+	 * payload or oob data in order to leave unspecified bits of data
+	 * to their initial state.
+	 */
+	if (!buf || !oob_required)
+		memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
+
+	/*
+	 * First copy the metadata section (stored in oob buffer) at the
+	 * beginning of the page, as imposed by the GPMI layout.
+	 */
+	memcpy(tmp_buf, oob, nfc_geo->metadata_size);
+	oob_bit_off = nfc_geo->metadata_size * 8;
+	dst_bit_off = oob_bit_off;
+
+	/* Interleave payload data and ECC bits */
+	for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
+		if (buf)
+			gpmi_copy_bits(tmp_buf, dst_bit_off,
+				       buf, step * eccsize * 8, eccsize * 8);
+		dst_bit_off += eccsize * 8;
+
+		/* Align last ECC block to align a byte boundary */
+		if (step == nfc_geo->ecc_chunk_count - 1 &&
+		    (oob_bit_off + eccbits) % 8)
+			eccbits += 8 - ((oob_bit_off + eccbits) % 8);
+
+		if (oob_required)
+			gpmi_copy_bits(tmp_buf, dst_bit_off,
+				       oob, oob_bit_off, eccbits);
+
+		dst_bit_off += eccbits;
+		oob_bit_off += eccbits;
+	}
+
+	oob_byte_off = oob_bit_off / 8;
+
+	if (oob_required && oob_byte_off < mtd->oobsize)
+		memcpy(tmp_buf + mtd->writesize + oob_byte_off,
+		       oob + oob_byte_off, mtd->oobsize - oob_byte_off);
+
+	/*
+	 * If required, swap the bad block marker and the first byte of the
+	 * metadata section, so that we don't modify the bad block marker.
+	 *
+	 * See the layout description for a detailed explanation on why this
+	 * is needed.
+	 */
+	if (this->swap_block_mark) {
+		u8 swap = tmp_buf[0];
+
+		tmp_buf[0] = tmp_buf[mtd->writesize];
+		tmp_buf[mtd->writesize] = swap;
+	}
+
+	chip->write_buf(mtd, tmp_buf, mtd->writesize + mtd->oobsize);
+
+	return 0;
+}
+
+static int gpmi_ecc_read_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				 int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	return gpmi_ecc_read_page_raw(mtd, chip, NULL, 1, page);
+}
+
+static int gpmi_ecc_write_oob_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				 int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+
+	return gpmi_ecc_write_page_raw(mtd, chip, NULL, 1, page);
+}
+
+static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct gpmi_nand_data *this = nand_get_controller_data(chip);
+	int ret = 0;
+	uint8_t *block_mark;
+	int column, page, status, chipnr;
+
+	chipnr = (int)(ofs >> chip->chip_shift);
+	chip->select_chip(mtd, chipnr);
+
+	column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
+
+	/* Write the block mark. */
+	block_mark = this->data_buffer_dma;
+	block_mark[0] = 0; /* bad block marker */
+
+	/* Shift to get page */
+	page = (int)(ofs >> chip->page_shift);
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page);
+	chip->write_buf(mtd, block_mark, 1);
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+	if (status & NAND_STATUS_FAIL)
+		ret = -EIO;
+
+	chip->select_chip(mtd, -1);
+
+	return ret;
+}
+
+static int nand_boot_set_geometry(struct gpmi_nand_data *this)
+{
+	struct boot_rom_geometry *geometry = &this->rom_geometry;
+
+	/*
+	 * Set the boot block stride size.
+	 *
+	 * In principle, we should be reading this from the OTP bits, since
+	 * that's where the ROM is going to get it. In fact, we don't have any
+	 * way to read the OTP bits, so we go with the default and hope for the
+	 * best.
+	 */
+	geometry->stride_size_in_pages = 64;
+
+	/*
+	 * Set the search area stride exponent.
+	 *
+	 * In principle, we should be reading this from the OTP bits, since
+	 * that's where the ROM is going to get it. In fact, we don't have any
+	 * way to read the OTP bits, so we go with the default and hope for the
+	 * best.
+	 */
+	geometry->search_area_stride_exponent = 2;
+	return 0;
+}
+
+static const char  *fingerprint = "STMP";
+static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
+{
+	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+	struct device *dev = this->dev;
+	struct nand_chip *chip = &this->nand;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	unsigned int search_area_size_in_strides;
+	unsigned int stride;
+	unsigned int page;
+	uint8_t *buffer = chip->buffers->databuf;
+	int saved_chip_number;
+	int found_an_ncb_fingerprint = false;
+
+	/* Compute the number of strides in a search area. */
+	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+
+	saved_chip_number = this->current_chip;
+	chip->select_chip(mtd, 0);
+
+	/*
+	 * Loop through the first search area, looking for the NCB fingerprint.
+	 */
+	dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
+
+	for (stride = 0; stride < search_area_size_in_strides; stride++) {
+		/* Compute the page addresses. */
+		page = stride * rom_geo->stride_size_in_pages;
+
+		dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
+
+		/*
+		 * Read the NCB fingerprint. The fingerprint is four bytes long
+		 * and starts in the 12th byte of the page.
+		 */
+		chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page);
+		chip->read_buf(mtd, buffer, strlen(fingerprint));
+
+		/* Look for the fingerprint. */
+		if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
+			found_an_ncb_fingerprint = true;
+			break;
+		}
+
+	}
+
+	chip->select_chip(mtd, saved_chip_number);
+
+	if (found_an_ncb_fingerprint)
+		dev_dbg(dev, "\tFound a fingerprint\n");
+	else
+		dev_dbg(dev, "\tNo fingerprint found\n");
+	return found_an_ncb_fingerprint;
+}
+
+/* Writes a transcription stamp. */
+static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
+{
+	struct device *dev = this->dev;
+	struct boot_rom_geometry *rom_geo = &this->rom_geometry;
+	struct nand_chip *chip = &this->nand;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	unsigned int block_size_in_pages;
+	unsigned int search_area_size_in_strides;
+	unsigned int search_area_size_in_pages;
+	unsigned int search_area_size_in_blocks;
+	unsigned int block;
+	unsigned int stride;
+	unsigned int page;
+	uint8_t      *buffer = chip->buffers->databuf;
+	int saved_chip_number;
+	int status;
+
+	/* Compute the search area geometry. */
+	block_size_in_pages = mtd->erasesize / mtd->writesize;
+	search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
+	search_area_size_in_pages = search_area_size_in_strides *
+					rom_geo->stride_size_in_pages;
+	search_area_size_in_blocks =
+		  (search_area_size_in_pages + (block_size_in_pages - 1)) /
+				    block_size_in_pages;
+
+	dev_dbg(dev, "Search Area Geometry :\n");
+	dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
+	dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
+	dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
+
+	/* Select chip 0. */
+	saved_chip_number = this->current_chip;
+	chip->select_chip(mtd, 0);
+
+	/* Loop over blocks in the first search area, erasing them. */
+	dev_dbg(dev, "Erasing the search area...\n");
+
+	for (block = 0; block < search_area_size_in_blocks; block++) {
+		/* Compute the page address. */
+		page = block * block_size_in_pages;
+
+		/* Erase this block. */
+		dev_dbg(dev, "\tErasing block 0x%x\n", block);
+		chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+		chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+		/* Wait for the erase to finish. */
+		status = chip->waitfunc(mtd, chip);
+		if (status & NAND_STATUS_FAIL)
+			dev_err(dev, "[%s] Erase failed.\n", __func__);
+	}
+
+	/* Write the NCB fingerprint into the page buffer. */
+	memset(buffer, ~0, mtd->writesize);
+	memcpy(buffer + 12, fingerprint, strlen(fingerprint));
+
+	/* Loop through the first search area, writing NCB fingerprints. */
+	dev_dbg(dev, "Writing NCB fingerprints...\n");
+	for (stride = 0; stride < search_area_size_in_strides; stride++) {
+		/* Compute the page addresses. */
+		page = stride * rom_geo->stride_size_in_pages;
+
+		/* Write the first page of the current stride. */
+		dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
+		chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+		chip->ecc.write_page_raw(mtd, chip, buffer, 0, page);
+		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+		/* Wait for the write to finish. */
+		status = chip->waitfunc(mtd, chip);
+		if (status & NAND_STATUS_FAIL)
+			dev_err(dev, "[%s] Write failed.\n", __func__);
+	}
+
+	/* Deselect chip 0. */
+	chip->select_chip(mtd, saved_chip_number);
+	return 0;
+}
+
+static int mx23_boot_init(struct gpmi_nand_data  *this)
+{
+	struct device *dev = this->dev;
+	struct nand_chip *chip = &this->nand;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	unsigned int block_count;
+	unsigned int block;
+	int     chipnr;
+	int     page;
+	loff_t  byte;
+	uint8_t block_mark;
+	int     ret = 0;
+
+	/*
+	 * If control arrives here, we can't use block mark swapping, which
+	 * means we're forced to use transcription. First, scan for the
+	 * transcription stamp. If we find it, then we don't have to do
+	 * anything -- the block marks are already transcribed.
+	 */
+	if (mx23_check_transcription_stamp(this))
+		return 0;
+
+	/*
+	 * If control arrives here, we couldn't find a transcription stamp, so
+	 * so we presume the block marks are in the conventional location.
+	 */
+	dev_dbg(dev, "Transcribing bad block marks...\n");
+
+	/* Compute the number of blocks in the entire medium. */
+	block_count = chip->chipsize >> chip->phys_erase_shift;
+
+	/*
+	 * Loop over all the blocks in the medium, transcribing block marks as
+	 * we go.
+	 */
+	for (block = 0; block < block_count; block++) {
+		/*
+		 * Compute the chip, page and byte addresses for this block's
+		 * conventional mark.
+		 */
+		chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
+		page = block << (chip->phys_erase_shift - chip->page_shift);
+		byte = block <<  chip->phys_erase_shift;
+
+		/* Send the command to read the conventional block mark. */
+		chip->select_chip(mtd, chipnr);
+		chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page);
+		block_mark = chip->read_byte(mtd);
+		chip->select_chip(mtd, -1);
+
+		/*
+		 * Check if the block is marked bad. If so, we need to mark it
+		 * again, but this time the result will be a mark in the
+		 * location where we transcribe block marks.
+		 */
+		if (block_mark != 0xff) {
+			dev_dbg(dev, "Transcribing mark in block %u\n", block);
+			ret = chip->block_markbad(mtd, byte);
+			if (ret)
+				dev_err(dev,
+					"Failed to mark block bad with ret %d\n",
+					ret);
+		}
+	}
+
+	/* Write the stamp that indicates we've transcribed the block marks. */
+	mx23_write_transcription_stamp(this);
+	return 0;
+}
+
+static int nand_boot_init(struct gpmi_nand_data  *this)
+{
+	nand_boot_set_geometry(this);
+
+	/* This is ROM arch-specific initilization before the BBT scanning. */
+	if (GPMI_IS_MX23(this))
+		return mx23_boot_init(this);
+	return 0;
+}
+
+static int gpmi_set_geometry(struct gpmi_nand_data *this)
+{
+	int ret;
+
+	/* Free the temporary DMA memory for reading ID. */
+	gpmi_free_dma_buffer(this);
+
+	/* Set up the NFC geometry which is used by BCH. */
+	ret = bch_set_geometry(this);
+	if (ret) {
+		dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
+		return ret;
+	}
+
+	/* Alloc the new DMA buffers according to the pagesize and oobsize */
+	return gpmi_alloc_dma_buffer(this);
+}
+
+static void gpmi_nand_exit(struct gpmi_nand_data *this)
+{
+	nand_release(nand_to_mtd(&this->nand));
+	gpmi_free_dma_buffer(this);
+}
+
+static int gpmi_init_last(struct gpmi_nand_data *this)
+{
+	struct nand_chip *chip = &this->nand;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	struct bch_geometry *bch_geo = &this->bch_geometry;
+	int ret;
+
+	/* Set up the medium geometry */
+	ret = gpmi_set_geometry(this);
+	if (ret)
+		return ret;
+
+	/* Init the nand_ecc_ctrl{} */
+	ecc->read_page	= gpmi_ecc_read_page;
+	ecc->write_page	= gpmi_ecc_write_page;
+	ecc->read_oob	= gpmi_ecc_read_oob;
+	ecc->write_oob	= gpmi_ecc_write_oob;
+	ecc->read_page_raw = gpmi_ecc_read_page_raw;
+	ecc->write_page_raw = gpmi_ecc_write_page_raw;
+	ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
+	ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
+	ecc->mode	= NAND_ECC_HW;
+	ecc->size	= bch_geo->ecc_chunk_size;
+	ecc->strength	= bch_geo->ecc_strength;
+	mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
+
+	/*
+	 * We only enable the subpage read when:
+	 *  (1) the chip is imx6, and
+	 *  (2) the size of the ECC parity is byte aligned.
+	 */
+	if (GPMI_IS_MX6(this) &&
+		((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
+		ecc->read_subpage = gpmi_ecc_read_subpage;
+		chip->options |= NAND_SUBPAGE_READ;
+	}
+
+	/*
+	 * Can we enable the extra features? such as EDO or Sync mode.
+	 *
+	 * We do not check the return value now. That's means if we fail in
+	 * enable the extra features, we still can run in the normal way.
+	 */
+	gpmi_extra_init(this);
+
+	return 0;
+}
+
+static int gpmi_nand_init(struct gpmi_nand_data *this)
+{
+	struct nand_chip *chip = &this->nand;
+	struct mtd_info  *mtd = nand_to_mtd(chip);
+	int ret;
+
+	/* init current chip */
+	this->current_chip	= -1;
+
+	/* init the MTD data structures */
+	mtd->name		= "gpmi-nand";
+	mtd->dev.parent		= this->dev;
+
+	/* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
+	nand_set_controller_data(chip, this);
+	nand_set_flash_node(chip, this->pdev->dev.of_node);
+	chip->select_chip	= gpmi_select_chip;
+	chip->cmd_ctrl		= gpmi_cmd_ctrl;
+	chip->dev_ready		= gpmi_dev_ready;
+	chip->read_byte		= gpmi_read_byte;
+	chip->read_buf		= gpmi_read_buf;
+	chip->write_buf		= gpmi_write_buf;
+	chip->badblock_pattern	= &gpmi_bbt_descr;
+	chip->block_markbad	= gpmi_block_markbad;
+	chip->options		|= NAND_NO_SUBPAGE_WRITE;
+
+	/* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
+	this->swap_block_mark = !GPMI_IS_MX23(this);
+
+	/*
+	 * Allocate a temporary DMA buffer for reading ID in the
+	 * nand_scan_ident().
+	 */
+	this->bch_geometry.payload_size = 1024;
+	this->bch_geometry.auxiliary_size = 128;
+	ret = gpmi_alloc_dma_buffer(this);
+	if (ret)
+		goto err_out;
+
+	ret = nand_scan_ident(mtd, GPMI_IS_MX6(this) ? 2 : 1, NULL);
+	if (ret)
+		goto err_out;
+
+	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+		chip->bbt_options |= NAND_BBT_NO_OOB;
+
+		if (of_property_read_bool(this->dev->of_node,
+						"fsl,no-blockmark-swap"))
+			this->swap_block_mark = false;
+	}
+	dev_dbg(this->dev, "Blockmark swapping %sabled\n",
+		this->swap_block_mark ? "en" : "dis");
+
+	ret = gpmi_init_last(this);
+	if (ret)
+		goto err_out;
+
+	chip->options |= NAND_SKIP_BBTSCAN;
+	ret = nand_scan_tail(mtd);
+	if (ret)
+		goto err_out;
+
+	ret = nand_boot_init(this);
+	if (ret)
+		goto err_out;
+	ret = chip->scan_bbt(mtd);
+	if (ret)
+		goto err_out;
+
+	ret = mtd_device_register(mtd, NULL, 0);
+	if (ret)
+		goto err_out;
+	return 0;
+
+err_out:
+	gpmi_nand_exit(this);
+	return ret;
+}
+
+static const struct of_device_id gpmi_nand_id_table[] = {
+	{
+		.compatible = "fsl,imx23-gpmi-nand",
+		.data = &gpmi_devdata_imx23,
+	}, {
+		.compatible = "fsl,imx28-gpmi-nand",
+		.data = &gpmi_devdata_imx28,
+	}, {
+		.compatible = "fsl,imx6q-gpmi-nand",
+		.data = &gpmi_devdata_imx6q,
+	}, {
+		.compatible = "fsl,imx6sx-gpmi-nand",
+		.data = &gpmi_devdata_imx6sx,
+	}, {}
+};
+MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
+
+static int gpmi_nand_probe(struct platform_device *pdev)
+{
+	struct gpmi_nand_data *this;
+	const struct of_device_id *of_id;
+	int ret;
+
+	this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
+	if (!this)
+		return -ENOMEM;
+
+	of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
+	if (of_id) {
+		this->devdata = of_id->data;
+	} else {
+		dev_err(&pdev->dev, "Failed to find the right device id.\n");
+		return -ENODEV;
+	}
+
+	platform_set_drvdata(pdev, this);
+	this->pdev  = pdev;
+	this->dev   = &pdev->dev;
+
+	ret = acquire_resources(this);
+	if (ret)
+		goto exit_acquire_resources;
+
+	ret = init_hardware(this);
+	if (ret)
+		goto exit_nfc_init;
+
+	ret = gpmi_nand_init(this);
+	if (ret)
+		goto exit_nfc_init;
+
+	dev_info(this->dev, "driver registered.\n");
+
+	return 0;
+
+exit_nfc_init:
+	release_resources(this);
+exit_acquire_resources:
+
+	return ret;
+}
+
+static int gpmi_nand_remove(struct platform_device *pdev)
+{
+	struct gpmi_nand_data *this = platform_get_drvdata(pdev);
+
+	gpmi_nand_exit(this);
+	release_resources(this);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gpmi_pm_suspend(struct device *dev)
+{
+	struct gpmi_nand_data *this = dev_get_drvdata(dev);
+
+	release_dma_channels(this);
+	return 0;
+}
+
+static int gpmi_pm_resume(struct device *dev)
+{
+	struct gpmi_nand_data *this = dev_get_drvdata(dev);
+	int ret;
+
+	ret = acquire_dma_channels(this);
+	if (ret < 0)
+		return ret;
+
+	/* re-init the GPMI registers */
+	this->flags &= ~GPMI_TIMING_INIT_OK;
+	ret = gpmi_init(this);
+	if (ret) {
+		dev_err(this->dev, "Error setting GPMI : %d\n", ret);
+		return ret;
+	}
+
+	/* re-init the BCH registers */
+	ret = bch_set_geometry(this);
+	if (ret) {
+		dev_err(this->dev, "Error setting BCH : %d\n", ret);
+		return ret;
+	}
+
+	/* re-init others */
+	gpmi_extra_init(this);
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops gpmi_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
+};
+
+static struct platform_driver gpmi_nand_driver = {
+	.driver = {
+		.name = "gpmi-nand",
+		.pm = &gpmi_pm_ops,
+		.of_match_table = gpmi_nand_id_table,
+	},
+	.probe   = gpmi_nand_probe,
+	.remove  = gpmi_nand_remove,
+};
+module_platform_driver(gpmi_nand_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.h
new file mode 100644
index 000000000000..d7625cad6493
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-nand.h
@@ -0,0 +1,310 @@ 
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ * Copyright (C) 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_MTD_NAND_GPMI_NAND_H
+#define __DRIVERS_MTD_NAND_GPMI_NAND_H
+
+#include <linux/mtd/rawnand.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+
+#define GPMI_CLK_MAX 5 /* MX6Q needs five clocks */
+struct resources {
+	void __iomem  *gpmi_regs;
+	void __iomem  *bch_regs;
+	unsigned int  dma_low_channel;
+	unsigned int  dma_high_channel;
+	struct clk    *clock[GPMI_CLK_MAX];
+};
+
+/**
+ * struct bch_geometry - BCH geometry description.
+ * @gf_len:                   The length of Galois Field. (e.g., 13 or 14)
+ * @ecc_strength:             A number that describes the strength of the ECC
+ *                            algorithm.
+ * @page_size:                The size, in bytes, of a physical page, including
+ *                            both data and OOB.
+ * @metadata_size:            The size, in bytes, of the metadata.
+ * @ecc_chunk_size:           The size, in bytes, of a single ECC chunk. Note
+ *                            the first chunk in the page includes both data and
+ *                            metadata, so it's a bit larger than this value.
+ * @ecc_chunk_count:          The number of ECC chunks in the page,
+ * @payload_size:             The size, in bytes, of the payload buffer.
+ * @auxiliary_size:           The size, in bytes, of the auxiliary buffer.
+ * @auxiliary_status_offset:  The offset into the auxiliary buffer at which
+ *                            the ECC status appears.
+ * @block_mark_byte_offset:   The byte offset in the ECC-based page view at
+ *                            which the underlying physical block mark appears.
+ * @block_mark_bit_offset:    The bit offset into the ECC-based page view at
+ *                            which the underlying physical block mark appears.
+ */
+struct bch_geometry {
+	unsigned int  gf_len;
+	unsigned int  ecc_strength;
+	unsigned int  page_size;
+	unsigned int  metadata_size;
+	unsigned int  ecc_chunk_size;
+	unsigned int  ecc_chunk_count;
+	unsigned int  payload_size;
+	unsigned int  auxiliary_size;
+	unsigned int  auxiliary_status_offset;
+	unsigned int  block_mark_byte_offset;
+	unsigned int  block_mark_bit_offset;
+};
+
+/**
+ * struct boot_rom_geometry - Boot ROM geometry description.
+ * @stride_size_in_pages:        The size of a boot block stride, in pages.
+ * @search_area_stride_exponent: The logarithm to base 2 of the size of a
+ *                               search area in boot block strides.
+ */
+struct boot_rom_geometry {
+	unsigned int  stride_size_in_pages;
+	unsigned int  search_area_stride_exponent;
+};
+
+/* DMA operations types */
+enum dma_ops_type {
+	DMA_FOR_COMMAND = 1,
+	DMA_FOR_READ_DATA,
+	DMA_FOR_WRITE_DATA,
+	DMA_FOR_READ_ECC_PAGE,
+	DMA_FOR_WRITE_ECC_PAGE
+};
+
+/**
+ * struct nand_timing - Fundamental timing attributes for NAND.
+ * @data_setup_in_ns:         The data setup time, in nanoseconds. Usually the
+ *                            maximum of tDS and tWP. A negative value
+ *                            indicates this characteristic isn't known.
+ * @data_hold_in_ns:          The data hold time, in nanoseconds. Usually the
+ *                            maximum of tDH, tWH and tREH. A negative value
+ *                            indicates this characteristic isn't known.
+ * @address_setup_in_ns:      The address setup time, in nanoseconds. Usually
+ *                            the maximum of tCLS, tCS and tALS. A negative
+ *                            value indicates this characteristic isn't known.
+ * @gpmi_sample_delay_in_ns:  A GPMI-specific timing parameter. A negative value
+ *                            indicates this characteristic isn't known.
+ * @tREA_in_ns:               tREA, in nanoseconds, from the data sheet. A
+ *                            negative value indicates this characteristic isn't
+ *                            known.
+ * @tRLOH_in_ns:              tRLOH, in nanoseconds, from the data sheet. A
+ *                            negative value indicates this characteristic isn't
+ *                            known.
+ * @tRHOH_in_ns:              tRHOH, in nanoseconds, from the data sheet. A
+ *                            negative value indicates this characteristic isn't
+ *                            known.
+ */
+struct nand_timing {
+	int8_t  data_setup_in_ns;
+	int8_t  data_hold_in_ns;
+	int8_t  address_setup_in_ns;
+	int8_t  gpmi_sample_delay_in_ns;
+	int8_t  tREA_in_ns;
+	int8_t  tRLOH_in_ns;
+	int8_t  tRHOH_in_ns;
+};
+
+enum gpmi_type {
+	IS_MX23,
+	IS_MX28,
+	IS_MX6Q,
+	IS_MX6SX
+};
+
+struct gpmi_devdata {
+	enum gpmi_type type;
+	int bch_max_ecc_strength;
+	int max_chain_delay; /* See the async EDO mode */
+};
+
+struct gpmi_nand_data {
+	/* flags */
+#define GPMI_ASYNC_EDO_ENABLED	(1 << 0)
+#define GPMI_TIMING_INIT_OK	(1 << 1)
+	int			flags;
+	const struct gpmi_devdata *devdata;
+
+	/* System Interface */
+	struct device		*dev;
+	struct platform_device	*pdev;
+
+	/* Resources */
+	struct resources	resources;
+
+	/* Flash Hardware */
+	struct nand_timing	timing;
+	int			timing_mode;
+
+	/* BCH */
+	struct bch_geometry	bch_geometry;
+	struct completion	bch_done;
+
+	/* NAND Boot issue */
+	bool			swap_block_mark;
+	struct boot_rom_geometry rom_geometry;
+
+	/* MTD / NAND */
+	struct nand_chip	nand;
+
+	/* General-use Variables */
+	int			current_chip;
+	unsigned int		command_length;
+
+	/* passed from upper layer */
+	uint8_t			*upper_buf;
+	int			upper_len;
+
+	/* for DMA operations */
+	bool			direct_dma_map_ok;
+
+	struct scatterlist	cmd_sgl;
+	char			*cmd_buffer;
+
+	struct scatterlist	data_sgl;
+	char			*data_buffer_dma;
+
+	void			*page_buffer_virt;
+	dma_addr_t		page_buffer_phys;
+	unsigned int		page_buffer_size;
+
+	void			*payload_virt;
+	dma_addr_t		payload_phys;
+
+	void			*auxiliary_virt;
+	dma_addr_t		auxiliary_phys;
+
+	void			*raw_buffer;
+
+	/* DMA channels */
+#define DMA_CHANS		8
+	struct dma_chan		*dma_chans[DMA_CHANS];
+	enum dma_ops_type	last_dma_type;
+	enum dma_ops_type	dma_type;
+	struct completion	dma_done;
+
+	/* private */
+	void			*private;
+};
+
+/**
+ * struct gpmi_nfc_hardware_timing - GPMI hardware timing parameters.
+ * @data_setup_in_cycles:      The data setup time, in cycles.
+ * @data_hold_in_cycles:       The data hold time, in cycles.
+ * @address_setup_in_cycles:   The address setup time, in cycles.
+ * @device_busy_timeout:       The timeout waiting for NAND Ready/Busy,
+ *                             this value is the number of cycles multiplied
+ *                             by 4096.
+ * @use_half_periods:          Indicates the clock is running slowly, so the
+ *                             NFC DLL should use half-periods.
+ * @sample_delay_factor:       The sample delay factor.
+ * @wrn_dly_sel:               The delay on the GPMI write strobe.
+ */
+struct gpmi_nfc_hardware_timing {
+	/* for HW_GPMI_TIMING0 */
+	uint8_t  data_setup_in_cycles;
+	uint8_t  data_hold_in_cycles;
+	uint8_t  address_setup_in_cycles;
+
+	/* for HW_GPMI_TIMING1 */
+	uint16_t device_busy_timeout;
+#define GPMI_DEFAULT_BUSY_TIMEOUT	0x500 /* default busy timeout value.*/
+
+	/* for HW_GPMI_CTRL1 */
+	bool     use_half_periods;
+	uint8_t  sample_delay_factor;
+	uint8_t  wrn_dly_sel;
+};
+
+/**
+ * struct timing_threshod - Timing threshold
+ * @max_data_setup_cycles:       The maximum number of data setup cycles that
+ *                               can be expressed in the hardware.
+ * @internal_data_setup_in_ns:   The time, in ns, that the NFC hardware requires
+ *                               for data read internal setup. In the Reference
+ *                               Manual, see the chapter "High-Speed NAND
+ *                               Timing" for more details.
+ * @max_sample_delay_factor:     The maximum sample delay factor that can be
+ *                               expressed in the hardware.
+ * @max_dll_clock_period_in_ns:  The maximum period of the GPMI clock that the
+ *                               sample delay DLL hardware can possibly work
+ *                               with (the DLL is unusable with longer periods).
+ *                               If the full-cycle period is greater than HALF
+ *                               this value, the DLL must be configured to use
+ *                               half-periods.
+ * @max_dll_delay_in_ns:         The maximum amount of delay, in ns, that the
+ *                               DLL can implement.
+ * @clock_frequency_in_hz:       The clock frequency, in Hz, during the current
+ *                               I/O transaction. If no I/O transaction is in
+ *                               progress, this is the clock frequency during
+ *                               the most recent I/O transaction.
+ */
+struct timing_threshod {
+	const unsigned int      max_chip_count;
+	const unsigned int      max_data_setup_cycles;
+	const unsigned int      internal_data_setup_in_ns;
+	const unsigned int      max_sample_delay_factor;
+	const unsigned int      max_dll_clock_period_in_ns;
+	const unsigned int      max_dll_delay_in_ns;
+	unsigned long           clock_frequency_in_hz;
+
+};
+
+/* Common Services */
+extern int common_nfc_set_geometry(struct gpmi_nand_data *);
+extern struct dma_chan *get_dma_chan(struct gpmi_nand_data *);
+extern void prepare_data_dma(struct gpmi_nand_data *,
+				enum dma_data_direction dr);
+extern int start_dma_without_bch_irq(struct gpmi_nand_data *,
+				struct dma_async_tx_descriptor *);
+extern int start_dma_with_bch_irq(struct gpmi_nand_data *,
+				struct dma_async_tx_descriptor *);
+
+/* GPMI-NAND helper function library */
+extern int gpmi_init(struct gpmi_nand_data *);
+extern int gpmi_extra_init(struct gpmi_nand_data *);
+extern void gpmi_clear_bch(struct gpmi_nand_data *);
+extern void gpmi_dump_info(struct gpmi_nand_data *);
+extern int bch_set_geometry(struct gpmi_nand_data *);
+extern int gpmi_is_ready(struct gpmi_nand_data *, unsigned chip);
+extern int gpmi_send_command(struct gpmi_nand_data *);
+extern void gpmi_begin(struct gpmi_nand_data *);
+extern void gpmi_end(struct gpmi_nand_data *);
+extern int gpmi_read_data(struct gpmi_nand_data *);
+extern int gpmi_send_data(struct gpmi_nand_data *);
+extern int gpmi_send_page(struct gpmi_nand_data *,
+			dma_addr_t payload, dma_addr_t auxiliary);
+extern int gpmi_read_page(struct gpmi_nand_data *,
+			dma_addr_t payload, dma_addr_t auxiliary);
+
+void gpmi_copy_bits(u8 *dst, size_t dst_bit_off,
+		    const u8 *src, size_t src_bit_off,
+		    size_t nbits);
+
+/* BCH : Status Block Completion Codes */
+#define STATUS_GOOD		0x00
+#define STATUS_ERASED		0xff
+#define STATUS_UNCORRECTABLE	0xfe
+
+/* Use the devdata to distinguish different Archs. */
+#define GPMI_IS_MX23(x)		((x)->devdata->type == IS_MX23)
+#define GPMI_IS_MX28(x)		((x)->devdata->type == IS_MX28)
+#define GPMI_IS_MX6Q(x)		((x)->devdata->type == IS_MX6Q)
+#define GPMI_IS_MX6SX(x)	((x)->devdata->type == IS_MX6SX)
+
+#define GPMI_IS_MX6(x)		(GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x))
+#endif
diff --git a/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-regs.h b/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-regs.h
new file mode 100644
index 000000000000..82114cdc8330
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/gpmi-nand/gpmi-regs.h
@@ -0,0 +1,187 @@ 
+/*
+ * Freescale GPMI NAND Flash Driver
+ *
+ * Copyright 2008-2011 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+#ifndef __GPMI_NAND_GPMI_REGS_H
+#define __GPMI_NAND_GPMI_REGS_H
+
+#define HW_GPMI_CTRL0					0x00000000
+#define HW_GPMI_CTRL0_SET				0x00000004
+#define HW_GPMI_CTRL0_CLR				0x00000008
+#define HW_GPMI_CTRL0_TOG				0x0000000c
+
+#define BP_GPMI_CTRL0_COMMAND_MODE			24
+#define BM_GPMI_CTRL0_COMMAND_MODE	(3 << BP_GPMI_CTRL0_COMMAND_MODE)
+#define BF_GPMI_CTRL0_COMMAND_MODE(v)	\
+	(((v) << BP_GPMI_CTRL0_COMMAND_MODE) & BM_GPMI_CTRL0_COMMAND_MODE)
+#define BV_GPMI_CTRL0_COMMAND_MODE__WRITE		0x0
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ		0x1
+#define BV_GPMI_CTRL0_COMMAND_MODE__READ_AND_COMPARE	0x2
+#define BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY	0x3
+
+#define BM_GPMI_CTRL0_WORD_LENGTH			(1 << 23)
+#define BV_GPMI_CTRL0_WORD_LENGTH__16_BIT		0x0
+#define BV_GPMI_CTRL0_WORD_LENGTH__8_BIT		0x1
+
+/*
+ *  Difference in LOCK_CS between imx23 and imx28 :
+ *  This bit may impact the _POWER_ consumption. So some chips
+ *  do not set it.
+ */
+#define MX23_BP_GPMI_CTRL0_LOCK_CS			22
+#define MX28_BP_GPMI_CTRL0_LOCK_CS			27
+#define LOCK_CS_ENABLE					0x1
+#define BF_GPMI_CTRL0_LOCK_CS(v, x)			0x0
+
+/* Difference in CS between imx23 and imx28 */
+#define BP_GPMI_CTRL0_CS				20
+#define MX23_BM_GPMI_CTRL0_CS		(3 << BP_GPMI_CTRL0_CS)
+#define MX28_BM_GPMI_CTRL0_CS		(7 << BP_GPMI_CTRL0_CS)
+#define BF_GPMI_CTRL0_CS(v, x)		(((v) << BP_GPMI_CTRL0_CS) & \
+						(GPMI_IS_MX23((x)) \
+						? MX23_BM_GPMI_CTRL0_CS	\
+						: MX28_BM_GPMI_CTRL0_CS))
+
+#define BP_GPMI_CTRL0_ADDRESS				17
+#define BM_GPMI_CTRL0_ADDRESS		(3 << BP_GPMI_CTRL0_ADDRESS)
+#define BF_GPMI_CTRL0_ADDRESS(v)	\
+		(((v) << BP_GPMI_CTRL0_ADDRESS) & BM_GPMI_CTRL0_ADDRESS)
+#define BV_GPMI_CTRL0_ADDRESS__NAND_DATA		0x0
+#define BV_GPMI_CTRL0_ADDRESS__NAND_CLE			0x1
+#define BV_GPMI_CTRL0_ADDRESS__NAND_ALE			0x2
+
+#define BM_GPMI_CTRL0_ADDRESS_INCREMENT			(1 << 16)
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__DISABLED	0x0
+#define BV_GPMI_CTRL0_ADDRESS_INCREMENT__ENABLED	0x1
+
+#define BP_GPMI_CTRL0_XFER_COUNT			0
+#define BM_GPMI_CTRL0_XFER_COUNT	(0xffff << BP_GPMI_CTRL0_XFER_COUNT)
+#define BF_GPMI_CTRL0_XFER_COUNT(v)	\
+		(((v) << BP_GPMI_CTRL0_XFER_COUNT) & BM_GPMI_CTRL0_XFER_COUNT)
+
+#define HW_GPMI_COMPARE					0x00000010
+
+#define HW_GPMI_ECCCTRL					0x00000020
+#define HW_GPMI_ECCCTRL_SET				0x00000024
+#define HW_GPMI_ECCCTRL_CLR				0x00000028
+#define HW_GPMI_ECCCTRL_TOG				0x0000002c
+
+#define BP_GPMI_ECCCTRL_ECC_CMD				13
+#define BM_GPMI_ECCCTRL_ECC_CMD		(3 << BP_GPMI_ECCCTRL_ECC_CMD)
+#define BF_GPMI_ECCCTRL_ECC_CMD(v)	\
+		(((v) << BP_GPMI_ECCCTRL_ECC_CMD) & BM_GPMI_ECCCTRL_ECC_CMD)
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE		0x0
+#define BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE		0x1
+
+#define BM_GPMI_ECCCTRL_ENABLE_ECC			(1 << 12)
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__ENABLE		0x1
+#define BV_GPMI_ECCCTRL_ENABLE_ECC__DISABLE		0x0
+
+#define BP_GPMI_ECCCTRL_BUFFER_MASK			0
+#define BM_GPMI_ECCCTRL_BUFFER_MASK	(0x1ff << BP_GPMI_ECCCTRL_BUFFER_MASK)
+#define BF_GPMI_ECCCTRL_BUFFER_MASK(v)	\
+	(((v) << BP_GPMI_ECCCTRL_BUFFER_MASK) & BM_GPMI_ECCCTRL_BUFFER_MASK)
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY	0x100
+#define BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE		0x1FF
+
+#define HW_GPMI_ECCCOUNT				0x00000030
+#define HW_GPMI_PAYLOAD					0x00000040
+#define HW_GPMI_AUXILIARY				0x00000050
+#define HW_GPMI_CTRL1					0x00000060
+#define HW_GPMI_CTRL1_SET				0x00000064
+#define HW_GPMI_CTRL1_CLR				0x00000068
+#define HW_GPMI_CTRL1_TOG				0x0000006c
+
+#define BP_GPMI_CTRL1_DECOUPLE_CS			24
+#define BM_GPMI_CTRL1_DECOUPLE_CS	(1 << BP_GPMI_CTRL1_DECOUPLE_CS)
+
+#define BP_GPMI_CTRL1_WRN_DLY_SEL			22
+#define BM_GPMI_CTRL1_WRN_DLY_SEL	(0x3 << BP_GPMI_CTRL1_WRN_DLY_SEL)
+#define BF_GPMI_CTRL1_WRN_DLY_SEL(v)  \
+	(((v) << BP_GPMI_CTRL1_WRN_DLY_SEL) & BM_GPMI_CTRL1_WRN_DLY_SEL)
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS		0x0
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_6_TO_10NS		0x1
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_7_TO_12NS		0x2
+#define BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY		0x3
+
+#define BM_GPMI_CTRL1_BCH_MODE				(1 << 18)
+
+#define BP_GPMI_CTRL1_DLL_ENABLE			17
+#define BM_GPMI_CTRL1_DLL_ENABLE	(1 << BP_GPMI_CTRL1_DLL_ENABLE)
+
+#define BP_GPMI_CTRL1_HALF_PERIOD			16
+#define BM_GPMI_CTRL1_HALF_PERIOD	(1 << BP_GPMI_CTRL1_HALF_PERIOD)
+
+#define BP_GPMI_CTRL1_RDN_DELAY				12
+#define BM_GPMI_CTRL1_RDN_DELAY		(0xf << BP_GPMI_CTRL1_RDN_DELAY)
+#define BF_GPMI_CTRL1_RDN_DELAY(v)	\
+		(((v) << BP_GPMI_CTRL1_RDN_DELAY) & BM_GPMI_CTRL1_RDN_DELAY)
+
+#define BM_GPMI_CTRL1_DEV_RESET				(1 << 3)
+#define BV_GPMI_CTRL1_DEV_RESET__ENABLED		0x0
+#define BV_GPMI_CTRL1_DEV_RESET__DISABLED		0x1
+
+#define BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY		(1 << 2)
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVELOW	0x0
+#define BV_GPMI_CTRL1_ATA_IRQRDY_POLARITY__ACTIVEHIGH	0x1
+
+#define BM_GPMI_CTRL1_CAMERA_MODE			(1 << 1)
+#define BV_GPMI_CTRL1_GPMI_MODE__NAND			0x0
+#define BV_GPMI_CTRL1_GPMI_MODE__ATA			0x1
+
+#define BM_GPMI_CTRL1_GPMI_MODE				(1 << 0)
+
+#define HW_GPMI_TIMING0					0x00000070
+
+#define BP_GPMI_TIMING0_ADDRESS_SETUP			16
+#define BM_GPMI_TIMING0_ADDRESS_SETUP	(0xff << BP_GPMI_TIMING0_ADDRESS_SETUP)
+#define BF_GPMI_TIMING0_ADDRESS_SETUP(v)	\
+	(((v) << BP_GPMI_TIMING0_ADDRESS_SETUP) & BM_GPMI_TIMING0_ADDRESS_SETUP)
+
+#define BP_GPMI_TIMING0_DATA_HOLD			8
+#define BM_GPMI_TIMING0_DATA_HOLD	(0xff << BP_GPMI_TIMING0_DATA_HOLD)
+#define BF_GPMI_TIMING0_DATA_HOLD(v)		\
+	(((v) << BP_GPMI_TIMING0_DATA_HOLD) & BM_GPMI_TIMING0_DATA_HOLD)
+
+#define BP_GPMI_TIMING0_DATA_SETUP			0
+#define BM_GPMI_TIMING0_DATA_SETUP	(0xff << BP_GPMI_TIMING0_DATA_SETUP)
+#define BF_GPMI_TIMING0_DATA_SETUP(v)		\
+	(((v) << BP_GPMI_TIMING0_DATA_SETUP) & BM_GPMI_TIMING0_DATA_SETUP)
+
+#define HW_GPMI_TIMING1					0x00000080
+#define BP_GPMI_TIMING1_BUSY_TIMEOUT			16
+#define BM_GPMI_TIMING1_BUSY_TIMEOUT	(0xffff << BP_GPMI_TIMING1_BUSY_TIMEOUT)
+#define BF_GPMI_TIMING1_BUSY_TIMEOUT(v)		\
+	(((v) << BP_GPMI_TIMING1_BUSY_TIMEOUT) & BM_GPMI_TIMING1_BUSY_TIMEOUT)
+
+#define HW_GPMI_TIMING2					0x00000090
+#define HW_GPMI_DATA					0x000000a0
+
+/* MX28 uses this to detect READY. */
+#define HW_GPMI_STAT					0x000000b0
+#define MX28_BP_GPMI_STAT_READY_BUSY			24
+#define MX28_BM_GPMI_STAT_READY_BUSY	(0xff << MX28_BP_GPMI_STAT_READY_BUSY)
+#define MX28_BF_GPMI_STAT_READY_BUSY(v)		\
+	(((v) << MX28_BP_GPMI_STAT_READY_BUSY) & MX28_BM_GPMI_STAT_READY_BUSY)
+
+/* MX23 uses this to detect READY. */
+#define HW_GPMI_DEBUG					0x000000c0
+#define MX23_BP_GPMI_DEBUG_READY0			28
+#define MX23_BM_GPMI_DEBUG_READY0	(1 << MX23_BP_GPMI_DEBUG_READY0)
+#endif
diff --git a/drivers/mtd/nand/rawnand/hisi504_nand.c b/drivers/mtd/nand/rawnand/hisi504_nand.c
new file mode 100644
index 000000000000..a287d73bb17e
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/hisi504_nand.c
@@ -0,0 +1,898 @@ 
+/*
+ * Hisilicon NAND Flash controller driver
+ *
+ * Copyright © 2012-2014 HiSilicon Technologies Co., Ltd.
+ *              http://www.hisilicon.com
+ *
+ * Author: Zhou Wang <wangzhou.bry@gmail.com>
+ * The initial developer of the original code is Zhiyong Cai
+ * <caizhiyong@huawei.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/sizes.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/partitions.h>
+
+#define HINFC504_MAX_CHIP                               (4)
+#define HINFC504_W_LATCH                                (5)
+#define HINFC504_R_LATCH                                (7)
+#define HINFC504_RW_LATCH                               (3)
+
+#define HINFC504_NFC_TIMEOUT				(2 * HZ)
+#define HINFC504_NFC_PM_TIMEOUT				(1 * HZ)
+#define HINFC504_NFC_DMA_TIMEOUT			(5 * HZ)
+#define HINFC504_CHIP_DELAY				(25)
+
+#define HINFC504_REG_BASE_ADDRESS_LEN			(0x100)
+#define HINFC504_BUFFER_BASE_ADDRESS_LEN		(2048 + 128)
+
+#define HINFC504_ADDR_CYCLE_MASK			0x4
+
+#define HINFC504_CON					0x00
+#define HINFC504_CON_OP_MODE_NORMAL			BIT(0)
+#define HINFC504_CON_PAGEISZE_SHIFT			(1)
+#define HINFC504_CON_PAGESIZE_MASK			(0x07)
+#define HINFC504_CON_BUS_WIDTH				BIT(4)
+#define HINFC504_CON_READY_BUSY_SEL			BIT(8)
+#define HINFC504_CON_ECCTYPE_SHIFT			(9)
+#define HINFC504_CON_ECCTYPE_MASK			(0x07)
+
+#define HINFC504_PWIDTH					0x04
+#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \
+	((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8))
+
+#define HINFC504_CMD					0x0C
+#define HINFC504_ADDRL					0x10
+#define HINFC504_ADDRH					0x14
+#define HINFC504_DATA_NUM				0x18
+
+#define HINFC504_OP					0x1C
+#define HINFC504_OP_READ_DATA_EN			BIT(1)
+#define HINFC504_OP_WAIT_READY_EN			BIT(2)
+#define HINFC504_OP_CMD2_EN				BIT(3)
+#define HINFC504_OP_WRITE_DATA_EN			BIT(4)
+#define HINFC504_OP_ADDR_EN				BIT(5)
+#define HINFC504_OP_CMD1_EN				BIT(6)
+#define HINFC504_OP_NF_CS_SHIFT                         (7)
+#define HINFC504_OP_NF_CS_MASK				(3)
+#define HINFC504_OP_ADDR_CYCLE_SHIFT			(9)
+#define HINFC504_OP_ADDR_CYCLE_MASK			(7)
+
+#define HINFC504_STATUS                                 0x20
+#define HINFC504_READY					BIT(0)
+
+#define HINFC504_INTEN					0x24
+#define HINFC504_INTEN_DMA				BIT(9)
+#define HINFC504_INTEN_UE				BIT(6)
+#define HINFC504_INTEN_CE				BIT(5)
+
+#define HINFC504_INTS					0x28
+#define HINFC504_INTS_DMA				BIT(9)
+#define HINFC504_INTS_UE				BIT(6)
+#define HINFC504_INTS_CE				BIT(5)
+
+#define HINFC504_INTCLR                                 0x2C
+#define HINFC504_INTCLR_DMA				BIT(9)
+#define HINFC504_INTCLR_UE				BIT(6)
+#define HINFC504_INTCLR_CE				BIT(5)
+
+#define HINFC504_ECC_STATUS                             0x5C
+#define HINFC504_ECC_16_BIT_SHIFT                       12
+
+#define HINFC504_DMA_CTRL				0x60
+#define HINFC504_DMA_CTRL_DMA_START			BIT(0)
+#define HINFC504_DMA_CTRL_WE				BIT(1)
+#define HINFC504_DMA_CTRL_DATA_AREA_EN			BIT(2)
+#define HINFC504_DMA_CTRL_OOB_AREA_EN			BIT(3)
+#define HINFC504_DMA_CTRL_BURST4_EN			BIT(4)
+#define HINFC504_DMA_CTRL_BURST8_EN			BIT(5)
+#define HINFC504_DMA_CTRL_BURST16_EN			BIT(6)
+#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT		(7)
+#define HINFC504_DMA_CTRL_ADDR_NUM_MASK                 (1)
+#define HINFC504_DMA_CTRL_CS_SHIFT			(8)
+#define HINFC504_DMA_CTRL_CS_MASK			(0x03)
+
+#define HINFC504_DMA_ADDR_DATA				0x64
+#define HINFC504_DMA_ADDR_OOB				0x68
+
+#define HINFC504_DMA_LEN				0x6C
+#define HINFC504_DMA_LEN_OOB_SHIFT			(16)
+#define HINFC504_DMA_LEN_OOB_MASK			(0xFFF)
+
+#define HINFC504_DMA_PARA				0x70
+#define HINFC504_DMA_PARA_DATA_RW_EN			BIT(0)
+#define HINFC504_DMA_PARA_OOB_RW_EN			BIT(1)
+#define HINFC504_DMA_PARA_DATA_EDC_EN			BIT(2)
+#define HINFC504_DMA_PARA_OOB_EDC_EN			BIT(3)
+#define HINFC504_DMA_PARA_DATA_ECC_EN			BIT(4)
+#define HINFC504_DMA_PARA_OOB_ECC_EN			BIT(5)
+
+#define HINFC_VERSION                                   0x74
+#define HINFC504_LOG_READ_ADDR				0x7C
+#define HINFC504_LOG_READ_LEN				0x80
+
+#define HINFC504_NANDINFO_LEN				0x10
+
+struct hinfc_host {
+	struct nand_chip	chip;
+	struct device		*dev;
+	void __iomem		*iobase;
+	void __iomem		*mmio;
+	struct completion       cmd_complete;
+	unsigned int		offset;
+	unsigned int		command;
+	int			chipselect;
+	unsigned int		addr_cycle;
+	u32                     addr_value[2];
+	u32                     cache_addr_value[2];
+	char			*buffer;
+	dma_addr_t		dma_buffer;
+	dma_addr_t		dma_oob;
+	int			version;
+	unsigned int            irq_status; /* interrupt status */
+};
+
+static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg)
+{
+	return readl(host->iobase + reg);
+}
+
+static inline void hinfc_write(struct hinfc_host *host, unsigned int value,
+			       unsigned int reg)
+{
+	writel(value, host->iobase + reg);
+}
+
+static void wait_controller_finished(struct hinfc_host *host)
+{
+	unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT;
+	int val;
+
+	while (time_before(jiffies, timeout)) {
+		val = hinfc_read(host, HINFC504_STATUS);
+		if (host->command == NAND_CMD_ERASE2) {
+			/* nfc is ready */
+			while (!(val & HINFC504_READY))	{
+				usleep_range(500, 1000);
+				val = hinfc_read(host, HINFC504_STATUS);
+			}
+			return;
+		}
+
+		if (val & HINFC504_READY)
+			return;
+	}
+
+	/* wait cmd timeout */
+	dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n");
+}
+
+static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev)
+{
+	struct nand_chip *chip = &host->chip;
+	struct mtd_info	*mtd = nand_to_mtd(chip);
+	unsigned long val;
+	int ret;
+
+	hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA);
+	hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB);
+
+	if (chip->ecc.mode == NAND_ECC_NONE) {
+		hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK)
+			<< HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN);
+
+		hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+			| HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA);
+	} else {
+		if (host->command == NAND_CMD_READOOB)
+			hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN
+			| HINFC504_DMA_PARA_OOB_EDC_EN
+			| HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+		else
+			hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN
+			| HINFC504_DMA_PARA_OOB_RW_EN
+			| HINFC504_DMA_PARA_DATA_EDC_EN
+			| HINFC504_DMA_PARA_OOB_EDC_EN
+			| HINFC504_DMA_PARA_DATA_ECC_EN
+			| HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA);
+
+	}
+
+	val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN
+		| HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN
+		| HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN
+		| ((host->addr_cycle == 4 ? 1 : 0)
+			<< HINFC504_DMA_CTRL_ADDR_NUM_SHIFT)
+		| ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK)
+			<< HINFC504_DMA_CTRL_CS_SHIFT));
+
+	if (todev)
+		val |= HINFC504_DMA_CTRL_WE;
+
+	init_completion(&host->cmd_complete);
+
+	hinfc_write(host, val, HINFC504_DMA_CTRL);
+	ret = wait_for_completion_timeout(&host->cmd_complete,
+			HINFC504_NFC_DMA_TIMEOUT);
+
+	if (!ret) {
+		dev_err(host->dev, "DMA operation(irq) timeout!\n");
+		/* sanity check */
+		val = hinfc_read(host, HINFC504_DMA_CTRL);
+		if (!(val & HINFC504_DMA_CTRL_DMA_START))
+			dev_err(host->dev, "DMA is already done but without irq ACK!\n");
+		else
+			dev_err(host->dev, "DMA is really timeout!\n");
+	}
+}
+
+static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host)
+{
+	host->addr_value[0] &= 0xffff0000;
+
+	hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+	hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+	hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN,
+		    HINFC504_CMD);
+
+	hisi_nfc_dma_transfer(host, 1);
+
+	return 0;
+}
+
+static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host)
+{
+	struct mtd_info	*mtd = nand_to_mtd(&host->chip);
+
+	if ((host->addr_value[0] == host->cache_addr_value[0]) &&
+	    (host->addr_value[1] == host->cache_addr_value[1]))
+		return 0;
+
+	host->addr_value[0] &= 0xffff0000;
+
+	hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+	hinfc_write(host, host->addr_value[1], HINFC504_ADDRH);
+	hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0,
+		    HINFC504_CMD);
+
+	hinfc_write(host, 0, HINFC504_LOG_READ_ADDR);
+	hinfc_write(host, mtd->writesize + mtd->oobsize,
+		    HINFC504_LOG_READ_LEN);
+
+	hisi_nfc_dma_transfer(host, 0);
+
+	host->cache_addr_value[0] = host->addr_value[0];
+	host->cache_addr_value[1] = host->addr_value[1];
+
+	return 0;
+}
+
+static int hisi_nfc_send_cmd_erase(struct hinfc_host *host)
+{
+	hinfc_write(host, host->addr_value[0], HINFC504_ADDRL);
+	hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1,
+		    HINFC504_CMD);
+
+	hinfc_write(host, HINFC504_OP_WAIT_READY_EN
+		| HINFC504_OP_CMD2_EN
+		| HINFC504_OP_CMD1_EN
+		| HINFC504_OP_ADDR_EN
+		| ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+			<< HINFC504_OP_NF_CS_SHIFT)
+		| ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK)
+			<< HINFC504_OP_ADDR_CYCLE_SHIFT),
+		HINFC504_OP);
+
+	wait_controller_finished(host);
+
+	return 0;
+}
+
+static int hisi_nfc_send_cmd_readid(struct hinfc_host *host)
+{
+	hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+	hinfc_write(host, NAND_CMD_READID, HINFC504_CMD);
+	hinfc_write(host, 0, HINFC504_ADDRL);
+
+	hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN
+		| HINFC504_OP_READ_DATA_EN
+		| ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+			<< HINFC504_OP_NF_CS_SHIFT)
+		| 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP);
+
+	wait_controller_finished(host);
+
+	return 0;
+}
+
+static int hisi_nfc_send_cmd_status(struct hinfc_host *host)
+{
+	hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM);
+	hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD);
+	hinfc_write(host, HINFC504_OP_CMD1_EN
+		| HINFC504_OP_READ_DATA_EN
+		| ((host->chipselect & HINFC504_OP_NF_CS_MASK)
+			<< HINFC504_OP_NF_CS_SHIFT),
+		HINFC504_OP);
+
+	wait_controller_finished(host);
+
+	return 0;
+}
+
+static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect)
+{
+	hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD);
+
+	hinfc_write(host, HINFC504_OP_CMD1_EN
+		| ((chipselect & HINFC504_OP_NF_CS_MASK)
+			<< HINFC504_OP_NF_CS_SHIFT)
+		| HINFC504_OP_WAIT_READY_EN,
+		HINFC504_OP);
+
+	wait_controller_finished(host);
+
+	return 0;
+}
+
+static void hisi_nfc_select_chip(struct mtd_info *mtd, int chipselect)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct hinfc_host *host = nand_get_controller_data(chip);
+
+	if (chipselect < 0)
+		return;
+
+	host->chipselect = chipselect;
+}
+
+static uint8_t hisi_nfc_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct hinfc_host *host = nand_get_controller_data(chip);
+
+	if (host->command == NAND_CMD_STATUS)
+		return *(uint8_t *)(host->mmio);
+
+	host->offset++;
+
+	if (host->command == NAND_CMD_READID)
+		return *(uint8_t *)(host->mmio + host->offset - 1);
+
+	return *(uint8_t *)(host->buffer + host->offset - 1);
+}
+
+static u16 hisi_nfc_read_word(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct hinfc_host *host = nand_get_controller_data(chip);
+
+	host->offset += 2;
+	return *(u16 *)(host->buffer + host->offset - 2);
+}
+
+static void
+hisi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct hinfc_host *host = nand_get_controller_data(chip);
+
+	memcpy(host->buffer + host->offset, buf, len);
+	host->offset += len;
+}
+
+static void hisi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct hinfc_host *host = nand_get_controller_data(chip);
+
+	memcpy(buf, host->buffer + host->offset, len);
+	host->offset += len;
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct hinfc_host *host = nand_get_controller_data(chip);
+	unsigned int command = host->command;
+
+	host->addr_cycle    = 0;
+	host->addr_value[0] = 0;
+	host->addr_value[1] = 0;
+
+	/* Serially input address */
+	if (column != -1) {
+		/* Adjust columns for 16 bit buswidth */
+		if (chip->options & NAND_BUSWIDTH_16 &&
+				!nand_opcode_8bits(command))
+			column >>= 1;
+
+		host->addr_value[0] = column & 0xffff;
+		host->addr_cycle    = 2;
+	}
+	if (page_addr != -1) {
+		host->addr_value[0] |= (page_addr & 0xffff)
+			<< (host->addr_cycle * 8);
+		host->addr_cycle    += 2;
+		/* One more address cycle for devices > 128MiB */
+		if (chip->chipsize > (128 << 20)) {
+			host->addr_cycle += 1;
+			if (host->command == NAND_CMD_ERASE1)
+				host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16;
+			else
+				host->addr_value[1] |= ((page_addr >> 16) & 0xff);
+		}
+	}
+}
+
+static void hisi_nfc_cmdfunc(struct mtd_info *mtd, unsigned command, int column,
+		int page_addr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct hinfc_host *host = nand_get_controller_data(chip);
+	int is_cache_invalid = 1;
+	unsigned int flag = 0;
+
+	host->command =  command;
+
+	switch (command) {
+	case NAND_CMD_READ0:
+	case NAND_CMD_READOOB:
+		if (command == NAND_CMD_READ0)
+			host->offset = column;
+		else
+			host->offset = column + mtd->writesize;
+
+		is_cache_invalid = 0;
+		set_addr(mtd, column, page_addr);
+		hisi_nfc_send_cmd_readstart(host);
+		break;
+
+	case NAND_CMD_SEQIN:
+		host->offset = column;
+		set_addr(mtd, column, page_addr);
+		break;
+
+	case NAND_CMD_ERASE1:
+		set_addr(mtd, column, page_addr);
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		hisi_nfc_send_cmd_pageprog(host);
+		break;
+
+	case NAND_CMD_ERASE2:
+		hisi_nfc_send_cmd_erase(host);
+		break;
+
+	case NAND_CMD_READID:
+		host->offset = column;
+		memset(host->mmio, 0, 0x10);
+		hisi_nfc_send_cmd_readid(host);
+		break;
+
+	case NAND_CMD_STATUS:
+		flag = hinfc_read(host, HINFC504_CON);
+		if (chip->ecc.mode == NAND_ECC_HW)
+			hinfc_write(host,
+				    flag & ~(HINFC504_CON_ECCTYPE_MASK <<
+				    HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON);
+
+		host->offset = 0;
+		memset(host->mmio, 0, 0x10);
+		hisi_nfc_send_cmd_status(host);
+		hinfc_write(host, flag, HINFC504_CON);
+		break;
+
+	case NAND_CMD_RESET:
+		hisi_nfc_send_cmd_reset(host, host->chipselect);
+		break;
+
+	default:
+		dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n",
+			command, column, page_addr);
+	}
+
+	if (is_cache_invalid) {
+		host->cache_addr_value[0] = ~0;
+		host->cache_addr_value[1] = ~0;
+	}
+}
+
+static irqreturn_t hinfc_irq_handle(int irq, void *devid)
+{
+	struct hinfc_host *host = devid;
+	unsigned int flag;
+
+	flag = hinfc_read(host, HINFC504_INTS);
+	/* store interrupts state */
+	host->irq_status |= flag;
+
+	if (flag & HINFC504_INTS_DMA) {
+		hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR);
+		complete(&host->cmd_complete);
+	} else if (flag & HINFC504_INTS_CE) {
+		hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR);
+	} else if (flag & HINFC504_INTS_UE) {
+		hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int hisi_nand_read_page_hwecc(struct mtd_info *mtd,
+	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+	struct hinfc_host *host = nand_get_controller_data(chip);
+	int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc;
+	int stat_1, stat_2;
+
+	chip->read_buf(mtd, buf, mtd->writesize);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	/* errors which can not be corrected by ECC */
+	if (host->irq_status & HINFC504_INTS_UE) {
+		mtd->ecc_stats.failed++;
+	} else if (host->irq_status & HINFC504_INTS_CE) {
+		/* TODO: need add other ECC modes! */
+		switch (chip->ecc.strength) {
+		case 16:
+			status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >>
+					HINFC504_ECC_16_BIT_SHIFT & 0x0fff;
+			stat_2 = status_ecc & 0x3f;
+			stat_1 = status_ecc >> 6 & 0x3f;
+			stat = stat_1 + stat_2;
+			stat_max = max_t(int, stat_1, stat_2);
+		}
+		mtd->ecc_stats.corrected += stat;
+		max_bitflips = max_t(int, max_bitflips, stat_max);
+	}
+	host->irq_status = 0;
+
+	return max_bitflips;
+}
+
+static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+				int page)
+{
+	struct hinfc_host *host = nand_get_controller_data(chip);
+
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	if (host->irq_status & HINFC504_INTS_UE) {
+		host->irq_status = 0;
+		return -EBADMSG;
+	}
+
+	host->irq_status = 0;
+	return 0;
+}
+
+static int hisi_nand_write_page_hwecc(struct mtd_info *mtd,
+		struct nand_chip *chip, const uint8_t *buf, int oob_required,
+		int page)
+{
+	chip->write_buf(mtd, buf, mtd->writesize);
+	if (oob_required)
+		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+static void hisi_nfc_host_init(struct hinfc_host *host)
+{
+	struct nand_chip *chip = &host->chip;
+	unsigned int flag = 0;
+
+	host->version = hinfc_read(host, HINFC_VERSION);
+	host->addr_cycle		= 0;
+	host->addr_value[0]		= 0;
+	host->addr_value[1]		= 0;
+	host->cache_addr_value[0]	= ~0;
+	host->cache_addr_value[1]	= ~0;
+	host->chipselect		= 0;
+
+	/* default page size: 2K, ecc_none. need modify */
+	flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL
+		| ((0x001 & HINFC504_CON_PAGESIZE_MASK)
+			<< HINFC504_CON_PAGEISZE_SHIFT)
+		| ((0x0 & HINFC504_CON_ECCTYPE_MASK)
+			<< HINFC504_CON_ECCTYPE_SHIFT)
+		| ((chip->options & NAND_BUSWIDTH_16) ?
+			HINFC504_CON_BUS_WIDTH : 0);
+	hinfc_write(host, flag, HINFC504_CON);
+
+	memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN);
+
+	hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+		    HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+	/* enable DMA irq */
+	hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN);
+}
+
+static int hisi_ooblayout_ecc(struct mtd_info *mtd, int section,
+			      struct mtd_oob_region *oobregion)
+{
+	/* FIXME: add ECC bytes position */
+	return -ENOTSUPP;
+}
+
+static int hisi_ooblayout_free(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 2;
+	oobregion->length = 6;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops hisi_ooblayout_ops = {
+	.ecc = hisi_ooblayout_ecc,
+	.free = hisi_ooblayout_free,
+};
+
+static int hisi_nfc_ecc_probe(struct hinfc_host *host)
+{
+	unsigned int flag;
+	int size, strength, ecc_bits;
+	struct device *dev = host->dev;
+	struct nand_chip *chip = &host->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+
+	size = chip->ecc.size;
+	strength = chip->ecc.strength;
+	if (size != 1024) {
+		dev_err(dev, "error ecc size: %d\n", size);
+		return -EINVAL;
+	}
+
+	if ((size == 1024) && ((strength != 8) && (strength != 16) &&
+				(strength != 24) && (strength != 40))) {
+		dev_err(dev, "ecc size and strength do not match\n");
+		return -EINVAL;
+	}
+
+	chip->ecc.size = size;
+	chip->ecc.strength = strength;
+
+	chip->ecc.read_page = hisi_nand_read_page_hwecc;
+	chip->ecc.read_oob = hisi_nand_read_oob;
+	chip->ecc.write_page = hisi_nand_write_page_hwecc;
+
+	switch (chip->ecc.strength) {
+	case 16:
+		ecc_bits = 6;
+		if (mtd->writesize == 2048)
+			mtd_set_ooblayout(mtd, &hisi_ooblayout_ops);
+
+		/* TODO: add more page size support */
+		break;
+
+	/* TODO: add more ecc strength support */
+	default:
+		dev_err(dev, "not support strength: %d\n", chip->ecc.strength);
+		return -EINVAL;
+	}
+
+	flag = hinfc_read(host, HINFC504_CON);
+	/* add ecc type configure */
+	flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK)
+						<< HINFC504_CON_ECCTYPE_SHIFT);
+	hinfc_write(host, flag, HINFC504_CON);
+
+	/* enable ecc irq */
+	flag = hinfc_read(host, HINFC504_INTEN) & 0xfff;
+	hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE,
+		    HINFC504_INTEN);
+
+	return 0;
+}
+
+static int hisi_nfc_probe(struct platform_device *pdev)
+{
+	int ret = 0, irq, flag, max_chips = HINFC504_MAX_CHIP;
+	struct device *dev = &pdev->dev;
+	struct hinfc_host *host;
+	struct nand_chip  *chip;
+	struct mtd_info   *mtd;
+	struct resource	  *res;
+	struct device_node *np = dev->of_node;
+
+	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+	host->dev = dev;
+
+	platform_set_drvdata(pdev, host);
+	chip = &host->chip;
+	mtd  = nand_to_mtd(chip);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "no IRQ resource defined\n");
+		ret = -ENXIO;
+		goto err_res;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	host->iobase = devm_ioremap_resource(dev, res);
+	if (IS_ERR(host->iobase)) {
+		ret = PTR_ERR(host->iobase);
+		goto err_res;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	host->mmio = devm_ioremap_resource(dev, res);
+	if (IS_ERR(host->mmio)) {
+		ret = PTR_ERR(host->mmio);
+		dev_err(dev, "devm_ioremap_resource[1] fail\n");
+		goto err_res;
+	}
+
+	mtd->name		= "hisi_nand";
+	mtd->dev.parent         = &pdev->dev;
+
+	nand_set_controller_data(chip, host);
+	nand_set_flash_node(chip, np);
+	chip->cmdfunc		= hisi_nfc_cmdfunc;
+	chip->select_chip	= hisi_nfc_select_chip;
+	chip->read_byte		= hisi_nfc_read_byte;
+	chip->read_word		= hisi_nfc_read_word;
+	chip->write_buf		= hisi_nfc_write_buf;
+	chip->read_buf		= hisi_nfc_read_buf;
+	chip->chip_delay	= HINFC504_CHIP_DELAY;
+
+	hisi_nfc_host_init(host);
+
+	ret = devm_request_irq(dev, irq, hinfc_irq_handle, 0x0, "nandc", host);
+	if (ret) {
+		dev_err(dev, "failed to request IRQ\n");
+		goto err_res;
+	}
+
+	ret = nand_scan_ident(mtd, max_chips, NULL);
+	if (ret) {
+		ret = -ENODEV;
+		goto err_res;
+	}
+
+	host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize,
+		&host->dma_buffer, GFP_KERNEL);
+	if (!host->buffer) {
+		ret = -ENOMEM;
+		goto err_res;
+	}
+
+	host->dma_oob = host->dma_buffer + mtd->writesize;
+	memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize);
+
+	flag = hinfc_read(host, HINFC504_CON);
+	flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT);
+	switch (mtd->writesize) {
+	case 2048:
+		flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT); break;
+	/*
+	 * TODO: add more pagesize support,
+	 * default pagesize has been set in hisi_nfc_host_init
+	 */
+	default:
+		dev_err(dev, "NON-2KB page size nand flash\n");
+		ret = -EINVAL;
+		goto err_res;
+	}
+	hinfc_write(host, flag, HINFC504_CON);
+
+	if (chip->ecc.mode == NAND_ECC_HW)
+		hisi_nfc_ecc_probe(host);
+
+	ret = nand_scan_tail(mtd);
+	if (ret) {
+		dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+		goto err_res;
+	}
+
+	ret = mtd_device_register(mtd, NULL, 0);
+	if (ret) {
+		dev_err(dev, "Err MTD partition=%d\n", ret);
+		goto err_mtd;
+	}
+
+	return 0;
+
+err_mtd:
+	nand_release(mtd);
+err_res:
+	return ret;
+}
+
+static int hisi_nfc_remove(struct platform_device *pdev)
+{
+	struct hinfc_host *host = platform_get_drvdata(pdev);
+	struct mtd_info *mtd = nand_to_mtd(&host->chip);
+
+	nand_release(mtd);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int hisi_nfc_suspend(struct device *dev)
+{
+	struct hinfc_host *host = dev_get_drvdata(dev);
+	unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT;
+
+	while (time_before(jiffies, timeout)) {
+		if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) &&
+		    (hinfc_read(host, HINFC504_DMA_CTRL) &
+		     HINFC504_DMA_CTRL_DMA_START)) {
+			cond_resched();
+			return 0;
+		}
+	}
+
+	dev_err(host->dev, "nand controller suspend timeout.\n");
+
+	return -EAGAIN;
+}
+
+static int hisi_nfc_resume(struct device *dev)
+{
+	int cs;
+	struct hinfc_host *host = dev_get_drvdata(dev);
+	struct nand_chip *chip = &host->chip;
+
+	for (cs = 0; cs < chip->numchips; cs++)
+		hisi_nfc_send_cmd_reset(host, cs);
+	hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH,
+		    HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH);
+
+	return 0;
+}
+#endif
+static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume);
+
+static const struct of_device_id nfc_id_table[] = {
+	{ .compatible = "hisilicon,504-nfc" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, nfc_id_table);
+
+static struct platform_driver hisi_nfc_driver = {
+	.driver = {
+		.name  = "hisi_nand",
+		.of_match_table = nfc_id_table,
+		.pm = &hisi_nfc_pm_ops,
+	},
+	.probe		= hisi_nfc_probe,
+	.remove		= hisi_nfc_remove,
+};
+
+module_platform_driver(hisi_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Zhou Wang");
+MODULE_AUTHOR("Zhiyong Cai");
+MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/rawnand/jz4740_nand.c b/drivers/mtd/nand/rawnand/jz4740_nand.c
new file mode 100644
index 000000000000..e813ec11ee84
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/jz4740_nand.c
@@ -0,0 +1,557 @@ 
+/*
+ *  Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ *  JZ4740 SoC NAND controller driver
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under  the terms of the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/gpio.h>
+
+#include <asm/mach-jz4740/gpio.h>
+#include <asm/mach-jz4740/jz4740_nand.h>
+
+#define JZ_REG_NAND_CTRL	0x50
+#define JZ_REG_NAND_ECC_CTRL	0x100
+#define JZ_REG_NAND_DATA	0x104
+#define JZ_REG_NAND_PAR0	0x108
+#define JZ_REG_NAND_PAR1	0x10C
+#define JZ_REG_NAND_PAR2	0x110
+#define JZ_REG_NAND_IRQ_STAT	0x114
+#define JZ_REG_NAND_IRQ_CTRL	0x118
+#define JZ_REG_NAND_ERR(x)	(0x11C + ((x) << 2))
+
+#define JZ_NAND_ECC_CTRL_PAR_READY	BIT(4)
+#define JZ_NAND_ECC_CTRL_ENCODING	BIT(3)
+#define JZ_NAND_ECC_CTRL_RS		BIT(2)
+#define JZ_NAND_ECC_CTRL_RESET		BIT(1)
+#define JZ_NAND_ECC_CTRL_ENABLE		BIT(0)
+
+#define JZ_NAND_STATUS_ERR_COUNT	(BIT(31) | BIT(30) | BIT(29))
+#define JZ_NAND_STATUS_PAD_FINISH	BIT(4)
+#define JZ_NAND_STATUS_DEC_FINISH	BIT(3)
+#define JZ_NAND_STATUS_ENC_FINISH	BIT(2)
+#define JZ_NAND_STATUS_UNCOR_ERROR	BIT(1)
+#define JZ_NAND_STATUS_ERROR		BIT(0)
+
+#define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1)
+#define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1)
+#define JZ_NAND_CTRL_ASSERT_CHIP_MASK 0xaa
+
+#define JZ_NAND_MEM_CMD_OFFSET 0x08000
+#define JZ_NAND_MEM_ADDR_OFFSET 0x10000
+
+struct jz_nand {
+	struct nand_chip chip;
+	void __iomem *base;
+	struct resource *mem;
+
+	unsigned char banks[JZ_NAND_NUM_BANKS];
+	void __iomem *bank_base[JZ_NAND_NUM_BANKS];
+	struct resource *bank_mem[JZ_NAND_NUM_BANKS];
+
+	int selected_bank;
+
+	struct gpio_desc *busy_gpio;
+	bool is_reading;
+};
+
+static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct jz_nand, chip);
+}
+
+static void jz_nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct jz_nand *nand = mtd_to_jz_nand(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	uint32_t ctrl;
+	int banknr;
+
+	ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
+	ctrl &= ~JZ_NAND_CTRL_ASSERT_CHIP_MASK;
+
+	if (chipnr == -1) {
+		banknr = -1;
+	} else {
+		banknr = nand->banks[chipnr] - 1;
+		chip->IO_ADDR_R = nand->bank_base[banknr];
+		chip->IO_ADDR_W = nand->bank_base[banknr];
+	}
+	writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+
+	nand->selected_bank = banknr;
+}
+
+static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+	struct jz_nand *nand = mtd_to_jz_nand(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	uint32_t reg;
+	void __iomem *bank_base = nand->bank_base[nand->selected_bank];
+
+	BUG_ON(nand->selected_bank < 0);
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE));
+		if (ctrl & NAND_ALE)
+			bank_base += JZ_NAND_MEM_ADDR_OFFSET;
+		else if (ctrl & NAND_CLE)
+			bank_base += JZ_NAND_MEM_CMD_OFFSET;
+		chip->IO_ADDR_W = bank_base;
+
+		reg = readl(nand->base + JZ_REG_NAND_CTRL);
+		if (ctrl & NAND_NCE)
+			reg |= JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
+		else
+			reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(nand->selected_bank);
+		writel(reg, nand->base + JZ_REG_NAND_CTRL);
+	}
+	if (dat != NAND_CMD_NONE)
+		writeb(dat, chip->IO_ADDR_W);
+}
+
+static int jz_nand_dev_ready(struct mtd_info *mtd)
+{
+	struct jz_nand *nand = mtd_to_jz_nand(mtd);
+	return gpiod_get_value_cansleep(nand->busy_gpio);
+}
+
+static void jz_nand_hwctl(struct mtd_info *mtd, int mode)
+{
+	struct jz_nand *nand = mtd_to_jz_nand(mtd);
+	uint32_t reg;
+
+	writel(0, nand->base + JZ_REG_NAND_IRQ_STAT);
+	reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+
+	reg |= JZ_NAND_ECC_CTRL_RESET;
+	reg |= JZ_NAND_ECC_CTRL_ENABLE;
+	reg |= JZ_NAND_ECC_CTRL_RS;
+
+	switch (mode) {
+	case NAND_ECC_READ:
+		reg &= ~JZ_NAND_ECC_CTRL_ENCODING;
+		nand->is_reading = true;
+		break;
+	case NAND_ECC_WRITE:
+		reg |= JZ_NAND_ECC_CTRL_ENCODING;
+		nand->is_reading = false;
+		break;
+	default:
+		break;
+	}
+
+	writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+}
+
+static int jz_nand_calculate_ecc_rs(struct mtd_info *mtd, const uint8_t *dat,
+	uint8_t *ecc_code)
+{
+	struct jz_nand *nand = mtd_to_jz_nand(mtd);
+	uint32_t reg, status;
+	int i;
+	unsigned int timeout = 1000;
+	static uint8_t empty_block_ecc[] = {0xcd, 0x9d, 0x90, 0x58, 0xf4,
+						0x8b, 0xff, 0xb7, 0x6f};
+
+	if (nand->is_reading)
+		return 0;
+
+	do {
+		status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
+	} while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout);
+
+	if (timeout == 0)
+	    return -1;
+
+	reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+	reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+	writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+	for (i = 0; i < 9; ++i)
+		ecc_code[i] = readb(nand->base + JZ_REG_NAND_PAR0 + i);
+
+	/* If the written data is completly 0xff, we also want to write 0xff as
+	 * ecc, otherwise we will get in trouble when doing subpage writes. */
+	if (memcmp(ecc_code, empty_block_ecc, 9) == 0)
+		memset(ecc_code, 0xff, 9);
+
+	return 0;
+}
+
+static void jz_nand_correct_data(uint8_t *dat, int index, int mask)
+{
+	int offset = index & 0x7;
+	uint16_t data;
+
+	index += (index >> 3);
+
+	data = dat[index];
+	data |= dat[index+1] << 8;
+
+	mask ^= (data >> offset) & 0x1ff;
+	data &= ~(0x1ff << offset);
+	data |= (mask << offset);
+
+	dat[index] = data & 0xff;
+	dat[index+1] = (data >> 8) & 0xff;
+}
+
+static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat,
+	uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+	struct jz_nand *nand = mtd_to_jz_nand(mtd);
+	int i, error_count, index;
+	uint32_t reg, status, error;
+	unsigned int timeout = 1000;
+
+	for (i = 0; i < 9; ++i)
+		writeb(read_ecc[i], nand->base + JZ_REG_NAND_PAR0 + i);
+
+	reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+	reg |= JZ_NAND_ECC_CTRL_PAR_READY;
+	writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+	do {
+		status = readl(nand->base + JZ_REG_NAND_IRQ_STAT);
+	} while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout);
+
+	if (timeout == 0)
+		return -ETIMEDOUT;
+
+	reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL);
+	reg &= ~JZ_NAND_ECC_CTRL_ENABLE;
+	writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL);
+
+	if (status & JZ_NAND_STATUS_ERROR) {
+		if (status & JZ_NAND_STATUS_UNCOR_ERROR)
+			return -EBADMSG;
+
+		error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29;
+
+		for (i = 0; i < error_count; ++i) {
+			error = readl(nand->base + JZ_REG_NAND_ERR(i));
+			index = ((error >> 16) & 0x1ff) - 1;
+			if (index >= 0 && index < 512)
+				jz_nand_correct_data(dat, index, error & 0x1ff);
+		}
+
+		return error_count;
+	}
+
+	return 0;
+}
+
+static int jz_nand_ioremap_resource(struct platform_device *pdev,
+	const char *name, struct resource **res, void *__iomem *base)
+{
+	int ret;
+
+	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+	if (!*res) {
+		dev_err(&pdev->dev, "Failed to get platform %s memory\n", name);
+		ret = -ENXIO;
+		goto err;
+	}
+
+	*res = request_mem_region((*res)->start, resource_size(*res),
+				pdev->name);
+	if (!*res) {
+		dev_err(&pdev->dev, "Failed to request %s memory region\n", name);
+		ret = -EBUSY;
+		goto err;
+	}
+
+	*base = ioremap((*res)->start, resource_size(*res));
+	if (!*base) {
+		dev_err(&pdev->dev, "Failed to ioremap %s memory region\n", name);
+		ret = -EBUSY;
+		goto err_release_mem;
+	}
+
+	return 0;
+
+err_release_mem:
+	release_mem_region((*res)->start, resource_size(*res));
+err:
+	*res = NULL;
+	*base = NULL;
+	return ret;
+}
+
+static inline void jz_nand_iounmap_resource(struct resource *res,
+					    void __iomem *base)
+{
+	iounmap(base);
+	release_mem_region(res->start, resource_size(res));
+}
+
+static int jz_nand_detect_bank(struct platform_device *pdev,
+			       struct jz_nand *nand, unsigned char bank,
+			       size_t chipnr, uint8_t *nand_maf_id,
+			       uint8_t *nand_dev_id)
+{
+	int ret;
+	int gpio;
+	char gpio_name[9];
+	char res_name[6];
+	uint32_t ctrl;
+	struct nand_chip *chip = &nand->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+
+	/* Request GPIO port. */
+	gpio = JZ_GPIO_MEM_CS0 + bank - 1;
+	sprintf(gpio_name, "NAND CS%d", bank);
+	ret = gpio_request(gpio, gpio_name);
+	if (ret) {
+		dev_warn(&pdev->dev,
+			"Failed to request %s gpio %d: %d\n",
+			gpio_name, gpio, ret);
+		goto notfound_gpio;
+	}
+
+	/* Request I/O resource. */
+	sprintf(res_name, "bank%d", bank);
+	ret = jz_nand_ioremap_resource(pdev, res_name,
+					&nand->bank_mem[bank - 1],
+					&nand->bank_base[bank - 1]);
+	if (ret)
+		goto notfound_resource;
+
+	/* Enable chip in bank. */
+	jz_gpio_set_function(gpio, JZ_GPIO_FUNC_MEM_CS0);
+	ctrl = readl(nand->base + JZ_REG_NAND_CTRL);
+	ctrl |= JZ_NAND_CTRL_ENABLE_CHIP(bank - 1);
+	writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+
+	if (chipnr == 0) {
+		/* Detect first chip. */
+		ret = nand_scan_ident(mtd, 1, NULL);
+		if (ret)
+			goto notfound_id;
+
+		/* Retrieve the IDs from the first chip. */
+		chip->select_chip(mtd, 0);
+		chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+		*nand_maf_id = chip->read_byte(mtd);
+		*nand_dev_id = chip->read_byte(mtd);
+	} else {
+		/* Detect additional chip. */
+		chip->select_chip(mtd, chipnr);
+		chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+		if (*nand_maf_id != chip->read_byte(mtd)
+		 || *nand_dev_id != chip->read_byte(mtd)) {
+			ret = -ENODEV;
+			goto notfound_id;
+		}
+
+		/* Update size of the MTD. */
+		chip->numchips++;
+		mtd->size += chip->chipsize;
+	}
+
+	dev_info(&pdev->dev, "Found chip %i on bank %i\n", chipnr, bank);
+	return 0;
+
+notfound_id:
+	dev_info(&pdev->dev, "No chip found on bank %i\n", bank);
+	ctrl &= ~(JZ_NAND_CTRL_ENABLE_CHIP(bank - 1));
+	writel(ctrl, nand->base + JZ_REG_NAND_CTRL);
+	jz_gpio_set_function(gpio, JZ_GPIO_FUNC_NONE);
+	jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+				 nand->bank_base[bank - 1]);
+notfound_resource:
+	gpio_free(gpio);
+notfound_gpio:
+	return ret;
+}
+
+static int jz_nand_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct jz_nand *nand;
+	struct nand_chip *chip;
+	struct mtd_info *mtd;
+	struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	size_t chipnr, bank_idx;
+	uint8_t nand_maf_id = 0, nand_dev_id = 0;
+
+	nand = kzalloc(sizeof(*nand), GFP_KERNEL);
+	if (!nand)
+		return -ENOMEM;
+
+	ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base);
+	if (ret)
+		goto err_free;
+
+	nand->busy_gpio = devm_gpiod_get_optional(&pdev->dev, "busy", GPIOD_IN);
+	if (IS_ERR(nand->busy_gpio)) {
+		ret = PTR_ERR(nand->busy_gpio);
+		dev_err(&pdev->dev, "Failed to request busy gpio %d\n",
+		    ret);
+		goto err_iounmap_mmio;
+	}
+
+	chip		= &nand->chip;
+	mtd		= nand_to_mtd(chip);
+	mtd->dev.parent = &pdev->dev;
+	mtd->name	= "jz4740-nand";
+
+	chip->ecc.hwctl		= jz_nand_hwctl;
+	chip->ecc.calculate	= jz_nand_calculate_ecc_rs;
+	chip->ecc.correct	= jz_nand_correct_ecc_rs;
+	chip->ecc.mode		= NAND_ECC_HW_OOB_FIRST;
+	chip->ecc.size		= 512;
+	chip->ecc.bytes		= 9;
+	chip->ecc.strength	= 4;
+	chip->ecc.options	= NAND_ECC_GENERIC_ERASED_CHECK;
+
+	chip->chip_delay = 50;
+	chip->cmd_ctrl = jz_nand_cmd_ctrl;
+	chip->select_chip = jz_nand_select_chip;
+
+	if (nand->busy_gpio)
+		chip->dev_ready = jz_nand_dev_ready;
+
+	platform_set_drvdata(pdev, nand);
+
+	/* We are going to autodetect NAND chips in the banks specified in the
+	 * platform data. Although nand_scan_ident() can detect multiple chips,
+	 * it requires those chips to be numbered consecuitively, which is not
+	 * always the case for external memory banks. And a fixed chip-to-bank
+	 * mapping is not practical either, since for example Dingoo units
+	 * produced at different times have NAND chips in different banks.
+	 */
+	chipnr = 0;
+	for (bank_idx = 0; bank_idx < JZ_NAND_NUM_BANKS; bank_idx++) {
+		unsigned char bank;
+
+		/* If there is no platform data, look for NAND in bank 1,
+		 * which is the most likely bank since it is the only one
+		 * that can be booted from.
+		 */
+		bank = pdata ? pdata->banks[bank_idx] : bank_idx ^ 1;
+		if (bank == 0)
+			break;
+		if (bank > JZ_NAND_NUM_BANKS) {
+			dev_warn(&pdev->dev,
+				"Skipping non-existing bank: %d\n", bank);
+			continue;
+		}
+		/* The detection routine will directly or indirectly call
+		 * jz_nand_select_chip(), so nand->banks has to contain the
+		 * bank we're checking.
+		 */
+		nand->banks[chipnr] = bank;
+		if (jz_nand_detect_bank(pdev, nand, bank, chipnr,
+					&nand_maf_id, &nand_dev_id) == 0)
+			chipnr++;
+		else
+			nand->banks[chipnr] = 0;
+	}
+	if (chipnr == 0) {
+		dev_err(&pdev->dev, "No NAND chips found\n");
+		goto err_iounmap_mmio;
+	}
+
+	if (pdata && pdata->ident_callback) {
+		pdata->ident_callback(pdev, mtd, &pdata->partitions,
+					&pdata->num_partitions);
+	}
+
+	ret = nand_scan_tail(mtd);
+	if (ret) {
+		dev_err(&pdev->dev,  "Failed to scan NAND\n");
+		goto err_unclaim_banks;
+	}
+
+	ret = mtd_device_parse_register(mtd, NULL, NULL,
+					pdata ? pdata->partitions : NULL,
+					pdata ? pdata->num_partitions : 0);
+
+	if (ret) {
+		dev_err(&pdev->dev, "Failed to add mtd device\n");
+		goto err_nand_release;
+	}
+
+	dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n");
+
+	return 0;
+
+err_nand_release:
+	nand_release(mtd);
+err_unclaim_banks:
+	while (chipnr--) {
+		unsigned char bank = nand->banks[chipnr];
+		gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
+		jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+					 nand->bank_base[bank - 1]);
+	}
+	writel(0, nand->base + JZ_REG_NAND_CTRL);
+err_iounmap_mmio:
+	jz_nand_iounmap_resource(nand->mem, nand->base);
+err_free:
+	kfree(nand);
+	return ret;
+}
+
+static int jz_nand_remove(struct platform_device *pdev)
+{
+	struct jz_nand *nand = platform_get_drvdata(pdev);
+	size_t i;
+
+	nand_release(nand_to_mtd(&nand->chip));
+
+	/* Deassert and disable all chips */
+	writel(0, nand->base + JZ_REG_NAND_CTRL);
+
+	for (i = 0; i < JZ_NAND_NUM_BANKS; ++i) {
+		unsigned char bank = nand->banks[i];
+		if (bank != 0) {
+			jz_nand_iounmap_resource(nand->bank_mem[bank - 1],
+						 nand->bank_base[bank - 1]);
+			gpio_free(JZ_GPIO_MEM_CS0 + bank - 1);
+		}
+	}
+
+	jz_nand_iounmap_resource(nand->mem, nand->base);
+
+	kfree(nand);
+
+	return 0;
+}
+
+static struct platform_driver jz_nand_driver = {
+	.probe = jz_nand_probe,
+	.remove = jz_nand_remove,
+	.driver = {
+		.name = "jz4740-nand",
+	},
+};
+
+module_platform_driver(jz_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("NAND controller driver for JZ4740 SoC");
+MODULE_ALIAS("platform:jz4740-nand");
diff --git a/drivers/mtd/nand/rawnand/jz4780_bch.c b/drivers/mtd/nand/rawnand/jz4780_bch.c
new file mode 100644
index 000000000000..731c6051d91e
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/jz4780_bch.c
@@ -0,0 +1,380 @@ 
+/*
+ * JZ4780 BCH controller
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "jz4780_bch.h"
+
+#define BCH_BHCR			0x0
+#define BCH_BHCCR			0x8
+#define BCH_BHCNT			0xc
+#define BCH_BHDR			0x10
+#define BCH_BHPAR0			0x14
+#define BCH_BHERR0			0x84
+#define BCH_BHINT			0x184
+#define BCH_BHINTES			0x188
+#define BCH_BHINTEC			0x18c
+#define BCH_BHINTE			0x190
+
+#define BCH_BHCR_BSEL_SHIFT		4
+#define BCH_BHCR_BSEL_MASK		(0x7f << BCH_BHCR_BSEL_SHIFT)
+#define BCH_BHCR_ENCE			BIT(2)
+#define BCH_BHCR_INIT			BIT(1)
+#define BCH_BHCR_BCHE			BIT(0)
+
+#define BCH_BHCNT_PARITYSIZE_SHIFT	16
+#define BCH_BHCNT_PARITYSIZE_MASK	(0x7f << BCH_BHCNT_PARITYSIZE_SHIFT)
+#define BCH_BHCNT_BLOCKSIZE_SHIFT	0
+#define BCH_BHCNT_BLOCKSIZE_MASK	(0x7ff << BCH_BHCNT_BLOCKSIZE_SHIFT)
+
+#define BCH_BHERR_MASK_SHIFT		16
+#define BCH_BHERR_MASK_MASK		(0xffff << BCH_BHERR_MASK_SHIFT)
+#define BCH_BHERR_INDEX_SHIFT		0
+#define BCH_BHERR_INDEX_MASK		(0x7ff << BCH_BHERR_INDEX_SHIFT)
+
+#define BCH_BHINT_ERRC_SHIFT		24
+#define BCH_BHINT_ERRC_MASK		(0x7f << BCH_BHINT_ERRC_SHIFT)
+#define BCH_BHINT_TERRC_SHIFT		16
+#define BCH_BHINT_TERRC_MASK		(0x7f << BCH_BHINT_TERRC_SHIFT)
+#define BCH_BHINT_DECF			BIT(3)
+#define BCH_BHINT_ENCF			BIT(2)
+#define BCH_BHINT_UNCOR			BIT(1)
+#define BCH_BHINT_ERR			BIT(0)
+
+#define BCH_CLK_RATE			(200 * 1000 * 1000)
+
+/* Timeout for BCH calculation/correction. */
+#define BCH_TIMEOUT_US			100000
+
+struct jz4780_bch {
+	struct device *dev;
+	void __iomem *base;
+	struct clk *clk;
+	struct mutex lock;
+};
+
+static void jz4780_bch_init(struct jz4780_bch *bch,
+			    struct jz4780_bch_params *params, bool encode)
+{
+	u32 reg;
+
+	/* Clear interrupt status. */
+	writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+
+	/* Set up BCH count register. */
+	reg = params->size << BCH_BHCNT_BLOCKSIZE_SHIFT;
+	reg |= params->bytes << BCH_BHCNT_PARITYSIZE_SHIFT;
+	writel(reg, bch->base + BCH_BHCNT);
+
+	/* Initialise and enable BCH. */
+	reg = BCH_BHCR_BCHE | BCH_BHCR_INIT;
+	reg |= params->strength << BCH_BHCR_BSEL_SHIFT;
+	if (encode)
+		reg |= BCH_BHCR_ENCE;
+	writel(reg, bch->base + BCH_BHCR);
+}
+
+static void jz4780_bch_disable(struct jz4780_bch *bch)
+{
+	writel(readl(bch->base + BCH_BHINT), bch->base + BCH_BHINT);
+	writel(BCH_BHCR_BCHE, bch->base + BCH_BHCCR);
+}
+
+static void jz4780_bch_write_data(struct jz4780_bch *bch, const void *buf,
+				  size_t size)
+{
+	size_t size32 = size / sizeof(u32);
+	size_t size8 = size % sizeof(u32);
+	const u32 *src32;
+	const u8 *src8;
+
+	src32 = (const u32 *)buf;
+	while (size32--)
+		writel(*src32++, bch->base + BCH_BHDR);
+
+	src8 = (const u8 *)src32;
+	while (size8--)
+		writeb(*src8++, bch->base + BCH_BHDR);
+}
+
+static void jz4780_bch_read_parity(struct jz4780_bch *bch, void *buf,
+				   size_t size)
+{
+	size_t size32 = size / sizeof(u32);
+	size_t size8 = size % sizeof(u32);
+	u32 *dest32;
+	u8 *dest8;
+	u32 val, offset = 0;
+
+	dest32 = (u32 *)buf;
+	while (size32--) {
+		*dest32++ = readl(bch->base + BCH_BHPAR0 + offset);
+		offset += sizeof(u32);
+	}
+
+	dest8 = (u8 *)dest32;
+	val = readl(bch->base + BCH_BHPAR0 + offset);
+	switch (size8) {
+	case 3:
+		dest8[2] = (val >> 16) & 0xff;
+	case 2:
+		dest8[1] = (val >> 8) & 0xff;
+	case 1:
+		dest8[0] = val & 0xff;
+		break;
+	}
+}
+
+static bool jz4780_bch_wait_complete(struct jz4780_bch *bch, unsigned int irq,
+				     u32 *status)
+{
+	u32 reg;
+	int ret;
+
+	/*
+	 * While we could use interrupts here and sleep until the operation
+	 * completes, the controller works fairly quickly (usually a few
+	 * microseconds) and so the overhead of sleeping until we get an
+	 * interrupt quite noticeably decreases performance.
+	 */
+	ret = readl_poll_timeout(bch->base + BCH_BHINT, reg,
+				 (reg & irq) == irq, 0, BCH_TIMEOUT_US);
+	if (ret)
+		return false;
+
+	if (status)
+		*status = reg;
+
+	writel(reg, bch->base + BCH_BHINT);
+	return true;
+}
+
+/**
+ * jz4780_bch_calculate() - calculate ECC for a data buffer
+ * @bch: BCH device.
+ * @params: BCH parameters.
+ * @buf: input buffer with raw data.
+ * @ecc_code: output buffer with ECC.
+ *
+ * Return: 0 on success, -ETIMEDOUT if timed out while waiting for BCH
+ * controller.
+ */
+int jz4780_bch_calculate(struct jz4780_bch *bch, struct jz4780_bch_params *params,
+			 const u8 *buf, u8 *ecc_code)
+{
+	int ret = 0;
+
+	mutex_lock(&bch->lock);
+	jz4780_bch_init(bch, params, true);
+	jz4780_bch_write_data(bch, buf, params->size);
+
+	if (jz4780_bch_wait_complete(bch, BCH_BHINT_ENCF, NULL)) {
+		jz4780_bch_read_parity(bch, ecc_code, params->bytes);
+	} else {
+		dev_err(bch->dev, "timed out while calculating ECC\n");
+		ret = -ETIMEDOUT;
+	}
+
+	jz4780_bch_disable(bch);
+	mutex_unlock(&bch->lock);
+	return ret;
+}
+EXPORT_SYMBOL(jz4780_bch_calculate);
+
+/**
+ * jz4780_bch_correct() - detect and correct bit errors
+ * @bch: BCH device.
+ * @params: BCH parameters.
+ * @buf: raw data read from the chip.
+ * @ecc_code: ECC read from the chip.
+ *
+ * Given the raw data and the ECC read from the NAND device, detects and
+ * corrects errors in the data.
+ *
+ * Return: the number of bit errors corrected, -EBADMSG if there are too many
+ * errors to correct or -ETIMEDOUT if we timed out waiting for the controller.
+ */
+int jz4780_bch_correct(struct jz4780_bch *bch, struct jz4780_bch_params *params,
+		       u8 *buf, u8 *ecc_code)
+{
+	u32 reg, mask, index;
+	int i, ret, count;
+
+	mutex_lock(&bch->lock);
+
+	jz4780_bch_init(bch, params, false);
+	jz4780_bch_write_data(bch, buf, params->size);
+	jz4780_bch_write_data(bch, ecc_code, params->bytes);
+
+	if (!jz4780_bch_wait_complete(bch, BCH_BHINT_DECF, &reg)) {
+		dev_err(bch->dev, "timed out while correcting data\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	if (reg & BCH_BHINT_UNCOR) {
+		dev_warn(bch->dev, "uncorrectable ECC error\n");
+		ret = -EBADMSG;
+		goto out;
+	}
+
+	/* Correct any detected errors. */
+	if (reg & BCH_BHINT_ERR) {
+		count = (reg & BCH_BHINT_ERRC_MASK) >> BCH_BHINT_ERRC_SHIFT;
+		ret = (reg & BCH_BHINT_TERRC_MASK) >> BCH_BHINT_TERRC_SHIFT;
+
+		for (i = 0; i < count; i++) {
+			reg = readl(bch->base + BCH_BHERR0 + (i * 4));
+			mask = (reg & BCH_BHERR_MASK_MASK) >>
+						BCH_BHERR_MASK_SHIFT;
+			index = (reg & BCH_BHERR_INDEX_MASK) >>
+						BCH_BHERR_INDEX_SHIFT;
+			buf[(index * 2) + 0] ^= mask;
+			buf[(index * 2) + 1] ^= mask >> 8;
+		}
+	} else {
+		ret = 0;
+	}
+
+out:
+	jz4780_bch_disable(bch);
+	mutex_unlock(&bch->lock);
+	return ret;
+}
+EXPORT_SYMBOL(jz4780_bch_correct);
+
+/**
+ * jz4780_bch_get() - get the BCH controller device
+ * @np: BCH device tree node.
+ *
+ * Gets the BCH controller device from the specified device tree node. The
+ * device must be released with jz4780_bch_release() when it is no longer being
+ * used.
+ *
+ * Return: a pointer to jz4780_bch, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+static struct jz4780_bch *jz4780_bch_get(struct device_node *np)
+{
+	struct platform_device *pdev;
+	struct jz4780_bch *bch;
+
+	pdev = of_find_device_by_node(np);
+	if (!pdev || !platform_get_drvdata(pdev))
+		return ERR_PTR(-EPROBE_DEFER);
+
+	get_device(&pdev->dev);
+
+	bch = platform_get_drvdata(pdev);
+	clk_prepare_enable(bch->clk);
+
+	return bch;
+}
+
+/**
+ * of_jz4780_bch_get() - get the BCH controller from a DT node
+ * @of_node: the node that contains a bch-controller property.
+ *
+ * Get the bch-controller property from the given device tree
+ * node and pass it to jz4780_bch_get to do the work.
+ *
+ * Return: a pointer to jz4780_bch, errors are encoded into the pointer.
+ * PTR_ERR(-EPROBE_DEFER) if the device hasn't been initialised yet.
+ */
+struct jz4780_bch *of_jz4780_bch_get(struct device_node *of_node)
+{
+	struct jz4780_bch *bch = NULL;
+	struct device_node *np;
+
+	np = of_parse_phandle(of_node, "ingenic,bch-controller", 0);
+
+	if (np) {
+		bch = jz4780_bch_get(np);
+		of_node_put(np);
+	}
+	return bch;
+}
+EXPORT_SYMBOL(of_jz4780_bch_get);
+
+/**
+ * jz4780_bch_release() - release the BCH controller device
+ * @bch: BCH device.
+ */
+void jz4780_bch_release(struct jz4780_bch *bch)
+{
+	clk_disable_unprepare(bch->clk);
+	put_device(bch->dev);
+}
+EXPORT_SYMBOL(jz4780_bch_release);
+
+static int jz4780_bch_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct jz4780_bch *bch;
+	struct resource *res;
+
+	bch = devm_kzalloc(dev, sizeof(*bch), GFP_KERNEL);
+	if (!bch)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	bch->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(bch->base))
+		return PTR_ERR(bch->base);
+
+	jz4780_bch_disable(bch);
+
+	bch->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(bch->clk)) {
+		dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(bch->clk));
+		return PTR_ERR(bch->clk);
+	}
+
+	clk_set_rate(bch->clk, BCH_CLK_RATE);
+
+	mutex_init(&bch->lock);
+
+	bch->dev = dev;
+	platform_set_drvdata(pdev, bch);
+
+	return 0;
+}
+
+static const struct of_device_id jz4780_bch_dt_match[] = {
+	{ .compatible = "ingenic,jz4780-bch" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, jz4780_bch_dt_match);
+
+static struct platform_driver jz4780_bch_driver = {
+	.probe		= jz4780_bch_probe,
+	.driver	= {
+		.name	= "jz4780-bch",
+		.of_match_table = of_match_ptr(jz4780_bch_dt_match),
+	},
+};
+module_platform_driver(jz4780_bch_driver);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic JZ4780 BCH error correction driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/rawnand/jz4780_bch.h b/drivers/mtd/nand/rawnand/jz4780_bch.h
new file mode 100644
index 000000000000..bf4718088a3a
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/jz4780_bch.h
@@ -0,0 +1,43 @@ 
+/*
+ * JZ4780 BCH controller
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_MTD_NAND_JZ4780_BCH_H__
+#define __DRIVERS_MTD_NAND_JZ4780_BCH_H__
+
+#include <linux/types.h>
+
+struct device;
+struct device_node;
+struct jz4780_bch;
+
+/**
+ * struct jz4780_bch_params - BCH parameters
+ * @size: data bytes per ECC step.
+ * @bytes: ECC bytes per step.
+ * @strength: number of correctable bits per ECC step.
+ */
+struct jz4780_bch_params {
+	int size;
+	int bytes;
+	int strength;
+};
+
+int jz4780_bch_calculate(struct jz4780_bch *bch,
+				struct jz4780_bch_params *params,
+				const u8 *buf, u8 *ecc_code);
+int jz4780_bch_correct(struct jz4780_bch *bch,
+			      struct jz4780_bch_params *params, u8 *buf,
+			      u8 *ecc_code);
+
+void jz4780_bch_release(struct jz4780_bch *bch);
+struct jz4780_bch *of_jz4780_bch_get(struct device_node *np);
+
+#endif /* __DRIVERS_MTD_NAND_JZ4780_BCH_H__ */
diff --git a/drivers/mtd/nand/rawnand/jz4780_nand.c b/drivers/mtd/nand/rawnand/jz4780_nand.c
new file mode 100644
index 000000000000..2f725bd83de8
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/jz4780_nand.c
@@ -0,0 +1,416 @@ 
+/*
+ * JZ4780 NAND driver
+ *
+ * Copyright (c) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/jz4780-nemc.h>
+
+#include "jz4780_bch.h"
+
+#define DRV_NAME	"jz4780-nand"
+
+#define OFFSET_DATA	0x00000000
+#define OFFSET_CMD	0x00400000
+#define OFFSET_ADDR	0x00800000
+
+/* Command delay when there is no R/B pin. */
+#define RB_DELAY_US	100
+
+struct jz4780_nand_cs {
+	unsigned int bank;
+	void __iomem *base;
+};
+
+struct jz4780_nand_controller {
+	struct device *dev;
+	struct jz4780_bch *bch;
+	struct nand_hw_control controller;
+	unsigned int num_banks;
+	struct list_head chips;
+	int selected;
+	struct jz4780_nand_cs cs[];
+};
+
+struct jz4780_nand_chip {
+	struct nand_chip chip;
+	struct list_head chip_list;
+
+	struct gpio_desc *busy_gpio;
+	struct gpio_desc *wp_gpio;
+	unsigned int reading: 1;
+};
+
+static inline struct jz4780_nand_chip *to_jz4780_nand_chip(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct jz4780_nand_chip, chip);
+}
+
+static inline struct jz4780_nand_controller *to_jz4780_nand_controller(struct nand_hw_control *ctrl)
+{
+	return container_of(ctrl, struct jz4780_nand_controller, controller);
+}
+
+static void jz4780_nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
+	struct jz4780_nand_cs *cs;
+
+	/* Ensure the currently selected chip is deasserted. */
+	if (chipnr == -1 && nfc->selected >= 0) {
+		cs = &nfc->cs[nfc->selected];
+		jz4780_nemc_assert(nfc->dev, cs->bank, false);
+	}
+
+	nfc->selected = chipnr;
+}
+
+static void jz4780_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+				 unsigned int ctrl)
+{
+	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
+	struct jz4780_nand_cs *cs;
+
+	if (WARN_ON(nfc->selected < 0))
+		return;
+
+	cs = &nfc->cs[nfc->selected];
+
+	jz4780_nemc_assert(nfc->dev, cs->bank, ctrl & NAND_NCE);
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_ALE)
+		writeb(cmd, cs->base + OFFSET_ADDR);
+	else if (ctrl & NAND_CLE)
+		writeb(cmd, cs->base + OFFSET_CMD);
+}
+
+static int jz4780_nand_dev_ready(struct mtd_info *mtd)
+{
+	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+
+	return !gpiod_get_value_cansleep(nand->busy_gpio);
+}
+
+static void jz4780_nand_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+
+	nand->reading = (mode == NAND_ECC_READ);
+}
+
+static int jz4780_nand_ecc_calculate(struct mtd_info *mtd, const u8 *dat,
+				     u8 *ecc_code)
+{
+	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
+	struct jz4780_bch_params params;
+
+	/*
+	 * Don't need to generate the ECC when reading, BCH does it for us as
+	 * part of decoding/correction.
+	 */
+	if (nand->reading)
+		return 0;
+
+	params.size = nand->chip.ecc.size;
+	params.bytes = nand->chip.ecc.bytes;
+	params.strength = nand->chip.ecc.strength;
+
+	return jz4780_bch_calculate(nfc->bch, &params, dat, ecc_code);
+}
+
+static int jz4780_nand_ecc_correct(struct mtd_info *mtd, u8 *dat,
+				   u8 *read_ecc, u8 *calc_ecc)
+{
+	struct jz4780_nand_chip *nand = to_jz4780_nand_chip(mtd);
+	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(nand->chip.controller);
+	struct jz4780_bch_params params;
+
+	params.size = nand->chip.ecc.size;
+	params.bytes = nand->chip.ecc.bytes;
+	params.strength = nand->chip.ecc.strength;
+
+	return jz4780_bch_correct(nfc->bch, &params, dat, read_ecc);
+}
+
+static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *dev)
+{
+	struct nand_chip *chip = &nand->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct jz4780_nand_controller *nfc = to_jz4780_nand_controller(chip->controller);
+	int eccbytes;
+
+	chip->ecc.bytes = fls((1 + 8) * chip->ecc.size)	*
+				(chip->ecc.strength / 8);
+
+	switch (chip->ecc.mode) {
+	case NAND_ECC_HW:
+		if (!nfc->bch) {
+			dev_err(dev, "HW BCH selected, but BCH controller not found\n");
+			return -ENODEV;
+		}
+
+		chip->ecc.hwctl = jz4780_nand_ecc_hwctl;
+		chip->ecc.calculate = jz4780_nand_ecc_calculate;
+		chip->ecc.correct = jz4780_nand_ecc_correct;
+		/* fall through */
+	case NAND_ECC_SOFT:
+		dev_info(dev, "using %s (strength %d, size %d, bytes %d)\n",
+			(nfc->bch) ? "hardware BCH" : "software ECC",
+			chip->ecc.strength, chip->ecc.size, chip->ecc.bytes);
+		break;
+	case NAND_ECC_NONE:
+		dev_info(dev, "not using ECC\n");
+		break;
+	default:
+		dev_err(dev, "ECC mode %d not supported\n", chip->ecc.mode);
+		return -EINVAL;
+	}
+
+	/* The NAND core will generate the ECC layout for SW ECC */
+	if (chip->ecc.mode != NAND_ECC_HW)
+		return 0;
+
+	/* Generate ECC layout. ECC codes are right aligned in the OOB area. */
+	eccbytes = mtd->writesize / chip->ecc.size * chip->ecc.bytes;
+
+	if (eccbytes > mtd->oobsize - 2) {
+		dev_err(dev,
+			"invalid ECC config: required %d ECC bytes, but only %d are available",
+			eccbytes, mtd->oobsize - 2);
+		return -EINVAL;
+	}
+
+	mtd->ooblayout = &nand_ooblayout_lp_ops;
+
+	return 0;
+}
+
+static int jz4780_nand_init_chip(struct platform_device *pdev,
+				struct jz4780_nand_controller *nfc,
+				struct device_node *np,
+				unsigned int chipnr)
+{
+	struct device *dev = &pdev->dev;
+	struct jz4780_nand_chip *nand;
+	struct jz4780_nand_cs *cs;
+	struct resource *res;
+	struct nand_chip *chip;
+	struct mtd_info *mtd;
+	const __be32 *reg;
+	int ret = 0;
+
+	cs = &nfc->cs[chipnr];
+
+	reg = of_get_property(np, "reg", NULL);
+	if (!reg)
+		return -EINVAL;
+
+	cs->bank = be32_to_cpu(*reg);
+
+	jz4780_nemc_set_type(nfc->dev, cs->bank, JZ4780_NEMC_BANK_NAND);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, chipnr);
+	cs->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(cs->base))
+		return PTR_ERR(cs->base);
+
+	nand = devm_kzalloc(dev, sizeof(*nand), GFP_KERNEL);
+	if (!nand)
+		return -ENOMEM;
+
+	nand->busy_gpio = devm_gpiod_get_optional(dev, "rb", GPIOD_IN);
+
+	if (IS_ERR(nand->busy_gpio)) {
+		ret = PTR_ERR(nand->busy_gpio);
+		dev_err(dev, "failed to request busy GPIO: %d\n", ret);
+		return ret;
+	} else if (nand->busy_gpio) {
+		nand->chip.dev_ready = jz4780_nand_dev_ready;
+	}
+
+	nand->wp_gpio = devm_gpiod_get_optional(dev, "wp", GPIOD_OUT_LOW);
+
+	if (IS_ERR(nand->wp_gpio)) {
+		ret = PTR_ERR(nand->wp_gpio);
+		dev_err(dev, "failed to request WP GPIO: %d\n", ret);
+		return ret;
+	}
+
+	chip = &nand->chip;
+	mtd = nand_to_mtd(chip);
+	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%s.%d", dev_name(dev),
+				   cs->bank);
+	if (!mtd->name)
+		return -ENOMEM;
+	mtd->dev.parent = dev;
+
+	chip->IO_ADDR_R = cs->base + OFFSET_DATA;
+	chip->IO_ADDR_W = cs->base + OFFSET_DATA;
+	chip->chip_delay = RB_DELAY_US;
+	chip->options = NAND_NO_SUBPAGE_WRITE;
+	chip->select_chip = jz4780_nand_select_chip;
+	chip->cmd_ctrl = jz4780_nand_cmd_ctrl;
+	chip->ecc.mode = NAND_ECC_HW;
+	chip->controller = &nfc->controller;
+	nand_set_flash_node(chip, np);
+
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (ret)
+		return ret;
+
+	ret = jz4780_nand_init_ecc(nand, dev);
+	if (ret)
+		return ret;
+
+	ret = nand_scan_tail(mtd);
+	if (ret)
+		return ret;
+
+	ret = mtd_device_register(mtd, NULL, 0);
+	if (ret) {
+		nand_release(mtd);
+		return ret;
+	}
+
+	list_add_tail(&nand->chip_list, &nfc->chips);
+
+	return 0;
+}
+
+static void jz4780_nand_cleanup_chips(struct jz4780_nand_controller *nfc)
+{
+	struct jz4780_nand_chip *chip;
+
+	while (!list_empty(&nfc->chips)) {
+		chip = list_first_entry(&nfc->chips, struct jz4780_nand_chip, chip_list);
+		nand_release(nand_to_mtd(&chip->chip));
+		list_del(&chip->chip_list);
+	}
+}
+
+static int jz4780_nand_init_chips(struct jz4780_nand_controller *nfc,
+				  struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np;
+	int i = 0;
+	int ret;
+	int num_chips = of_get_child_count(dev->of_node);
+
+	if (num_chips > nfc->num_banks) {
+		dev_err(dev, "found %d chips but only %d banks\n", num_chips, nfc->num_banks);
+		return -EINVAL;
+	}
+
+	for_each_child_of_node(dev->of_node, np) {
+		ret = jz4780_nand_init_chip(pdev, nfc, np, i);
+		if (ret) {
+			jz4780_nand_cleanup_chips(nfc);
+			return ret;
+		}
+
+		i++;
+	}
+
+	return 0;
+}
+
+static int jz4780_nand_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	unsigned int num_banks;
+	struct jz4780_nand_controller *nfc;
+	int ret;
+
+	num_banks = jz4780_nemc_num_banks(dev);
+	if (num_banks == 0) {
+		dev_err(dev, "no banks found\n");
+		return -ENODEV;
+	}
+
+	nfc = devm_kzalloc(dev, sizeof(*nfc) + (sizeof(nfc->cs[0]) * num_banks), GFP_KERNEL);
+	if (!nfc)
+		return -ENOMEM;
+
+	/*
+	 * Check for BCH HW before we call nand_scan_ident, to prevent us from
+	 * having to call it again if the BCH driver returns -EPROBE_DEFER.
+	 */
+	nfc->bch = of_jz4780_bch_get(dev->of_node);
+	if (IS_ERR(nfc->bch))
+		return PTR_ERR(nfc->bch);
+
+	nfc->dev = dev;
+	nfc->num_banks = num_banks;
+
+	nand_hw_control_init(&nfc->controller);
+	INIT_LIST_HEAD(&nfc->chips);
+
+	ret = jz4780_nand_init_chips(nfc, pdev);
+	if (ret) {
+		if (nfc->bch)
+			jz4780_bch_release(nfc->bch);
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, nfc);
+	return 0;
+}
+
+static int jz4780_nand_remove(struct platform_device *pdev)
+{
+	struct jz4780_nand_controller *nfc = platform_get_drvdata(pdev);
+
+	if (nfc->bch)
+		jz4780_bch_release(nfc->bch);
+
+	jz4780_nand_cleanup_chips(nfc);
+
+	return 0;
+}
+
+static const struct of_device_id jz4780_nand_dt_match[] = {
+	{ .compatible = "ingenic,jz4780-nand" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, jz4780_nand_dt_match);
+
+static struct platform_driver jz4780_nand_driver = {
+	.probe		= jz4780_nand_probe,
+	.remove		= jz4780_nand_remove,
+	.driver	= {
+		.name	= DRV_NAME,
+		.of_match_table = of_match_ptr(jz4780_nand_dt_match),
+	},
+};
+module_platform_driver(jz4780_nand_driver);
+
+MODULE_AUTHOR("Alex Smith <alex@alex-smith.me.uk>");
+MODULE_AUTHOR("Harvey Hunt <harveyhuntnexus@gmail.com>");
+MODULE_DESCRIPTION("Ingenic JZ4780 NAND driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/rawnand/lpc32xx_mlc.c b/drivers/mtd/nand/rawnand/lpc32xx_mlc.c
new file mode 100644
index 000000000000..b212bb0fd902
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/lpc32xx_mlc.c
@@ -0,0 +1,902 @@ 
+/*
+ * Driver for NAND MLC Controller in LPC32xx
+ *
+ * Author: Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 WORK Microwave GmbH
+ * Copyright © 2011, 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * NAND Flash Controller Operation:
+ * - Read: Auto Decode
+ * - Write: Auto Encode
+ * - Tested Page Sizes: 2048, 4096
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_mlc.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+
+#define DRV_NAME "lpc32xx_mlc"
+
+/**********************************************************************
+* MLC NAND controller register offsets
+**********************************************************************/
+
+#define MLC_BUFF(x)			(x + 0x00000)
+#define MLC_DATA(x)			(x + 0x08000)
+#define MLC_CMD(x)			(x + 0x10000)
+#define MLC_ADDR(x)			(x + 0x10004)
+#define MLC_ECC_ENC_REG(x)		(x + 0x10008)
+#define MLC_ECC_DEC_REG(x)		(x + 0x1000C)
+#define MLC_ECC_AUTO_ENC_REG(x)		(x + 0x10010)
+#define MLC_ECC_AUTO_DEC_REG(x)		(x + 0x10014)
+#define MLC_RPR(x)			(x + 0x10018)
+#define MLC_WPR(x)			(x + 0x1001C)
+#define MLC_RUBP(x)			(x + 0x10020)
+#define MLC_ROBP(x)			(x + 0x10024)
+#define MLC_SW_WP_ADD_LOW(x)		(x + 0x10028)
+#define MLC_SW_WP_ADD_HIG(x)		(x + 0x1002C)
+#define MLC_ICR(x)			(x + 0x10030)
+#define MLC_TIME_REG(x)			(x + 0x10034)
+#define MLC_IRQ_MR(x)			(x + 0x10038)
+#define MLC_IRQ_SR(x)			(x + 0x1003C)
+#define MLC_LOCK_PR(x)			(x + 0x10044)
+#define MLC_ISR(x)			(x + 0x10048)
+#define MLC_CEH(x)			(x + 0x1004C)
+
+/**********************************************************************
+* MLC_CMD bit definitions
+**********************************************************************/
+#define MLCCMD_RESET			0xFF
+
+/**********************************************************************
+* MLC_ICR bit definitions
+**********************************************************************/
+#define MLCICR_WPROT			(1 << 3)
+#define MLCICR_LARGEBLOCK		(1 << 2)
+#define MLCICR_LONGADDR			(1 << 1)
+#define MLCICR_16BIT			(1 << 0)  /* unsupported by LPC32x0! */
+
+/**********************************************************************
+* MLC_TIME_REG bit definitions
+**********************************************************************/
+#define MLCTIMEREG_TCEA_DELAY(n)	(((n) & 0x03) << 24)
+#define MLCTIMEREG_BUSY_DELAY(n)	(((n) & 0x1F) << 19)
+#define MLCTIMEREG_NAND_TA(n)		(((n) & 0x07) << 16)
+#define MLCTIMEREG_RD_HIGH(n)		(((n) & 0x0F) << 12)
+#define MLCTIMEREG_RD_LOW(n)		(((n) & 0x0F) << 8)
+#define MLCTIMEREG_WR_HIGH(n)		(((n) & 0x0F) << 4)
+#define MLCTIMEREG_WR_LOW(n)		(((n) & 0x0F) << 0)
+
+/**********************************************************************
+* MLC_IRQ_MR and MLC_IRQ_SR bit definitions
+**********************************************************************/
+#define MLCIRQ_NAND_READY		(1 << 5)
+#define MLCIRQ_CONTROLLER_READY		(1 << 4)
+#define MLCIRQ_DECODE_FAILURE		(1 << 3)
+#define MLCIRQ_DECODE_ERROR		(1 << 2)
+#define MLCIRQ_ECC_READY		(1 << 1)
+#define MLCIRQ_WRPROT_FAULT		(1 << 0)
+
+/**********************************************************************
+* MLC_LOCK_PR bit definitions
+**********************************************************************/
+#define MLCLOCKPR_MAGIC			0xA25E
+
+/**********************************************************************
+* MLC_ISR bit definitions
+**********************************************************************/
+#define MLCISR_DECODER_FAILURE		(1 << 6)
+#define MLCISR_ERRORS			((1 << 4) | (1 << 5))
+#define MLCISR_ERRORS_DETECTED		(1 << 3)
+#define MLCISR_ECC_READY		(1 << 2)
+#define MLCISR_CONTROLLER_READY		(1 << 1)
+#define MLCISR_NAND_READY		(1 << 0)
+
+/**********************************************************************
+* MLC_CEH bit definitions
+**********************************************************************/
+#define MLCCEH_NORMAL			(1 << 0)
+
+struct lpc32xx_nand_cfg_mlc {
+	uint32_t tcea_delay;
+	uint32_t busy_delay;
+	uint32_t nand_ta;
+	uint32_t rd_high;
+	uint32_t rd_low;
+	uint32_t wr_high;
+	uint32_t wr_low;
+	int wp_gpio;
+	struct mtd_partition *parts;
+	unsigned num_parts;
+};
+
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+	if (section >= nand_chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = ((section + 1) * 16) - nand_chip->ecc.bytes;
+	oobregion->length = nand_chip->ecc.bytes;
+
+	return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+	if (section >= nand_chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = 16 * section;
+	oobregion->length = 16 - nand_chip->ecc.bytes;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+	.ecc = lpc32xx_ooblayout_ecc,
+	.free = lpc32xx_ooblayout_free,
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt = {
+	.options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+		   NAND_BBT_WRITE,
+	.pages = { 524224, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+static struct nand_bbt_descr lpc32xx_nand_bbt_mirror = {
+	.options = NAND_BBT_ABSPAGE | NAND_BBT_2BIT | NAND_BBT_NO_OOB |
+		   NAND_BBT_WRITE,
+	.pages = { 524160, 0, 0, 0, 0, 0, 0, 0 },
+};
+
+struct lpc32xx_nand_host {
+	struct nand_chip	nand_chip;
+	struct lpc32xx_mlc_platform_data *pdata;
+	struct clk		*clk;
+	void __iomem		*io_base;
+	int			irq;
+	struct lpc32xx_nand_cfg_mlc	*ncfg;
+	struct completion       comp_nand;
+	struct completion       comp_controller;
+	uint32_t llptr;
+	/*
+	 * Physical addresses of ECC buffer, DMA data buffers, OOB data buffer
+	 */
+	dma_addr_t		oob_buf_phy;
+	/*
+	 * Virtual addresses of ECC buffer, DMA data buffers, OOB data buffer
+	 */
+	uint8_t			*oob_buf;
+	/* Physical address of DMA base address */
+	dma_addr_t		io_base_phy;
+
+	struct completion	comp_dma;
+	struct dma_chan		*dma_chan;
+	struct dma_slave_config	dma_slave_config;
+	struct scatterlist	sgl;
+	uint8_t			*dma_buf;
+	uint8_t			*dummy_buf;
+	int			mlcsubpages; /* number of 512bytes-subpages */
+};
+
+/*
+ * Activate/Deactivate DMA Operation:
+ *
+ * Using the PL080 DMA Controller for transferring the 512 byte subpages
+ * instead of doing readl() / writel() in a loop slows it down significantly.
+ * Measurements via getnstimeofday() upon 512 byte subpage reads reveal:
+ *
+ * - readl() of 128 x 32 bits in a loop: ~20us
+ * - DMA read of 512 bytes (32 bit, 4...128 words bursts): ~60us
+ * - DMA read of 512 bytes (32 bit, no bursts): ~100us
+ *
+ * This applies to the transfer itself. In the DMA case: only the
+ * wait_for_completion() (DMA setup _not_ included).
+ *
+ * Note that the 512 bytes subpage transfer is done directly from/to a
+ * FIFO/buffer inside the NAND controller. Most of the time (~400-800us for a
+ * 2048 bytes page) is spent waiting for the NAND IRQ, anyway. (The NAND
+ * controller transferring data between its internal buffer to/from the NAND
+ * chip.)
+ *
+ * Therefore, using the PL080 DMA is disabled by default, for now.
+ *
+ */
+static int use_dma;
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+	uint32_t clkrate, tmp;
+
+	/* Reset MLC controller */
+	writel(MLCCMD_RESET, MLC_CMD(host->io_base));
+	udelay(1000);
+
+	/* Get base clock for MLC block */
+	clkrate = clk_get_rate(host->clk);
+	if (clkrate == 0)
+		clkrate = 104000000;
+
+	/* Unlock MLC_ICR
+	 * (among others, will be locked again automatically) */
+	writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+	/* Configure MLC Controller: Large Block, 5 Byte Address */
+	tmp = MLCICR_LARGEBLOCK | MLCICR_LONGADDR;
+	writel(tmp, MLC_ICR(host->io_base));
+
+	/* Unlock MLC_TIME_REG
+	 * (among others, will be locked again automatically) */
+	writew(MLCLOCKPR_MAGIC, MLC_LOCK_PR(host->io_base));
+
+	/* Compute clock setup values, see LPC and NAND manual */
+	tmp = 0;
+	tmp |= MLCTIMEREG_TCEA_DELAY(clkrate / host->ncfg->tcea_delay + 1);
+	tmp |= MLCTIMEREG_BUSY_DELAY(clkrate / host->ncfg->busy_delay + 1);
+	tmp |= MLCTIMEREG_NAND_TA(clkrate / host->ncfg->nand_ta + 1);
+	tmp |= MLCTIMEREG_RD_HIGH(clkrate / host->ncfg->rd_high + 1);
+	tmp |= MLCTIMEREG_RD_LOW(clkrate / host->ncfg->rd_low);
+	tmp |= MLCTIMEREG_WR_HIGH(clkrate / host->ncfg->wr_high + 1);
+	tmp |= MLCTIMEREG_WR_LOW(clkrate / host->ncfg->wr_low);
+	writel(tmp, MLC_TIME_REG(host->io_base));
+
+	/* Enable IRQ for CONTROLLER_READY and NAND_READY */
+	writeb(MLCIRQ_CONTROLLER_READY | MLCIRQ_NAND_READY,
+			MLC_IRQ_MR(host->io_base));
+
+	/* Normal nCE operation: nCE controlled by controller */
+	writel(MLCCEH_NORMAL, MLC_CEH(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+				  unsigned int ctrl)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if (cmd != NAND_CMD_NONE) {
+		if (ctrl & NAND_CLE)
+			writel(cmd, MLC_CMD(host->io_base));
+		else
+			writel(cmd, MLC_ADDR(host->io_base));
+	}
+}
+
+/*
+ * Read Device Ready (NAND device _and_ controller ready)
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if ((readb(MLC_ISR(host->io_base)) &
+	     (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY)) ==
+	    (MLCISR_CONTROLLER_READY | MLCISR_NAND_READY))
+		return  1;
+
+	return 0;
+}
+
+static irqreturn_t lpc3xxx_nand_irq(int irq, struct lpc32xx_nand_host *host)
+{
+	uint8_t sr;
+
+	/* Clear interrupt flag by reading status */
+	sr = readb(MLC_IRQ_SR(host->io_base));
+	if (sr & MLCIRQ_NAND_READY)
+		complete(&host->comp_nand);
+	if (sr & MLCIRQ_CONTROLLER_READY)
+		complete(&host->comp_controller);
+
+	return IRQ_HANDLED;
+}
+
+static int lpc32xx_waitfunc_nand(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+	if (readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)
+		goto exit;
+
+	wait_for_completion(&host->comp_nand);
+
+	while (!(readb(MLC_ISR(host->io_base)) & MLCISR_NAND_READY)) {
+		/* Seems to be delayed sometimes by controller */
+		dev_dbg(&mtd->dev, "Warning: NAND not ready.\n");
+		cpu_relax();
+	}
+
+exit:
+	return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc_controller(struct mtd_info *mtd,
+				       struct nand_chip *chip)
+{
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+	if (readb(MLC_ISR(host->io_base)) & MLCISR_CONTROLLER_READY)
+		goto exit;
+
+	wait_for_completion(&host->comp_controller);
+
+	while (!(readb(MLC_ISR(host->io_base)) &
+		 MLCISR_CONTROLLER_READY)) {
+		dev_dbg(&mtd->dev, "Warning: Controller not ready.\n");
+		cpu_relax();
+	}
+
+exit:
+	return NAND_STATUS_READY;
+}
+
+static int lpc32xx_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	lpc32xx_waitfunc_nand(mtd, chip);
+	lpc32xx_waitfunc_controller(mtd, chip);
+
+	return NAND_STATUS_READY;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+	if (gpio_is_valid(host->ncfg->wp_gpio))
+		gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+	if (gpio_is_valid(host->ncfg->wp_gpio))
+		gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+	complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, void *mem, int len,
+			    enum dma_transfer_direction dir)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+	struct dma_async_tx_descriptor *desc;
+	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+	int res;
+
+	sg_init_one(&host->sgl, mem, len);
+
+	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+			 DMA_BIDIRECTIONAL);
+	if (res != 1) {
+		dev_err(mtd->dev.parent, "Failed to map sg list\n");
+		return -ENXIO;
+	}
+	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+				       flags);
+	if (!desc) {
+		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+		goto out1;
+	}
+
+	init_completion(&host->comp_dma);
+	desc->callback = lpc32xx_dma_complete_func;
+	desc->callback_param = &host->comp_dma;
+
+	dmaengine_submit(desc);
+	dma_async_issue_pending(host->dma_chan);
+
+	wait_for_completion_timeout(&host->comp_dma, msecs_to_jiffies(1000));
+
+	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+		     DMA_BIDIRECTIONAL);
+	return 0;
+out1:
+	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+		     DMA_BIDIRECTIONAL);
+	return -ENXIO;
+}
+
+static int lpc32xx_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+			     uint8_t *buf, int oob_required, int page)
+{
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+	int i, j;
+	uint8_t *oobbuf = chip->oob_poi;
+	uint32_t mlc_isr;
+	int res;
+	uint8_t *dma_buf;
+	bool dma_mapped;
+
+	if ((void *)buf <= high_memory) {
+		dma_buf = buf;
+		dma_mapped = true;
+	} else {
+		dma_buf = host->dma_buf;
+		dma_mapped = false;
+	}
+
+	/* Writing Command and Address */
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	/* For all sub-pages */
+	for (i = 0; i < host->mlcsubpages; i++) {
+		/* Start Auto Decode Command */
+		writeb(0x00, MLC_ECC_AUTO_DEC_REG(host->io_base));
+
+		/* Wait for Controller Ready */
+		lpc32xx_waitfunc_controller(mtd, chip);
+
+		/* Check ECC Error status */
+		mlc_isr = readl(MLC_ISR(host->io_base));
+		if (mlc_isr & MLCISR_DECODER_FAILURE) {
+			mtd->ecc_stats.failed++;
+			dev_warn(&mtd->dev, "%s: DECODER_FAILURE\n", __func__);
+		} else if (mlc_isr & MLCISR_ERRORS_DETECTED) {
+			mtd->ecc_stats.corrected += ((mlc_isr >> 4) & 0x3) + 1;
+		}
+
+		/* Read 512 + 16 Bytes */
+		if (use_dma) {
+			res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+					       DMA_DEV_TO_MEM);
+			if (res)
+				return res;
+		} else {
+			for (j = 0; j < (512 >> 2); j++) {
+				*((uint32_t *)(buf)) =
+					readl(MLC_BUFF(host->io_base));
+				buf += 4;
+			}
+		}
+		for (j = 0; j < (16 >> 2); j++) {
+			*((uint32_t *)(oobbuf)) =
+				readl(MLC_BUFF(host->io_base));
+			oobbuf += 4;
+		}
+	}
+
+	if (use_dma && !dma_mapped)
+		memcpy(buf, dma_buf, mtd->writesize);
+
+	return 0;
+}
+
+static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd,
+				       struct nand_chip *chip,
+				       const uint8_t *buf, int oob_required,
+				       int page)
+{
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+	const uint8_t *oobbuf = chip->oob_poi;
+	uint8_t *dma_buf = (uint8_t *)buf;
+	int res;
+	int i, j;
+
+	if (use_dma && (void *)buf >= high_memory) {
+		dma_buf = host->dma_buf;
+		memcpy(dma_buf, buf, mtd->writesize);
+	}
+
+	for (i = 0; i < host->mlcsubpages; i++) {
+		/* Start Encode */
+		writeb(0x00, MLC_ECC_ENC_REG(host->io_base));
+
+		/* Write 512 + 6 Bytes to Buffer */
+		if (use_dma) {
+			res = lpc32xx_xmit_dma(mtd, dma_buf + i * 512, 512,
+					       DMA_MEM_TO_DEV);
+			if (res)
+				return res;
+		} else {
+			for (j = 0; j < (512 >> 2); j++) {
+				writel(*((uint32_t *)(buf)),
+				       MLC_BUFF(host->io_base));
+				buf += 4;
+			}
+		}
+		writel(*((uint32_t *)(oobbuf)), MLC_BUFF(host->io_base));
+		oobbuf += 4;
+		writew(*((uint16_t *)(oobbuf)), MLC_BUFF(host->io_base));
+		oobbuf += 12;
+
+		/* Auto Encode w/ Bit 8 = 0 (see LPC MLC Controller manual) */
+		writeb(0x00, MLC_ECC_AUTO_ENC_REG(host->io_base));
+
+		/* Wait for Controller Ready */
+		lpc32xx_waitfunc_controller(mtd, chip);
+	}
+	return 0;
+}
+
+static int lpc32xx_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			    int page)
+{
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+	/* Read whole page - necessary with MLC controller! */
+	lpc32xx_read_page(mtd, chip, host->dummy_buf, 1, page);
+
+	return 0;
+}
+
+static int lpc32xx_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			      int page)
+{
+	/* None, write_oob conflicts with the automatic LPC MLC ECC decoder! */
+	return 0;
+}
+
+/* Prepares MLC for transfers with H/W ECC enabled: always enabled anyway */
+static void lpc32xx_ecc_enable(struct mtd_info *mtd, int mode)
+{
+	/* Always enabled! */
+}
+
+static int lpc32xx_dma_setup(struct lpc32xx_nand_host *host)
+{
+	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+	dma_cap_mask_t mask;
+
+	if (!host->pdata || !host->pdata->dma_filter) {
+		dev_err(mtd->dev.parent, "no DMA platform data\n");
+		return -ENOENT;
+	}
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+					     "nand-mlc");
+	if (!host->dma_chan) {
+		dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+		return -EBUSY;
+	}
+
+	/*
+	 * Set direction to a sensible value even if the dmaengine driver
+	 * should ignore it. With the default (DMA_MEM_TO_MEM), the amba-pl08x
+	 * driver criticizes it as "alien transfer direction".
+	 */
+	host->dma_slave_config.direction = DMA_DEV_TO_MEM;
+	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	host->dma_slave_config.src_maxburst = 128;
+	host->dma_slave_config.dst_maxburst = 128;
+	/* DMA controller does flow control: */
+	host->dma_slave_config.device_fc = false;
+	host->dma_slave_config.src_addr = MLC_BUFF(host->io_base_phy);
+	host->dma_slave_config.dst_addr = MLC_BUFF(host->io_base_phy);
+	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+		goto out1;
+	}
+
+	return 0;
+out1:
+	dma_release_channel(host->dma_chan);
+	return -ENXIO;
+}
+
+static struct lpc32xx_nand_cfg_mlc *lpc32xx_parse_dt(struct device *dev)
+{
+	struct lpc32xx_nand_cfg_mlc *ncfg;
+	struct device_node *np = dev->of_node;
+
+	ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+	if (!ncfg)
+		return NULL;
+
+	of_property_read_u32(np, "nxp,tcea-delay", &ncfg->tcea_delay);
+	of_property_read_u32(np, "nxp,busy-delay", &ncfg->busy_delay);
+	of_property_read_u32(np, "nxp,nand-ta", &ncfg->nand_ta);
+	of_property_read_u32(np, "nxp,rd-high", &ncfg->rd_high);
+	of_property_read_u32(np, "nxp,rd-low", &ncfg->rd_low);
+	of_property_read_u32(np, "nxp,wr-high", &ncfg->wr_high);
+	of_property_read_u32(np, "nxp,wr-low", &ncfg->wr_low);
+
+	if (!ncfg->tcea_delay || !ncfg->busy_delay || !ncfg->nand_ta ||
+	    !ncfg->rd_high || !ncfg->rd_low || !ncfg->wr_high ||
+	    !ncfg->wr_low) {
+		dev_err(dev, "chip parameters not specified correctly\n");
+		return NULL;
+	}
+
+	ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+	return ncfg;
+}
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host;
+	struct mtd_info *mtd;
+	struct nand_chip *nand_chip;
+	struct resource *rc;
+	int res;
+
+	/* Allocate memory for the device structure (and zero it) */
+	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+	if (IS_ERR(host->io_base))
+		return PTR_ERR(host->io_base);
+	
+	host->io_base_phy = rc->start;
+
+	nand_chip = &host->nand_chip;
+	mtd = nand_to_mtd(nand_chip);
+	if (pdev->dev.of_node)
+		host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+	if (!host->ncfg) {
+		dev_err(&pdev->dev,
+			"Missing or bad NAND config from device tree\n");
+		return -ENOENT;
+	}
+	if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+	if (gpio_is_valid(host->ncfg->wp_gpio) &&
+			gpio_request(host->ncfg->wp_gpio, "NAND WP")) {
+		dev_err(&pdev->dev, "GPIO not available\n");
+		return -EBUSY;
+	}
+	lpc32xx_wp_disable(host);
+
+	host->pdata = dev_get_platdata(&pdev->dev);
+
+	/* link the private data structures */
+	nand_set_controller_data(nand_chip, host);
+	nand_set_flash_node(nand_chip, pdev->dev.of_node);
+	mtd->dev.parent = &pdev->dev;
+
+	/* Get NAND clock */
+	host->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(host->clk)) {
+		dev_err(&pdev->dev, "Clock initialization failure\n");
+		res = -ENOENT;
+		goto err_exit1;
+	}
+	clk_prepare_enable(host->clk);
+
+	nand_chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+	nand_chip->dev_ready = lpc32xx_nand_device_ready;
+	nand_chip->chip_delay = 25; /* us */
+	nand_chip->IO_ADDR_R = MLC_DATA(host->io_base);
+	nand_chip->IO_ADDR_W = MLC_DATA(host->io_base);
+
+	/* Init NAND controller */
+	lpc32xx_nand_setup(host);
+
+	platform_set_drvdata(pdev, host);
+
+	/* Initialize function pointers */
+	nand_chip->ecc.hwctl = lpc32xx_ecc_enable;
+	nand_chip->ecc.read_page_raw = lpc32xx_read_page;
+	nand_chip->ecc.read_page = lpc32xx_read_page;
+	nand_chip->ecc.write_page_raw = lpc32xx_write_page_lowlevel;
+	nand_chip->ecc.write_page = lpc32xx_write_page_lowlevel;
+	nand_chip->ecc.write_oob = lpc32xx_write_oob;
+	nand_chip->ecc.read_oob = lpc32xx_read_oob;
+	nand_chip->ecc.strength = 4;
+	nand_chip->ecc.bytes = 10;
+	nand_chip->waitfunc = lpc32xx_waitfunc;
+
+	nand_chip->options = NAND_NO_SUBPAGE_WRITE;
+	nand_chip->bbt_options = NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB;
+	nand_chip->bbt_td = &lpc32xx_nand_bbt;
+	nand_chip->bbt_md = &lpc32xx_nand_bbt_mirror;
+
+	if (use_dma) {
+		res = lpc32xx_dma_setup(host);
+		if (res) {
+			res = -EIO;
+			goto err_exit2;
+		}
+	}
+
+	/*
+	 * Scan to find existance of the device and
+	 * Get the type of NAND device SMALL block or LARGE block
+	 */
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		res = -ENXIO;
+		goto err_exit3;
+	}
+
+	host->dma_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+	if (!host->dma_buf) {
+		res = -ENOMEM;
+		goto err_exit3;
+	}
+
+	host->dummy_buf = devm_kzalloc(&pdev->dev, mtd->writesize, GFP_KERNEL);
+	if (!host->dummy_buf) {
+		res = -ENOMEM;
+		goto err_exit3;
+	}
+
+	nand_chip->ecc.mode = NAND_ECC_HW;
+	nand_chip->ecc.size = 512;
+	mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+	host->mlcsubpages = mtd->writesize / 512;
+
+	/* initially clear interrupt status */
+	readb(MLC_IRQ_SR(host->io_base));
+
+	init_completion(&host->comp_nand);
+	init_completion(&host->comp_controller);
+
+	host->irq = platform_get_irq(pdev, 0);
+	if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+		dev_err(&pdev->dev, "failed to get platform irq\n");
+		res = -EINVAL;
+		goto err_exit3;
+	}
+
+	if (request_irq(host->irq, (irq_handler_t)&lpc3xxx_nand_irq,
+			IRQF_TRIGGER_HIGH, DRV_NAME, host)) {
+		dev_err(&pdev->dev, "Error requesting NAND IRQ\n");
+		res = -ENXIO;
+		goto err_exit3;
+	}
+
+	/*
+	 * Fills out all the uninitialized function pointers with the defaults
+	 * And scans for a bad block table if appropriate.
+	 */
+	if (nand_scan_tail(mtd)) {
+		res = -ENXIO;
+		goto err_exit4;
+	}
+
+	mtd->name = DRV_NAME;
+
+	res = mtd_device_register(mtd, host->ncfg->parts,
+				  host->ncfg->num_parts);
+	if (!res)
+		return res;
+
+	nand_release(mtd);
+
+err_exit4:
+	free_irq(host->irq, host);
+err_exit3:
+	if (use_dma)
+		dma_release_channel(host->dma_chan);
+err_exit2:
+	clk_disable_unprepare(host->clk);
+	clk_put(host->clk);
+err_exit1:
+	lpc32xx_wp_enable(host);
+	gpio_free(host->ncfg->wp_gpio);
+
+	return res;
+}
+
+/*
+ * Remove NAND device
+ */
+static int lpc32xx_nand_remove(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+
+	nand_release(mtd);
+	free_irq(host->irq, host);
+	if (use_dma)
+		dma_release_channel(host->dma_chan);
+
+	clk_disable_unprepare(host->clk);
+	clk_put(host->clk);
+
+	lpc32xx_wp_enable(host);
+	gpio_free(host->ncfg->wp_gpio);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+	/* Re-enable NAND clock */
+	clk_prepare_enable(host->clk);
+
+	/* Fresh init of NAND controller */
+	lpc32xx_nand_setup(host);
+
+	/* Disable write protect */
+	lpc32xx_wp_disable(host);
+
+	return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+	/* Enable write protect for safety */
+	lpc32xx_wp_enable(host);
+
+	/* Disable clock */
+	clk_disable_unprepare(host->clk);
+	return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+	{ .compatible = "nxp,lpc3220-mlc" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+	.probe		= lpc32xx_nand_probe,
+	.remove		= lpc32xx_nand_remove,
+	.resume		= lpc32xx_nand_resume,
+	.suspend	= lpc32xx_nand_suspend,
+	.driver		= {
+		.name	= DRV_NAME,
+		.of_match_table = lpc32xx_nand_match,
+	},
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX MLC controller");
diff --git a/drivers/mtd/nand/rawnand/lpc32xx_slc.c b/drivers/mtd/nand/rawnand/lpc32xx_slc.c
new file mode 100644
index 000000000000..018d783d37cd
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/lpc32xx_slc.c
@@ -0,0 +1,1041 @@ 
+/*
+ * NXP LPC32XX NAND SLC driver
+ *
+ * Authors:
+ *    Kevin Wells <kevin.wells@nxp.com>
+ *    Roland Stigge <stigge@antcom.de>
+ *
+ * Copyright © 2011 NXP Semiconductors
+ * Copyright © 2012 Roland Stigge
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/lpc32xx_slc.h>
+
+#define LPC32XX_MODNAME		"lpc32xx-nand"
+
+/**********************************************************************
+* SLC NAND controller register offsets
+**********************************************************************/
+
+#define SLC_DATA(x)		(x + 0x000)
+#define SLC_ADDR(x)		(x + 0x004)
+#define SLC_CMD(x)		(x + 0x008)
+#define SLC_STOP(x)		(x + 0x00C)
+#define SLC_CTRL(x)		(x + 0x010)
+#define SLC_CFG(x)		(x + 0x014)
+#define SLC_STAT(x)		(x + 0x018)
+#define SLC_INT_STAT(x)		(x + 0x01C)
+#define SLC_IEN(x)		(x + 0x020)
+#define SLC_ISR(x)		(x + 0x024)
+#define SLC_ICR(x)		(x + 0x028)
+#define SLC_TAC(x)		(x + 0x02C)
+#define SLC_TC(x)		(x + 0x030)
+#define SLC_ECC(x)		(x + 0x034)
+#define SLC_DMA_DATA(x)		(x + 0x038)
+
+/**********************************************************************
+* slc_ctrl register definitions
+**********************************************************************/
+#define SLCCTRL_SW_RESET	(1 << 2) /* Reset the NAND controller bit */
+#define SLCCTRL_ECC_CLEAR	(1 << 1) /* Reset ECC bit */
+#define SLCCTRL_DMA_START	(1 << 0) /* Start DMA channel bit */
+
+/**********************************************************************
+* slc_cfg register definitions
+**********************************************************************/
+#define SLCCFG_CE_LOW		(1 << 5) /* Force CE low bit */
+#define SLCCFG_DMA_ECC		(1 << 4) /* Enable DMA ECC bit */
+#define SLCCFG_ECC_EN		(1 << 3) /* ECC enable bit */
+#define SLCCFG_DMA_BURST	(1 << 2) /* DMA burst bit */
+#define SLCCFG_DMA_DIR		(1 << 1) /* DMA write(0)/read(1) bit */
+#define SLCCFG_WIDTH		(1 << 0) /* External device width, 0=8bit */
+
+/**********************************************************************
+* slc_stat register definitions
+**********************************************************************/
+#define SLCSTAT_DMA_FIFO	(1 << 2) /* DMA FIFO has data bit */
+#define SLCSTAT_SLC_FIFO	(1 << 1) /* SLC FIFO has data bit */
+#define SLCSTAT_NAND_READY	(1 << 0) /* NAND device is ready bit */
+
+/**********************************************************************
+* slc_int_stat, slc_ien, slc_isr, and slc_icr register definitions
+**********************************************************************/
+#define SLCSTAT_INT_TC		(1 << 1) /* Transfer count bit */
+#define SLCSTAT_INT_RDY_EN	(1 << 0) /* Ready interrupt bit */
+
+/**********************************************************************
+* slc_tac register definitions
+**********************************************************************/
+/* Computation of clock cycles on basis of controller and device clock rates */
+#define SLCTAC_CLOCKS(c, n, s)	(min_t(u32, DIV_ROUND_UP(c, n) - 1, 0xF) << s)
+
+/* Clock setting for RDY write sample wait time in 2*n clocks */
+#define SLCTAC_WDR(n)		(((n) & 0xF) << 28)
+/* Write pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_WWIDTH(c, n)	(SLCTAC_CLOCKS(c, n, 24))
+/* Write hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WHOLD(c, n)	(SLCTAC_CLOCKS(c, n, 20))
+/* Write setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_WSETUP(c, n)	(SLCTAC_CLOCKS(c, n, 16))
+/* Clock setting for RDY read sample wait time in 2*n clocks */
+#define SLCTAC_RDR(n)		(((n) & 0xF) << 12)
+/* Read pulse width in clock cycles, 1 to 16 clocks */
+#define SLCTAC_RWIDTH(c, n)	(SLCTAC_CLOCKS(c, n, 8))
+/* Read hold time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RHOLD(c, n)	(SLCTAC_CLOCKS(c, n, 4))
+/* Read setup time of control and data signals, 1 to 16 clocks */
+#define SLCTAC_RSETUP(c, n)	(SLCTAC_CLOCKS(c, n, 0))
+
+/**********************************************************************
+* slc_ecc register definitions
+**********************************************************************/
+/* ECC line party fetch macro */
+#define SLCECC_TO_LINEPAR(n)	(((n) >> 6) & 0x7FFF)
+#define SLCECC_TO_COLPAR(n)	((n) & 0x3F)
+
+/*
+ * DMA requires storage space for the DMA local buffer and the hardware ECC
+ * storage area. The DMA local buffer is only used if DMA mapping fails
+ * during runtime.
+ */
+#define LPC32XX_DMA_DATA_SIZE		4096
+#define LPC32XX_ECC_SAVE_SIZE		((4096 / 256) * 4)
+
+/* Number of bytes used for ECC stored in NAND per 256 bytes */
+#define LPC32XX_SLC_DEV_ECC_BYTES	3
+
+/*
+ * If the NAND base clock frequency can't be fetched, this frequency will be
+ * used instead as the base. This rate is used to setup the timing registers
+ * used for NAND accesses.
+ */
+#define LPC32XX_DEF_BUS_RATE		133250000
+
+/* Milliseconds for DMA FIFO timeout (unlikely anyway) */
+#define LPC32XX_DMA_TIMEOUT		100
+
+/*
+ * NAND ECC Layout for small page NAND devices
+ * Note: For large and huge page devices, the default layouts are used
+ */
+static int lpc32xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = 6;
+	oobregion->offset = 10;
+
+	return 0;
+}
+
+static int lpc32xx_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		oobregion->length = 4;
+	} else {
+		oobregion->offset = 6;
+		oobregion->length = 4;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops lpc32xx_ooblayout_ops = {
+	.ecc = lpc32xx_ooblayout_ecc,
+	.free = lpc32xx_ooblayout_free,
+};
+
+static u8 bbt_pattern[] = {'B', 'b', 't', '0' };
+static u8 mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+/*
+ * Small page FLASH BBT descriptors, marker at offset 0, version at offset 6
+ * Note: Large page devices used the default layout
+ */
+static struct nand_bbt_descr bbt_smallpage_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs =	0,
+	.len = 4,
+	.veroffs = 6,
+	.maxblocks = 4,
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_smallpage_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs =	0,
+	.len = 4,
+	.veroffs = 6,
+	.maxblocks = 4,
+	.pattern = mirror_pattern
+};
+
+/*
+ * NAND platform configuration structure
+ */
+struct lpc32xx_nand_cfg_slc {
+	uint32_t wdr_clks;
+	uint32_t wwidth;
+	uint32_t whold;
+	uint32_t wsetup;
+	uint32_t rdr_clks;
+	uint32_t rwidth;
+	uint32_t rhold;
+	uint32_t rsetup;
+	int wp_gpio;
+	struct mtd_partition *parts;
+	unsigned num_parts;
+};
+
+struct lpc32xx_nand_host {
+	struct nand_chip	nand_chip;
+	struct lpc32xx_slc_platform_data *pdata;
+	struct clk		*clk;
+	void __iomem		*io_base;
+	struct lpc32xx_nand_cfg_slc *ncfg;
+
+	struct completion	comp;
+	struct dma_chan		*dma_chan;
+	uint32_t		dma_buf_len;
+	struct dma_slave_config	dma_slave_config;
+	struct scatterlist	sgl;
+
+	/*
+	 * DMA and CPU addresses of ECC work area and data buffer
+	 */
+	uint32_t		*ecc_buf;
+	uint8_t			*data_buf;
+	dma_addr_t		io_base_dma;
+};
+
+static void lpc32xx_nand_setup(struct lpc32xx_nand_host *host)
+{
+	uint32_t clkrate, tmp;
+
+	/* Reset SLC controller */
+	writel(SLCCTRL_SW_RESET, SLC_CTRL(host->io_base));
+	udelay(1000);
+
+	/* Basic setup */
+	writel(0, SLC_CFG(host->io_base));
+	writel(0, SLC_IEN(host->io_base));
+	writel((SLCSTAT_INT_TC | SLCSTAT_INT_RDY_EN),
+		SLC_ICR(host->io_base));
+
+	/* Get base clock for SLC block */
+	clkrate = clk_get_rate(host->clk);
+	if (clkrate == 0)
+		clkrate = LPC32XX_DEF_BUS_RATE;
+
+	/* Compute clock setup values */
+	tmp = SLCTAC_WDR(host->ncfg->wdr_clks) |
+		SLCTAC_WWIDTH(clkrate, host->ncfg->wwidth) |
+		SLCTAC_WHOLD(clkrate, host->ncfg->whold) |
+		SLCTAC_WSETUP(clkrate, host->ncfg->wsetup) |
+		SLCTAC_RDR(host->ncfg->rdr_clks) |
+		SLCTAC_RWIDTH(clkrate, host->ncfg->rwidth) |
+		SLCTAC_RHOLD(clkrate, host->ncfg->rhold) |
+		SLCTAC_RSETUP(clkrate, host->ncfg->rsetup);
+	writel(tmp, SLC_TAC(host->io_base));
+}
+
+/*
+ * Hardware specific access to control lines
+ */
+static void lpc32xx_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+	unsigned int ctrl)
+{
+	uint32_t tmp;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+	/* Does CE state need to be changed? */
+	tmp = readl(SLC_CFG(host->io_base));
+	if (ctrl & NAND_NCE)
+		tmp |= SLCCFG_CE_LOW;
+	else
+		tmp &= ~SLCCFG_CE_LOW;
+	writel(tmp, SLC_CFG(host->io_base));
+
+	if (cmd != NAND_CMD_NONE) {
+		if (ctrl & NAND_CLE)
+			writel(cmd, SLC_CMD(host->io_base));
+		else
+			writel(cmd, SLC_ADDR(host->io_base));
+	}
+}
+
+/*
+ * Read the Device Ready pin
+ */
+static int lpc32xx_nand_device_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+	int rdy = 0;
+
+	if ((readl(SLC_STAT(host->io_base)) & SLCSTAT_NAND_READY) != 0)
+		rdy = 1;
+
+	return rdy;
+}
+
+/*
+ * Enable NAND write protect
+ */
+static void lpc32xx_wp_enable(struct lpc32xx_nand_host *host)
+{
+	if (gpio_is_valid(host->ncfg->wp_gpio))
+		gpio_set_value(host->ncfg->wp_gpio, 0);
+}
+
+/*
+ * Disable NAND write protect
+ */
+static void lpc32xx_wp_disable(struct lpc32xx_nand_host *host)
+{
+	if (gpio_is_valid(host->ncfg->wp_gpio))
+		gpio_set_value(host->ncfg->wp_gpio, 1);
+}
+
+/*
+ * Prepares SLC for transfers with H/W ECC enabled
+ */
+static void lpc32xx_nand_ecc_enable(struct mtd_info *mtd, int mode)
+{
+	/* Hardware ECC is enabled automatically in hardware as needed */
+}
+
+/*
+ * Calculates the ECC for the data
+ */
+static int lpc32xx_nand_ecc_calculate(struct mtd_info *mtd,
+				      const unsigned char *buf,
+				      unsigned char *code)
+{
+	/*
+	 * ECC is calculated automatically in hardware during syndrome read
+	 * and write operations, so it doesn't need to be calculated here.
+	 */
+	return 0;
+}
+
+/*
+ * Read a single byte from NAND device
+ */
+static uint8_t lpc32xx_nand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+	return (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device read without ECC
+ */
+static void lpc32xx_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+	/* Direct device read with no ECC */
+	while (len-- > 0)
+		*buf++ = (uint8_t)readl(SLC_DATA(host->io_base));
+}
+
+/*
+ * Simple device write without ECC
+ */
+static void lpc32xx_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+
+	/* Direct device write with no ECC */
+	while (len-- > 0)
+		writel((uint32_t)*buf++, SLC_DATA(host->io_base));
+}
+
+/*
+ * Read the OOB data from the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_read_oob_syndrome(struct mtd_info *mtd,
+					  struct nand_chip *chip, int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/*
+ * Write the OOB data to the device without ECC using FIFO method
+ */
+static int lpc32xx_nand_write_oob_syndrome(struct mtd_info *mtd,
+	struct nand_chip *chip, int page)
+{
+	int status;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	/* Send command to program the OOB data */
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * Fills in the ECC fields in the OOB buffer with the hardware generated ECC
+ */
+static void lpc32xx_slc_ecc_copy(uint8_t *spare, const uint32_t *ecc, int count)
+{
+	int i;
+
+	for (i = 0; i < (count * 3); i += 3) {
+		uint32_t ce = ecc[i / 3];
+		ce = ~(ce << 2) & 0xFFFFFF;
+		spare[i + 2] = (uint8_t)(ce & 0xFF);
+		ce >>= 8;
+		spare[i + 1] = (uint8_t)(ce & 0xFF);
+		ce >>= 8;
+		spare[i] = (uint8_t)(ce & 0xFF);
+	}
+}
+
+static void lpc32xx_dma_complete_func(void *completion)
+{
+	complete(completion);
+}
+
+static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
+			    void *mem, int len, enum dma_transfer_direction dir)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+	struct dma_async_tx_descriptor *desc;
+	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
+	int res;
+
+	host->dma_slave_config.direction = dir;
+	host->dma_slave_config.src_addr = dma;
+	host->dma_slave_config.dst_addr = dma;
+	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	host->dma_slave_config.src_maxburst = 4;
+	host->dma_slave_config.dst_maxburst = 4;
+	/* DMA controller does flow control: */
+	host->dma_slave_config.device_fc = false;
+	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
+		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
+		return -ENXIO;
+	}
+
+	sg_init_one(&host->sgl, mem, len);
+
+	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
+			 DMA_BIDIRECTIONAL);
+	if (res != 1) {
+		dev_err(mtd->dev.parent, "Failed to map sg list\n");
+		return -ENXIO;
+	}
+	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
+				       flags);
+	if (!desc) {
+		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
+		goto out1;
+	}
+
+	init_completion(&host->comp);
+	desc->callback = lpc32xx_dma_complete_func;
+	desc->callback_param = &host->comp;
+
+	dmaengine_submit(desc);
+	dma_async_issue_pending(host->dma_chan);
+
+	wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));
+
+	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+		     DMA_BIDIRECTIONAL);
+
+	return 0;
+out1:
+	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
+		     DMA_BIDIRECTIONAL);
+	return -ENXIO;
+}
+
+/*
+ * DMA read/write transfers with ECC support
+ */
+static int lpc32xx_xfer(struct mtd_info *mtd, uint8_t *buf, int eccsubpages,
+			int read)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+	int i, status = 0;
+	unsigned long timeout;
+	int res;
+	enum dma_transfer_direction dir =
+		read ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
+	uint8_t *dma_buf;
+	bool dma_mapped;
+
+	if ((void *)buf <= high_memory) {
+		dma_buf = buf;
+		dma_mapped = true;
+	} else {
+		dma_buf = host->data_buf;
+		dma_mapped = false;
+		if (!read)
+			memcpy(host->data_buf, buf, mtd->writesize);
+	}
+
+	if (read) {
+		writel(readl(SLC_CFG(host->io_base)) |
+		       SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+		       SLCCFG_DMA_BURST, SLC_CFG(host->io_base));
+	} else {
+		writel((readl(SLC_CFG(host->io_base)) |
+			SLCCFG_ECC_EN | SLCCFG_DMA_ECC | SLCCFG_DMA_BURST) &
+		       ~SLCCFG_DMA_DIR,
+			SLC_CFG(host->io_base));
+	}
+
+	/* Clear initial ECC */
+	writel(SLCCTRL_ECC_CLEAR, SLC_CTRL(host->io_base));
+
+	/* Transfer size is data area only */
+	writel(mtd->writesize, SLC_TC(host->io_base));
+
+	/* Start transfer in the NAND controller */
+	writel(readl(SLC_CTRL(host->io_base)) | SLCCTRL_DMA_START,
+	       SLC_CTRL(host->io_base));
+
+	for (i = 0; i < chip->ecc.steps; i++) {
+		/* Data */
+		res = lpc32xx_xmit_dma(mtd, SLC_DMA_DATA(host->io_base_dma),
+				       dma_buf + i * chip->ecc.size,
+				       mtd->writesize / chip->ecc.steps, dir);
+		if (res)
+			return res;
+
+		/* Always _read_ ECC */
+		if (i == chip->ecc.steps - 1)
+			break;
+		if (!read) /* ECC availability delayed on write */
+			udelay(10);
+		res = lpc32xx_xmit_dma(mtd, SLC_ECC(host->io_base_dma),
+				       &host->ecc_buf[i], 4, DMA_DEV_TO_MEM);
+		if (res)
+			return res;
+	}
+
+	/*
+	 * According to NXP, the DMA can be finished here, but the NAND
+	 * controller may still have buffered data. After porting to using the
+	 * dmaengine DMA driver (amba-pl080), the condition (DMA_FIFO empty)
+	 * appears to be always true, according to tests. Keeping the check for
+	 * safety reasons for now.
+	 */
+	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) {
+		dev_warn(mtd->dev.parent, "FIFO not empty!\n");
+		timeout = jiffies + msecs_to_jiffies(LPC32XX_DMA_TIMEOUT);
+		while ((readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO) &&
+		       time_before(jiffies, timeout))
+			cpu_relax();
+		if (!time_before(jiffies, timeout)) {
+			dev_err(mtd->dev.parent, "FIFO held data too long\n");
+			status = -EIO;
+		}
+	}
+
+	/* Read last calculated ECC value */
+	if (!read)
+		udelay(10);
+	host->ecc_buf[chip->ecc.steps - 1] =
+		readl(SLC_ECC(host->io_base));
+
+	/* Flush DMA */
+	dmaengine_terminate_all(host->dma_chan);
+
+	if (readl(SLC_STAT(host->io_base)) & SLCSTAT_DMA_FIFO ||
+	    readl(SLC_TC(host->io_base))) {
+		/* Something is left in the FIFO, something is wrong */
+		dev_err(mtd->dev.parent, "DMA FIFO failure\n");
+		status = -EIO;
+	}
+
+	/* Stop DMA & HW ECC */
+	writel(readl(SLC_CTRL(host->io_base)) & ~SLCCTRL_DMA_START,
+	       SLC_CTRL(host->io_base));
+	writel(readl(SLC_CFG(host->io_base)) &
+	       ~(SLCCFG_DMA_DIR | SLCCFG_ECC_EN | SLCCFG_DMA_ECC |
+		 SLCCFG_DMA_BURST), SLC_CFG(host->io_base));
+
+	if (!dma_mapped && read)
+		memcpy(buf, host->data_buf, mtd->writesize);
+
+	return status;
+}
+
+/*
+ * Read the data and OOB data from the device, use ECC correction with the
+ * data, disable ECC for the OOB data
+ */
+static int lpc32xx_nand_read_page_syndrome(struct mtd_info *mtd,
+					   struct nand_chip *chip, uint8_t *buf,
+					   int oob_required, int page)
+{
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+	struct mtd_oob_region oobregion = { };
+	int stat, i, status, error;
+	uint8_t *oobecc, tmpecc[LPC32XX_ECC_SAVE_SIZE];
+
+	/* Issue read command */
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	/* Read data and oob, calculate ECC */
+	status = lpc32xx_xfer(mtd, buf, chip->ecc.steps, 1);
+
+	/* Get OOB data */
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	/* Convert to stored ECC format */
+	lpc32xx_slc_ecc_copy(tmpecc, (uint32_t *) host->ecc_buf, chip->ecc.steps);
+
+	/* Pointer to ECC data retrieved from NAND spare area */
+	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+	if (error)
+		return error;
+
+	oobecc = chip->oob_poi + oobregion.offset;
+
+	for (i = 0; i < chip->ecc.steps; i++) {
+		stat = chip->ecc.correct(mtd, buf, oobecc,
+					 &tmpecc[i * chip->ecc.bytes]);
+		if (stat < 0)
+			mtd->ecc_stats.failed++;
+		else
+			mtd->ecc_stats.corrected += stat;
+
+		buf += chip->ecc.size;
+		oobecc += chip->ecc.bytes;
+	}
+
+	return status;
+}
+
+/*
+ * Read the data and OOB data from the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_read_page_raw_syndrome(struct mtd_info *mtd,
+					       struct nand_chip *chip,
+					       uint8_t *buf, int oob_required,
+					       int page)
+{
+	/* Issue read command */
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	/* Raw reads can just use the FIFO interface */
+	chip->read_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, use ECC with the data,
+ * disable ECC for the OOB data
+ */
+static int lpc32xx_nand_write_page_syndrome(struct mtd_info *mtd,
+					    struct nand_chip *chip,
+					    const uint8_t *buf,
+					    int oob_required, int page)
+{
+	struct lpc32xx_nand_host *host = nand_get_controller_data(chip);
+	struct mtd_oob_region oobregion = { };
+	uint8_t *pb;
+	int error;
+
+	/* Write data, calculate ECC on outbound data */
+	error = lpc32xx_xfer(mtd, (uint8_t *)buf, chip->ecc.steps, 0);
+	if (error)
+		return error;
+
+	/*
+	 * The calculated ECC needs some manual work done to it before
+	 * committing it to NAND. Process the calculated ECC and place
+	 * the resultant values directly into the OOB buffer. */
+	error = mtd_ooblayout_ecc(mtd, 0, &oobregion);
+	if (error)
+		return error;
+
+	pb = chip->oob_poi + oobregion.offset;
+	lpc32xx_slc_ecc_copy(pb, (uint32_t *)host->ecc_buf, chip->ecc.steps);
+
+	/* Write ECC data to device */
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+/*
+ * Write the data and OOB data to the device, no ECC correction with the
+ * data or OOB data
+ */
+static int lpc32xx_nand_write_page_raw_syndrome(struct mtd_info *mtd,
+						struct nand_chip *chip,
+						const uint8_t *buf,
+						int oob_required, int page)
+{
+	/* Raw writes can just use the FIFO interface */
+	chip->write_buf(mtd, buf, chip->ecc.size * chip->ecc.steps);
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+static int lpc32xx_nand_dma_setup(struct lpc32xx_nand_host *host)
+{
+	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+	dma_cap_mask_t mask;
+
+	if (!host->pdata || !host->pdata->dma_filter) {
+		dev_err(mtd->dev.parent, "no DMA platform data\n");
+		return -ENOENT;
+	}
+
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	host->dma_chan = dma_request_channel(mask, host->pdata->dma_filter,
+					     "nand-slc");
+	if (!host->dma_chan) {
+		dev_err(mtd->dev.parent, "Failed to request DMA channel\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static struct lpc32xx_nand_cfg_slc *lpc32xx_parse_dt(struct device *dev)
+{
+	struct lpc32xx_nand_cfg_slc *ncfg;
+	struct device_node *np = dev->of_node;
+
+	ncfg = devm_kzalloc(dev, sizeof(*ncfg), GFP_KERNEL);
+	if (!ncfg)
+		return NULL;
+
+	of_property_read_u32(np, "nxp,wdr-clks", &ncfg->wdr_clks);
+	of_property_read_u32(np, "nxp,wwidth", &ncfg->wwidth);
+	of_property_read_u32(np, "nxp,whold", &ncfg->whold);
+	of_property_read_u32(np, "nxp,wsetup", &ncfg->wsetup);
+	of_property_read_u32(np, "nxp,rdr-clks", &ncfg->rdr_clks);
+	of_property_read_u32(np, "nxp,rwidth", &ncfg->rwidth);
+	of_property_read_u32(np, "nxp,rhold", &ncfg->rhold);
+	of_property_read_u32(np, "nxp,rsetup", &ncfg->rsetup);
+
+	if (!ncfg->wdr_clks || !ncfg->wwidth || !ncfg->whold ||
+	    !ncfg->wsetup || !ncfg->rdr_clks || !ncfg->rwidth ||
+	    !ncfg->rhold || !ncfg->rsetup) {
+		dev_err(dev, "chip parameters not specified correctly\n");
+		return NULL;
+	}
+
+	ncfg->wp_gpio = of_get_named_gpio(np, "gpios", 0);
+
+	return ncfg;
+}
+
+/*
+ * Probe for NAND controller
+ */
+static int lpc32xx_nand_probe(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host;
+	struct mtd_info *mtd;
+	struct nand_chip *chip;
+	struct resource *rc;
+	int res;
+
+	rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (rc == NULL) {
+		dev_err(&pdev->dev, "No memory resource found for device\n");
+		return -EBUSY;
+	}
+
+	/* Allocate memory for the device structure (and zero it) */
+	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+	host->io_base_dma = rc->start;
+
+	host->io_base = devm_ioremap_resource(&pdev->dev, rc);
+	if (IS_ERR(host->io_base))
+		return PTR_ERR(host->io_base);
+
+	if (pdev->dev.of_node)
+		host->ncfg = lpc32xx_parse_dt(&pdev->dev);
+	if (!host->ncfg) {
+		dev_err(&pdev->dev,
+			"Missing or bad NAND config from device tree\n");
+		return -ENOENT;
+	}
+	if (host->ncfg->wp_gpio == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+	if (gpio_is_valid(host->ncfg->wp_gpio) && devm_gpio_request(&pdev->dev,
+			host->ncfg->wp_gpio, "NAND WP")) {
+		dev_err(&pdev->dev, "GPIO not available\n");
+		return -EBUSY;
+	}
+	lpc32xx_wp_disable(host);
+
+	host->pdata = dev_get_platdata(&pdev->dev);
+
+	chip = &host->nand_chip;
+	mtd = nand_to_mtd(chip);
+	nand_set_controller_data(chip, host);
+	nand_set_flash_node(chip, pdev->dev.of_node);
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = &pdev->dev;
+
+	/* Get NAND clock */
+	host->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(host->clk)) {
+		dev_err(&pdev->dev, "Clock failure\n");
+		res = -ENOENT;
+		goto err_exit1;
+	}
+	clk_prepare_enable(host->clk);
+
+	/* Set NAND IO addresses and command/ready functions */
+	chip->IO_ADDR_R = SLC_DATA(host->io_base);
+	chip->IO_ADDR_W = SLC_DATA(host->io_base);
+	chip->cmd_ctrl = lpc32xx_nand_cmd_ctrl;
+	chip->dev_ready = lpc32xx_nand_device_ready;
+	chip->chip_delay = 20; /* 20us command delay time */
+
+	/* Init NAND controller */
+	lpc32xx_nand_setup(host);
+
+	platform_set_drvdata(pdev, host);
+
+	/* NAND callbacks for LPC32xx SLC hardware */
+	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+	chip->read_byte = lpc32xx_nand_read_byte;
+	chip->read_buf = lpc32xx_nand_read_buf;
+	chip->write_buf = lpc32xx_nand_write_buf;
+	chip->ecc.read_page_raw = lpc32xx_nand_read_page_raw_syndrome;
+	chip->ecc.read_page = lpc32xx_nand_read_page_syndrome;
+	chip->ecc.write_page_raw = lpc32xx_nand_write_page_raw_syndrome;
+	chip->ecc.write_page = lpc32xx_nand_write_page_syndrome;
+	chip->ecc.write_oob = lpc32xx_nand_write_oob_syndrome;
+	chip->ecc.read_oob = lpc32xx_nand_read_oob_syndrome;
+	chip->ecc.calculate = lpc32xx_nand_ecc_calculate;
+	chip->ecc.correct = nand_correct_data;
+	chip->ecc.strength = 1;
+	chip->ecc.hwctl = lpc32xx_nand_ecc_enable;
+
+	/*
+	 * Allocate a large enough buffer for a single huge page plus
+	 * extra space for the spare area and ECC storage area
+	 */
+	host->dma_buf_len = LPC32XX_DMA_DATA_SIZE + LPC32XX_ECC_SAVE_SIZE;
+	host->data_buf = devm_kzalloc(&pdev->dev, host->dma_buf_len,
+				      GFP_KERNEL);
+	if (host->data_buf == NULL) {
+		res = -ENOMEM;
+		goto err_exit2;
+	}
+
+	res = lpc32xx_nand_dma_setup(host);
+	if (res) {
+		res = -EIO;
+		goto err_exit2;
+	}
+
+	/* Find NAND device */
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		res = -ENXIO;
+		goto err_exit3;
+	}
+
+	/* OOB and ECC CPU and DMA work areas */
+	host->ecc_buf = (uint32_t *)(host->data_buf + LPC32XX_DMA_DATA_SIZE);
+
+	/*
+	 * Small page FLASH has a unique OOB layout, but large and huge
+	 * page FLASH use the standard layout. Small page FLASH uses a
+	 * custom BBT marker layout.
+	 */
+	if (mtd->writesize <= 512)
+		mtd_set_ooblayout(mtd, &lpc32xx_ooblayout_ops);
+
+	/* These sizes remain the same regardless of page size */
+	chip->ecc.size = 256;
+	chip->ecc.bytes = LPC32XX_SLC_DEV_ECC_BYTES;
+	chip->ecc.prepad = chip->ecc.postpad = 0;
+
+	/*
+	 * Use a custom BBT marker setup for small page FLASH that
+	 * won't interfere with the ECC layout. Large and huge page
+	 * FLASH use the standard layout.
+	 */
+	if ((chip->bbt_options & NAND_BBT_USE_FLASH) &&
+	    mtd->writesize <= 512) {
+		chip->bbt_td = &bbt_smallpage_main_descr;
+		chip->bbt_md = &bbt_smallpage_mirror_descr;
+	}
+
+	/*
+	 * Fills out all the uninitialized function pointers with the defaults
+	 */
+	if (nand_scan_tail(mtd)) {
+		res = -ENXIO;
+		goto err_exit3;
+	}
+
+	mtd->name = "nxp_lpc3220_slc";
+	res = mtd_device_register(mtd, host->ncfg->parts,
+				  host->ncfg->num_parts);
+	if (!res)
+		return res;
+
+	nand_release(mtd);
+
+err_exit3:
+	dma_release_channel(host->dma_chan);
+err_exit2:
+	clk_disable_unprepare(host->clk);
+err_exit1:
+	lpc32xx_wp_enable(host);
+
+	return res;
+}
+
+/*
+ * Remove NAND device.
+ */
+static int lpc32xx_nand_remove(struct platform_device *pdev)
+{
+	uint32_t tmp;
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+
+	nand_release(mtd);
+	dma_release_channel(host->dma_chan);
+
+	/* Force CE high */
+	tmp = readl(SLC_CTRL(host->io_base));
+	tmp &= ~SLCCFG_CE_LOW;
+	writel(tmp, SLC_CTRL(host->io_base));
+
+	clk_disable_unprepare(host->clk);
+	lpc32xx_wp_enable(host);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int lpc32xx_nand_resume(struct platform_device *pdev)
+{
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+	/* Re-enable NAND clock */
+	clk_prepare_enable(host->clk);
+
+	/* Fresh init of NAND controller */
+	lpc32xx_nand_setup(host);
+
+	/* Disable write protect */
+	lpc32xx_wp_disable(host);
+
+	return 0;
+}
+
+static int lpc32xx_nand_suspend(struct platform_device *pdev, pm_message_t pm)
+{
+	uint32_t tmp;
+	struct lpc32xx_nand_host *host = platform_get_drvdata(pdev);
+
+	/* Force CE high */
+	tmp = readl(SLC_CTRL(host->io_base));
+	tmp &= ~SLCCFG_CE_LOW;
+	writel(tmp, SLC_CTRL(host->io_base));
+
+	/* Enable write protect for safety */
+	lpc32xx_wp_enable(host);
+
+	/* Disable clock */
+	clk_disable_unprepare(host->clk);
+
+	return 0;
+}
+
+#else
+#define lpc32xx_nand_resume NULL
+#define lpc32xx_nand_suspend NULL
+#endif
+
+static const struct of_device_id lpc32xx_nand_match[] = {
+	{ .compatible = "nxp,lpc3220-slc" },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, lpc32xx_nand_match);
+
+static struct platform_driver lpc32xx_nand_driver = {
+	.probe		= lpc32xx_nand_probe,
+	.remove		= lpc32xx_nand_remove,
+	.resume		= lpc32xx_nand_resume,
+	.suspend	= lpc32xx_nand_suspend,
+	.driver		= {
+		.name	= LPC32XX_MODNAME,
+		.of_match_table = lpc32xx_nand_match,
+	},
+};
+
+module_platform_driver(lpc32xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com>");
+MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
+MODULE_DESCRIPTION("NAND driver for the NXP LPC32XX SLC controller");
diff --git a/drivers/mtd/nand/rawnand/mpc5121_nfc.c b/drivers/mtd/nand/rawnand/mpc5121_nfc.c
new file mode 100644
index 000000000000..2a1fa86fd123
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/mpc5121_nfc.c
@@ -0,0 +1,855 @@ 
+/*
+ * Copyright 2004-2008 Freescale Semiconductor, Inc.
+ * Copyright 2009 Semihalf.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009;  for details see www.osadl.org.
+ *
+ * Based on original driver from Freescale Semiconductor
+ * written by John Rigby <jrigby@freescale.com> on basis
+ * of drivers/mtd/nand/mxc_nand.c. Reworked and extended
+ * Piotr Ziecik <kosmo@semihalf.com>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include <asm/mpc5121.h>
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n)	((n) *  0x200)
+
+/* Addresses for NFC SPARE BUFFER areas */
+#define NFC_SPARE_BUFFERS	8
+#define NFC_SPARE_LEN		0x40
+#define NFC_SPARE_AREA(n)	(0x1000 + ((n) * NFC_SPARE_LEN))
+
+/* MPC5121 NFC registers */
+#define NFC_BUF_ADDR		0x1E04
+#define NFC_FLASH_ADDR		0x1E06
+#define NFC_FLASH_CMD		0x1E08
+#define NFC_CONFIG		0x1E0A
+#define NFC_ECC_STATUS1		0x1E0C
+#define NFC_ECC_STATUS2		0x1E0E
+#define NFC_SPAS		0x1E10
+#define NFC_WRPROT		0x1E12
+#define NFC_NF_WRPRST		0x1E18
+#define NFC_CONFIG1		0x1E1A
+#define NFC_CONFIG2		0x1E1C
+#define NFC_UNLOCKSTART_BLK0	0x1E20
+#define NFC_UNLOCKEND_BLK0	0x1E22
+#define NFC_UNLOCKSTART_BLK1	0x1E24
+#define NFC_UNLOCKEND_BLK1	0x1E26
+#define NFC_UNLOCKSTART_BLK2	0x1E28
+#define NFC_UNLOCKEND_BLK2	0x1E2A
+#define NFC_UNLOCKSTART_BLK3	0x1E2C
+#define NFC_UNLOCKEND_BLK3	0x1E2E
+
+/* Bit Definitions: NFC_BUF_ADDR */
+#define NFC_RBA_MASK		(7 << 0)
+#define NFC_ACTIVE_CS_SHIFT	5
+#define NFC_ACTIVE_CS_MASK	(3 << NFC_ACTIVE_CS_SHIFT)
+
+/* Bit Definitions: NFC_CONFIG */
+#define NFC_BLS_UNLOCKED	(1 << 1)
+
+/* Bit Definitions: NFC_CONFIG1 */
+#define NFC_ECC_4BIT		(1 << 0)
+#define NFC_FULL_PAGE_DMA	(1 << 1)
+#define NFC_SPARE_ONLY		(1 << 2)
+#define NFC_ECC_ENABLE		(1 << 3)
+#define NFC_INT_MASK		(1 << 4)
+#define NFC_BIG_ENDIAN		(1 << 5)
+#define NFC_RESET		(1 << 6)
+#define NFC_CE			(1 << 7)
+#define NFC_ONE_CYCLE		(1 << 8)
+#define NFC_PPB_32		(0 << 9)
+#define NFC_PPB_64		(1 << 9)
+#define NFC_PPB_128		(2 << 9)
+#define NFC_PPB_256		(3 << 9)
+#define NFC_PPB_MASK		(3 << 9)
+#define NFC_FULL_PAGE_INT	(1 << 11)
+
+/* Bit Definitions: NFC_CONFIG2 */
+#define NFC_COMMAND		(1 << 0)
+#define NFC_ADDRESS		(1 << 1)
+#define NFC_INPUT		(1 << 2)
+#define NFC_OUTPUT		(1 << 3)
+#define NFC_ID			(1 << 4)
+#define NFC_STATUS		(1 << 5)
+#define NFC_CMD_FAIL		(1 << 15)
+#define NFC_INT			(1 << 15)
+
+/* Bit Definitions: NFC_WRPROT */
+#define NFC_WPC_LOCK_TIGHT	(1 << 0)
+#define NFC_WPC_LOCK		(1 << 1)
+#define NFC_WPC_UNLOCK		(1 << 2)
+
+#define	DRV_NAME		"mpc5121_nfc"
+
+/* Timeouts */
+#define NFC_RESET_TIMEOUT	1000		/* 1 ms */
+#define NFC_TIMEOUT		(HZ / 10)	/* 1/10 s */
+
+struct mpc5121_nfc_prv {
+	struct nand_chip	chip;
+	int			irq;
+	void __iomem		*regs;
+	struct clk		*clk;
+	wait_queue_head_t	irq_waitq;
+	uint			column;
+	int			spareonly;
+	void __iomem		*csreg;
+	struct device		*dev;
+};
+
+static void mpc5121_nfc_done(struct mtd_info *mtd);
+
+/* Read NFC register */
+static inline u16 nfc_read(struct mtd_info *mtd, uint reg)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+	return in_be16(prv->regs + reg);
+}
+
+/* Write NFC register */
+static inline void nfc_write(struct mtd_info *mtd, uint reg, u16 val)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+	out_be16(prv->regs + reg, val);
+}
+
+/* Set bits in NFC register */
+static inline void nfc_set(struct mtd_info *mtd, uint reg, u16 bits)
+{
+	nfc_write(mtd, reg, nfc_read(mtd, reg) | bits);
+}
+
+/* Clear bits in NFC register */
+static inline void nfc_clear(struct mtd_info *mtd, uint reg, u16 bits)
+{
+	nfc_write(mtd, reg, nfc_read(mtd, reg) & ~bits);
+}
+
+/* Invoke address cycle */
+static inline void mpc5121_nfc_send_addr(struct mtd_info *mtd, u16 addr)
+{
+	nfc_write(mtd, NFC_FLASH_ADDR, addr);
+	nfc_write(mtd, NFC_CONFIG2, NFC_ADDRESS);
+	mpc5121_nfc_done(mtd);
+}
+
+/* Invoke command cycle */
+static inline void mpc5121_nfc_send_cmd(struct mtd_info *mtd, u16 cmd)
+{
+	nfc_write(mtd, NFC_FLASH_CMD, cmd);
+	nfc_write(mtd, NFC_CONFIG2, NFC_COMMAND);
+	mpc5121_nfc_done(mtd);
+}
+
+/* Send data from NFC buffers to NAND flash */
+static inline void mpc5121_nfc_send_prog_page(struct mtd_info *mtd)
+{
+	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+	nfc_write(mtd, NFC_CONFIG2, NFC_INPUT);
+	mpc5121_nfc_done(mtd);
+}
+
+/* Receive data from NAND flash */
+static inline void mpc5121_nfc_send_read_page(struct mtd_info *mtd)
+{
+	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+	nfc_write(mtd, NFC_CONFIG2, NFC_OUTPUT);
+	mpc5121_nfc_done(mtd);
+}
+
+/* Receive ID from NAND flash */
+static inline void mpc5121_nfc_send_read_id(struct mtd_info *mtd)
+{
+	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+	nfc_write(mtd, NFC_CONFIG2, NFC_ID);
+	mpc5121_nfc_done(mtd);
+}
+
+/* Receive status from NAND flash */
+static inline void mpc5121_nfc_send_read_status(struct mtd_info *mtd)
+{
+	nfc_clear(mtd, NFC_BUF_ADDR, NFC_RBA_MASK);
+	nfc_write(mtd, NFC_CONFIG2, NFC_STATUS);
+	mpc5121_nfc_done(mtd);
+}
+
+/* NFC interrupt handler */
+static irqreturn_t mpc5121_nfc_irq(int irq, void *data)
+{
+	struct mtd_info *mtd = data;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+	nfc_set(mtd, NFC_CONFIG1, NFC_INT_MASK);
+	wake_up(&prv->irq_waitq);
+
+	return IRQ_HANDLED;
+}
+
+/* Wait for operation complete */
+static void mpc5121_nfc_done(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+	int rv;
+
+	if ((nfc_read(mtd, NFC_CONFIG2) & NFC_INT) == 0) {
+		nfc_clear(mtd, NFC_CONFIG1, NFC_INT_MASK);
+		rv = wait_event_timeout(prv->irq_waitq,
+			(nfc_read(mtd, NFC_CONFIG2) & NFC_INT), NFC_TIMEOUT);
+
+		if (!rv)
+			dev_warn(prv->dev,
+				"Timeout while waiting for interrupt.\n");
+	}
+
+	nfc_clear(mtd, NFC_CONFIG2, NFC_INT);
+}
+
+/* Do address cycle(s) */
+static void mpc5121_nfc_addr_cycle(struct mtd_info *mtd, int column, int page)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	u32 pagemask = chip->pagemask;
+
+	if (column != -1) {
+		mpc5121_nfc_send_addr(mtd, column);
+		if (mtd->writesize > 512)
+			mpc5121_nfc_send_addr(mtd, column >> 8);
+	}
+
+	if (page != -1) {
+		do {
+			mpc5121_nfc_send_addr(mtd, page & 0xFF);
+			page >>= 8;
+			pagemask >>= 8;
+		} while (pagemask);
+	}
+}
+
+/* Control chip select signals */
+static void mpc5121_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+	if (chip < 0) {
+		nfc_clear(mtd, NFC_CONFIG1, NFC_CE);
+		return;
+	}
+
+	nfc_clear(mtd, NFC_BUF_ADDR, NFC_ACTIVE_CS_MASK);
+	nfc_set(mtd, NFC_BUF_ADDR, (chip << NFC_ACTIVE_CS_SHIFT) &
+							NFC_ACTIVE_CS_MASK);
+	nfc_set(mtd, NFC_CONFIG1, NFC_CE);
+}
+
+/* Init external chip select logic on ADS5121 board */
+static int ads5121_chipselect_init(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+	struct device_node *dn;
+
+	dn = of_find_compatible_node(NULL, NULL, "fsl,mpc5121ads-cpld");
+	if (dn) {
+		prv->csreg = of_iomap(dn, 0);
+		of_node_put(dn);
+		if (!prv->csreg)
+			return -ENOMEM;
+
+		/* CPLD Register 9 controls NAND /CE Lines */
+		prv->csreg += 9;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/* Control chips select signal on ADS5121 board */
+static void ads5121_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
+	u8 v;
+
+	v = in_8(prv->csreg);
+	v |= 0x0F;
+
+	if (chip >= 0) {
+		mpc5121_nfc_select_chip(mtd, 0);
+		v &= ~(1 << chip);
+	} else
+		mpc5121_nfc_select_chip(mtd, -1);
+
+	out_8(prv->csreg, v);
+}
+
+/* Read NAND Ready/Busy signal */
+static int mpc5121_nfc_dev_ready(struct mtd_info *mtd)
+{
+	/*
+	 * NFC handles ready/busy signal internally. Therefore, this function
+	 * always returns status as ready.
+	 */
+	return 1;
+}
+
+/* Write command to NAND flash */
+static void mpc5121_nfc_command(struct mtd_info *mtd, unsigned command,
+							int column, int page)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+	prv->column = (column >= 0) ? column : 0;
+	prv->spareonly = 0;
+
+	switch (command) {
+	case NAND_CMD_PAGEPROG:
+		mpc5121_nfc_send_prog_page(mtd);
+		break;
+	/*
+	 * NFC does not support sub-page reads and writes,
+	 * so emulate them using full page transfers.
+	 */
+	case NAND_CMD_READ0:
+		column = 0;
+		break;
+
+	case NAND_CMD_READ1:
+		prv->column += 256;
+		command = NAND_CMD_READ0;
+		column = 0;
+		break;
+
+	case NAND_CMD_READOOB:
+		prv->spareonly = 1;
+		command = NAND_CMD_READ0;
+		column = 0;
+		break;
+
+	case NAND_CMD_SEQIN:
+		mpc5121_nfc_command(mtd, NAND_CMD_READ0, column, page);
+		column = 0;
+		break;
+
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+	case NAND_CMD_READID:
+	case NAND_CMD_STATUS:
+		break;
+
+	default:
+		return;
+	}
+
+	mpc5121_nfc_send_cmd(mtd, command);
+	mpc5121_nfc_addr_cycle(mtd, column, page);
+
+	switch (command) {
+	case NAND_CMD_READ0:
+		if (mtd->writesize > 512)
+			mpc5121_nfc_send_cmd(mtd, NAND_CMD_READSTART);
+		mpc5121_nfc_send_read_page(mtd);
+		break;
+
+	case NAND_CMD_READID:
+		mpc5121_nfc_send_read_id(mtd);
+		break;
+
+	case NAND_CMD_STATUS:
+		mpc5121_nfc_send_read_status(mtd);
+		if (chip->options & NAND_BUSWIDTH_16)
+			prv->column = 1;
+		else
+			prv->column = 0;
+		break;
+	}
+}
+
+/* Copy data from/to NFC spare buffers. */
+static void mpc5121_nfc_copy_spare(struct mtd_info *mtd, uint offset,
+						u8 *buffer, uint size, int wr)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
+	uint o, s, sbsize, blksize;
+
+	/*
+	 * NAND spare area is available through NFC spare buffers.
+	 * The NFC divides spare area into (page_size / 512) chunks.
+	 * Each chunk is placed into separate spare memory area, using
+	 * first (spare_size / num_of_chunks) bytes of the buffer.
+	 *
+	 * For NAND device in which the spare area is not divided fully
+	 * by the number of chunks, number of used bytes in each spare
+	 * buffer is rounded down to the nearest even number of bytes,
+	 * and all remaining bytes are added to the last used spare area.
+	 *
+	 * For more information read section 26.6.10 of MPC5121e
+	 * Microcontroller Reference Manual, Rev. 3.
+	 */
+
+	/* Calculate number of valid bytes in each spare buffer */
+	sbsize = (mtd->oobsize / (mtd->writesize / 512)) & ~1;
+
+	while (size) {
+		/* Calculate spare buffer number */
+		s = offset / sbsize;
+		if (s > NFC_SPARE_BUFFERS - 1)
+			s = NFC_SPARE_BUFFERS - 1;
+
+		/*
+		 * Calculate offset to requested data block in selected spare
+		 * buffer and its size.
+		 */
+		o = offset - (s * sbsize);
+		blksize = min(sbsize - o, size);
+
+		if (wr)
+			memcpy_toio(prv->regs + NFC_SPARE_AREA(s) + o,
+							buffer, blksize);
+		else
+			memcpy_fromio(buffer,
+				prv->regs + NFC_SPARE_AREA(s) + o, blksize);
+
+		buffer += blksize;
+		offset += blksize;
+		size -= blksize;
+	};
+}
+
+/* Copy data from/to NFC main and spare buffers */
+static void mpc5121_nfc_buf_copy(struct mtd_info *mtd, u_char *buf, int len,
+									int wr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+	uint c = prv->column;
+	uint l;
+
+	/* Handle spare area access */
+	if (prv->spareonly || c >= mtd->writesize) {
+		/* Calculate offset from beginning of spare area */
+		if (c >= mtd->writesize)
+			c -= mtd->writesize;
+
+		prv->column += len;
+		mpc5121_nfc_copy_spare(mtd, c, buf, len, wr);
+		return;
+	}
+
+	/*
+	 * Handle main area access - limit copy length to prevent
+	 * crossing main/spare boundary.
+	 */
+	l = min((uint)len, mtd->writesize - c);
+	prv->column += l;
+
+	if (wr)
+		memcpy_toio(prv->regs + NFC_MAIN_AREA(0) + c, buf, l);
+	else
+		memcpy_fromio(buf, prv->regs + NFC_MAIN_AREA(0) + c, l);
+
+	/* Handle crossing main/spare boundary */
+	if (l != len) {
+		buf += l;
+		len -= l;
+		mpc5121_nfc_buf_copy(mtd, buf, len, wr);
+	}
+}
+
+/* Read data from NFC buffers */
+static void mpc5121_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	mpc5121_nfc_buf_copy(mtd, buf, len, 0);
+}
+
+/* Write data to NFC buffers */
+static void mpc5121_nfc_write_buf(struct mtd_info *mtd,
+						const u_char *buf, int len)
+{
+	mpc5121_nfc_buf_copy(mtd, (u_char *)buf, len, 1);
+}
+
+/* Read byte from NFC buffers */
+static u8 mpc5121_nfc_read_byte(struct mtd_info *mtd)
+{
+	u8 tmp;
+
+	mpc5121_nfc_read_buf(mtd, &tmp, sizeof(tmp));
+
+	return tmp;
+}
+
+/* Read word from NFC buffers */
+static u16 mpc5121_nfc_read_word(struct mtd_info *mtd)
+{
+	u16 tmp;
+
+	mpc5121_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+
+	return tmp;
+}
+
+/*
+ * Read NFC configuration from Reset Config Word
+ *
+ * NFC is configured during reset in basis of information stored
+ * in Reset Config Word. There is no other way to set NAND block
+ * size, spare size and bus width.
+ */
+static int mpc5121_nfc_read_hw_config(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+	struct mpc512x_reset_module *rm;
+	struct device_node *rmnode;
+	uint rcw_pagesize = 0;
+	uint rcw_sparesize = 0;
+	uint rcw_width;
+	uint rcwh;
+	uint romloc, ps;
+	int ret = 0;
+
+	rmnode = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-reset");
+	if (!rmnode) {
+		dev_err(prv->dev, "Missing 'fsl,mpc5121-reset' "
+					"node in device tree!\n");
+		return -ENODEV;
+	}
+
+	rm = of_iomap(rmnode, 0);
+	if (!rm) {
+		dev_err(prv->dev, "Error mapping reset module node!\n");
+		ret = -EBUSY;
+		goto out;
+	}
+
+	rcwh = in_be32(&rm->rcwhr);
+
+	/* Bit 6: NFC bus width */
+	rcw_width = ((rcwh >> 6) & 0x1) ? 2 : 1;
+
+	/* Bit 7: NFC Page/Spare size */
+	ps = (rcwh >> 7) & 0x1;
+
+	/* Bits [22:21]: ROM Location */
+	romloc = (rcwh >> 21) & 0x3;
+
+	/* Decode RCW bits */
+	switch ((ps << 2) | romloc) {
+	case 0x00:
+	case 0x01:
+		rcw_pagesize = 512;
+		rcw_sparesize = 16;
+		break;
+	case 0x02:
+	case 0x03:
+		rcw_pagesize = 4096;
+		rcw_sparesize = 128;
+		break;
+	case 0x04:
+	case 0x05:
+		rcw_pagesize = 2048;
+		rcw_sparesize = 64;
+		break;
+	case 0x06:
+	case 0x07:
+		rcw_pagesize = 4096;
+		rcw_sparesize = 218;
+		break;
+	}
+
+	mtd->writesize = rcw_pagesize;
+	mtd->oobsize = rcw_sparesize;
+	if (rcw_width == 2)
+		chip->options |= NAND_BUSWIDTH_16;
+
+	dev_notice(prv->dev, "Configured for "
+				"%u-bit NAND, page size %u "
+				"with %u spare.\n",
+				rcw_width * 8, rcw_pagesize,
+				rcw_sparesize);
+	iounmap(rm);
+out:
+	of_node_put(rmnode);
+	return ret;
+}
+
+/* Free driver resources */
+static void mpc5121_nfc_free(struct device *dev, struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mpc5121_nfc_prv *prv = nand_get_controller_data(chip);
+
+	if (prv->clk)
+		clk_disable_unprepare(prv->clk);
+
+	if (prv->csreg)
+		iounmap(prv->csreg);
+}
+
+static int mpc5121_nfc_probe(struct platform_device *op)
+{
+	struct device_node *dn = op->dev.of_node;
+	struct clk *clk;
+	struct device *dev = &op->dev;
+	struct mpc5121_nfc_prv *prv;
+	struct resource res;
+	struct mtd_info *mtd;
+	struct nand_chip *chip;
+	unsigned long regs_paddr, regs_size;
+	const __be32 *chips_no;
+	int resettime = 0;
+	int retval = 0;
+	int rev, len;
+
+	/*
+	 * Check SoC revision. This driver supports only NFC
+	 * in MPC5121 revision 2 and MPC5123 revision 3.
+	 */
+	rev = (mfspr(SPRN_SVR) >> 4) & 0xF;
+	if ((rev != 2) && (rev != 3)) {
+		dev_err(dev, "SoC revision %u is not supported!\n", rev);
+		return -ENXIO;
+	}
+
+	prv = devm_kzalloc(dev, sizeof(*prv), GFP_KERNEL);
+	if (!prv)
+		return -ENOMEM;
+
+	chip = &prv->chip;
+	mtd = nand_to_mtd(chip);
+
+	mtd->dev.parent = dev;
+	nand_set_controller_data(chip, prv);
+	nand_set_flash_node(chip, dn);
+	prv->dev = dev;
+
+	/* Read NFC configuration from Reset Config Word */
+	retval = mpc5121_nfc_read_hw_config(mtd);
+	if (retval) {
+		dev_err(dev, "Unable to read NFC config!\n");
+		return retval;
+	}
+
+	prv->irq = irq_of_parse_and_map(dn, 0);
+	if (prv->irq == NO_IRQ) {
+		dev_err(dev, "Error mapping IRQ!\n");
+		return -EINVAL;
+	}
+
+	retval = of_address_to_resource(dn, 0, &res);
+	if (retval) {
+		dev_err(dev, "Error parsing memory region!\n");
+		return retval;
+	}
+
+	chips_no = of_get_property(dn, "chips", &len);
+	if (!chips_no || len != sizeof(*chips_no)) {
+		dev_err(dev, "Invalid/missing 'chips' property!\n");
+		return -EINVAL;
+	}
+
+	regs_paddr = res.start;
+	regs_size = resource_size(&res);
+
+	if (!devm_request_mem_region(dev, regs_paddr, regs_size, DRV_NAME)) {
+		dev_err(dev, "Error requesting memory region!\n");
+		return -EBUSY;
+	}
+
+	prv->regs = devm_ioremap(dev, regs_paddr, regs_size);
+	if (!prv->regs) {
+		dev_err(dev, "Error mapping memory region!\n");
+		return -ENOMEM;
+	}
+
+	mtd->name = "MPC5121 NAND";
+	chip->dev_ready = mpc5121_nfc_dev_ready;
+	chip->cmdfunc = mpc5121_nfc_command;
+	chip->read_byte = mpc5121_nfc_read_byte;
+	chip->read_word = mpc5121_nfc_read_word;
+	chip->read_buf = mpc5121_nfc_read_buf;
+	chip->write_buf = mpc5121_nfc_write_buf;
+	chip->select_chip = mpc5121_nfc_select_chip;
+	chip->bbt_options = NAND_BBT_USE_FLASH;
+	chip->ecc.mode = NAND_ECC_SOFT;
+	chip->ecc.algo = NAND_ECC_HAMMING;
+
+	/* Support external chip-select logic on ADS5121 board */
+	if (of_machine_is_compatible("fsl,mpc5121ads")) {
+		retval = ads5121_chipselect_init(mtd);
+		if (retval) {
+			dev_err(dev, "Chipselect init error!\n");
+			return retval;
+		}
+
+		chip->select_chip = ads5121_select_chip;
+	}
+
+	/* Enable NFC clock */
+	clk = devm_clk_get(dev, "ipg");
+	if (IS_ERR(clk)) {
+		dev_err(dev, "Unable to acquire NFC clock!\n");
+		retval = PTR_ERR(clk);
+		goto error;
+	}
+	retval = clk_prepare_enable(clk);
+	if (retval) {
+		dev_err(dev, "Unable to enable NFC clock!\n");
+		goto error;
+	}
+	prv->clk = clk;
+
+	/* Reset NAND Flash controller */
+	nfc_set(mtd, NFC_CONFIG1, NFC_RESET);
+	while (nfc_read(mtd, NFC_CONFIG1) & NFC_RESET) {
+		if (resettime++ >= NFC_RESET_TIMEOUT) {
+			dev_err(dev, "Timeout while resetting NFC!\n");
+			retval = -EINVAL;
+			goto error;
+		}
+
+		udelay(1);
+	}
+
+	/* Enable write to NFC memory */
+	nfc_write(mtd, NFC_CONFIG, NFC_BLS_UNLOCKED);
+
+	/* Enable write to all NAND pages */
+	nfc_write(mtd, NFC_UNLOCKSTART_BLK0, 0x0000);
+	nfc_write(mtd, NFC_UNLOCKEND_BLK0, 0xFFFF);
+	nfc_write(mtd, NFC_WRPROT, NFC_WPC_UNLOCK);
+
+	/*
+	 * Setup NFC:
+	 *	- Big Endian transfers,
+	 *	- Interrupt after full page read/write.
+	 */
+	nfc_write(mtd, NFC_CONFIG1, NFC_BIG_ENDIAN | NFC_INT_MASK |
+							NFC_FULL_PAGE_INT);
+
+	/* Set spare area size */
+	nfc_write(mtd, NFC_SPAS, mtd->oobsize >> 1);
+
+	init_waitqueue_head(&prv->irq_waitq);
+	retval = devm_request_irq(dev, prv->irq, &mpc5121_nfc_irq, 0, DRV_NAME,
+									mtd);
+	if (retval) {
+		dev_err(dev, "Error requesting IRQ!\n");
+		goto error;
+	}
+
+	/* Detect NAND chips */
+	if (nand_scan(mtd, be32_to_cpup(chips_no))) {
+		dev_err(dev, "NAND Flash not found !\n");
+		retval = -ENXIO;
+		goto error;
+	}
+
+	/* Set erase block size */
+	switch (mtd->erasesize / mtd->writesize) {
+	case 32:
+		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_32);
+		break;
+
+	case 64:
+		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_64);
+		break;
+
+	case 128:
+		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_128);
+		break;
+
+	case 256:
+		nfc_set(mtd, NFC_CONFIG1, NFC_PPB_256);
+		break;
+
+	default:
+		dev_err(dev, "Unsupported NAND flash!\n");
+		retval = -ENXIO;
+		goto error;
+	}
+
+	dev_set_drvdata(dev, mtd);
+
+	/* Register device in MTD */
+	retval = mtd_device_register(mtd, NULL, 0);
+	if (retval) {
+		dev_err(dev, "Error adding MTD device!\n");
+		goto error;
+	}
+
+	return 0;
+error:
+	mpc5121_nfc_free(dev, mtd);
+	return retval;
+}
+
+static int mpc5121_nfc_remove(struct platform_device *op)
+{
+	struct device *dev = &op->dev;
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	nand_release(mtd);
+	mpc5121_nfc_free(dev, mtd);
+
+	return 0;
+}
+
+static const struct of_device_id mpc5121_nfc_match[] = {
+	{ .compatible = "fsl,mpc5121-nfc", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mpc5121_nfc_match);
+
+static struct platform_driver mpc5121_nfc_driver = {
+	.probe		= mpc5121_nfc_probe,
+	.remove		= mpc5121_nfc_remove,
+	.driver		= {
+		.name = DRV_NAME,
+		.of_match_table = mpc5121_nfc_match,
+	},
+};
+
+module_platform_driver(mpc5121_nfc_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/rawnand/mtk_ecc.c b/drivers/mtd/nand/rawnand/mtk_ecc.c
new file mode 100644
index 000000000000..25a4fbd4d24a
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/mtk_ecc.c
@@ -0,0 +1,530 @@ 
+/*
+ * MTK ECC controller driver.
+ * Copyright (C) 2016  MediaTek Inc.
+ * Authors:	Xiaolei Li		<xiaolei.li@mediatek.com>
+ *		Jorge Ramirez-Ortiz	<jorge.ramirez-ortiz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/mutex.h>
+
+#include "mtk_ecc.h"
+
+#define ECC_IDLE_MASK		BIT(0)
+#define ECC_IRQ_EN		BIT(0)
+#define ECC_OP_ENABLE		(1)
+#define ECC_OP_DISABLE		(0)
+
+#define ECC_ENCCON		(0x00)
+#define ECC_ENCCNFG		(0x04)
+#define		ECC_CNFG_4BIT		(0)
+#define		ECC_CNFG_6BIT		(1)
+#define		ECC_CNFG_8BIT		(2)
+#define		ECC_CNFG_10BIT		(3)
+#define		ECC_CNFG_12BIT		(4)
+#define		ECC_CNFG_14BIT		(5)
+#define		ECC_CNFG_16BIT		(6)
+#define		ECC_CNFG_18BIT		(7)
+#define		ECC_CNFG_20BIT		(8)
+#define		ECC_CNFG_22BIT		(9)
+#define		ECC_CNFG_24BIT		(0xa)
+#define		ECC_CNFG_28BIT		(0xb)
+#define		ECC_CNFG_32BIT		(0xc)
+#define		ECC_CNFG_36BIT		(0xd)
+#define		ECC_CNFG_40BIT		(0xe)
+#define		ECC_CNFG_44BIT		(0xf)
+#define		ECC_CNFG_48BIT		(0x10)
+#define		ECC_CNFG_52BIT		(0x11)
+#define		ECC_CNFG_56BIT		(0x12)
+#define		ECC_CNFG_60BIT		(0x13)
+#define		ECC_MODE_SHIFT		(5)
+#define		ECC_MS_SHIFT		(16)
+#define ECC_ENCDIADDR		(0x08)
+#define ECC_ENCIDLE		(0x0C)
+#define ECC_ENCPAR(x)		(0x10 + (x) * sizeof(u32))
+#define ECC_ENCIRQ_EN		(0x80)
+#define ECC_ENCIRQ_STA		(0x84)
+#define ECC_DECCON		(0x100)
+#define ECC_DECCNFG		(0x104)
+#define		DEC_EMPTY_EN		BIT(31)
+#define		DEC_CNFG_CORRECT	(0x3 << 12)
+#define ECC_DECIDLE		(0x10C)
+#define ECC_DECENUM0		(0x114)
+#define		ERR_MASK		(0x3f)
+#define ECC_DECDONE		(0x124)
+#define ECC_DECIRQ_EN		(0x200)
+#define ECC_DECIRQ_STA		(0x204)
+
+#define ECC_TIMEOUT		(500000)
+
+#define ECC_IDLE_REG(op)	((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE)
+#define ECC_CTL_REG(op)		((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON)
+#define ECC_IRQ_REG(op)		((op) == ECC_ENCODE ? \
+					ECC_ENCIRQ_EN : ECC_DECIRQ_EN)
+
+struct mtk_ecc {
+	struct device *dev;
+	void __iomem *regs;
+	struct clk *clk;
+
+	struct completion done;
+	struct mutex lock;
+	u32 sectors;
+};
+
+static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc,
+				     enum mtk_ecc_operation op)
+{
+	struct device *dev = ecc->dev;
+	u32 val;
+	int ret;
+
+	ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val,
+					val & ECC_IDLE_MASK,
+					10, ECC_TIMEOUT);
+	if (ret)
+		dev_warn(dev, "%s NOT idle\n",
+			 op == ECC_ENCODE ? "encoder" : "decoder");
+}
+
+static irqreturn_t mtk_ecc_irq(int irq, void *id)
+{
+	struct mtk_ecc *ecc = id;
+	enum mtk_ecc_operation op;
+	u32 dec, enc;
+
+	dec = readw(ecc->regs + ECC_DECIRQ_STA) & ECC_IRQ_EN;
+	if (dec) {
+		op = ECC_DECODE;
+		dec = readw(ecc->regs + ECC_DECDONE);
+		if (dec & ecc->sectors) {
+			ecc->sectors = 0;
+			complete(&ecc->done);
+		} else {
+			return IRQ_HANDLED;
+		}
+	} else {
+		enc = readl(ecc->regs + ECC_ENCIRQ_STA) & ECC_IRQ_EN;
+		if (enc) {
+			op = ECC_ENCODE;
+			complete(&ecc->done);
+		} else {
+			return IRQ_NONE;
+		}
+	}
+
+	writel(0, ecc->regs + ECC_IRQ_REG(op));
+
+	return IRQ_HANDLED;
+}
+
+static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+	u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz;
+	u32 reg;
+
+	switch (config->strength) {
+	case 4:
+		ecc_bit = ECC_CNFG_4BIT;
+		break;
+	case 6:
+		ecc_bit = ECC_CNFG_6BIT;
+		break;
+	case 8:
+		ecc_bit = ECC_CNFG_8BIT;
+		break;
+	case 10:
+		ecc_bit = ECC_CNFG_10BIT;
+		break;
+	case 12:
+		ecc_bit = ECC_CNFG_12BIT;
+		break;
+	case 14:
+		ecc_bit = ECC_CNFG_14BIT;
+		break;
+	case 16:
+		ecc_bit = ECC_CNFG_16BIT;
+		break;
+	case 18:
+		ecc_bit = ECC_CNFG_18BIT;
+		break;
+	case 20:
+		ecc_bit = ECC_CNFG_20BIT;
+		break;
+	case 22:
+		ecc_bit = ECC_CNFG_22BIT;
+		break;
+	case 24:
+		ecc_bit = ECC_CNFG_24BIT;
+		break;
+	case 28:
+		ecc_bit = ECC_CNFG_28BIT;
+		break;
+	case 32:
+		ecc_bit = ECC_CNFG_32BIT;
+		break;
+	case 36:
+		ecc_bit = ECC_CNFG_36BIT;
+		break;
+	case 40:
+		ecc_bit = ECC_CNFG_40BIT;
+		break;
+	case 44:
+		ecc_bit = ECC_CNFG_44BIT;
+		break;
+	case 48:
+		ecc_bit = ECC_CNFG_48BIT;
+		break;
+	case 52:
+		ecc_bit = ECC_CNFG_52BIT;
+		break;
+	case 56:
+		ecc_bit = ECC_CNFG_56BIT;
+		break;
+	case 60:
+		ecc_bit = ECC_CNFG_60BIT;
+		break;
+	default:
+		dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n",
+			config->strength);
+	}
+
+	if (config->op == ECC_ENCODE) {
+		/* configure ECC encoder (in bits) */
+		enc_sz = config->len << 3;
+
+		reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+		reg |= (enc_sz << ECC_MS_SHIFT);
+		writel(reg, ecc->regs + ECC_ENCCNFG);
+
+		if (config->mode != ECC_NFI_MODE)
+			writel(lower_32_bits(config->addr),
+			       ecc->regs + ECC_ENCDIADDR);
+
+	} else {
+		/* configure ECC decoder (in bits) */
+		dec_sz = (config->len << 3) +
+					config->strength * ECC_PARITY_BITS;
+
+		reg = ecc_bit | (config->mode << ECC_MODE_SHIFT);
+		reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT;
+		reg |= DEC_EMPTY_EN;
+		writel(reg, ecc->regs + ECC_DECCNFG);
+
+		if (config->sectors)
+			ecc->sectors = 1 << (config->sectors - 1);
+	}
+}
+
+void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats,
+		       int sectors)
+{
+	u32 offset, i, err;
+	u32 bitflips = 0;
+
+	stats->corrected = 0;
+	stats->failed = 0;
+
+	for (i = 0; i < sectors; i++) {
+		offset = (i >> 2) << 2;
+		err = readl(ecc->regs + ECC_DECENUM0 + offset);
+		err = err >> ((i % 4) * 8);
+		err &= ERR_MASK;
+		if (err == ERR_MASK) {
+			/* uncorrectable errors */
+			stats->failed++;
+			continue;
+		}
+
+		stats->corrected += err;
+		bitflips = max_t(u32, bitflips, err);
+	}
+
+	stats->bitflips = bitflips;
+}
+EXPORT_SYMBOL(mtk_ecc_get_stats);
+
+void mtk_ecc_release(struct mtk_ecc *ecc)
+{
+	clk_disable_unprepare(ecc->clk);
+	put_device(ecc->dev);
+}
+EXPORT_SYMBOL(mtk_ecc_release);
+
+static void mtk_ecc_hw_init(struct mtk_ecc *ecc)
+{
+	mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+	writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON);
+
+	mtk_ecc_wait_idle(ecc, ECC_DECODE);
+	writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON);
+}
+
+static struct mtk_ecc *mtk_ecc_get(struct device_node *np)
+{
+	struct platform_device *pdev;
+	struct mtk_ecc *ecc;
+
+	pdev = of_find_device_by_node(np);
+	if (!pdev || !platform_get_drvdata(pdev))
+		return ERR_PTR(-EPROBE_DEFER);
+
+	get_device(&pdev->dev);
+	ecc = platform_get_drvdata(pdev);
+	clk_prepare_enable(ecc->clk);
+	mtk_ecc_hw_init(ecc);
+
+	return ecc;
+}
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node)
+{
+	struct mtk_ecc *ecc = NULL;
+	struct device_node *np;
+
+	np = of_parse_phandle(of_node, "ecc-engine", 0);
+	if (np) {
+		ecc = mtk_ecc_get(np);
+		of_node_put(np);
+	}
+
+	return ecc;
+}
+EXPORT_SYMBOL(of_mtk_ecc_get);
+
+int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config)
+{
+	enum mtk_ecc_operation op = config->op;
+	int ret;
+
+	ret = mutex_lock_interruptible(&ecc->lock);
+	if (ret) {
+		dev_err(ecc->dev, "interrupted when attempting to lock\n");
+		return ret;
+	}
+
+	mtk_ecc_wait_idle(ecc, op);
+	mtk_ecc_config(ecc, config);
+	writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op));
+
+	init_completion(&ecc->done);
+	writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op));
+
+	return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_enable);
+
+void mtk_ecc_disable(struct mtk_ecc *ecc)
+{
+	enum mtk_ecc_operation op = ECC_ENCODE;
+
+	/* find out the running operation */
+	if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE)
+		op = ECC_DECODE;
+
+	/* disable it */
+	mtk_ecc_wait_idle(ecc, op);
+	writew(0, ecc->regs + ECC_IRQ_REG(op));
+	writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
+
+	mutex_unlock(&ecc->lock);
+}
+EXPORT_SYMBOL(mtk_ecc_disable);
+
+int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op)
+{
+	int ret;
+
+	ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500));
+	if (!ret) {
+		dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n",
+			(op == ECC_ENCODE) ? "encoder" : "decoder");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(mtk_ecc_wait_done);
+
+int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
+		   u8 *data, u32 bytes)
+{
+	dma_addr_t addr;
+	u32 *p, len, i;
+	int ret = 0;
+
+	addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
+	ret = dma_mapping_error(ecc->dev, addr);
+	if (ret) {
+		dev_err(ecc->dev, "dma mapping error\n");
+		return -EINVAL;
+	}
+
+	config->op = ECC_ENCODE;
+	config->addr = addr;
+	ret = mtk_ecc_enable(ecc, config);
+	if (ret) {
+		dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+		return ret;
+	}
+
+	ret = mtk_ecc_wait_done(ecc, ECC_ENCODE);
+	if (ret)
+		goto timeout;
+
+	mtk_ecc_wait_idle(ecc, ECC_ENCODE);
+
+	/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
+	len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
+	p = (u32 *)(data + bytes);
+
+	/* write the parity bytes generated by the ECC back to the OOB region */
+	for (i = 0; i < len; i++)
+		p[i] = readl(ecc->regs + ECC_ENCPAR(i));
+timeout:
+
+	dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+	mtk_ecc_disable(ecc);
+
+	return ret;
+}
+EXPORT_SYMBOL(mtk_ecc_encode);
+
+void mtk_ecc_adjust_strength(u32 *p)
+{
+	u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36,
+			40, 44, 48, 52, 56, 60};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ecc); i++) {
+		if (*p <= ecc[i]) {
+			if (!i)
+				*p = ecc[i];
+			else if (*p != ecc[i])
+				*p = ecc[i - 1];
+			return;
+		}
+	}
+
+	*p = ecc[ARRAY_SIZE(ecc) - 1];
+}
+EXPORT_SYMBOL(mtk_ecc_adjust_strength);
+
+static int mtk_ecc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct mtk_ecc *ecc;
+	struct resource *res;
+	int irq, ret;
+
+	ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
+	if (!ecc)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	ecc->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(ecc->regs)) {
+		dev_err(dev, "failed to map regs: %ld\n", PTR_ERR(ecc->regs));
+		return PTR_ERR(ecc->regs);
+	}
+
+	ecc->clk = devm_clk_get(dev, NULL);
+	if (IS_ERR(ecc->clk)) {
+		dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk));
+		return PTR_ERR(ecc->clk);
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "failed to get irq\n");
+		return -EINVAL;
+	}
+
+	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(dev, "failed to set DMA mask\n");
+		return ret;
+	}
+
+	ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc);
+	if (ret) {
+		dev_err(dev, "failed to request irq\n");
+		return -EINVAL;
+	}
+
+	ecc->dev = dev;
+	mutex_init(&ecc->lock);
+	platform_set_drvdata(pdev, ecc);
+	dev_info(dev, "probed\n");
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_ecc_suspend(struct device *dev)
+{
+	struct mtk_ecc *ecc = dev_get_drvdata(dev);
+
+	clk_disable_unprepare(ecc->clk);
+
+	return 0;
+}
+
+static int mtk_ecc_resume(struct device *dev)
+{
+	struct mtk_ecc *ecc = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_prepare_enable(ecc->clk);
+	if (ret) {
+		dev_err(dev, "failed to enable clk\n");
+		return ret;
+	}
+
+	mtk_ecc_hw_init(ecc);
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume);
+#endif
+
+static const struct of_device_id mtk_ecc_dt_match[] = {
+	{ .compatible = "mediatek,mt2701-ecc" },
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match);
+
+static struct platform_driver mtk_ecc_driver = {
+	.probe  = mtk_ecc_probe,
+	.driver = {
+		.name  = "mtk-ecc",
+		.of_match_table = of_match_ptr(mtk_ecc_dt_match),
+#ifdef CONFIG_PM_SLEEP
+		.pm = &mtk_ecc_pm_ops,
+#endif
+	},
+};
+
+module_platform_driver(mtk_ecc_driver);
+
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand ECC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/rawnand/mtk_ecc.h b/drivers/mtd/nand/rawnand/mtk_ecc.h
new file mode 100644
index 000000000000..cbeba5cd1c13
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/mtk_ecc.h
@@ -0,0 +1,50 @@ 
+/*
+ * MTK SDG1 ECC controller
+ *
+ * Copyright (c) 2016 Mediatek
+ * Authors:	Xiaolei Li		<xiaolei.li@mediatek.com>
+ *		Jorge Ramirez-Ortiz	<jorge.ramirez-ortiz@linaro.org>
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__
+#define __DRIVERS_MTD_NAND_MTK_ECC_H__
+
+#include <linux/types.h>
+
+#define ECC_PARITY_BITS		(14)
+
+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1};
+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE};
+
+struct device_node;
+struct mtk_ecc;
+
+struct mtk_ecc_stats {
+	u32 corrected;
+	u32 bitflips;
+	u32 failed;
+};
+
+struct mtk_ecc_config {
+	enum mtk_ecc_operation op;
+	enum mtk_ecc_mode mode;
+	dma_addr_t addr;
+	u32 strength;
+	u32 sectors;
+	u32 len;
+};
+
+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32);
+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int);
+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation);
+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *);
+void mtk_ecc_disable(struct mtk_ecc *);
+void mtk_ecc_adjust_strength(u32 *);
+
+struct mtk_ecc *of_mtk_ecc_get(struct device_node *);
+void mtk_ecc_release(struct mtk_ecc *);
+
+#endif
diff --git a/drivers/mtd/nand/rawnand/mtk_nand.c b/drivers/mtd/nand/rawnand/mtk_nand.c
new file mode 100644
index 000000000000..65156b8fe839
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/mtk_nand.c
@@ -0,0 +1,1526 @@ 
+/*
+ * MTK NAND Flash controller driver.
+ * Copyright (C) 2016 MediaTek Inc.
+ * Authors:	Xiaolei Li		<xiaolei.li@mediatek.com>
+ *		Jorge Ramirez-Ortiz	<jorge.ramirez-ortiz@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/mtd.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/of.h>
+#include "mtk_ecc.h"
+
+/* NAND controller register definition */
+#define NFI_CNFG		(0x00)
+#define		CNFG_AHB		BIT(0)
+#define		CNFG_READ_EN		BIT(1)
+#define		CNFG_DMA_BURST_EN	BIT(2)
+#define		CNFG_BYTE_RW		BIT(6)
+#define		CNFG_HW_ECC_EN		BIT(8)
+#define		CNFG_AUTO_FMT_EN	BIT(9)
+#define		CNFG_OP_CUST		(6 << 12)
+#define NFI_PAGEFMT		(0x04)
+#define		PAGEFMT_FDM_ECC_SHIFT	(12)
+#define		PAGEFMT_FDM_SHIFT	(8)
+#define		PAGEFMT_SPARE_16	(0)
+#define		PAGEFMT_SPARE_26	(1)
+#define		PAGEFMT_SPARE_27	(2)
+#define		PAGEFMT_SPARE_28	(3)
+#define		PAGEFMT_SPARE_32	(4)
+#define		PAGEFMT_SPARE_36	(5)
+#define		PAGEFMT_SPARE_40	(6)
+#define		PAGEFMT_SPARE_44	(7)
+#define		PAGEFMT_SPARE_48	(8)
+#define		PAGEFMT_SPARE_49	(9)
+#define		PAGEFMT_SPARE_50	(0xa)
+#define		PAGEFMT_SPARE_51	(0xb)
+#define		PAGEFMT_SPARE_52	(0xc)
+#define		PAGEFMT_SPARE_62	(0xd)
+#define		PAGEFMT_SPARE_63	(0xe)
+#define		PAGEFMT_SPARE_64	(0xf)
+#define		PAGEFMT_SPARE_SHIFT	(4)
+#define		PAGEFMT_SEC_SEL_512	BIT(2)
+#define		PAGEFMT_512_2K		(0)
+#define		PAGEFMT_2K_4K		(1)
+#define		PAGEFMT_4K_8K		(2)
+#define		PAGEFMT_8K_16K		(3)
+/* NFI control */
+#define NFI_CON			(0x08)
+#define		CON_FIFO_FLUSH		BIT(0)
+#define		CON_NFI_RST		BIT(1)
+#define		CON_BRD			BIT(8)  /* burst  read */
+#define		CON_BWR			BIT(9)	/* burst  write */
+#define		CON_SEC_SHIFT		(12)
+/* Timming control register */
+#define NFI_ACCCON		(0x0C)
+#define NFI_INTR_EN		(0x10)
+#define		INTR_AHB_DONE_EN	BIT(6)
+#define NFI_INTR_STA		(0x14)
+#define NFI_CMD			(0x20)
+#define NFI_ADDRNOB		(0x30)
+#define NFI_COLADDR		(0x34)
+#define NFI_ROWADDR		(0x38)
+#define NFI_STRDATA		(0x40)
+#define		STAR_EN			(1)
+#define		STAR_DE			(0)
+#define NFI_CNRNB		(0x44)
+#define NFI_DATAW		(0x50)
+#define NFI_DATAR		(0x54)
+#define NFI_PIO_DIRDY		(0x58)
+#define		PIO_DI_RDY		(0x01)
+#define NFI_STA			(0x60)
+#define		STA_CMD			BIT(0)
+#define		STA_ADDR		BIT(1)
+#define		STA_BUSY		BIT(8)
+#define		STA_EMP_PAGE		BIT(12)
+#define		NFI_FSM_CUSTDATA	(0xe << 16)
+#define		NFI_FSM_MASK		(0xf << 16)
+#define NFI_ADDRCNTR		(0x70)
+#define		CNTR_MASK		GENMASK(16, 12)
+#define NFI_STRADDR		(0x80)
+#define NFI_BYTELEN		(0x84)
+#define NFI_CSEL		(0x90)
+#define NFI_FDML(x)		(0xA0 + (x) * sizeof(u32) * 2)
+#define NFI_FDMM(x)		(0xA4 + (x) * sizeof(u32) * 2)
+#define NFI_FDM_MAX_SIZE	(8)
+#define NFI_FDM_MIN_SIZE	(1)
+#define NFI_MASTER_STA		(0x224)
+#define		MASTER_STA_MASK		(0x0FFF)
+#define NFI_EMPTY_THRESH	(0x23C)
+
+#define MTK_NAME		"mtk-nand"
+#define KB(x)			((x) * 1024UL)
+#define MB(x)			(KB(x) * 1024UL)
+
+#define MTK_TIMEOUT		(500000)
+#define MTK_RESET_TIMEOUT	(1000000)
+#define MTK_MAX_SECTOR		(16)
+#define MTK_NAND_MAX_NSELS	(2)
+
+struct mtk_nfc_bad_mark_ctl {
+	void (*bm_swap)(struct mtd_info *, u8 *buf, int raw);
+	u32 sec;
+	u32 pos;
+};
+
+/*
+ * FDM: region used to store free OOB data
+ */
+struct mtk_nfc_fdm {
+	u32 reg_size;
+	u32 ecc_size;
+};
+
+struct mtk_nfc_nand_chip {
+	struct list_head node;
+	struct nand_chip nand;
+
+	struct mtk_nfc_bad_mark_ctl bad_mark;
+	struct mtk_nfc_fdm fdm;
+	u32 spare_per_sector;
+
+	int nsels;
+	u8 sels[0];
+	/* nothing after this field */
+};
+
+struct mtk_nfc_clk {
+	struct clk *nfi_clk;
+	struct clk *pad_clk;
+};
+
+struct mtk_nfc {
+	struct nand_hw_control controller;
+	struct mtk_ecc_config ecc_cfg;
+	struct mtk_nfc_clk clk;
+	struct mtk_ecc *ecc;
+
+	struct device *dev;
+	void __iomem *regs;
+
+	struct completion done;
+	struct list_head chips;
+
+	u8 *buffer;
+};
+
+static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand)
+{
+	return container_of(nand, struct mtk_nfc_nand_chip, nand);
+}
+
+static inline u8 *data_ptr(struct nand_chip *chip, const u8 *p, int i)
+{
+	return (u8 *)p + i * chip->ecc.size;
+}
+
+static inline u8 *oob_ptr(struct nand_chip *chip, int i)
+{
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	u8 *poi;
+
+	/* map the sector's FDM data to free oob:
+	 * the beginning of the oob area stores the FDM data of bad mark sectors
+	 */
+
+	if (i < mtk_nand->bad_mark.sec)
+		poi = chip->oob_poi + (i + 1) * mtk_nand->fdm.reg_size;
+	else if (i == mtk_nand->bad_mark.sec)
+		poi = chip->oob_poi;
+	else
+		poi = chip->oob_poi + i * mtk_nand->fdm.reg_size;
+
+	return poi;
+}
+
+static inline int mtk_data_len(struct nand_chip *chip)
+{
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+
+	return chip->ecc.size + mtk_nand->spare_per_sector;
+}
+
+static inline u8 *mtk_data_ptr(struct nand_chip *chip,  int i)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+	return nfc->buffer + i * mtk_data_len(chip);
+}
+
+static inline u8 *mtk_oob_ptr(struct nand_chip *chip, int i)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+	return nfc->buffer + i * mtk_data_len(chip) + chip->ecc.size;
+}
+
+static inline void nfi_writel(struct mtk_nfc *nfc, u32 val, u32 reg)
+{
+	writel(val, nfc->regs + reg);
+}
+
+static inline void nfi_writew(struct mtk_nfc *nfc, u16 val, u32 reg)
+{
+	writew(val, nfc->regs + reg);
+}
+
+static inline void nfi_writeb(struct mtk_nfc *nfc, u8 val, u32 reg)
+{
+	writeb(val, nfc->regs + reg);
+}
+
+static inline u32 nfi_readl(struct mtk_nfc *nfc, u32 reg)
+{
+	return readl_relaxed(nfc->regs + reg);
+}
+
+static inline u16 nfi_readw(struct mtk_nfc *nfc, u32 reg)
+{
+	return readw_relaxed(nfc->regs + reg);
+}
+
+static inline u8 nfi_readb(struct mtk_nfc *nfc, u32 reg)
+{
+	return readb_relaxed(nfc->regs + reg);
+}
+
+static void mtk_nfc_hw_reset(struct mtk_nfc *nfc)
+{
+	struct device *dev = nfc->dev;
+	u32 val;
+	int ret;
+
+	/* reset all registers and force the NFI master to terminate */
+	nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+
+	/* wait for the master to finish the last transaction */
+	ret = readl_poll_timeout(nfc->regs + NFI_MASTER_STA, val,
+				 !(val & MASTER_STA_MASK), 50,
+				 MTK_RESET_TIMEOUT);
+	if (ret)
+		dev_warn(dev, "master active in reset [0x%x] = 0x%x\n",
+			 NFI_MASTER_STA, val);
+
+	/* ensure any status register affected by the NFI master is reset */
+	nfi_writel(nfc, CON_FIFO_FLUSH | CON_NFI_RST, NFI_CON);
+	nfi_writew(nfc, STAR_DE, NFI_STRDATA);
+}
+
+static int mtk_nfc_send_command(struct mtk_nfc *nfc, u8 command)
+{
+	struct device *dev = nfc->dev;
+	u32 val;
+	int ret;
+
+	nfi_writel(nfc, command, NFI_CMD);
+
+	ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+					!(val & STA_CMD), 10,  MTK_TIMEOUT);
+	if (ret) {
+		dev_warn(dev, "nfi core timed out entering command mode\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int mtk_nfc_send_address(struct mtk_nfc *nfc, int addr)
+{
+	struct device *dev = nfc->dev;
+	u32 val;
+	int ret;
+
+	nfi_writel(nfc, addr, NFI_COLADDR);
+	nfi_writel(nfc, 0, NFI_ROWADDR);
+	nfi_writew(nfc, 1, NFI_ADDRNOB);
+
+	ret = readl_poll_timeout_atomic(nfc->regs + NFI_STA, val,
+					!(val & STA_ADDR), 10, MTK_TIMEOUT);
+	if (ret) {
+		dev_warn(dev, "nfi core timed out entering address mode\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	u32 fmt, spare;
+
+	if (!mtd->writesize)
+		return 0;
+
+	spare = mtk_nand->spare_per_sector;
+
+	switch (mtd->writesize) {
+	case 512:
+		fmt = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
+		break;
+	case KB(2):
+		if (chip->ecc.size == 512)
+			fmt = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
+		else
+			fmt = PAGEFMT_512_2K;
+		break;
+	case KB(4):
+		if (chip->ecc.size == 512)
+			fmt = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
+		else
+			fmt = PAGEFMT_2K_4K;
+		break;
+	case KB(8):
+		if (chip->ecc.size == 512)
+			fmt = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
+		else
+			fmt = PAGEFMT_4K_8K;
+		break;
+	case KB(16):
+		fmt = PAGEFMT_8K_16K;
+		break;
+	default:
+		dev_err(nfc->dev, "invalid page len: %d\n", mtd->writesize);
+		return -EINVAL;
+	}
+
+	/*
+	 * the hardware will double the value for this eccsize, so we need to
+	 * halve it
+	 */
+	if (chip->ecc.size == 1024)
+		spare >>= 1;
+
+	switch (spare) {
+	case 16:
+		fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 26:
+		fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 27:
+		fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 28:
+		fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 32:
+		fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 36:
+		fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 40:
+		fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 44:
+		fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 48:
+		fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 49:
+		fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 50:
+		fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 51:
+		fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 52:
+		fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 62:
+		fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 63:
+		fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT);
+		break;
+	case 64:
+		fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT);
+		break;
+	default:
+		dev_err(nfc->dev, "invalid spare per sector %d\n", spare);
+		return -EINVAL;
+	}
+
+	fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT;
+	fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT;
+	nfi_writew(nfc, fmt, NFI_PAGEFMT);
+
+	nfc->ecc_cfg.strength = chip->ecc.strength;
+	nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size;
+
+	return 0;
+}
+
+static void mtk_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct mtk_nfc *nfc = nand_get_controller_data(nand);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand);
+
+	if (chip < 0)
+		return;
+
+	mtk_nfc_hw_runtime_config(mtd);
+
+	nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL);
+}
+
+static int mtk_nfc_dev_ready(struct mtd_info *mtd)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+
+	if (nfi_readl(nfc, NFI_STA) & STA_BUSY)
+		return 0;
+
+	return 1;
+}
+
+static void mtk_nfc_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+
+	if (ctrl & NAND_ALE) {
+		mtk_nfc_send_address(nfc, dat);
+	} else if (ctrl & NAND_CLE) {
+		mtk_nfc_hw_reset(nfc);
+
+		nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG);
+		mtk_nfc_send_command(nfc, dat);
+	}
+}
+
+static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc)
+{
+	int rc;
+	u8 val;
+
+	rc = readb_poll_timeout_atomic(nfc->regs + NFI_PIO_DIRDY, val,
+				       val & PIO_DI_RDY, 10, MTK_TIMEOUT);
+	if (rc < 0)
+		dev_err(nfc->dev, "data not ready\n");
+}
+
+static inline u8 mtk_nfc_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	u32 reg;
+
+	/* after each byte read, the NFI_STA reg is reset by the hardware */
+	reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+	if (reg != NFI_FSM_CUSTDATA) {
+		reg = nfi_readw(nfc, NFI_CNFG);
+		reg |= CNFG_BYTE_RW | CNFG_READ_EN;
+		nfi_writew(nfc, reg, NFI_CNFG);
+
+		/*
+		 * set to max sector to allow the HW to continue reading over
+		 * unaligned accesses
+		 */
+		reg = (MTK_MAX_SECTOR << CON_SEC_SHIFT) | CON_BRD;
+		nfi_writel(nfc, reg, NFI_CON);
+
+		/* trigger to fetch data */
+		nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+	}
+
+	mtk_nfc_wait_ioready(nfc);
+
+	return nfi_readb(nfc, NFI_DATAR);
+}
+
+static void mtk_nfc_read_buf(struct mtd_info *mtd, u8 *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		buf[i] = mtk_nfc_read_byte(mtd);
+}
+
+static void mtk_nfc_write_byte(struct mtd_info *mtd, u8 byte)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd));
+	u32 reg;
+
+	reg = nfi_readl(nfc, NFI_STA) & NFI_FSM_MASK;
+
+	if (reg != NFI_FSM_CUSTDATA) {
+		reg = nfi_readw(nfc, NFI_CNFG) | CNFG_BYTE_RW;
+		nfi_writew(nfc, reg, NFI_CNFG);
+
+		reg = MTK_MAX_SECTOR << CON_SEC_SHIFT | CON_BWR;
+		nfi_writel(nfc, reg, NFI_CON);
+
+		nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+	}
+
+	mtk_nfc_wait_ioready(nfc);
+	nfi_writeb(nfc, byte, NFI_DATAW);
+}
+
+static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		mtk_nfc_write_byte(mtd, buf[i]);
+}
+
+static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	int size = chip->ecc.size + mtk_nand->fdm.reg_size;
+
+	nfc->ecc_cfg.mode = ECC_DMA_MODE;
+	nfc->ecc_cfg.op = ECC_ENCODE;
+
+	return mtk_ecc_encode(nfc->ecc, &nfc->ecc_cfg, data, size);
+}
+
+static void mtk_nfc_no_bad_mark_swap(struct mtd_info *a, u8 *b, int c)
+{
+	/* nop */
+}
+
+static void mtk_nfc_bad_mark_swap(struct mtd_info *mtd, u8 *buf, int raw)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtk_nfc_nand_chip *nand = to_mtk_nand(chip);
+	u32 bad_pos = nand->bad_mark.pos;
+
+	if (raw)
+		bad_pos += nand->bad_mark.sec * mtk_data_len(chip);
+	else
+		bad_pos += nand->bad_mark.sec * chip->ecc.size;
+
+	swap(chip->oob_poi[0], buf[bad_pos]);
+}
+
+static int mtk_nfc_format_subpage(struct mtd_info *mtd, u32 offset,
+				  u32 len, const u8 *buf)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+	u32 start, end;
+	int i, ret;
+
+	start = offset / chip->ecc.size;
+	end = DIV_ROUND_UP(offset + len, chip->ecc.size);
+
+	memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+	for (i = 0; i < chip->ecc.steps; i++) {
+		memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+		       chip->ecc.size);
+
+		if (start > i || i >= end)
+			continue;
+
+		if (i == mtk_nand->bad_mark.sec)
+			mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+		memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+
+		/* program the CRC back to the OOB */
+		ret = mtk_nfc_sector_encode(chip, mtk_data_ptr(chip, i));
+		if (ret < 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+static void mtk_nfc_format_page(struct mtd_info *mtd, const u8 *buf)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+	u32 i;
+
+	memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+	for (i = 0; i < chip->ecc.steps; i++) {
+		if (buf)
+			memcpy(mtk_data_ptr(chip, i), data_ptr(chip, buf, i),
+			       chip->ecc.size);
+
+		if (i == mtk_nand->bad_mark.sec)
+			mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+		memcpy(mtk_oob_ptr(chip, i), oob_ptr(chip, i), fdm->reg_size);
+	}
+}
+
+static inline void mtk_nfc_read_fdm(struct nand_chip *chip, u32 start,
+				    u32 sectors)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+	u32 vall, valm;
+	u8 *oobptr;
+	int i, j;
+
+	for (i = 0; i < sectors; i++) {
+		oobptr = oob_ptr(chip, start + i);
+		vall = nfi_readl(nfc, NFI_FDML(i));
+		valm = nfi_readl(nfc, NFI_FDMM(i));
+
+		for (j = 0; j < fdm->reg_size; j++)
+			oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
+	}
+}
+
+static inline void mtk_nfc_write_fdm(struct nand_chip *chip)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+	u32 vall, valm;
+	u8 *oobptr;
+	int i, j;
+
+	for (i = 0; i < chip->ecc.steps; i++) {
+		oobptr = oob_ptr(chip, i);
+		vall = 0;
+		valm = 0;
+		for (j = 0; j < 8; j++) {
+			if (j < 4)
+				vall |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+						<< (j * 8);
+			else
+				valm |= (j < fdm->reg_size ? oobptr[j] : 0xff)
+						<< ((j - 4) * 8);
+		}
+		nfi_writel(nfc, vall, NFI_FDML(i));
+		nfi_writel(nfc, valm, NFI_FDMM(i));
+	}
+}
+
+static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+				 const u8 *buf, int page, int len)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct device *dev = nfc->dev;
+	dma_addr_t addr;
+	u32 reg;
+	int ret;
+
+	addr = dma_map_single(dev, (void *)buf, len, DMA_TO_DEVICE);
+	ret = dma_mapping_error(nfc->dev, addr);
+	if (ret) {
+		dev_err(nfc->dev, "dma mapping error\n");
+		return -EINVAL;
+	}
+
+	reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AHB | CNFG_DMA_BURST_EN;
+	nfi_writew(nfc, reg, NFI_CNFG);
+
+	nfi_writel(nfc, chip->ecc.steps << CON_SEC_SHIFT, NFI_CON);
+	nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+	nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+
+	init_completion(&nfc->done);
+
+	reg = nfi_readl(nfc, NFI_CON) | CON_BWR;
+	nfi_writel(nfc, reg, NFI_CON);
+	nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+	ret = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+	if (!ret) {
+		dev_err(dev, "program ahb done timeout\n");
+		nfi_writew(nfc, 0, NFI_INTR_EN);
+		ret = -ETIMEDOUT;
+		goto timeout;
+	}
+
+	ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
+					(reg & CNTR_MASK) >= chip->ecc.steps,
+					10, MTK_TIMEOUT);
+	if (ret)
+		dev_err(dev, "hwecc write timeout\n");
+
+timeout:
+
+	dma_unmap_single(nfc->dev, addr, len, DMA_TO_DEVICE);
+	nfi_writel(nfc, 0, NFI_CON);
+
+	return ret;
+}
+
+static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+			      const u8 *buf, int page, int raw)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	size_t len;
+	const u8 *bufpoi;
+	u32 reg;
+	int ret;
+
+	if (!raw) {
+		/* OOB => FDM: from register,  ECC: from HW */
+		reg = nfi_readw(nfc, NFI_CNFG) | CNFG_AUTO_FMT_EN;
+		nfi_writew(nfc, reg | CNFG_HW_ECC_EN, NFI_CNFG);
+
+		nfc->ecc_cfg.op = ECC_ENCODE;
+		nfc->ecc_cfg.mode = ECC_NFI_MODE;
+		ret = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+		if (ret) {
+			/* clear NFI config */
+			reg = nfi_readw(nfc, NFI_CNFG);
+			reg &= ~(CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+			nfi_writew(nfc, reg, NFI_CNFG);
+
+			return ret;
+		}
+
+		memcpy(nfc->buffer, buf, mtd->writesize);
+		mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, raw);
+		bufpoi = nfc->buffer;
+
+		/* write OOB into the FDM registers (OOB area in MTK NAND) */
+		mtk_nfc_write_fdm(chip);
+	} else {
+		bufpoi = buf;
+	}
+
+	len = mtd->writesize + (raw ? mtd->oobsize : 0);
+	ret = mtk_nfc_do_write_page(mtd, chip, bufpoi, page, len);
+
+	if (!raw)
+		mtk_ecc_disable(nfc->ecc);
+
+	return ret;
+}
+
+static int mtk_nfc_write_page_hwecc(struct mtd_info *mtd,
+				    struct nand_chip *chip, const u8 *buf,
+				    int oob_on, int page)
+{
+	return mtk_nfc_write_page(mtd, chip, buf, page, 0);
+}
+
+static int mtk_nfc_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				  const u8 *buf, int oob_on, int pg)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+
+	mtk_nfc_format_page(mtd, buf);
+	return mtk_nfc_write_page(mtd, chip, nfc->buffer, pg, 1);
+}
+
+static int mtk_nfc_write_subpage_hwecc(struct mtd_info *mtd,
+				       struct nand_chip *chip, u32 offset,
+				       u32 data_len, const u8 *buf,
+				       int oob_on, int page)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	int ret;
+
+	ret = mtk_nfc_format_subpage(mtd, offset, data_len, buf);
+	if (ret < 0)
+		return ret;
+
+	/* use the data in the private buffer (now with FDM and CRC) */
+	return mtk_nfc_write_page(mtd, chip, nfc->buffer, page, 1);
+}
+
+static int mtk_nfc_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+				 int page)
+{
+	int ret;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+	ret = mtk_nfc_write_page_raw(mtd, chip, NULL, 1, page);
+	if (ret < 0)
+		return -EIO;
+
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+	ret = chip->waitfunc(mtd, chip);
+
+	return ret & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int mtk_nfc_update_ecc_stats(struct mtd_info *mtd, u8 *buf, u32 sectors)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	struct mtk_ecc_stats stats;
+	int rc, i;
+
+	rc = nfi_readl(nfc, NFI_STA) & STA_EMP_PAGE;
+	if (rc) {
+		memset(buf, 0xff, sectors * chip->ecc.size);
+		for (i = 0; i < sectors; i++)
+			memset(oob_ptr(chip, i), 0xff, mtk_nand->fdm.reg_size);
+		return 0;
+	}
+
+	mtk_ecc_get_stats(nfc->ecc, &stats, sectors);
+	mtd->ecc_stats.corrected += stats.corrected;
+	mtd->ecc_stats.failed += stats.failed;
+
+	return stats.bitflips;
+}
+
+static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+				u32 data_offs, u32 readlen,
+				u8 *bufpoi, int page, int raw)
+{
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	u32 spare = mtk_nand->spare_per_sector;
+	u32 column, sectors, start, end, reg;
+	dma_addr_t addr;
+	int bitflips;
+	size_t len;
+	u8 *buf;
+	int rc;
+
+	start = data_offs / chip->ecc.size;
+	end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+
+	sectors = end - start;
+	column = start * (chip->ecc.size + spare);
+
+	len = sectors * chip->ecc.size + (raw ? sectors * spare : 0);
+	buf = bufpoi + start * chip->ecc.size;
+
+	if (column != 0)
+		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, column, -1);
+
+	addr = dma_map_single(nfc->dev, buf, len, DMA_FROM_DEVICE);
+	rc = dma_mapping_error(nfc->dev, addr);
+	if (rc) {
+		dev_err(nfc->dev, "dma mapping error\n");
+
+		return -EINVAL;
+	}
+
+	reg = nfi_readw(nfc, NFI_CNFG);
+	reg |= CNFG_READ_EN | CNFG_DMA_BURST_EN | CNFG_AHB;
+	if (!raw) {
+		reg |= CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN;
+		nfi_writew(nfc, reg, NFI_CNFG);
+
+		nfc->ecc_cfg.mode = ECC_NFI_MODE;
+		nfc->ecc_cfg.sectors = sectors;
+		nfc->ecc_cfg.op = ECC_DECODE;
+		rc = mtk_ecc_enable(nfc->ecc, &nfc->ecc_cfg);
+		if (rc) {
+			dev_err(nfc->dev, "ecc enable\n");
+			/* clear NFI_CNFG */
+			reg &= ~(CNFG_DMA_BURST_EN | CNFG_AHB | CNFG_READ_EN |
+				CNFG_AUTO_FMT_EN | CNFG_HW_ECC_EN);
+			nfi_writew(nfc, reg, NFI_CNFG);
+			dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+			return rc;
+		}
+	} else {
+		nfi_writew(nfc, reg, NFI_CNFG);
+	}
+
+	nfi_writel(nfc, sectors << CON_SEC_SHIFT, NFI_CON);
+	nfi_writew(nfc, INTR_AHB_DONE_EN, NFI_INTR_EN);
+	nfi_writel(nfc, lower_32_bits(addr), NFI_STRADDR);
+
+	init_completion(&nfc->done);
+	reg = nfi_readl(nfc, NFI_CON) | CON_BRD;
+	nfi_writel(nfc, reg, NFI_CON);
+	nfi_writew(nfc, STAR_EN, NFI_STRDATA);
+
+	rc = wait_for_completion_timeout(&nfc->done, msecs_to_jiffies(500));
+	if (!rc)
+		dev_warn(nfc->dev, "read ahb/dma done timeout\n");
+
+	rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
+				       (reg & CNTR_MASK) >= sectors, 10,
+				       MTK_TIMEOUT);
+	if (rc < 0) {
+		dev_err(nfc->dev, "subpage done timeout\n");
+		bitflips = -EIO;
+	} else {
+		bitflips = 0;
+		if (!raw) {
+			rc = mtk_ecc_wait_done(nfc->ecc, ECC_DECODE);
+			bitflips = rc < 0 ? -ETIMEDOUT :
+				mtk_nfc_update_ecc_stats(mtd, buf, sectors);
+			mtk_nfc_read_fdm(chip, start, sectors);
+		}
+	}
+
+	dma_unmap_single(nfc->dev, addr, len, DMA_FROM_DEVICE);
+
+	if (raw)
+		goto done;
+
+	mtk_ecc_disable(nfc->ecc);
+
+	if (clamp(mtk_nand->bad_mark.sec, start, end) == mtk_nand->bad_mark.sec)
+		mtk_nand->bad_mark.bm_swap(mtd, bufpoi, raw);
+done:
+	nfi_writel(nfc, 0, NFI_CON);
+
+	return bitflips;
+}
+
+static int mtk_nfc_read_subpage_hwecc(struct mtd_info *mtd,
+				      struct nand_chip *chip, u32 off,
+				      u32 len, u8 *p, int pg)
+{
+	return mtk_nfc_read_subpage(mtd, chip, off, len, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_hwecc(struct mtd_info *mtd,
+				   struct nand_chip *chip, u8 *p,
+				   int oob_on, int pg)
+{
+	return mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, p, pg, 0);
+}
+
+static int mtk_nfc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+				 u8 *buf, int oob_on, int page)
+{
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	struct mtk_nfc *nfc = nand_get_controller_data(chip);
+	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+	int i, ret;
+
+	memset(nfc->buffer, 0xff, mtd->writesize + mtd->oobsize);
+	ret = mtk_nfc_read_subpage(mtd, chip, 0, mtd->writesize, nfc->buffer,
+				   page, 1);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < chip->ecc.steps; i++) {
+		memcpy(oob_ptr(chip, i), mtk_oob_ptr(chip, i), fdm->reg_size);
+
+		if (i == mtk_nand->bad_mark.sec)
+			mtk_nand->bad_mark.bm_swap(mtd, nfc->buffer, 1);
+
+		if (buf)
+			memcpy(data_ptr(chip, buf, i), mtk_data_ptr(chip, i),
+			       chip->ecc.size);
+	}
+
+	return ret;
+}
+
+static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
+				int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	return mtk_nfc_read_page_raw(mtd, chip, NULL, 1, page);
+}
+
+static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc)
+{
+	/*
+	 * ACCON: access timing control register
+	 * -------------------------------------
+	 * 31:28: minimum required time for CS post pulling down after accessing
+	 *	the device
+	 * 27:22: minimum required time for CS pre pulling down before accessing
+	 *	the device
+	 * 21:16: minimum required time from NCEB low to NREB low
+	 * 15:12: minimum required time from NWEB high to NREB low.
+	 * 11:08: write enable hold time
+	 * 07:04: write wait states
+	 * 03:00: read wait states
+	 */
+	nfi_writel(nfc, 0x10804211, NFI_ACCCON);
+
+	/*
+	 * CNRNB: nand ready/busy register
+	 * -------------------------------
+	 * 7:4: timeout register for polling the NAND busy/ready signal
+	 * 0  : poll the status of the busy/ready signal after [7:4]*16 cycles.
+	 */
+	nfi_writew(nfc, 0xf1, NFI_CNRNB);
+	nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT);
+
+	mtk_nfc_hw_reset(nfc);
+
+	nfi_readl(nfc, NFI_INTR_STA);
+	nfi_writel(nfc, 0, NFI_INTR_EN);
+}
+
+static irqreturn_t mtk_nfc_irq(int irq, void *id)
+{
+	struct mtk_nfc *nfc = id;
+	u16 sta, ien;
+
+	sta = nfi_readw(nfc, NFI_INTR_STA);
+	ien = nfi_readw(nfc, NFI_INTR_EN);
+
+	if (!(sta & ien))
+		return IRQ_NONE;
+
+	nfi_writew(nfc, ~sta & ien, NFI_INTR_EN);
+	complete(&nfc->done);
+
+	return IRQ_HANDLED;
+}
+
+static int mtk_nfc_enable_clk(struct device *dev, struct mtk_nfc_clk *clk)
+{
+	int ret;
+
+	ret = clk_prepare_enable(clk->nfi_clk);
+	if (ret) {
+		dev_err(dev, "failed to enable nfi clk\n");
+		return ret;
+	}
+
+	ret = clk_prepare_enable(clk->pad_clk);
+	if (ret) {
+		dev_err(dev, "failed to enable pad clk\n");
+		clk_disable_unprepare(clk->nfi_clk);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void mtk_nfc_disable_clk(struct mtk_nfc_clk *clk)
+{
+	clk_disable_unprepare(clk->nfi_clk);
+	clk_disable_unprepare(clk->pad_clk);
+}
+
+static int mtk_nfc_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oob_region)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	struct mtk_nfc_fdm *fdm = &mtk_nand->fdm;
+	u32 eccsteps;
+
+	eccsteps = mtd->writesize / chip->ecc.size;
+
+	if (section >= eccsteps)
+		return -ERANGE;
+
+	oob_region->length = fdm->reg_size - fdm->ecc_size;
+	oob_region->offset = section * fdm->reg_size + fdm->ecc_size;
+
+	return 0;
+}
+
+static int mtk_nfc_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oob_region)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip);
+	u32 eccsteps;
+
+	if (section)
+		return -ERANGE;
+
+	eccsteps = mtd->writesize / chip->ecc.size;
+	oob_region->offset = mtk_nand->fdm.reg_size * eccsteps;
+	oob_region->length = mtd->oobsize - oob_region->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops mtk_nfc_ooblayout_ops = {
+	.free = mtk_nfc_ooblayout_free,
+	.ecc = mtk_nfc_ooblayout_ecc,
+};
+
+static void mtk_nfc_set_fdm(struct mtk_nfc_fdm *fdm, struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct mtk_nfc_nand_chip *chip = to_mtk_nand(nand);
+	u32 ecc_bytes;
+
+	ecc_bytes = DIV_ROUND_UP(nand->ecc.strength * ECC_PARITY_BITS, 8);
+
+	fdm->reg_size = chip->spare_per_sector - ecc_bytes;
+	if (fdm->reg_size > NFI_FDM_MAX_SIZE)
+		fdm->reg_size = NFI_FDM_MAX_SIZE;
+
+	/* bad block mark storage */
+	fdm->ecc_size = 1;
+}
+
+static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl,
+				     struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+
+	if (mtd->writesize == 512) {
+		bm_ctl->bm_swap = mtk_nfc_no_bad_mark_swap;
+	} else {
+		bm_ctl->bm_swap = mtk_nfc_bad_mark_swap;
+		bm_ctl->sec = mtd->writesize / mtk_data_len(nand);
+		bm_ctl->pos = mtd->writesize % mtk_data_len(nand);
+	}
+}
+
+static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44,
+			48, 49, 50, 51, 52, 62, 63, 64};
+	u32 eccsteps, i;
+
+	eccsteps = mtd->writesize / nand->ecc.size;
+	*sps = mtd->oobsize / eccsteps;
+
+	if (nand->ecc.size == 1024)
+		*sps >>= 1;
+
+	for (i = 0; i < ARRAY_SIZE(spare); i++) {
+		if (*sps <= spare[i]) {
+			if (!i)
+				*sps = spare[i];
+			else if (*sps != spare[i])
+				*sps = spare[i - 1];
+			break;
+		}
+	}
+
+	if (i >= ARRAY_SIZE(spare))
+		*sps = spare[ARRAY_SIZE(spare) - 1];
+
+	if (nand->ecc.size == 1024)
+		*sps <<= 1;
+}
+
+static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	u32 spare;
+	int free;
+
+	/* support only ecc hw mode */
+	if (nand->ecc.mode != NAND_ECC_HW) {
+		dev_err(dev, "ecc.mode not supported\n");
+		return -EINVAL;
+	}
+
+	/* if optional dt settings not present */
+	if (!nand->ecc.size || !nand->ecc.strength) {
+		/* use datasheet requirements */
+		nand->ecc.strength = nand->ecc_strength_ds;
+		nand->ecc.size = nand->ecc_step_ds;
+
+		/*
+		 * align eccstrength and eccsize
+		 * this controller only supports 512 and 1024 sizes
+		 */
+		if (nand->ecc.size < 1024) {
+			if (mtd->writesize > 512) {
+				nand->ecc.size = 1024;
+				nand->ecc.strength <<= 1;
+			} else {
+				nand->ecc.size = 512;
+			}
+		} else {
+			nand->ecc.size = 1024;
+		}
+
+		mtk_nfc_set_spare_per_sector(&spare, mtd);
+
+		/* calculate oob bytes except ecc parity data */
+		free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3;
+		free = spare - free;
+
+		/*
+		 * enhance ecc strength if oob left is bigger than max FDM size
+		 * or reduce ecc strength if oob size is not enough for ecc
+		 * parity data.
+		 */
+		if (free > NFI_FDM_MAX_SIZE) {
+			spare -= NFI_FDM_MAX_SIZE;
+			nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+		} else if (free < 0) {
+			spare -= NFI_FDM_MIN_SIZE;
+			nand->ecc.strength = (spare << 3) / ECC_PARITY_BITS;
+		}
+	}
+
+	mtk_ecc_adjust_strength(&nand->ecc.strength);
+
+	dev_info(dev, "eccsize %d eccstrength %d\n",
+		 nand->ecc.size, nand->ecc.strength);
+
+	return 0;
+}
+
+static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc,
+				  struct device_node *np)
+{
+	struct mtk_nfc_nand_chip *chip;
+	struct nand_chip *nand;
+	struct mtd_info *mtd;
+	int nsels, len;
+	u32 tmp;
+	int ret;
+	int i;
+
+	if (!of_get_property(np, "reg", &nsels))
+		return -ENODEV;
+
+	nsels /= sizeof(u32);
+	if (!nsels || nsels > MTK_NAND_MAX_NSELS) {
+		dev_err(dev, "invalid reg property size %d\n", nsels);
+		return -EINVAL;
+	}
+
+	chip = devm_kzalloc(dev, sizeof(*chip) + nsels * sizeof(u8),
+			    GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	chip->nsels = nsels;
+	for (i = 0; i < nsels; i++) {
+		ret = of_property_read_u32_index(np, "reg", i, &tmp);
+		if (ret) {
+			dev_err(dev, "reg property failure : %d\n", ret);
+			return ret;
+		}
+		chip->sels[i] = tmp;
+	}
+
+	nand = &chip->nand;
+	nand->controller = &nfc->controller;
+
+	nand_set_flash_node(nand, np);
+	nand_set_controller_data(nand, nfc);
+
+	nand->options |= NAND_USE_BOUNCE_BUFFER | NAND_SUBPAGE_READ;
+	nand->dev_ready = mtk_nfc_dev_ready;
+	nand->select_chip = mtk_nfc_select_chip;
+	nand->write_byte = mtk_nfc_write_byte;
+	nand->write_buf = mtk_nfc_write_buf;
+	nand->read_byte = mtk_nfc_read_byte;
+	nand->read_buf = mtk_nfc_read_buf;
+	nand->cmd_ctrl = mtk_nfc_cmd_ctrl;
+
+	/* set default mode in case dt entry is missing */
+	nand->ecc.mode = NAND_ECC_HW;
+
+	nand->ecc.write_subpage = mtk_nfc_write_subpage_hwecc;
+	nand->ecc.write_page_raw = mtk_nfc_write_page_raw;
+	nand->ecc.write_page = mtk_nfc_write_page_hwecc;
+	nand->ecc.write_oob_raw = mtk_nfc_write_oob_std;
+	nand->ecc.write_oob = mtk_nfc_write_oob_std;
+
+	nand->ecc.read_subpage = mtk_nfc_read_subpage_hwecc;
+	nand->ecc.read_page_raw = mtk_nfc_read_page_raw;
+	nand->ecc.read_page = mtk_nfc_read_page_hwecc;
+	nand->ecc.read_oob_raw = mtk_nfc_read_oob_std;
+	nand->ecc.read_oob = mtk_nfc_read_oob_std;
+
+	mtd = nand_to_mtd(nand);
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = dev;
+	mtd->name = MTK_NAME;
+	mtd_set_ooblayout(mtd, &mtk_nfc_ooblayout_ops);
+
+	mtk_nfc_hw_init(nfc);
+
+	ret = nand_scan_ident(mtd, nsels, NULL);
+	if (ret)
+		return -ENODEV;
+
+	/* store bbt magic in page, cause OOB is not protected */
+	if (nand->bbt_options & NAND_BBT_USE_FLASH)
+		nand->bbt_options |= NAND_BBT_NO_OOB;
+
+	ret = mtk_nfc_ecc_init(dev, mtd);
+	if (ret)
+		return -EINVAL;
+
+	if (nand->options & NAND_BUSWIDTH_16) {
+		dev_err(dev, "16bits buswidth not supported");
+		return -EINVAL;
+	}
+
+	mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd);
+	mtk_nfc_set_fdm(&chip->fdm, mtd);
+	mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd);
+
+	len = mtd->writesize + mtd->oobsize;
+	nfc->buffer = devm_kzalloc(dev, len, GFP_KERNEL);
+	if (!nfc->buffer)
+		return  -ENOMEM;
+
+	ret = nand_scan_tail(mtd);
+	if (ret)
+		return -ENODEV;
+
+	ret = mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+	if (ret) {
+		dev_err(dev, "mtd parse partition error\n");
+		nand_release(mtd);
+		return ret;
+	}
+
+	list_add_tail(&chip->node, &nfc->chips);
+
+	return 0;
+}
+
+static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc)
+{
+	struct device_node *np = dev->of_node;
+	struct device_node *nand_np;
+	int ret;
+
+	for_each_child_of_node(np, nand_np) {
+		ret = mtk_nfc_nand_chip_init(dev, nfc, nand_np);
+		if (ret) {
+			of_node_put(nand_np);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int mtk_nfc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	struct mtk_nfc *nfc;
+	struct resource *res;
+	int ret, irq;
+
+	nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+	if (!nfc)
+		return -ENOMEM;
+
+	spin_lock_init(&nfc->controller.lock);
+	init_waitqueue_head(&nfc->controller.wq);
+	INIT_LIST_HEAD(&nfc->chips);
+
+	/* probe defer if not ready */
+	nfc->ecc = of_mtk_ecc_get(np);
+	if (IS_ERR(nfc->ecc))
+		return PTR_ERR(nfc->ecc);
+	else if (!nfc->ecc)
+		return -ENODEV;
+
+	nfc->dev = dev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	nfc->regs = devm_ioremap_resource(dev, res);
+	if (IS_ERR(nfc->regs)) {
+		ret = PTR_ERR(nfc->regs);
+		dev_err(dev, "no nfi base\n");
+		goto release_ecc;
+	}
+
+	nfc->clk.nfi_clk = devm_clk_get(dev, "nfi_clk");
+	if (IS_ERR(nfc->clk.nfi_clk)) {
+		dev_err(dev, "no clk\n");
+		ret = PTR_ERR(nfc->clk.nfi_clk);
+		goto release_ecc;
+	}
+
+	nfc->clk.pad_clk = devm_clk_get(dev, "pad_clk");
+	if (IS_ERR(nfc->clk.pad_clk)) {
+		dev_err(dev, "no pad clk\n");
+		ret = PTR_ERR(nfc->clk.pad_clk);
+		goto release_ecc;
+	}
+
+	ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+	if (ret)
+		goto release_ecc;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "no nfi irq resource\n");
+		ret = -EINVAL;
+		goto clk_disable;
+	}
+
+	ret = devm_request_irq(dev, irq, mtk_nfc_irq, 0x0, "mtk-nand", nfc);
+	if (ret) {
+		dev_err(dev, "failed to request nfi irq\n");
+		goto clk_disable;
+	}
+
+	ret = dma_set_mask(dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(dev, "failed to set dma mask\n");
+		goto clk_disable;
+	}
+
+	platform_set_drvdata(pdev, nfc);
+
+	ret = mtk_nfc_nand_chips_init(dev, nfc);
+	if (ret) {
+		dev_err(dev, "failed to init nand chips\n");
+		goto clk_disable;
+	}
+
+	return 0;
+
+clk_disable:
+	mtk_nfc_disable_clk(&nfc->clk);
+
+release_ecc:
+	mtk_ecc_release(nfc->ecc);
+
+	return ret;
+}
+
+static int mtk_nfc_remove(struct platform_device *pdev)
+{
+	struct mtk_nfc *nfc = platform_get_drvdata(pdev);
+	struct mtk_nfc_nand_chip *chip;
+
+	while (!list_empty(&nfc->chips)) {
+		chip = list_first_entry(&nfc->chips, struct mtk_nfc_nand_chip,
+					node);
+		nand_release(nand_to_mtd(&chip->nand));
+		list_del(&chip->node);
+	}
+
+	mtk_ecc_release(nfc->ecc);
+	mtk_nfc_disable_clk(&nfc->clk);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mtk_nfc_suspend(struct device *dev)
+{
+	struct mtk_nfc *nfc = dev_get_drvdata(dev);
+
+	mtk_nfc_disable_clk(&nfc->clk);
+
+	return 0;
+}
+
+static int mtk_nfc_resume(struct device *dev)
+{
+	struct mtk_nfc *nfc = dev_get_drvdata(dev);
+	struct mtk_nfc_nand_chip *chip;
+	struct nand_chip *nand;
+	struct mtd_info *mtd;
+	int ret;
+	u32 i;
+
+	udelay(200);
+
+	ret = mtk_nfc_enable_clk(dev, &nfc->clk);
+	if (ret)
+		return ret;
+
+	mtk_nfc_hw_init(nfc);
+
+	/* reset NAND chip if VCC was powered off */
+	list_for_each_entry(chip, &nfc->chips, node) {
+		nand = &chip->nand;
+		mtd = nand_to_mtd(nand);
+		for (i = 0; i < chip->nsels; i++) {
+			nand->select_chip(mtd, i);
+			nand->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+		}
+	}
+
+	return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume);
+#endif
+
+static const struct of_device_id mtk_nfc_id_table[] = {
+	{ .compatible = "mediatek,mt2701-nfc" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, mtk_nfc_id_table);
+
+static struct platform_driver mtk_nfc_driver = {
+	.probe  = mtk_nfc_probe,
+	.remove = mtk_nfc_remove,
+	.driver = {
+		.name  = MTK_NAME,
+		.of_match_table = mtk_nfc_id_table,
+#ifdef CONFIG_PM_SLEEP
+		.pm = &mtk_nfc_pm_ops,
+#endif
+	},
+};
+
+module_platform_driver(mtk_nfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Xiaolei Li <xiaolei.li@mediatek.com>");
+MODULE_DESCRIPTION("MTK Nand Flash Controller Driver");
diff --git a/drivers/mtd/nand/rawnand/mxc_nand.c b/drivers/mtd/nand/rawnand/mxc_nand.c
new file mode 100644
index 000000000000..379e11be6e0b
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/mxc_nand.c
@@ -0,0 +1,1857 @@ 
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2008 Sascha Hauer, kernel@pengutronix.de
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
+ * MA 02110-1301, USA.
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/completion.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/mach/flash.h>
+#include <linux/platform_data/mtd-mxc_nand.h>
+
+#define DRIVER_NAME "mxc_nand"
+
+/* Addresses for NFC registers */
+#define NFC_V1_V2_BUF_SIZE		(host->regs + 0x00)
+#define NFC_V1_V2_BUF_ADDR		(host->regs + 0x04)
+#define NFC_V1_V2_FLASH_ADDR		(host->regs + 0x06)
+#define NFC_V1_V2_FLASH_CMD		(host->regs + 0x08)
+#define NFC_V1_V2_CONFIG		(host->regs + 0x0a)
+#define NFC_V1_V2_ECC_STATUS_RESULT	(host->regs + 0x0c)
+#define NFC_V1_V2_RSLTMAIN_AREA		(host->regs + 0x0e)
+#define NFC_V1_V2_RSLTSPARE_AREA	(host->regs + 0x10)
+#define NFC_V1_V2_WRPROT		(host->regs + 0x12)
+#define NFC_V1_UNLOCKSTART_BLKADDR	(host->regs + 0x14)
+#define NFC_V1_UNLOCKEND_BLKADDR	(host->regs + 0x16)
+#define NFC_V21_UNLOCKSTART_BLKADDR0	(host->regs + 0x20)
+#define NFC_V21_UNLOCKSTART_BLKADDR1	(host->regs + 0x24)
+#define NFC_V21_UNLOCKSTART_BLKADDR2	(host->regs + 0x28)
+#define NFC_V21_UNLOCKSTART_BLKADDR3	(host->regs + 0x2c)
+#define NFC_V21_UNLOCKEND_BLKADDR0	(host->regs + 0x22)
+#define NFC_V21_UNLOCKEND_BLKADDR1	(host->regs + 0x26)
+#define NFC_V21_UNLOCKEND_BLKADDR2	(host->regs + 0x2a)
+#define NFC_V21_UNLOCKEND_BLKADDR3	(host->regs + 0x2e)
+#define NFC_V1_V2_NF_WRPRST		(host->regs + 0x18)
+#define NFC_V1_V2_CONFIG1		(host->regs + 0x1a)
+#define NFC_V1_V2_CONFIG2		(host->regs + 0x1c)
+
+#define NFC_V2_CONFIG1_ECC_MODE_4	(1 << 0)
+#define NFC_V1_V2_CONFIG1_SP_EN		(1 << 2)
+#define NFC_V1_V2_CONFIG1_ECC_EN	(1 << 3)
+#define NFC_V1_V2_CONFIG1_INT_MSK	(1 << 4)
+#define NFC_V1_V2_CONFIG1_BIG		(1 << 5)
+#define NFC_V1_V2_CONFIG1_RST		(1 << 6)
+#define NFC_V1_V2_CONFIG1_CE		(1 << 7)
+#define NFC_V2_CONFIG1_ONE_CYCLE	(1 << 8)
+#define NFC_V2_CONFIG1_PPB(x)		(((x) & 0x3) << 9)
+#define NFC_V2_CONFIG1_FP_INT		(1 << 11)
+
+#define NFC_V1_V2_CONFIG2_INT		(1 << 15)
+
+/*
+ * Operation modes for the NFC. Valid for v1, v2 and v3
+ * type controllers.
+ */
+#define NFC_CMD				(1 << 0)
+#define NFC_ADDR			(1 << 1)
+#define NFC_INPUT			(1 << 2)
+#define NFC_OUTPUT			(1 << 3)
+#define NFC_ID				(1 << 4)
+#define NFC_STATUS			(1 << 5)
+
+#define NFC_V3_FLASH_CMD		(host->regs_axi + 0x00)
+#define NFC_V3_FLASH_ADDR0		(host->regs_axi + 0x04)
+
+#define NFC_V3_CONFIG1			(host->regs_axi + 0x34)
+#define NFC_V3_CONFIG1_SP_EN		(1 << 0)
+#define NFC_V3_CONFIG1_RBA(x)		(((x) & 0x7 ) << 4)
+
+#define NFC_V3_ECC_STATUS_RESULT	(host->regs_axi + 0x38)
+
+#define NFC_V3_LAUNCH			(host->regs_axi + 0x40)
+
+#define NFC_V3_WRPROT			(host->regs_ip + 0x0)
+#define NFC_V3_WRPROT_LOCK_TIGHT	(1 << 0)
+#define NFC_V3_WRPROT_LOCK		(1 << 1)
+#define NFC_V3_WRPROT_UNLOCK		(1 << 2)
+#define NFC_V3_WRPROT_BLS_UNLOCK	(2 << 6)
+
+#define NFC_V3_WRPROT_UNLOCK_BLK_ADD0   (host->regs_ip + 0x04)
+
+#define NFC_V3_CONFIG2			(host->regs_ip + 0x24)
+#define NFC_V3_CONFIG2_PS_512			(0 << 0)
+#define NFC_V3_CONFIG2_PS_2048			(1 << 0)
+#define NFC_V3_CONFIG2_PS_4096			(2 << 0)
+#define NFC_V3_CONFIG2_ONE_CYCLE		(1 << 2)
+#define NFC_V3_CONFIG2_ECC_EN			(1 << 3)
+#define NFC_V3_CONFIG2_2CMD_PHASES		(1 << 4)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE0		(1 << 5)
+#define NFC_V3_CONFIG2_ECC_MODE_8		(1 << 6)
+#define NFC_V3_CONFIG2_PPB(x, shift)		(((x) & 0x3) << shift)
+#define NFC_V3_CONFIG2_NUM_ADDR_PHASE1(x)	(((x) & 0x3) << 12)
+#define NFC_V3_CONFIG2_INT_MSK			(1 << 15)
+#define NFC_V3_CONFIG2_ST_CMD(x)		(((x) & 0xff) << 24)
+#define NFC_V3_CONFIG2_SPAS(x)			(((x) & 0xff) << 16)
+
+#define NFC_V3_CONFIG3				(host->regs_ip + 0x28)
+#define NFC_V3_CONFIG3_ADD_OP(x)		(((x) & 0x3) << 0)
+#define NFC_V3_CONFIG3_FW8			(1 << 3)
+#define NFC_V3_CONFIG3_SBB(x)			(((x) & 0x7) << 8)
+#define NFC_V3_CONFIG3_NUM_OF_DEVICES(x)	(((x) & 0x7) << 12)
+#define NFC_V3_CONFIG3_RBB_MODE			(1 << 15)
+#define NFC_V3_CONFIG3_NO_SDMA			(1 << 20)
+
+#define NFC_V3_IPC			(host->regs_ip + 0x2C)
+#define NFC_V3_IPC_CREQ			(1 << 0)
+#define NFC_V3_IPC_INT			(1 << 31)
+
+#define NFC_V3_DELAY_LINE		(host->regs_ip + 0x34)
+
+struct mxc_nand_host;
+
+struct mxc_nand_devtype_data {
+	void (*preset)(struct mtd_info *);
+	void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
+	void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
+	void (*send_page)(struct mtd_info *, unsigned int);
+	void (*send_read_id)(struct mxc_nand_host *);
+	uint16_t (*get_dev_status)(struct mxc_nand_host *);
+	int (*check_int)(struct mxc_nand_host *);
+	void (*irq_control)(struct mxc_nand_host *, int);
+	u32 (*get_ecc_status)(struct mxc_nand_host *);
+	const struct mtd_ooblayout_ops *ooblayout;
+	void (*select_chip)(struct mtd_info *mtd, int chip);
+	int (*correct_data)(struct mtd_info *mtd, u_char *dat,
+			u_char *read_ecc, u_char *calc_ecc);
+	int (*setup_data_interface)(struct mtd_info *mtd,
+				    const struct nand_data_interface *conf,
+				    bool check_only);
+
+	/*
+	 * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
+	 * (CONFIG1:INT_MSK is set). To handle this the driver uses
+	 * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
+	 */
+	int irqpending_quirk;
+	int needs_ip;
+
+	size_t regs_offset;
+	size_t spare0_offset;
+	size_t axi_offset;
+
+	int spare_len;
+	int eccbytes;
+	int eccsize;
+	int ppb_shift;
+};
+
+struct mxc_nand_host {
+	struct nand_chip	nand;
+	struct device		*dev;
+
+	void __iomem		*spare0;
+	void __iomem		*main_area0;
+
+	void __iomem		*base;
+	void __iomem		*regs;
+	void __iomem		*regs_axi;
+	void __iomem		*regs_ip;
+	int			status_request;
+	struct clk		*clk;
+	int			clk_act;
+	int			irq;
+	int			eccsize;
+	int			used_oobsize;
+	int			active_cs;
+
+	struct completion	op_completion;
+
+	uint8_t			*data_buf;
+	unsigned int		buf_start;
+
+	const struct mxc_nand_devtype_data *devtype_data;
+	struct mxc_nand_platform_data pdata;
+};
+
+static const char * const part_probes[] = {
+	"cmdlinepart", "RedBoot", "ofpart", NULL };
+
+static void memcpy32_fromio(void *trg, const void __iomem  *src, size_t size)
+{
+	int i;
+	u32 *t = trg;
+	const __iomem u32 *s = src;
+
+	for (i = 0; i < (size >> 2); i++)
+		*t++ = __raw_readl(s++);
+}
+
+static void memcpy16_fromio(void *trg, const void __iomem  *src, size_t size)
+{
+	int i;
+	u16 *t = trg;
+	const __iomem u16 *s = src;
+
+	/* We assume that src (IO) is always 32bit aligned */
+	if (PTR_ALIGN(trg, 4) == trg && IS_ALIGNED(size, 4)) {
+		memcpy32_fromio(trg, src, size);
+		return;
+	}
+
+	for (i = 0; i < (size >> 1); i++)
+		*t++ = __raw_readw(s++);
+}
+
+static inline void memcpy32_toio(void __iomem *trg, const void *src, int size)
+{
+	/* __iowrite32_copy use 32bit size values so divide by 4 */
+	__iowrite32_copy(trg, src, size / 4);
+}
+
+static void memcpy16_toio(void __iomem *trg, const void *src, int size)
+{
+	int i;
+	__iomem u16 *t = trg;
+	const u16 *s = src;
+
+	/* We assume that trg (IO) is always 32bit aligned */
+	if (PTR_ALIGN(src, 4) == src && IS_ALIGNED(size, 4)) {
+		memcpy32_toio(trg, src, size);
+		return;
+	}
+
+	for (i = 0; i < (size >> 1); i++)
+		__raw_writew(*s++, t++);
+}
+
+static int check_int_v3(struct mxc_nand_host *host)
+{
+	uint32_t tmp;
+
+	tmp = readl(NFC_V3_IPC);
+	if (!(tmp & NFC_V3_IPC_INT))
+		return 0;
+
+	tmp &= ~NFC_V3_IPC_INT;
+	writel(tmp, NFC_V3_IPC);
+
+	return 1;
+}
+
+static int check_int_v1_v2(struct mxc_nand_host *host)
+{
+	uint32_t tmp;
+
+	tmp = readw(NFC_V1_V2_CONFIG2);
+	if (!(tmp & NFC_V1_V2_CONFIG2_INT))
+		return 0;
+
+	if (!host->devtype_data->irqpending_quirk)
+		writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
+
+	return 1;
+}
+
+static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
+{
+	uint16_t tmp;
+
+	tmp = readw(NFC_V1_V2_CONFIG1);
+
+	if (activate)
+		tmp &= ~NFC_V1_V2_CONFIG1_INT_MSK;
+	else
+		tmp |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+	writew(tmp, NFC_V1_V2_CONFIG1);
+}
+
+static void irq_control_v3(struct mxc_nand_host *host, int activate)
+{
+	uint32_t tmp;
+
+	tmp = readl(NFC_V3_CONFIG2);
+
+	if (activate)
+		tmp &= ~NFC_V3_CONFIG2_INT_MSK;
+	else
+		tmp |= NFC_V3_CONFIG2_INT_MSK;
+
+	writel(tmp, NFC_V3_CONFIG2);
+}
+
+static void irq_control(struct mxc_nand_host *host, int activate)
+{
+	if (host->devtype_data->irqpending_quirk) {
+		if (activate)
+			enable_irq(host->irq);
+		else
+			disable_irq_nosync(host->irq);
+	} else {
+		host->devtype_data->irq_control(host, activate);
+	}
+}
+
+static u32 get_ecc_status_v1(struct mxc_nand_host *host)
+{
+	return readw(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v2(struct mxc_nand_host *host)
+{
+	return readl(NFC_V1_V2_ECC_STATUS_RESULT);
+}
+
+static u32 get_ecc_status_v3(struct mxc_nand_host *host)
+{
+	return readl(NFC_V3_ECC_STATUS_RESULT);
+}
+
+static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
+{
+	struct mxc_nand_host *host = dev_id;
+
+	if (!host->devtype_data->check_int(host))
+		return IRQ_NONE;
+
+	irq_control(host, 0);
+
+	complete(&host->op_completion);
+
+	return IRQ_HANDLED;
+}
+
+/* This function polls the NANDFC to wait for the basic operation to
+ * complete by checking the INT bit of config2 register.
+ */
+static int wait_op_done(struct mxc_nand_host *host, int useirq)
+{
+	int ret = 0;
+
+	/*
+	 * If operation is already complete, don't bother to setup an irq or a
+	 * loop.
+	 */
+	if (host->devtype_data->check_int(host))
+		return 0;
+
+	if (useirq) {
+		unsigned long timeout;
+
+		reinit_completion(&host->op_completion);
+
+		irq_control(host, 1);
+
+		timeout = wait_for_completion_timeout(&host->op_completion, HZ);
+		if (!timeout && !host->devtype_data->check_int(host)) {
+			dev_dbg(host->dev, "timeout waiting for irq\n");
+			ret = -ETIMEDOUT;
+		}
+	} else {
+		int max_retries = 8000;
+		int done;
+
+		do {
+			udelay(1);
+
+			done = host->devtype_data->check_int(host);
+			if (done)
+				break;
+
+		} while (--max_retries);
+
+		if (!done) {
+			dev_dbg(host->dev, "timeout polling for completion\n");
+			ret = -ETIMEDOUT;
+		}
+	}
+
+	WARN_ONCE(ret < 0, "timeout! useirq=%d\n", useirq);
+
+	return ret;
+}
+
+static void send_cmd_v3(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+	/* fill command */
+	writel(cmd, NFC_V3_FLASH_CMD);
+
+	/* send out command */
+	writel(NFC_CMD, NFC_V3_LAUNCH);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, useirq);
+}
+
+/* This function issues the specified command to the NAND device and
+ * waits for completion. */
+static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
+{
+	pr_debug("send_cmd(host, 0x%x, %d)\n", cmd, useirq);
+
+	writew(cmd, NFC_V1_V2_FLASH_CMD);
+	writew(NFC_CMD, NFC_V1_V2_CONFIG2);
+
+	if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
+		int max_retries = 100;
+		/* Reset completion is indicated by NFC_CONFIG2 */
+		/* being set to 0 */
+		while (max_retries-- > 0) {
+			if (readw(NFC_V1_V2_CONFIG2) == 0) {
+				break;
+			}
+			udelay(1);
+		}
+		if (max_retries < 0)
+			pr_debug("%s: RESET failed\n", __func__);
+	} else {
+		/* Wait for operation to complete */
+		wait_op_done(host, useirq);
+	}
+}
+
+static void send_addr_v3(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+	/* fill address */
+	writel(addr, NFC_V3_FLASH_ADDR0);
+
+	/* send out address */
+	writel(NFC_ADDR, NFC_V3_LAUNCH);
+
+	wait_op_done(host, 0);
+}
+
+/* This function sends an address (or partial address) to the
+ * NAND device. The address is used to select the source/destination for
+ * a NAND command. */
+static void send_addr_v1_v2(struct mxc_nand_host *host, uint16_t addr, int islast)
+{
+	pr_debug("send_addr(host, 0x%x %d)\n", addr, islast);
+
+	writew(addr, NFC_V1_V2_FLASH_ADDR);
+	writew(NFC_ADDR, NFC_V1_V2_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, islast);
+}
+
+static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	uint32_t tmp;
+
+	tmp = readl(NFC_V3_CONFIG1);
+	tmp &= ~(7 << 4);
+	writel(tmp, NFC_V3_CONFIG1);
+
+	/* transfer data from NFC ram to nand */
+	writel(ops, NFC_V3_LAUNCH);
+
+	wait_op_done(host, false);
+}
+
+static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+	/* NANDFC buffer 0 is used for page read/write */
+	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+	writew(ops, NFC_V1_V2_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, true);
+}
+
+static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	int bufs, i;
+
+	if (mtd->writesize > 512)
+		bufs = 4;
+	else
+		bufs = 1;
+
+	for (i = 0; i < bufs; i++) {
+
+		/* NANDFC buffer 0 is used for page read/write */
+		writew((host->active_cs << 4) | i, NFC_V1_V2_BUF_ADDR);
+
+		writew(ops, NFC_V1_V2_CONFIG2);
+
+		/* Wait for operation to complete */
+		wait_op_done(host, true);
+	}
+}
+
+static void send_read_id_v3(struct mxc_nand_host *host)
+{
+	/* Read ID into main buffer */
+	writel(NFC_ID, NFC_V3_LAUNCH);
+
+	wait_op_done(host, true);
+
+	memcpy32_fromio(host->data_buf, host->main_area0, 16);
+}
+
+/* Request the NANDFC to perform a read of the NAND device ID. */
+static void send_read_id_v1_v2(struct mxc_nand_host *host)
+{
+	/* NANDFC buffer 0 is used for device ID output */
+	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+	writew(NFC_ID, NFC_V1_V2_CONFIG2);
+
+	/* Wait for operation to complete */
+	wait_op_done(host, true);
+
+	memcpy32_fromio(host->data_buf, host->main_area0, 16);
+}
+
+static uint16_t get_dev_status_v3(struct mxc_nand_host *host)
+{
+	writew(NFC_STATUS, NFC_V3_LAUNCH);
+	wait_op_done(host, true);
+
+	return readl(NFC_V3_CONFIG1) >> 16;
+}
+
+/* This function requests the NANDFC to perform a read of the
+ * NAND device status and returns the current status. */
+static uint16_t get_dev_status_v1_v2(struct mxc_nand_host *host)
+{
+	void __iomem *main_buf = host->main_area0;
+	uint32_t store;
+	uint16_t ret;
+
+	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+
+	/*
+	 * The device status is stored in main_area0. To
+	 * prevent corruption of the buffer save the value
+	 * and restore it afterwards.
+	 */
+	store = readl(main_buf);
+
+	writew(NFC_STATUS, NFC_V1_V2_CONFIG2);
+	wait_op_done(host, true);
+
+	ret = readw(main_buf);
+
+	writel(store, main_buf);
+
+	return ret;
+}
+
+/* This functions is used by upper layer to checks if device is ready */
+static int mxc_nand_dev_ready(struct mtd_info *mtd)
+{
+	/*
+	 * NFC handles R/B internally. Therefore, this function
+	 * always returns status as ready.
+	 */
+	return 1;
+}
+
+static void mxc_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	/*
+	 * If HW ECC is enabled, we turn it on during init. There is
+	 * no need to enable again here.
+	 */
+}
+
+static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
+				 u_char *read_ecc, u_char *calc_ecc)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+	/*
+	 * 1-Bit errors are automatically corrected in HW.  No need for
+	 * additional correction.  2-Bit errors cannot be corrected by
+	 * HW ECC, so we need to return failure
+	 */
+	uint16_t ecc_status = get_ecc_status_v1(host);
+
+	if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
+		pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
+		return -EBADMSG;
+	}
+
+	return 0;
+}
+
+static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
+				 u_char *read_ecc, u_char *calc_ecc)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	u32 ecc_stat, err;
+	int no_subpages = 1;
+	int ret = 0;
+	u8 ecc_bit_mask, err_limit;
+
+	ecc_bit_mask = (host->eccsize == 4) ? 0x7 : 0xf;
+	err_limit = (host->eccsize == 4) ? 0x4 : 0x8;
+
+	no_subpages = mtd->writesize >> 9;
+
+	ecc_stat = host->devtype_data->get_ecc_status(host);
+
+	do {
+		err = ecc_stat & ecc_bit_mask;
+		if (err > err_limit) {
+			printk(KERN_WARNING "UnCorrectable RS-ECC Error\n");
+			return -EBADMSG;
+		} else {
+			ret += err;
+		}
+		ecc_stat >>= 4;
+	} while (--no_subpages);
+
+	pr_debug("%d Symbol Correctable RS-ECC Error\n", ret);
+
+	return ret;
+}
+
+static int mxc_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+				  u_char *ecc_code)
+{
+	return 0;
+}
+
+static u_char mxc_nand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	uint8_t ret;
+
+	/* Check for status request */
+	if (host->status_request)
+		return host->devtype_data->get_dev_status(host) & 0xFF;
+
+	if (nand_chip->options & NAND_BUSWIDTH_16) {
+		/* only take the lower byte of each word */
+		ret = *(uint16_t *)(host->data_buf + host->buf_start);
+
+		host->buf_start += 2;
+	} else {
+		ret = *(uint8_t *)(host->data_buf + host->buf_start);
+		host->buf_start++;
+	}
+
+	pr_debug("%s: ret=0x%hhx (start=%u)\n", __func__, ret, host->buf_start);
+	return ret;
+}
+
+static uint16_t mxc_nand_read_word(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	uint16_t ret;
+
+	ret = *(uint16_t *)(host->data_buf + host->buf_start);
+	host->buf_start += 2;
+
+	return ret;
+}
+
+/* Write data of length len to buffer buf. The data to be
+ * written on NAND Flash is first copied to RAMbuffer. After the Data Input
+ * Operation by the NFC, the data is written to NAND Flash */
+static void mxc_nand_write_buf(struct mtd_info *mtd,
+				const u_char *buf, int len)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	u16 col = host->buf_start;
+	int n = mtd->oobsize + mtd->writesize - col;
+
+	n = min(n, len);
+
+	memcpy(host->data_buf + col, buf, n);
+
+	host->buf_start += n;
+}
+
+/* Read the data buffer from the NAND Flash. To read the data from NAND
+ * Flash first the data output cycle is initiated by the NFC, which copies
+ * the data to RAMbuffer. This data of length len is then copied to buffer buf.
+ */
+static void mxc_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	u16 col = host->buf_start;
+	int n = mtd->oobsize + mtd->writesize - col;
+
+	n = min(n, len);
+
+	memcpy(buf, host->data_buf + col, n);
+
+	host->buf_start += n;
+}
+
+/* This function is used by upper layer for select and
+ * deselect of the NAND chip */
+static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if (chip == -1) {
+		/* Disable the NFC clock */
+		if (host->clk_act) {
+			clk_disable_unprepare(host->clk);
+			host->clk_act = 0;
+		}
+		return;
+	}
+
+	if (!host->clk_act) {
+		/* Enable the NFC clock */
+		clk_prepare_enable(host->clk);
+		host->clk_act = 1;
+	}
+}
+
+static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if (chip == -1) {
+		/* Disable the NFC clock */
+		if (host->clk_act) {
+			clk_disable_unprepare(host->clk);
+			host->clk_act = 0;
+		}
+		return;
+	}
+
+	if (!host->clk_act) {
+		/* Enable the NFC clock */
+		clk_prepare_enable(host->clk);
+		host->clk_act = 1;
+	}
+
+	host->active_cs = chip;
+	writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
+}
+
+/*
+ * The controller splits a page into data chunks of 512 bytes + partial oob.
+ * There are writesize / 512 such chunks, the size of the partial oob parts is
+ * oobsize / #chunks rounded down to a multiple of 2. The last oob chunk then
+ * contains additionally the byte lost by rounding (if any).
+ * This function handles the needed shuffling between host->data_buf (which
+ * holds a page in natural order, i.e. writesize bytes data + oobsize bytes
+ * spare) and the NFC buffer.
+ */
+static void copy_spare(struct mtd_info *mtd, bool bfrom)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(this);
+	u16 i, oob_chunk_size;
+	u16 num_chunks = mtd->writesize / 512;
+
+	u8 *d = host->data_buf + mtd->writesize;
+	u8 __iomem *s = host->spare0;
+	u16 sparebuf_size = host->devtype_data->spare_len;
+
+	/* size of oob chunk for all but possibly the last one */
+	oob_chunk_size = (host->used_oobsize / num_chunks) & ~1;
+
+	if (bfrom) {
+		for (i = 0; i < num_chunks - 1; i++)
+			memcpy16_fromio(d + i * oob_chunk_size,
+					s + i * sparebuf_size,
+					oob_chunk_size);
+
+		/* the last chunk */
+		memcpy16_fromio(d + i * oob_chunk_size,
+				s + i * sparebuf_size,
+				host->used_oobsize - i * oob_chunk_size);
+	} else {
+		for (i = 0; i < num_chunks - 1; i++)
+			memcpy16_toio(&s[i * sparebuf_size],
+				      &d[i * oob_chunk_size],
+				      oob_chunk_size);
+
+		/* the last chunk */
+		memcpy16_toio(&s[i * sparebuf_size],
+			      &d[i * oob_chunk_size],
+			      host->used_oobsize - i * oob_chunk_size);
+	}
+}
+
+/*
+ * MXC NANDFC can only perform full page+spare or spare-only read/write.  When
+ * the upper layers perform a read/write buf operation, the saved column address
+ * is used to index into the full page. So usually this function is called with
+ * column == 0 (unless no column cycle is needed indicated by column == -1)
+ */
+static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+	/* Write out column address, if necessary */
+	if (column != -1) {
+		host->devtype_data->send_addr(host, column & 0xff,
+					      page_addr == -1);
+		if (mtd->writesize > 512)
+			/* another col addr cycle for 2k page */
+			host->devtype_data->send_addr(host,
+						      (column >> 8) & 0xff,
+						      false);
+	}
+
+	/* Write out page address, if necessary */
+	if (page_addr != -1) {
+		/* paddr_0 - p_addr_7 */
+		host->devtype_data->send_addr(host, (page_addr & 0xff), false);
+
+		if (mtd->writesize > 512) {
+			if (mtd->size >= 0x10000000) {
+				/* paddr_8 - paddr_15 */
+				host->devtype_data->send_addr(host,
+						(page_addr >> 8) & 0xff,
+						false);
+				host->devtype_data->send_addr(host,
+						(page_addr >> 16) & 0xff,
+						true);
+			} else
+				/* paddr_8 - paddr_15 */
+				host->devtype_data->send_addr(host,
+						(page_addr >> 8) & 0xff, true);
+		} else {
+			/* One more address cycle for higher density devices */
+			if (mtd->size >= 0x4000000) {
+				/* paddr_8 - paddr_15 */
+				host->devtype_data->send_addr(host,
+						(page_addr >> 8) & 0xff,
+						false);
+				host->devtype_data->send_addr(host,
+						(page_addr >> 16) & 0xff,
+						true);
+			} else
+				/* paddr_8 - paddr_15 */
+				host->devtype_data->send_addr(host,
+						(page_addr >> 8) & 0xff, true);
+		}
+	}
+}
+
+static int mxc_v1_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+	if (section >= nand_chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 6;
+	oobregion->length = nand_chip->ecc.bytes;
+
+	return 0;
+}
+
+static int mxc_v1_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+
+	if (section > nand_chip->ecc.steps)
+		return -ERANGE;
+
+	if (!section) {
+		if (mtd->writesize <= 512) {
+			oobregion->offset = 0;
+			oobregion->length = 5;
+		} else {
+			oobregion->offset = 2;
+			oobregion->length = 4;
+		}
+	} else {
+		oobregion->offset = ((section - 1) * 16) +
+				    nand_chip->ecc.bytes + 6;
+		if (section < nand_chip->ecc.steps)
+			oobregion->length = (section * 16) + 6 -
+					    oobregion->offset;
+		else
+			oobregion->length = mtd->oobsize - oobregion->offset;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v1_ooblayout_ops = {
+	.ecc = mxc_v1_ooblayout_ecc,
+	.free = mxc_v1_ooblayout_free,
+};
+
+static int mxc_v2_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+	if (section >= nand_chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * stepsize) + 7;
+	oobregion->length = nand_chip->ecc.bytes;
+
+	return 0;
+}
+
+static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
+
+	if (section > nand_chip->ecc.steps)
+		return -ERANGE;
+
+	if (!section) {
+		if (mtd->writesize <= 512) {
+			oobregion->offset = 0;
+			oobregion->length = 5;
+		} else {
+			oobregion->offset = 2;
+			oobregion->length = 4;
+		}
+	} else {
+		oobregion->offset = section * stepsize;
+		oobregion->length = 7;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops mxc_v2_ooblayout_ops = {
+	.ecc = mxc_v2_ooblayout_ecc,
+	.free = mxc_v2_ooblayout_free,
+};
+
+/*
+ * v2 and v3 type controllers can do 4bit or 8bit ecc depending
+ * on how much oob the nand chip has. For 8bit ecc we need at least
+ * 26 bytes of oob data per 512 byte block.
+ */
+static int get_eccsize(struct mtd_info *mtd)
+{
+	int oobbytes_per_512 = 0;
+
+	oobbytes_per_512 = mtd->oobsize * 512 / mtd->writesize;
+
+	if (oobbytes_per_512 < 26)
+		return 4;
+	else
+		return 8;
+}
+
+static void preset_v1(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	uint16_t config1 = 0;
+
+	if (nand_chip->ecc.mode == NAND_ECC_HW && mtd->writesize)
+		config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+	if (!host->devtype_data->irqpending_quirk)
+		config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+	host->eccsize = 1;
+
+	writew(config1, NFC_V1_V2_CONFIG1);
+	/* preset operation */
+
+	/* Unlock the internal RAM Buffer */
+	writew(0x2, NFC_V1_V2_CONFIG);
+
+	/* Blocks to be unlocked */
+	writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
+	writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
+
+	/* Unlock Block Command for given address range */
+	writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd,
+					const struct nand_data_interface *conf,
+					bool check_only)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	int tRC_min_ns, tRC_ps, ret;
+	unsigned long rate, rate_round;
+	const struct nand_sdr_timings *timings;
+	u16 config1;
+
+	timings = nand_get_sdr_timings(conf);
+	if (IS_ERR(timings))
+		return -ENOTSUPP;
+
+	config1 = readw(NFC_V1_V2_CONFIG1);
+
+	tRC_min_ns = timings->tRC_min / 1000;
+	rate = 1000000000 / tRC_min_ns;
+
+	/*
+	 * For tRC < 30ns we have to use EDO mode. In this case the controller
+	 * does one access per clock cycle. Otherwise the controller does one
+	 * access in two clock cycles, thus we have to double the rate to the
+	 * controller.
+	 */
+	if (tRC_min_ns < 30) {
+		rate_round = clk_round_rate(host->clk, rate);
+		config1 |= NFC_V2_CONFIG1_ONE_CYCLE;
+		tRC_ps = 1000000000 / (rate_round / 1000);
+	} else {
+		rate *= 2;
+		rate_round = clk_round_rate(host->clk, rate);
+		config1 &= ~NFC_V2_CONFIG1_ONE_CYCLE;
+		tRC_ps = 1000000000 / (rate_round / 1000 / 2);
+	}
+
+	/*
+	 * The timing values compared against are from the i.MX25 Automotive
+	 * datasheet, Table 50. NFC Timing Parameters
+	 */
+	if (timings->tCLS_min > tRC_ps - 1000 ||
+	    timings->tCLH_min > tRC_ps - 2000 ||
+	    timings->tCS_min > tRC_ps - 1000 ||
+	    timings->tCH_min > tRC_ps - 2000 ||
+	    timings->tWP_min > tRC_ps - 1500 ||
+	    timings->tALS_min > tRC_ps ||
+	    timings->tALH_min > tRC_ps - 3000 ||
+	    timings->tDS_min > tRC_ps ||
+	    timings->tDH_min > tRC_ps - 5000 ||
+	    timings->tWC_min > 2 * tRC_ps ||
+	    timings->tWH_min > tRC_ps - 2500 ||
+	    timings->tRR_min > 6 * tRC_ps ||
+	    timings->tRP_min > 3 * tRC_ps / 2 ||
+	    timings->tRC_min > 2 * tRC_ps ||
+	    timings->tREH_min > (tRC_ps / 2) - 2500) {
+		dev_dbg(host->dev, "Timing out of bounds\n");
+		return -EINVAL;
+	}
+
+	if (check_only)
+		return 0;
+
+	ret = clk_set_rate(host->clk, rate);
+	if (ret)
+		return ret;
+
+	writew(config1, NFC_V1_V2_CONFIG1);
+
+	dev_dbg(host->dev, "Setting rate to %ldHz, %s mode\n", rate_round,
+		config1 & NFC_V2_CONFIG1_ONE_CYCLE ? "One cycle (EDO)" :
+		"normal");
+
+	return 0;
+}
+
+static void preset_v2(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	uint16_t config1 = 0;
+
+	config1 |= NFC_V2_CONFIG1_FP_INT;
+
+	if (!host->devtype_data->irqpending_quirk)
+		config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
+
+	if (mtd->writesize) {
+		uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
+
+		if (nand_chip->ecc.mode == NAND_ECC_HW)
+			config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
+
+		host->eccsize = get_eccsize(mtd);
+		if (host->eccsize == 4)
+			config1 |= NFC_V2_CONFIG1_ECC_MODE_4;
+
+		config1 |= NFC_V2_CONFIG1_PPB(ffs(pages_per_block) - 6);
+	} else {
+		host->eccsize = 1;
+	}
+
+	writew(config1, NFC_V1_V2_CONFIG1);
+	/* preset operation */
+
+	/* Unlock the internal RAM Buffer */
+	writew(0x2, NFC_V1_V2_CONFIG);
+
+	/* Blocks to be unlocked */
+	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
+	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
+	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
+	writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
+	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
+	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
+	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
+	writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
+
+	/* Unlock Block Command for given address range */
+	writew(0x4, NFC_V1_V2_WRPROT);
+}
+
+static void preset_v3(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(chip);
+	uint32_t config2, config3;
+	int i, addr_phases;
+
+	writel(NFC_V3_CONFIG1_RBA(0), NFC_V3_CONFIG1);
+	writel(NFC_V3_IPC_CREQ, NFC_V3_IPC);
+
+	/* Unlock the internal RAM Buffer */
+	writel(NFC_V3_WRPROT_BLS_UNLOCK | NFC_V3_WRPROT_UNLOCK,
+			NFC_V3_WRPROT);
+
+	/* Blocks to be unlocked */
+	for (i = 0; i < NAND_MAX_CHIPS; i++)
+		writel(0xffff << 16, NFC_V3_WRPROT_UNLOCK_BLK_ADD0 + (i << 2));
+
+	writel(0, NFC_V3_IPC);
+
+	config2 = NFC_V3_CONFIG2_ONE_CYCLE |
+		NFC_V3_CONFIG2_2CMD_PHASES |
+		NFC_V3_CONFIG2_SPAS(mtd->oobsize >> 1) |
+		NFC_V3_CONFIG2_ST_CMD(0x70) |
+		NFC_V3_CONFIG2_INT_MSK |
+		NFC_V3_CONFIG2_NUM_ADDR_PHASE0;
+
+	addr_phases = fls(chip->pagemask) >> 3;
+
+	if (mtd->writesize == 2048) {
+		config2 |= NFC_V3_CONFIG2_PS_2048;
+		config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+	} else if (mtd->writesize == 4096) {
+		config2 |= NFC_V3_CONFIG2_PS_4096;
+		config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases);
+	} else {
+		config2 |= NFC_V3_CONFIG2_PS_512;
+		config2 |= NFC_V3_CONFIG2_NUM_ADDR_PHASE1(addr_phases - 1);
+	}
+
+	if (mtd->writesize) {
+		if (chip->ecc.mode == NAND_ECC_HW)
+			config2 |= NFC_V3_CONFIG2_ECC_EN;
+
+		config2 |= NFC_V3_CONFIG2_PPB(
+				ffs(mtd->erasesize / mtd->writesize) - 6,
+				host->devtype_data->ppb_shift);
+		host->eccsize = get_eccsize(mtd);
+		if (host->eccsize == 8)
+			config2 |= NFC_V3_CONFIG2_ECC_MODE_8;
+	}
+
+	writel(config2, NFC_V3_CONFIG2);
+
+	config3 = NFC_V3_CONFIG3_NUM_OF_DEVICES(0) |
+			NFC_V3_CONFIG3_NO_SDMA |
+			NFC_V3_CONFIG3_RBB_MODE |
+			NFC_V3_CONFIG3_SBB(6) | /* Reset default */
+			NFC_V3_CONFIG3_ADD_OP(0);
+
+	if (!(chip->options & NAND_BUSWIDTH_16))
+		config3 |= NFC_V3_CONFIG3_FW8;
+
+	writel(config3, NFC_V3_CONFIG3);
+
+	writel(0, NFC_V3_DELAY_LINE);
+}
+
+/* Used by the upper layer to write command to NAND Flash for
+ * different operations to be carried out on NAND Flash */
+static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
+				int column, int page_addr)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+
+	pr_debug("mxc_nand_command (cmd = 0x%x, col = 0x%x, page = 0x%x)\n",
+	      command, column, page_addr);
+
+	/* Reset command state information */
+	host->status_request = false;
+
+	/* Command pre-processing step */
+	switch (command) {
+	case NAND_CMD_RESET:
+		host->devtype_data->preset(mtd);
+		host->devtype_data->send_cmd(host, command, false);
+		break;
+
+	case NAND_CMD_STATUS:
+		host->buf_start = 0;
+		host->status_request = true;
+
+		host->devtype_data->send_cmd(host, command, true);
+		WARN_ONCE(column != -1 || page_addr != -1,
+			  "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+			  command, column, page_addr);
+		mxc_do_addr_cycle(mtd, column, page_addr);
+		break;
+
+	case NAND_CMD_READ0:
+	case NAND_CMD_READOOB:
+		if (command == NAND_CMD_READ0)
+			host->buf_start = column;
+		else
+			host->buf_start = column + mtd->writesize;
+
+		command = NAND_CMD_READ0; /* only READ0 is valid */
+
+		host->devtype_data->send_cmd(host, command, false);
+		WARN_ONCE(column < 0,
+			  "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+			  command, column, page_addr);
+		mxc_do_addr_cycle(mtd, 0, page_addr);
+
+		if (mtd->writesize > 512)
+			host->devtype_data->send_cmd(host,
+					NAND_CMD_READSTART, true);
+
+		host->devtype_data->send_page(mtd, NFC_OUTPUT);
+
+		memcpy32_fromio(host->data_buf, host->main_area0,
+				mtd->writesize);
+		copy_spare(mtd, true);
+		break;
+
+	case NAND_CMD_SEQIN:
+		if (column >= mtd->writesize)
+			/* call ourself to read a page */
+			mxc_nand_command(mtd, NAND_CMD_READ0, 0, page_addr);
+
+		host->buf_start = column;
+
+		host->devtype_data->send_cmd(host, command, false);
+		WARN_ONCE(column < -1,
+			  "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+			  command, column, page_addr);
+		mxc_do_addr_cycle(mtd, 0, page_addr);
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
+		copy_spare(mtd, false);
+		host->devtype_data->send_page(mtd, NFC_INPUT);
+		host->devtype_data->send_cmd(host, command, true);
+		WARN_ONCE(column != -1 || page_addr != -1,
+			  "Unexpected column/row value (cmd=%u, col=%d, row=%d)\n",
+			  command, column, page_addr);
+		mxc_do_addr_cycle(mtd, column, page_addr);
+		break;
+
+	case NAND_CMD_READID:
+		host->devtype_data->send_cmd(host, command, true);
+		mxc_do_addr_cycle(mtd, column, page_addr);
+		host->devtype_data->send_read_id(host);
+		host->buf_start = 0;
+		break;
+
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+		host->devtype_data->send_cmd(host, command, false);
+		WARN_ONCE(column != -1,
+			  "Unexpected column value (cmd=%u, col=%d)\n",
+			  command, column);
+		mxc_do_addr_cycle(mtd, column, page_addr);
+
+		break;
+	case NAND_CMD_PARAM:
+		host->devtype_data->send_cmd(host, command, false);
+		mxc_do_addr_cycle(mtd, column, page_addr);
+		host->devtype_data->send_page(mtd, NFC_OUTPUT);
+		memcpy32_fromio(host->data_buf, host->main_area0, 512);
+		host->buf_start = 0;
+		break;
+	default:
+		WARN_ONCE(1, "Unimplemented command (cmd=%u)\n",
+			  command);
+		break;
+	}
+}
+
+static int mxc_nand_onfi_set_features(struct mtd_info *mtd,
+				      struct nand_chip *chip, int addr,
+				      u8 *subfeature_param)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	int i;
+
+	if (!chip->onfi_version ||
+	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
+	      & ONFI_OPT_CMD_SET_GET_FEATURES))
+		return -EINVAL;
+
+	host->buf_start = 0;
+
+	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+		chip->write_byte(mtd, subfeature_param[i]);
+
+	memcpy32_toio(host->main_area0, host->data_buf, mtd->writesize);
+	host->devtype_data->send_cmd(host, NAND_CMD_SET_FEATURES, false);
+	mxc_do_addr_cycle(mtd, addr, -1);
+	host->devtype_data->send_page(mtd, NFC_INPUT);
+
+	return 0;
+}
+
+static int mxc_nand_onfi_get_features(struct mtd_info *mtd,
+				      struct nand_chip *chip, int addr,
+				      u8 *subfeature_param)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct mxc_nand_host *host = nand_get_controller_data(nand_chip);
+	int i;
+
+	if (!chip->onfi_version ||
+	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
+	      & ONFI_OPT_CMD_SET_GET_FEATURES))
+		return -EINVAL;
+
+	host->devtype_data->send_cmd(host, NAND_CMD_GET_FEATURES, false);
+	mxc_do_addr_cycle(mtd, addr, -1);
+	host->devtype_data->send_page(mtd, NFC_OUTPUT);
+	memcpy32_fromio(host->data_buf, host->main_area0, 512);
+	host->buf_start = 0;
+
+	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+		*subfeature_param++ = chip->read_byte(mtd);
+
+	return 0;
+}
+
+/*
+ * The generic flash bbt decriptors overlap with our ecc
+ * hardware, so define some i.MX specific ones.
+ */
+static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+	    | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs = 0,
+	.len = 4,
+	.veroffs = 4,
+	.maxblocks = 4,
+	.pattern = bbt_pattern,
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+	    | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs = 0,
+	.len = 4,
+	.veroffs = 4,
+	.maxblocks = 4,
+	.pattern = mirror_pattern,
+};
+
+/* v1 + irqpending_quirk: i.MX21 */
+static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
+	.preset = preset_v1,
+	.send_cmd = send_cmd_v1_v2,
+	.send_addr = send_addr_v1_v2,
+	.send_page = send_page_v1,
+	.send_read_id = send_read_id_v1_v2,
+	.get_dev_status = get_dev_status_v1_v2,
+	.check_int = check_int_v1_v2,
+	.irq_control = irq_control_v1_v2,
+	.get_ecc_status = get_ecc_status_v1,
+	.ooblayout = &mxc_v1_ooblayout_ops,
+	.select_chip = mxc_nand_select_chip_v1_v3,
+	.correct_data = mxc_nand_correct_data_v1,
+	.irqpending_quirk = 1,
+	.needs_ip = 0,
+	.regs_offset = 0xe00,
+	.spare0_offset = 0x800,
+	.spare_len = 16,
+	.eccbytes = 3,
+	.eccsize = 1,
+};
+
+/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
+static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
+	.preset = preset_v1,
+	.send_cmd = send_cmd_v1_v2,
+	.send_addr = send_addr_v1_v2,
+	.send_page = send_page_v1,
+	.send_read_id = send_read_id_v1_v2,
+	.get_dev_status = get_dev_status_v1_v2,
+	.check_int = check_int_v1_v2,
+	.irq_control = irq_control_v1_v2,
+	.get_ecc_status = get_ecc_status_v1,
+	.ooblayout = &mxc_v1_ooblayout_ops,
+	.select_chip = mxc_nand_select_chip_v1_v3,
+	.correct_data = mxc_nand_correct_data_v1,
+	.irqpending_quirk = 0,
+	.needs_ip = 0,
+	.regs_offset = 0xe00,
+	.spare0_offset = 0x800,
+	.axi_offset = 0,
+	.spare_len = 16,
+	.eccbytes = 3,
+	.eccsize = 1,
+};
+
+/* v21: i.MX25, i.MX35 */
+static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
+	.preset = preset_v2,
+	.send_cmd = send_cmd_v1_v2,
+	.send_addr = send_addr_v1_v2,
+	.send_page = send_page_v2,
+	.send_read_id = send_read_id_v1_v2,
+	.get_dev_status = get_dev_status_v1_v2,
+	.check_int = check_int_v1_v2,
+	.irq_control = irq_control_v1_v2,
+	.get_ecc_status = get_ecc_status_v2,
+	.ooblayout = &mxc_v2_ooblayout_ops,
+	.select_chip = mxc_nand_select_chip_v2,
+	.correct_data = mxc_nand_correct_data_v2_v3,
+	.setup_data_interface = mxc_nand_v2_setup_data_interface,
+	.irqpending_quirk = 0,
+	.needs_ip = 0,
+	.regs_offset = 0x1e00,
+	.spare0_offset = 0x1000,
+	.axi_offset = 0,
+	.spare_len = 64,
+	.eccbytes = 9,
+	.eccsize = 0,
+};
+
+/* v3.2a: i.MX51 */
+static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
+	.preset = preset_v3,
+	.send_cmd = send_cmd_v3,
+	.send_addr = send_addr_v3,
+	.send_page = send_page_v3,
+	.send_read_id = send_read_id_v3,
+	.get_dev_status = get_dev_status_v3,
+	.check_int = check_int_v3,
+	.irq_control = irq_control_v3,
+	.get_ecc_status = get_ecc_status_v3,
+	.ooblayout = &mxc_v2_ooblayout_ops,
+	.select_chip = mxc_nand_select_chip_v1_v3,
+	.correct_data = mxc_nand_correct_data_v2_v3,
+	.irqpending_quirk = 0,
+	.needs_ip = 1,
+	.regs_offset = 0,
+	.spare0_offset = 0x1000,
+	.axi_offset = 0x1e00,
+	.spare_len = 64,
+	.eccbytes = 0,
+	.eccsize = 0,
+	.ppb_shift = 7,
+};
+
+/* v3.2b: i.MX53 */
+static const struct mxc_nand_devtype_data imx53_nand_devtype_data = {
+	.preset = preset_v3,
+	.send_cmd = send_cmd_v3,
+	.send_addr = send_addr_v3,
+	.send_page = send_page_v3,
+	.send_read_id = send_read_id_v3,
+	.get_dev_status = get_dev_status_v3,
+	.check_int = check_int_v3,
+	.irq_control = irq_control_v3,
+	.get_ecc_status = get_ecc_status_v3,
+	.ooblayout = &mxc_v2_ooblayout_ops,
+	.select_chip = mxc_nand_select_chip_v1_v3,
+	.correct_data = mxc_nand_correct_data_v2_v3,
+	.irqpending_quirk = 0,
+	.needs_ip = 1,
+	.regs_offset = 0,
+	.spare0_offset = 0x1000,
+	.axi_offset = 0x1e00,
+	.spare_len = 64,
+	.eccbytes = 0,
+	.eccsize = 0,
+	.ppb_shift = 8,
+};
+
+static inline int is_imx21_nfc(struct mxc_nand_host *host)
+{
+	return host->devtype_data == &imx21_nand_devtype_data;
+}
+
+static inline int is_imx27_nfc(struct mxc_nand_host *host)
+{
+	return host->devtype_data == &imx27_nand_devtype_data;
+}
+
+static inline int is_imx25_nfc(struct mxc_nand_host *host)
+{
+	return host->devtype_data == &imx25_nand_devtype_data;
+}
+
+static inline int is_imx51_nfc(struct mxc_nand_host *host)
+{
+	return host->devtype_data == &imx51_nand_devtype_data;
+}
+
+static inline int is_imx53_nfc(struct mxc_nand_host *host)
+{
+	return host->devtype_data == &imx53_nand_devtype_data;
+}
+
+static const struct platform_device_id mxcnd_devtype[] = {
+	{
+		.name = "imx21-nand",
+		.driver_data = (kernel_ulong_t) &imx21_nand_devtype_data,
+	}, {
+		.name = "imx27-nand",
+		.driver_data = (kernel_ulong_t) &imx27_nand_devtype_data,
+	}, {
+		.name = "imx25-nand",
+		.driver_data = (kernel_ulong_t) &imx25_nand_devtype_data,
+	}, {
+		.name = "imx51-nand",
+		.driver_data = (kernel_ulong_t) &imx51_nand_devtype_data,
+	}, {
+		.name = "imx53-nand",
+		.driver_data = (kernel_ulong_t) &imx53_nand_devtype_data,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, mxcnd_devtype);
+
+#ifdef CONFIG_OF
+static const struct of_device_id mxcnd_dt_ids[] = {
+	{
+		.compatible = "fsl,imx21-nand",
+		.data = &imx21_nand_devtype_data,
+	}, {
+		.compatible = "fsl,imx27-nand",
+		.data = &imx27_nand_devtype_data,
+	}, {
+		.compatible = "fsl,imx25-nand",
+		.data = &imx25_nand_devtype_data,
+	}, {
+		.compatible = "fsl,imx51-nand",
+		.data = &imx51_nand_devtype_data,
+	}, {
+		.compatible = "fsl,imx53-nand",
+		.data = &imx53_nand_devtype_data,
+	},
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxcnd_dt_ids);
+
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+	struct device_node *np = host->dev->of_node;
+	const struct of_device_id *of_id =
+		of_match_device(mxcnd_dt_ids, host->dev);
+
+	if (!np)
+		return 1;
+
+	host->devtype_data = of_id->data;
+
+	return 0;
+}
+#else
+static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
+{
+	return 1;
+}
+#endif
+
+static int mxcnd_probe(struct platform_device *pdev)
+{
+	struct nand_chip *this;
+	struct mtd_info *mtd;
+	struct mxc_nand_host *host;
+	struct resource *res;
+	int err = 0;
+
+	/* Allocate memory for MTD device structure and private data */
+	host = devm_kzalloc(&pdev->dev, sizeof(struct mxc_nand_host),
+			GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	/* allocate a temporary buffer for the nand_scan_ident() */
+	host->data_buf = devm_kzalloc(&pdev->dev, PAGE_SIZE, GFP_KERNEL);
+	if (!host->data_buf)
+		return -ENOMEM;
+
+	host->dev = &pdev->dev;
+	/* structures must be linked */
+	this = &host->nand;
+	mtd = nand_to_mtd(this);
+	mtd->dev.parent = &pdev->dev;
+	mtd->name = DRIVER_NAME;
+
+	/* 50 us command delay time */
+	this->chip_delay = 5;
+
+	nand_set_controller_data(this, host);
+	nand_set_flash_node(this, pdev->dev.of_node),
+	this->dev_ready = mxc_nand_dev_ready;
+	this->cmdfunc = mxc_nand_command;
+	this->read_byte = mxc_nand_read_byte;
+	this->read_word = mxc_nand_read_word;
+	this->write_buf = mxc_nand_write_buf;
+	this->read_buf = mxc_nand_read_buf;
+	this->onfi_set_features = mxc_nand_onfi_set_features;
+	this->onfi_get_features = mxc_nand_onfi_get_features;
+
+	host->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(host->clk))
+		return PTR_ERR(host->clk);
+
+	err = mxcnd_probe_dt(host);
+	if (err > 0) {
+		struct mxc_nand_platform_data *pdata =
+					dev_get_platdata(&pdev->dev);
+		if (pdata) {
+			host->pdata = *pdata;
+			host->devtype_data = (struct mxc_nand_devtype_data *)
+						pdev->id_entry->driver_data;
+		} else {
+			err = -ENODEV;
+		}
+	}
+	if (err < 0)
+		return err;
+
+	this->setup_data_interface = host->devtype_data->setup_data_interface;
+
+	if (host->devtype_data->needs_ip) {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+		host->regs_ip = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(host->regs_ip))
+			return PTR_ERR(host->regs_ip);
+
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	} else {
+		res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	}
+
+	host->base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(host->base))
+		return PTR_ERR(host->base);
+
+	host->main_area0 = host->base;
+
+	if (host->devtype_data->regs_offset)
+		host->regs = host->base + host->devtype_data->regs_offset;
+	host->spare0 = host->base + host->devtype_data->spare0_offset;
+	if (host->devtype_data->axi_offset)
+		host->regs_axi = host->base + host->devtype_data->axi_offset;
+
+	this->ecc.bytes = host->devtype_data->eccbytes;
+	host->eccsize = host->devtype_data->eccsize;
+
+	this->select_chip = host->devtype_data->select_chip;
+	this->ecc.size = 512;
+	mtd_set_ooblayout(mtd, host->devtype_data->ooblayout);
+
+	if (host->pdata.hw_ecc) {
+		this->ecc.mode = NAND_ECC_HW;
+	} else {
+		this->ecc.mode = NAND_ECC_SOFT;
+		this->ecc.algo = NAND_ECC_HAMMING;
+	}
+
+	/* NAND bus width determines access functions used by upper layer */
+	if (host->pdata.width == 2)
+		this->options |= NAND_BUSWIDTH_16;
+
+	/* update flash based bbt */
+	if (host->pdata.flash_bbt)
+		this->bbt_options |= NAND_BBT_USE_FLASH;
+
+	init_completion(&host->op_completion);
+
+	host->irq = platform_get_irq(pdev, 0);
+	if (host->irq < 0)
+		return host->irq;
+
+	/*
+	 * Use host->devtype_data->irq_control() here instead of irq_control()
+	 * because we must not disable_irq_nosync without having requested the
+	 * irq.
+	 */
+	host->devtype_data->irq_control(host, 0);
+
+	err = devm_request_irq(&pdev->dev, host->irq, mxc_nfc_irq,
+			0, DRIVER_NAME, host);
+	if (err)
+		return err;
+
+	err = clk_prepare_enable(host->clk);
+	if (err)
+		return err;
+	host->clk_act = 1;
+
+	/*
+	 * Now that we "own" the interrupt make sure the interrupt mask bit is
+	 * cleared on i.MX21. Otherwise we can't read the interrupt status bit
+	 * on this machine.
+	 */
+	if (host->devtype_data->irqpending_quirk) {
+		disable_irq_nosync(host->irq);
+		host->devtype_data->irq_control(host, 1);
+	}
+
+	/* first scan to find the device and get the page size */
+	if (nand_scan_ident(mtd, is_imx25_nfc(host) ? 4 : 1, NULL)) {
+		err = -ENXIO;
+		goto escan;
+	}
+
+	switch (this->ecc.mode) {
+	case NAND_ECC_HW:
+		this->ecc.calculate = mxc_nand_calculate_ecc;
+		this->ecc.hwctl = mxc_nand_enable_hwecc;
+		this->ecc.correct = host->devtype_data->correct_data;
+		break;
+
+	case NAND_ECC_SOFT:
+		break;
+
+	default:
+		err = -EINVAL;
+		goto escan;
+	}
+
+	if (this->bbt_options & NAND_BBT_USE_FLASH) {
+		this->bbt_td = &bbt_main_descr;
+		this->bbt_md = &bbt_mirror_descr;
+	}
+
+	/* allocate the right size buffer now */
+	devm_kfree(&pdev->dev, (void *)host->data_buf);
+	host->data_buf = devm_kzalloc(&pdev->dev, mtd->writesize + mtd->oobsize,
+					GFP_KERNEL);
+	if (!host->data_buf) {
+		err = -ENOMEM;
+		goto escan;
+	}
+
+	/* Call preset again, with correct writesize this time */
+	host->devtype_data->preset(mtd);
+
+	if (!this->ecc.bytes) {
+		if (host->eccsize == 8)
+			this->ecc.bytes = 18;
+		else if (host->eccsize == 4)
+			this->ecc.bytes = 9;
+	}
+
+	/*
+	 * Experimentation shows that i.MX NFC can only handle up to 218 oob
+	 * bytes. Limit used_oobsize to 218 so as to not confuse copy_spare()
+	 * into copying invalid data to/from the spare IO buffer, as this
+	 * might cause ECC data corruption when doing sub-page write to a
+	 * partially written page.
+	 */
+	host->used_oobsize = min(mtd->oobsize, 218U);
+
+	if (this->ecc.mode == NAND_ECC_HW) {
+		if (is_imx21_nfc(host) || is_imx27_nfc(host))
+			this->ecc.strength = 1;
+		else
+			this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
+	}
+
+	/* second phase scan */
+	if (nand_scan_tail(mtd)) {
+		err = -ENXIO;
+		goto escan;
+	}
+
+	/* Register the partitions */
+	mtd_device_parse_register(mtd, part_probes,
+			NULL,
+			host->pdata.parts,
+			host->pdata.nr_parts);
+
+	platform_set_drvdata(pdev, host);
+
+	return 0;
+
+escan:
+	if (host->clk_act)
+		clk_disable_unprepare(host->clk);
+
+	return err;
+}
+
+static int mxcnd_remove(struct platform_device *pdev)
+{
+	struct mxc_nand_host *host = platform_get_drvdata(pdev);
+
+	nand_release(nand_to_mtd(&host->nand));
+	if (host->clk_act)
+		clk_disable_unprepare(host->clk);
+
+	return 0;
+}
+
+static struct platform_driver mxcnd_driver = {
+	.driver = {
+		   .name = DRIVER_NAME,
+		   .of_match_table = of_match_ptr(mxcnd_dt_ids),
+	},
+	.id_table = mxcnd_devtype,
+	.probe = mxcnd_probe,
+	.remove = mxcnd_remove,
+};
+module_platform_driver(mxcnd_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC NAND MTD driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/rawnand/nand_base.c b/drivers/mtd/nand/rawnand/nand_base.c
new file mode 100644
index 000000000000..56b08a897115
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/nand_base.c
@@ -0,0 +1,4840 @@ 
+/*
+ *  Overview:
+ *   This is the generic MTD driver for NAND flash devices. It should be
+ *   capable of working with almost all NAND chips currently available.
+ *
+ *	Additional technical information is available on
+ *	http://www.linux-mtd.infradead.org/doc/nand.html
+ *
+ *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
+ *		  2002-2006 Thomas Gleixner (tglx@linutronix.de)
+ *
+ *  Credits:
+ *	David Woodhouse for adding multichip support
+ *
+ *	Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ *	rework for 2K page size chips
+ *
+ *  TODO:
+ *	Enable cached programming for 2k page size chips
+ *	Check, if mtd->ecctype should be set to MTD_ECC_HW
+ *	if we have HW ECC support.
+ *	BBT table is not serialized, has to be fixed
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+
+static int nand_get_device(struct mtd_info *mtd, int new_state);
+
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+			     struct mtd_oob_ops *ops);
+
+/* Define default oob placement schemes for large and small page devices */
+static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section > 1)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->offset = 0;
+		oobregion->length = 4;
+	} else {
+		oobregion->offset = 6;
+		oobregion->length = ecc->total - 4;
+	}
+
+	return 0;
+}
+
+static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	if (mtd->oobsize == 16) {
+		if (section)
+			return -ERANGE;
+
+		oobregion->length = 8;
+		oobregion->offset = 8;
+	} else {
+		oobregion->length = 2;
+		if (!section)
+			oobregion->offset = 3;
+		else
+			oobregion->offset = 6;
+	}
+
+	return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
+	.ecc = nand_ooblayout_ecc_sp,
+	.free = nand_ooblayout_free_sp,
+};
+EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
+
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = ecc->total;
+	oobregion->offset = mtd->oobsize - oobregion->length;
+
+	return 0;
+}
+
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = mtd->oobsize - ecc->total - 2;
+	oobregion->offset = 2;
+
+	return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
+	.ecc = nand_ooblayout_ecc_lp,
+	.free = nand_ooblayout_free_lp,
+};
+EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
+
+static int check_offs_len(struct mtd_info *mtd,
+					loff_t ofs, uint64_t len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret = 0;
+
+	/* Start address must align on block boundary */
+	if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
+		pr_debug("%s: unaligned address\n", __func__);
+		ret = -EINVAL;
+	}
+
+	/* Length must align on block boundary */
+	if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
+		pr_debug("%s: length not block aligned\n", __func__);
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+/**
+ * nand_release_device - [GENERIC] release chip
+ * @mtd: MTD device structure
+ *
+ * Release chip lock and wake up anyone waiting on the device.
+ */
+static void nand_release_device(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	/* Release the controller and the chip */
+	spin_lock(&chip->controller->lock);
+	chip->controller->active = NULL;
+	chip->state = FL_READY;
+	wake_up(&chip->controller->wq);
+	spin_unlock(&chip->controller->lock);
+}
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 8bit buswidth
+ */
+static uint8_t nand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	return readb(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
+ *
+ */
+static uint8_t nand_read_byte16(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	return (uint8_t) cpu_to_le16(readw(chip->IO_ADDR_R));
+}
+
+/**
+ * nand_read_word - [DEFAULT] read one word from the chip
+ * @mtd: MTD device structure
+ *
+ * Default read function for 16bit buswidth without endianness conversion.
+ */
+static u16 nand_read_word(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	return readw(chip->IO_ADDR_R);
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @mtd: MTD device structure
+ * @chipnr: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	switch (chipnr) {
+	case -1:
+		chip->cmd_ctrl(mtd, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
+		break;
+	case 0:
+		break;
+
+	default:
+		BUG();
+	}
+}
+
+/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct mtd_info *mtd, uint8_t byte)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	chip->write_buf(mtd, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @mtd: MTD device structure
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct mtd_info *mtd, uint8_t byte)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	uint16_t word = byte;
+
+	/*
+	 * It's not entirely clear what should happen to I/O[15:8] when writing
+	 * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+	 *
+	 *    When the host supports a 16-bit bus width, only data is
+	 *    transferred at the 16-bit width. All address and command line
+	 *    transfers shall use only the lower 8-bits of the data bus. During
+	 *    command transfers, the host may place any value on the upper
+	 *    8-bits of the data bus. During address transfers, the host shall
+	 *    set the upper 8-bits of the data bus to 00h.
+	 *
+	 * One user of the write_byte callback is nand_onfi_set_features. The
+	 * four parameters are specified to be written to I/O[7:0], but this is
+	 * neither an address nor a command transfer. Let's assume a 0 on the
+	 * upper I/O lines is OK.
+	 */
+	chip->write_buf(mtd, (uint8_t *)&word, 2);
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswidth.
+ */
+static void nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	iowrite8_rep(chip->IO_ADDR_W, buf, len);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswidth.
+ */
+static void nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	ioread8_rep(chip->IO_ADDR_R, buf, len);
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswidth.
+ */
+static void nand_write_buf16(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	u16 *p = (u16 *) buf;
+
+	iowrite16_rep(chip->IO_ADDR_W, p, len >> 1);
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswidth.
+ */
+static void nand_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	u16 *p = (u16 *) buf;
+
+	ioread16_rep(chip->IO_ADDR_R, p, len >> 1);
+}
+
+/**
+ * nand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * Check, if the block is bad.
+ */
+static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
+{
+	int page, res = 0, i = 0;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	u16 bad;
+
+	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+		ofs += mtd->erasesize - mtd->writesize;
+
+	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+	do {
+		if (chip->options & NAND_BUSWIDTH_16) {
+			chip->cmdfunc(mtd, NAND_CMD_READOOB,
+					chip->badblockpos & 0xFE, page);
+			bad = cpu_to_le16(chip->read_word(mtd));
+			if (chip->badblockpos & 0x1)
+				bad >>= 8;
+			else
+				bad &= 0xFF;
+		} else {
+			chip->cmdfunc(mtd, NAND_CMD_READOOB, chip->badblockpos,
+					page);
+			bad = chip->read_byte(mtd);
+		}
+
+		if (likely(chip->badblockbits == 8))
+			res = bad != 0xFF;
+		else
+			res = hweight8(bad) < chip->badblockbits;
+		ofs += mtd->writesize;
+		page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+		i++;
+	} while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
+
+	return res;
+}
+
+/**
+ * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This is the default implementation, which can be overridden by a hardware
+ * specific driver. It provides the details for writing a bad block marker to a
+ * block.
+ */
+static int nand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtd_oob_ops ops;
+	uint8_t buf[2] = { 0, 0 };
+	int ret = 0, res, i = 0;
+
+	memset(&ops, 0, sizeof(ops));
+	ops.oobbuf = buf;
+	ops.ooboffs = chip->badblockpos;
+	if (chip->options & NAND_BUSWIDTH_16) {
+		ops.ooboffs &= ~0x01;
+		ops.len = ops.ooblen = 2;
+	} else {
+		ops.len = ops.ooblen = 1;
+	}
+	ops.mode = MTD_OPS_PLACE_OOB;
+
+	/* Write to first/last page(s) if necessary */
+	if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
+		ofs += mtd->erasesize - mtd->writesize;
+	do {
+		res = nand_do_write_oob(mtd, ofs, &ops);
+		if (!ret)
+			ret = res;
+
+		i++;
+		ofs += mtd->writesize;
+	} while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+
+	return ret;
+}
+
+/**
+ * nand_block_markbad_lowlevel - mark a block bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * This function performs the generic NAND bad block marking steps (i.e., bad
+ * block table(s) and/or marker(s)). We only allow the hardware driver to
+ * specify how to write bad block markers to OOB (chip->block_markbad).
+ *
+ * We try operations in the following order:
+ *  (1) erase the affected block, to allow OOB marker to be written cleanly
+ *  (2) write bad block marker to OOB area of affected block (unless flag
+ *      NAND_BBT_NO_OOB_BBM is present)
+ *  (3) update the BBT
+ * Note that we retain the first error encountered in (2) or (3), finish the
+ * procedures, and dump the error in the end.
+*/
+static int nand_block_markbad_lowlevel(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int res, ret = 0;
+
+	if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
+		struct erase_info einfo;
+
+		/* Attempt erase before marking OOB */
+		memset(&einfo, 0, sizeof(einfo));
+		einfo.mtd = mtd;
+		einfo.addr = ofs;
+		einfo.len = 1ULL << chip->phys_erase_shift;
+		nand_erase_nand(mtd, &einfo, 0);
+
+		/* Write bad block marker to OOB */
+		nand_get_device(mtd, FL_WRITING);
+		ret = chip->block_markbad(mtd, ofs);
+		nand_release_device(mtd);
+	}
+
+	/* Mark block bad in BBT */
+	if (chip->bbt) {
+		res = nand_markbad_bbt(mtd, ofs);
+		if (!ret)
+			ret = res;
+	}
+
+	if (!ret)
+		mtd->ecc_stats.badblocks++;
+
+	return ret;
+}
+
+/**
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @mtd: MTD device structure
+ *
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
+ */
+static int nand_check_wp(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	/* Broken xD cards report WP despite being writable */
+	if (chip->options & NAND_BROKEN_XD)
+		return 0;
+
+	/* Check the WP bit */
+	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+	return (chip->read_byte(mtd) & NAND_STATUS_WP) ? 0 : 1;
+}
+
+/**
+ * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ *
+ * Check if the block is marked as reserved.
+ */
+static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (!chip->bbt)
+		return 0;
+	/* Return info from the table */
+	return nand_isreserved_bbt(mtd, ofs);
+}
+
+/**
+ * nand_block_checkbad - [GENERIC] Check if a block is marked bad
+ * @mtd: MTD device structure
+ * @ofs: offset from device start
+ * @allowbbt: 1, if its allowed to access the bbt area
+ *
+ * Check, if the block is bad. Either by reading the bad block table or
+ * calling of the scan function.
+ */
+static int nand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int allowbbt)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (!chip->bbt)
+		return chip->block_bad(mtd, ofs);
+
+	/* Return info from the table */
+	return nand_isbad_bbt(mtd, ofs, allowbbt);
+}
+
+/**
+ * panic_nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout
+ *
+ * Helper function for nand_wait_ready used when needing to wait in interrupt
+ * context.
+ */
+static void panic_nand_wait_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int i;
+
+	/* Wait for the device to get ready */
+	for (i = 0; i < timeo; i++) {
+		if (chip->dev_ready(mtd))
+			break;
+		touch_softlockup_watchdog();
+		mdelay(1);
+	}
+}
+
+/**
+ * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @mtd: MTD device structure
+ *
+ * Wait for the ready pin after a command, and warn if a timeout occurs.
+ */
+void nand_wait_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	unsigned long timeo = 400;
+
+	if (in_interrupt() || oops_in_progress)
+		return panic_nand_wait_ready(mtd, timeo);
+
+	/* Wait until command is processed or timeout occurs */
+	timeo = jiffies + msecs_to_jiffies(timeo);
+	do {
+		if (chip->dev_ready(mtd))
+			return;
+		cond_resched();
+	} while (time_before(jiffies, timeo));
+
+	if (!chip->dev_ready(mtd))
+		pr_warn_ratelimited("timeout while waiting for chip to become ready\n");
+}
+EXPORT_SYMBOL_GPL(nand_wait_ready);
+
+/**
+ * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
+ * @mtd: MTD device structure
+ * @timeo: Timeout in ms
+ *
+ * Wait for status ready (i.e. command done) or timeout.
+ */
+static void nand_wait_status_ready(struct mtd_info *mtd, unsigned long timeo)
+{
+	register struct nand_chip *chip = mtd_to_nand(mtd);
+
+	timeo = jiffies + msecs_to_jiffies(timeo);
+	do {
+		if ((chip->read_byte(mtd) & NAND_STATUS_READY))
+			break;
+		touch_softlockup_watchdog();
+	} while (time_before(jiffies, timeo));
+};
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page devices
+ * (512 Bytes per page).
+ */
+static void nand_command(struct mtd_info *mtd, unsigned int command,
+			 int column, int page_addr)
+{
+	register struct nand_chip *chip = mtd_to_nand(mtd);
+	int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+
+	/* Write out the command to the device */
+	if (command == NAND_CMD_SEQIN) {
+		int readcmd;
+
+		if (column >= mtd->writesize) {
+			/* OOB area */
+			column -= mtd->writesize;
+			readcmd = NAND_CMD_READOOB;
+		} else if (column < 256) {
+			/* First 256 bytes --> READ0 */
+			readcmd = NAND_CMD_READ0;
+		} else {
+			column -= 256;
+			readcmd = NAND_CMD_READ1;
+		}
+		chip->cmd_ctrl(mtd, readcmd, ctrl);
+		ctrl &= ~NAND_CTRL_CHANGE;
+	}
+	chip->cmd_ctrl(mtd, command, ctrl);
+
+	/* Address cycle, when necessary */
+	ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+	/* Serially input address */
+	if (column != -1) {
+		/* Adjust columns for 16 bit buswidth */
+		if (chip->options & NAND_BUSWIDTH_16 &&
+				!nand_opcode_8bits(command))
+			column >>= 1;
+		chip->cmd_ctrl(mtd, column, ctrl);
+		ctrl &= ~NAND_CTRL_CHANGE;
+	}
+	if (page_addr != -1) {
+		chip->cmd_ctrl(mtd, page_addr, ctrl);
+		ctrl &= ~NAND_CTRL_CHANGE;
+		chip->cmd_ctrl(mtd, page_addr >> 8, ctrl);
+		/* One more address cycle for devices > 32MiB */
+		if (chip->chipsize > (32 << 20))
+			chip->cmd_ctrl(mtd, page_addr >> 16, ctrl);
+	}
+	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+	/*
+	 * Program and erase have their own busy handlers status and sequential
+	 * in needs no delay
+	 */
+	switch (command) {
+
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+	case NAND_CMD_SEQIN:
+	case NAND_CMD_STATUS:
+		return;
+
+	case NAND_CMD_RESET:
+		if (chip->dev_ready)
+			break;
+		udelay(chip->chip_delay);
+		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+			       NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+		chip->cmd_ctrl(mtd,
+			       NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+		nand_wait_status_ready(mtd, 250);
+		return;
+
+		/* This applies to read commands */
+	default:
+		/*
+		 * If we don't have access to the busy pin, we apply the given
+		 * command delay
+		 */
+		if (!chip->dev_ready) {
+			udelay(chip->chip_delay);
+			return;
+		}
+	}
+	/*
+	 * Apply this short delay always to ensure that we do wait tWB in
+	 * any case on any machine.
+	 */
+	ndelay(100);
+
+	nand_wait_ready(mtd);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @mtd: MTD device structure
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void nand_command_lp(struct mtd_info *mtd, unsigned int command,
+			    int column, int page_addr)
+{
+	register struct nand_chip *chip = mtd_to_nand(mtd);
+
+	/* Emulate NAND_CMD_READOOB */
+	if (command == NAND_CMD_READOOB) {
+		column += mtd->writesize;
+		command = NAND_CMD_READ0;
+	}
+
+	/* Command latch cycle */
+	chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+	if (column != -1 || page_addr != -1) {
+		int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+		/* Serially input address */
+		if (column != -1) {
+			/* Adjust columns for 16 bit buswidth */
+			if (chip->options & NAND_BUSWIDTH_16 &&
+					!nand_opcode_8bits(command))
+				column >>= 1;
+			chip->cmd_ctrl(mtd, column, ctrl);
+			ctrl &= ~NAND_CTRL_CHANGE;
+
+			/* Only ouput a single addr cycle for 8bits opcodes. */
+			if (!nand_opcode_8bits(command))
+				chip->cmd_ctrl(mtd, column >> 8, ctrl);
+		}
+		if (page_addr != -1) {
+			chip->cmd_ctrl(mtd, page_addr, ctrl);
+			chip->cmd_ctrl(mtd, page_addr >> 8,
+				       NAND_NCE | NAND_ALE);
+			/* One more address cycle for devices > 128MiB */
+			if (chip->chipsize > (128 << 20))
+				chip->cmd_ctrl(mtd, page_addr >> 16,
+					       NAND_NCE | NAND_ALE);
+		}
+	}
+	chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+
+	/*
+	 * Program and erase have their own busy handlers status, sequential
+	 * in and status need no delay.
+	 */
+	switch (command) {
+
+	case NAND_CMD_CACHEDPROG:
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+	case NAND_CMD_SEQIN:
+	case NAND_CMD_RNDIN:
+	case NAND_CMD_STATUS:
+		return;
+
+	case NAND_CMD_RESET:
+		if (chip->dev_ready)
+			break;
+		udelay(chip->chip_delay);
+		chip->cmd_ctrl(mtd, NAND_CMD_STATUS,
+			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+			       NAND_NCE | NAND_CTRL_CHANGE);
+		/* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+		nand_wait_status_ready(mtd, 250);
+		return;
+
+	case NAND_CMD_RNDOUT:
+		/* No ready / busy check necessary */
+		chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART,
+			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+			       NAND_NCE | NAND_CTRL_CHANGE);
+		return;
+
+	case NAND_CMD_READ0:
+		chip->cmd_ctrl(mtd, NAND_CMD_READSTART,
+			       NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+		chip->cmd_ctrl(mtd, NAND_CMD_NONE,
+			       NAND_NCE | NAND_CTRL_CHANGE);
+
+		/* This applies to read commands */
+	default:
+		/*
+		 * If we don't have access to the busy pin, we apply the given
+		 * command delay.
+		 */
+		if (!chip->dev_ready) {
+			udelay(chip->chip_delay);
+			return;
+		}
+	}
+
+	/*
+	 * Apply this short delay always to ensure that we do wait tWB in
+	 * any case on any machine.
+	 */
+	ndelay(100);
+
+	nand_wait_ready(mtd);
+}
+
+/**
+ * panic_nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: the nand chip descriptor
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Used when in panic, no locks are taken.
+ */
+static void panic_nand_get_device(struct nand_chip *chip,
+		      struct mtd_info *mtd, int new_state)
+{
+	/* Hardware controller shared among independent devices */
+	chip->controller->active = chip;
+	chip->state = new_state;
+}
+
+/**
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @mtd: MTD device structure
+ * @new_state: the state which is requested
+ *
+ * Get the device and lock it for exclusive access
+ */
+static int
+nand_get_device(struct mtd_info *mtd, int new_state)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	spinlock_t *lock = &chip->controller->lock;
+	wait_queue_head_t *wq = &chip->controller->wq;
+	DECLARE_WAITQUEUE(wait, current);
+retry:
+	spin_lock(lock);
+
+	/* Hardware controller shared among independent devices */
+	if (!chip->controller->active)
+		chip->controller->active = chip;
+
+	if (chip->controller->active == chip && chip->state == FL_READY) {
+		chip->state = new_state;
+		spin_unlock(lock);
+		return 0;
+	}
+	if (new_state == FL_PM_SUSPENDED) {
+		if (chip->controller->active->state == FL_PM_SUSPENDED) {
+			chip->state = FL_PM_SUSPENDED;
+			spin_unlock(lock);
+			return 0;
+		}
+	}
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	add_wait_queue(wq, &wait);
+	spin_unlock(lock);
+	schedule();
+	remove_wait_queue(wq, &wait);
+	goto retry;
+}
+
+/**
+ * panic_nand_wait - [GENERIC] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ * @timeo: timeout
+ *
+ * Wait for command done. This is a helper function for nand_wait used when
+ * we are in interrupt context. May happen when in panic and trying to write
+ * an oops through mtdoops.
+ */
+static void panic_nand_wait(struct mtd_info *mtd, struct nand_chip *chip,
+			    unsigned long timeo)
+{
+	int i;
+	for (i = 0; i < timeo; i++) {
+		if (chip->dev_ready) {
+			if (chip->dev_ready(mtd))
+				break;
+		} else {
+			if (chip->read_byte(mtd) & NAND_STATUS_READY)
+				break;
+		}
+		mdelay(1);
+	}
+}
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND chip structure
+ *
+ * Wait for command done. This applies to erase and program only.
+ */
+static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+
+	int status;
+	unsigned long timeo = 400;
+
+	/*
+	 * Apply this short delay always to ensure that we do wait tWB in any
+	 * case on any machine.
+	 */
+	ndelay(100);
+
+	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+
+	if (in_interrupt() || oops_in_progress)
+		panic_nand_wait(mtd, chip, timeo);
+	else {
+		timeo = jiffies + msecs_to_jiffies(timeo);
+		do {
+			if (chip->dev_ready) {
+				if (chip->dev_ready(mtd))
+					break;
+			} else {
+				if (chip->read_byte(mtd) & NAND_STATUS_READY)
+					break;
+			}
+			cond_resched();
+		} while (time_before(jiffies, timeo));
+	}
+
+	status = (int)chip->read_byte(mtd);
+	/* This can happen if in case of timeout or buggy dev_ready */
+	WARN_ON(!(status & NAND_STATUS_READY));
+	return status;
+}
+
+/**
+ * nand_reset_data_interface - Reset data interface and timings
+ * @chip: The NAND chip
+ *
+ * Reset the Data interface and timings to ONFI mode 0.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_reset_data_interface(struct nand_chip *chip)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	const struct nand_data_interface *conf;
+	int ret;
+
+	if (!chip->setup_data_interface)
+		return 0;
+
+	/*
+	 * The ONFI specification says:
+	 * "
+	 * To transition from NV-DDR or NV-DDR2 to the SDR data
+	 * interface, the host shall use the Reset (FFh) command
+	 * using SDR timing mode 0. A device in any timing mode is
+	 * required to recognize Reset (FFh) command issued in SDR
+	 * timing mode 0.
+	 * "
+	 *
+	 * Configure the data interface in SDR mode and set the
+	 * timings to timing mode 0.
+	 */
+
+	conf = nand_get_default_data_interface();
+	ret = chip->setup_data_interface(mtd, conf, false);
+	if (ret)
+		pr_err("Failed to configure data interface to SDR timing mode 0\n");
+
+	return ret;
+}
+
+/**
+ * nand_setup_data_interface - Setup the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find and configure the best data interface and NAND timings supported by
+ * the chip and the driver.
+ * First tries to retrieve supported timing modes from ONFI information,
+ * and if the NAND chip does not support ONFI, relies on the
+ * ->onfi_timing_mode_default specified in the nand_ids table.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_setup_data_interface(struct nand_chip *chip)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int ret;
+
+	if (!chip->setup_data_interface || !chip->data_interface)
+		return 0;
+
+	/*
+	 * Ensure the timing mode has been changed on the chip side
+	 * before changing timings on the controller side.
+	 */
+	if (chip->onfi_version) {
+		u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = {
+			chip->onfi_timing_mode_default,
+		};
+
+		ret = chip->onfi_set_features(mtd, chip,
+				ONFI_FEATURE_ADDR_TIMING_MODE,
+				tmode_param);
+		if (ret)
+			goto err;
+	}
+
+	ret = chip->setup_data_interface(mtd, chip->data_interface, false);
+err:
+	return ret;
+}
+
+/**
+ * nand_init_data_interface - find the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find the best data interface and NAND timings supported by the chip
+ * and the driver.
+ * First tries to retrieve supported timing modes from ONFI information,
+ * and if the NAND chip does not support ONFI, relies on the
+ * ->onfi_timing_mode_default specified in the nand_ids table. After this
+ * function nand_chip->data_interface is initialized with the best timing mode
+ * available.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_init_data_interface(struct nand_chip *chip)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int modes, mode, ret;
+
+	if (!chip->setup_data_interface)
+		return 0;
+
+	/*
+	 * First try to identify the best timings from ONFI parameters and
+	 * if the NAND does not support ONFI, fallback to the default ONFI
+	 * timing mode.
+	 */
+	modes = onfi_get_async_timing_mode(chip);
+	if (modes == ONFI_TIMING_MODE_UNKNOWN) {
+		if (!chip->onfi_timing_mode_default)
+			return 0;
+
+		modes = GENMASK(chip->onfi_timing_mode_default, 0);
+	}
+
+	chip->data_interface = kzalloc(sizeof(*chip->data_interface),
+				       GFP_KERNEL);
+	if (!chip->data_interface)
+		return -ENOMEM;
+
+	for (mode = fls(modes) - 1; mode >= 0; mode--) {
+		ret = onfi_init_data_interface(chip, chip->data_interface,
+					       NAND_SDR_IFACE, mode);
+		if (ret)
+			continue;
+
+		ret = chip->setup_data_interface(mtd, chip->data_interface,
+						 true);
+		if (!ret) {
+			chip->onfi_timing_mode_default = mode;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static void nand_release_data_interface(struct nand_chip *chip)
+{
+	kfree(chip->data_interface);
+}
+
+/**
+ * nand_reset - Reset and initialize a NAND device
+ * @chip: The NAND chip
+ *
+ * Returns 0 for success or negative error code otherwise
+ */
+int nand_reset(struct nand_chip *chip)
+{
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int ret;
+
+	ret = nand_reset_data_interface(chip);
+	if (ret)
+		return ret;
+
+	chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+
+	ret = nand_setup_data_interface(chip);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/**
+ * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ * @invert: when = 0, unlock the range of blocks within the lower and
+ *                    upper boundary address
+ *          when = 1, unlock the range of blocks outside the boundaries
+ *                    of the lower and upper boundary address
+ *
+ * Returs unlock status.
+ */
+static int __nand_unlock(struct mtd_info *mtd, loff_t ofs,
+					uint64_t len, int invert)
+{
+	int ret = 0;
+	int status, page;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	/* Submit address of first page to unlock */
+	page = ofs >> chip->page_shift;
+	chip->cmdfunc(mtd, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
+
+	/* Submit address of last page to unlock */
+	page = (ofs + len) >> chip->page_shift;
+	chip->cmdfunc(mtd, NAND_CMD_UNLOCK2, -1,
+				(page | invert) & chip->pagemask);
+
+	/* Call wait ready function */
+	status = chip->waitfunc(mtd, chip);
+	/* See if device thinks it succeeded */
+	if (status & NAND_STATUS_FAIL) {
+		pr_debug("%s: error status = 0x%08x\n",
+					__func__, status);
+		ret = -EIO;
+	}
+
+	return ret;
+}
+
+/**
+ * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * Returns unlock status.
+ */
+int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	int ret = 0;
+	int chipnr;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	pr_debug("%s: start = 0x%012llx, len = %llu\n",
+			__func__, (unsigned long long)ofs, len);
+
+	if (check_offs_len(mtd, ofs, len))
+		return -EINVAL;
+
+	/* Align to last block address if size addresses end of the device */
+	if (ofs + len == mtd->size)
+		len -= mtd->erasesize;
+
+	nand_get_device(mtd, FL_UNLOCKING);
+
+	/* Shift to get chip number */
+	chipnr = ofs >> chip->chip_shift;
+
+	chip->select_chip(mtd, chipnr);
+
+	/*
+	 * Reset the chip.
+	 * If we want to check the WP through READ STATUS and check the bit 7
+	 * we must reset the chip
+	 * some operation can also clear the bit 7 of status register
+	 * eg. erase/program a locked block
+	 */
+	nand_reset(chip);
+
+	/* Check, if it is write protected */
+	if (nand_check_wp(mtd)) {
+		pr_debug("%s: device is write protected!\n",
+					__func__);
+		ret = -EIO;
+		goto out;
+	}
+
+	ret = __nand_unlock(mtd, ofs, len, 0);
+
+out:
+	chip->select_chip(mtd, -1);
+	nand_release_device(mtd);
+
+	return ret;
+}
+EXPORT_SYMBOL(nand_unlock);
+
+/**
+ * nand_lock - [REPLACEABLE] locks all blocks present in the device
+ * @mtd: mtd info
+ * @ofs: offset to start unlock from
+ * @len: length to unlock
+ *
+ * This feature is not supported in many NAND parts. 'Micron' NAND parts do
+ * have this feature, but it allows only to lock all blocks, not for specified
+ * range for block. Implementing 'lock' feature by making use of 'unlock', for
+ * now.
+ *
+ * Returns lock status.
+ */
+int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
+{
+	int ret = 0;
+	int chipnr, status, page;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	pr_debug("%s: start = 0x%012llx, len = %llu\n",
+			__func__, (unsigned long long)ofs, len);
+
+	if (check_offs_len(mtd, ofs, len))
+		return -EINVAL;
+
+	nand_get_device(mtd, FL_LOCKING);
+
+	/* Shift to get chip number */
+	chipnr = ofs >> chip->chip_shift;
+
+	chip->select_chip(mtd, chipnr);
+
+	/*
+	 * Reset the chip.
+	 * If we want to check the WP through READ STATUS and check the bit 7
+	 * we must reset the chip
+	 * some operation can also clear the bit 7 of status register
+	 * eg. erase/program a locked block
+	 */
+	nand_reset(chip);
+
+	/* Check, if it is write protected */
+	if (nand_check_wp(mtd)) {
+		pr_debug("%s: device is write protected!\n",
+					__func__);
+		status = MTD_ERASE_FAILED;
+		ret = -EIO;
+		goto out;
+	}
+
+	/* Submit address of first page to lock */
+	page = ofs >> chip->page_shift;
+	chip->cmdfunc(mtd, NAND_CMD_LOCK, -1, page & chip->pagemask);
+
+	/* Call wait ready function */
+	status = chip->waitfunc(mtd, chip);
+	/* See if device thinks it succeeded */
+	if (status & NAND_STATUS_FAIL) {
+		pr_debug("%s: error status = 0x%08x\n",
+					__func__, status);
+		ret = -EIO;
+		goto out;
+	}
+
+	ret = __nand_unlock(mtd, ofs, len, 0x1);
+
+out:
+	chip->select_chip(mtd, -1);
+	nand_release_device(mtd);
+
+	return ret;
+}
+EXPORT_SYMBOL(nand_lock);
+
+/**
+ * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
+ * @buf: buffer to test
+ * @len: buffer length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a buffer contains only 0xff, which means the underlying region
+ * has been erased and is ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region is not erased.
+ * Note: The logic of this function has been extracted from the memweight
+ * implementation, except that nand_check_erased_buf function exit before
+ * testing the whole buffer if the number of bitflips exceed the
+ * bitflips_threshold value.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold.
+ */
+static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
+{
+	const unsigned char *bitmap = buf;
+	int bitflips = 0;
+	int weight;
+
+	for (; len && ((uintptr_t)bitmap) % sizeof(long);
+	     len--, bitmap++) {
+		weight = hweight8(*bitmap);
+		bitflips += BITS_PER_BYTE - weight;
+		if (unlikely(bitflips > bitflips_threshold))
+			return -EBADMSG;
+	}
+
+	for (; len >= sizeof(long);
+	     len -= sizeof(long), bitmap += sizeof(long)) {
+		weight = hweight_long(*((unsigned long *)bitmap));
+		bitflips += BITS_PER_LONG - weight;
+		if (unlikely(bitflips > bitflips_threshold))
+			return -EBADMSG;
+	}
+
+	for (; len > 0; len--, bitmap++) {
+		weight = hweight8(*bitmap);
+		bitflips += BITS_PER_BYTE - weight;
+		if (unlikely(bitflips > bitflips_threshold))
+			return -EBADMSG;
+	}
+
+	return bitflips;
+}
+
+/**
+ * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
+ *				 0xff data
+ * @data: data buffer to test
+ * @datalen: data length
+ * @ecc: ECC buffer
+ * @ecclen: ECC length
+ * @extraoob: extra OOB buffer
+ * @extraooblen: extra OOB length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a data buffer and its associated ECC and OOB data contains only
+ * 0xff pattern, which means the underlying region has been erased and is
+ * ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region as not erased.
+ *
+ * Note:
+ * 1/ ECC algorithms are working on pre-defined block sizes which are usually
+ *    different from the NAND page size. When fixing bitflips, ECC engines will
+ *    report the number of errors per chunk, and the NAND core infrastructure
+ *    expect you to return the maximum number of bitflips for the whole page.
+ *    This is why you should always use this function on a single chunk and
+ *    not on the whole page. After checking each chunk you should update your
+ *    max_bitflips value accordingly.
+ * 2/ When checking for bitflips in erased pages you should not only check
+ *    the payload data but also their associated ECC data, because a user might
+ *    have programmed almost all bits to 1 but a few. In this case, we
+ *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
+ *    this case.
+ * 3/ The extraoob argument is optional, and should be used if some of your OOB
+ *    data are protected by the ECC engine.
+ *    It could also be used if you support subpages and want to attach some
+ *    extra OOB data to an ECC chunk.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold. In case of success, the passed buffers are filled with 0xff.
+ */
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+				void *ecc, int ecclen,
+				void *extraoob, int extraooblen,
+				int bitflips_threshold)
+{
+	int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
+
+	data_bitflips = nand_check_erased_buf(data, datalen,
+					      bitflips_threshold);
+	if (data_bitflips < 0)
+		return data_bitflips;
+
+	bitflips_threshold -= data_bitflips;
+
+	ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
+	if (ecc_bitflips < 0)
+		return ecc_bitflips;
+
+	bitflips_threshold -= ecc_bitflips;
+
+	extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
+						  bitflips_threshold);
+	if (extraoob_bitflips < 0)
+		return extraoob_bitflips;
+
+	if (data_bitflips)
+		memset(data, 0xff, datalen);
+
+	if (ecc_bitflips)
+		memset(ecc, 0xff, ecclen);
+
+	if (extraoob_bitflips)
+		memset(extraoob, 0xff, extraooblen);
+
+	return data_bitflips + ecc_bitflips + extraoob_bitflips;
+}
+EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
+
+/**
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+			      uint8_t *buf, int oob_required, int page)
+{
+	chip->read_buf(mtd, buf, mtd->writesize);
+	if (oob_required)
+		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+/**
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * We need a special oob layout and handling even when OOB isn't used.
+ */
+static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
+				       struct nand_chip *chip, uint8_t *buf,
+				       int oob_required, int page)
+{
+	int eccsize = chip->ecc.size;
+	int eccbytes = chip->ecc.bytes;
+	uint8_t *oob = chip->oob_poi;
+	int steps, size;
+
+	for (steps = chip->ecc.steps; steps > 0; steps--) {
+		chip->read_buf(mtd, buf, eccsize);
+		buf += eccsize;
+
+		if (chip->ecc.prepad) {
+			chip->read_buf(mtd, oob, chip->ecc.prepad);
+			oob += chip->ecc.prepad;
+		}
+
+		chip->read_buf(mtd, oob, eccbytes);
+		oob += eccbytes;
+
+		if (chip->ecc.postpad) {
+			chip->read_buf(mtd, oob, chip->ecc.postpad);
+			oob += chip->ecc.postpad;
+		}
+	}
+
+	size = mtd->oobsize - (oob - chip->oob_poi);
+	if (size)
+		chip->read_buf(mtd, oob, size);
+
+	return 0;
+}
+
+/**
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ */
+static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
+{
+	int i, eccsize = chip->ecc.size, ret;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	uint8_t *p = buf;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	uint8_t *ecc_code = chip->buffers->ecccode;
+	unsigned int max_bitflips = 0;
+
+	chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	eccsteps = chip->ecc.steps;
+	p = buf;
+
+	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		int stat;
+
+		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+		if (stat < 0) {
+			mtd->ecc_stats.failed++;
+		} else {
+			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
+	}
+	return max_bitflips;
+}
+
+/**
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ * @page: page number to read
+ */
+static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
+			uint32_t data_offs, uint32_t readlen, uint8_t *bufpoi,
+			int page)
+{
+	int start_step, end_step, num_steps, ret;
+	uint8_t *p;
+	int data_col_addr, i, gaps = 0;
+	int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+	int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+	int index, section = 0;
+	unsigned int max_bitflips = 0;
+	struct mtd_oob_region oobregion = { };
+
+	/* Column address within the page aligned to ECC size (256bytes) */
+	start_step = data_offs / chip->ecc.size;
+	end_step = (data_offs + readlen - 1) / chip->ecc.size;
+	num_steps = end_step - start_step + 1;
+	index = start_step * chip->ecc.bytes;
+
+	/* Data size aligned to ECC ecc.size */
+	datafrag_len = num_steps * chip->ecc.size;
+	eccfrag_len = num_steps * chip->ecc.bytes;
+
+	data_col_addr = start_step * chip->ecc.size;
+	/* If we read not a page aligned data */
+	if (data_col_addr != 0)
+		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, data_col_addr, -1);
+
+	p = bufpoi + data_col_addr;
+	chip->read_buf(mtd, p, datafrag_len);
+
+	/* Calculate ECC */
+	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+		chip->ecc.calculate(mtd, p, &chip->buffers->ecccalc[i]);
+
+	/*
+	 * The performance is faster if we position offsets according to
+	 * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+	 */
+	ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
+	if (ret)
+		return ret;
+
+	if (oobregion.length < eccfrag_len)
+		gaps = 1;
+
+	if (gaps) {
+		chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
+		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	} else {
+		/*
+		 * Send the command to read the particular ECC bytes take care
+		 * about buswidth alignment in read_buf.
+		 */
+		aligned_pos = oobregion.offset & ~(busw - 1);
+		aligned_len = eccfrag_len;
+		if (oobregion.offset & (busw - 1))
+			aligned_len++;
+		if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
+		    (busw - 1))
+			aligned_len++;
+
+		chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+			      mtd->writesize + aligned_pos, -1);
+		chip->read_buf(mtd, &chip->oob_poi[aligned_pos], aligned_len);
+	}
+
+	ret = mtd_ooblayout_get_eccbytes(mtd, chip->buffers->ecccode,
+					 chip->oob_poi, index, eccfrag_len);
+	if (ret)
+		return ret;
+
+	p = bufpoi + data_col_addr;
+	for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+		int stat;
+
+		stat = chip->ecc.correct(mtd, p,
+			&chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
+		if (stat == -EBADMSG &&
+		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+			/* check for empty pages with bitflips */
+			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+						&chip->buffers->ecccode[i],
+						chip->ecc.bytes,
+						NULL, 0,
+						chip->ecc.strength);
+		}
+
+		if (stat < 0) {
+			mtd->ecc_stats.failed++;
+		} else {
+			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
+	}
+	return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
+ */
+static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
+{
+	int i, eccsize = chip->ecc.size, ret;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	uint8_t *p = buf;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	uint8_t *ecc_code = chip->buffers->ecccode;
+	unsigned int max_bitflips = 0;
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		chip->ecc.hwctl(mtd, NAND_ECC_READ);
+		chip->read_buf(mtd, p, eccsize);
+		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+	}
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	eccsteps = chip->ecc.steps;
+	p = buf;
+
+	for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		int stat;
+
+		stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
+		if (stat == -EBADMSG &&
+		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+			/* check for empty pages with bitflips */
+			stat = nand_check_erased_ecc_chunk(p, eccsize,
+						&ecc_code[i], eccbytes,
+						NULL, 0,
+						chip->ecc.strength);
+		}
+
+		if (stat < 0) {
+			mtd->ecc_stats.failed++;
+		} else {
+			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
+	}
+	return max_bitflips;
+}
+
+/**
+ * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Hardware ECC for large page chips, require OOB to be read first. For this
+ * ECC mode, the write_page method is re-used from ECC_HW. These methods
+ * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
+ * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
+ * the data area, by overwriting the NAND manufacturer bad block markings.
+ */
+static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
+	struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
+{
+	int i, eccsize = chip->ecc.size, ret;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	uint8_t *p = buf;
+	uint8_t *ecc_code = chip->buffers->ecccode;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	unsigned int max_bitflips = 0;
+
+	/* Read the OOB area first */
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		int stat;
+
+		chip->ecc.hwctl(mtd, NAND_ECC_READ);
+		chip->read_buf(mtd, p, eccsize);
+		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+		stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
+		if (stat == -EBADMSG &&
+		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+			/* check for empty pages with bitflips */
+			stat = nand_check_erased_ecc_chunk(p, eccsize,
+						&ecc_code[i], eccbytes,
+						NULL, 0,
+						chip->ecc.strength);
+		}
+
+		if (stat < 0) {
+			mtd->ecc_stats.failed++;
+		} else {
+			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
+	}
+	return max_bitflips;
+}
+
+/**
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+				   uint8_t *buf, int oob_required, int page)
+{
+	int i, eccsize = chip->ecc.size;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+	uint8_t *p = buf;
+	uint8_t *oob = chip->oob_poi;
+	unsigned int max_bitflips = 0;
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		int stat;
+
+		chip->ecc.hwctl(mtd, NAND_ECC_READ);
+		chip->read_buf(mtd, p, eccsize);
+
+		if (chip->ecc.prepad) {
+			chip->read_buf(mtd, oob, chip->ecc.prepad);
+			oob += chip->ecc.prepad;
+		}
+
+		chip->ecc.hwctl(mtd, NAND_ECC_READSYN);
+		chip->read_buf(mtd, oob, eccbytes);
+		stat = chip->ecc.correct(mtd, p, oob, NULL);
+
+		oob += eccbytes;
+
+		if (chip->ecc.postpad) {
+			chip->read_buf(mtd, oob, chip->ecc.postpad);
+			oob += chip->ecc.postpad;
+		}
+
+		if (stat == -EBADMSG &&
+		    (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+			/* check for empty pages with bitflips */
+			stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+							   oob - eccpadbytes,
+							   eccpadbytes,
+							   NULL, 0,
+							   chip->ecc.strength);
+		}
+
+		if (stat < 0) {
+			mtd->ecc_stats.failed++;
+		} else {
+			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max_t(unsigned int, max_bitflips, stat);
+		}
+	}
+
+	/* Calculate remaining oob bytes */
+	i = mtd->oobsize - (oob - chip->oob_poi);
+	if (i)
+		chip->read_buf(mtd, oob, i);
+
+	return max_bitflips;
+}
+
+/**
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @mtd: mtd info structure
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
+ */
+static uint8_t *nand_transfer_oob(struct mtd_info *mtd, uint8_t *oob,
+				  struct mtd_oob_ops *ops, size_t len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret;
+
+	switch (ops->mode) {
+
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_RAW:
+		memcpy(oob, chip->oob_poi + ops->ooboffs, len);
+		return oob + len;
+
+	case MTD_OPS_AUTO_OOB:
+		ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
+						  ops->ooboffs, len);
+		BUG_ON(ret);
+		return oob + len;
+
+	default:
+		BUG();
+	}
+	return NULL;
+}
+
+/**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @mtd: MTD device structure
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct mtd_info *mtd, int retry_mode)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+	if (retry_mode >= chip->read_retries)
+		return -EINVAL;
+
+	if (!chip->setup_read_retry)
+		return -EOPNOTSUPP;
+
+	return chip->setup_read_retry(mtd, retry_mode);
+}
+
+/**
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
+			    struct mtd_oob_ops *ops)
+{
+	int chipnr, page, realpage, col, bytes, aligned, oob_required;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret = 0;
+	uint32_t readlen = ops->len;
+	uint32_t oobreadlen = ops->ooblen;
+	uint32_t max_oobsize = mtd_oobavail(mtd, ops);
+
+	uint8_t *bufpoi, *oob, *buf;
+	int use_bufpoi;
+	unsigned int max_bitflips = 0;
+	int retry_mode = 0;
+	bool ecc_fail = false;
+
+	chipnr = (int)(from >> chip->chip_shift);
+	chip->select_chip(mtd, chipnr);
+
+	realpage = (int)(from >> chip->page_shift);
+	page = realpage & chip->pagemask;
+
+	col = (int)(from & (mtd->writesize - 1));
+
+	buf = ops->datbuf;
+	oob = ops->oobbuf;
+	oob_required = oob ? 1 : 0;
+
+	while (1) {
+		unsigned int ecc_failures = mtd->ecc_stats.failed;
+
+		bytes = min(mtd->writesize - col, readlen);
+		aligned = (bytes == mtd->writesize);
+
+		if (!aligned)
+			use_bufpoi = 1;
+		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+			use_bufpoi = !virt_addr_valid(buf);
+		else
+			use_bufpoi = 0;
+
+		/* Is the current page in the buffer? */
+		if (realpage != chip->pagebuf || oob) {
+			bufpoi = use_bufpoi ? chip->buffers->databuf : buf;
+
+			if (use_bufpoi && aligned)
+				pr_debug("%s: using read bounce buffer for buf@%p\n",
+						 __func__, buf);
+
+read_retry:
+			chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
+
+			/*
+			 * Now read the page into the buffer.  Absent an error,
+			 * the read methods return max bitflips per ecc step.
+			 */
+			if (unlikely(ops->mode == MTD_OPS_RAW))
+				ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
+							      oob_required,
+							      page);
+			else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
+				 !oob)
+				ret = chip->ecc.read_subpage(mtd, chip,
+							col, bytes, bufpoi,
+							page);
+			else
+				ret = chip->ecc.read_page(mtd, chip, bufpoi,
+							  oob_required, page);
+			if (ret < 0) {
+				if (use_bufpoi)
+					/* Invalidate page cache */
+					chip->pagebuf = -1;
+				break;
+			}
+
+			max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
+			/* Transfer not aligned data */
+			if (use_bufpoi) {
+				if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
+				    !(mtd->ecc_stats.failed - ecc_failures) &&
+				    (ops->mode != MTD_OPS_RAW)) {
+					chip->pagebuf = realpage;
+					chip->pagebuf_bitflips = ret;
+				} else {
+					/* Invalidate page cache */
+					chip->pagebuf = -1;
+				}
+				memcpy(buf, chip->buffers->databuf + col, bytes);
+			}
+
+			if (unlikely(oob)) {
+				int toread = min(oobreadlen, max_oobsize);
+
+				if (toread) {
+					oob = nand_transfer_oob(mtd,
+						oob, ops, toread);
+					oobreadlen -= toread;
+				}
+			}
+
+			if (chip->options & NAND_NEED_READRDY) {
+				/* Apply delay or wait for ready/busy pin */
+				if (!chip->dev_ready)
+					udelay(chip->chip_delay);
+				else
+					nand_wait_ready(mtd);
+			}
+
+			if (mtd->ecc_stats.failed - ecc_failures) {
+				if (retry_mode + 1 < chip->read_retries) {
+					retry_mode++;
+					ret = nand_setup_read_retry(mtd,
+							retry_mode);
+					if (ret < 0)
+						break;
+
+					/* Reset failures; retry */
+					mtd->ecc_stats.failed = ecc_failures;
+					goto read_retry;
+				} else {
+					/* No more retry modes; real failure */
+					ecc_fail = true;
+				}
+			}
+
+			buf += bytes;
+		} else {
+			memcpy(buf, chip->buffers->databuf + col, bytes);
+			buf += bytes;
+			max_bitflips = max_t(unsigned int, max_bitflips,
+					     chip->pagebuf_bitflips);
+		}
+
+		readlen -= bytes;
+
+		/* Reset to retry mode 0 */
+		if (retry_mode) {
+			ret = nand_setup_read_retry(mtd, 0);
+			if (ret < 0)
+				break;
+			retry_mode = 0;
+		}
+
+		if (!readlen)
+			break;
+
+		/* For subsequent reads align to page boundary */
+		col = 0;
+		/* Increment page address */
+		realpage++;
+
+		page = realpage & chip->pagemask;
+		/* Check, if we cross a chip boundary */
+		if (!page) {
+			chipnr++;
+			chip->select_chip(mtd, -1);
+			chip->select_chip(mtd, chipnr);
+		}
+	}
+	chip->select_chip(mtd, -1);
+
+	ops->retlen = ops->len - (size_t) readlen;
+	if (oob)
+		ops->oobretlen = ops->ooblen - oobreadlen;
+
+	if (ret < 0)
+		return ret;
+
+	if (ecc_fail)
+		return -EBADMSG;
+
+	return max_bitflips;
+}
+
+/**
+ * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @len: number of bytes to read
+ * @retlen: pointer to variable to store the number of read bytes
+ * @buf: the databuffer to put data
+ *
+ * Get hold of the chip and call nand_do_read.
+ */
+static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
+		     size_t *retlen, uint8_t *buf)
+{
+	struct mtd_oob_ops ops;
+	int ret;
+
+	nand_get_device(mtd, FL_READING);
+	memset(&ops, 0, sizeof(ops));
+	ops.len = len;
+	ops.datbuf = buf;
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ret = nand_do_read_ops(mtd, from, &ops);
+	*retlen = ops.retlen;
+	nand_release_device(mtd);
+	return ret;
+}
+
+/**
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+EXPORT_SYMBOL(nand_read_oob_std);
+
+/**
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
+ *			    with syndromes
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+			   int page)
+{
+	int length = mtd->oobsize;
+	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+	int eccsize = chip->ecc.size;
+	uint8_t *bufpoi = chip->oob_poi;
+	int i, toread, sndrnd = 0, pos;
+
+	chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page);
+	for (i = 0; i < chip->ecc.steps; i++) {
+		if (sndrnd) {
+			pos = eccsize + i * (eccsize + chunk);
+			if (mtd->writesize > 512)
+				chip->cmdfunc(mtd, NAND_CMD_RNDOUT, pos, -1);
+			else
+				chip->cmdfunc(mtd, NAND_CMD_READ0, pos, page);
+		} else
+			sndrnd = 1;
+		toread = min_t(int, length, chunk);
+		chip->read_buf(mtd, bufpoi, toread);
+		bufpoi += toread;
+		length -= toread;
+	}
+	if (length > 0)
+		chip->read_buf(mtd, bufpoi, length);
+
+	return 0;
+}
+EXPORT_SYMBOL(nand_read_oob_syndrome);
+
+/**
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page)
+{
+	int status = 0;
+	const uint8_t *buf = chip->oob_poi;
+	int length = mtd->oobsize;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page);
+	chip->write_buf(mtd, buf, length);
+	/* Send command to program the OOB data */
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+EXPORT_SYMBOL(nand_write_oob_std);
+
+/**
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ *			     with syndrome - only for large page flash
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+int nand_write_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
+			    int page)
+{
+	int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+	int eccsize = chip->ecc.size, length = mtd->oobsize;
+	int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
+	const uint8_t *bufpoi = chip->oob_poi;
+
+	/*
+	 * data-ecc-data-ecc ... ecc-oob
+	 * or
+	 * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+	 */
+	if (!chip->ecc.prepad && !chip->ecc.postpad) {
+		pos = steps * (eccsize + chunk);
+		steps = 0;
+	} else
+		pos = eccsize;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, pos, page);
+	for (i = 0; i < steps; i++) {
+		if (sndcmd) {
+			if (mtd->writesize <= 512) {
+				uint32_t fill = 0xFFFFFFFF;
+
+				len = eccsize;
+				while (len > 0) {
+					int num = min_t(int, len, 4);
+					chip->write_buf(mtd, (uint8_t *)&fill,
+							num);
+					len -= num;
+				}
+			} else {
+				pos = eccsize + i * (eccsize + chunk);
+				chip->cmdfunc(mtd, NAND_CMD_RNDIN, pos, -1);
+			}
+		} else
+			sndcmd = 1;
+		len = min_t(int, length, chunk);
+		chip->write_buf(mtd, bufpoi, len);
+		bufpoi += len;
+		length -= len;
+	}
+	if (length > 0)
+		chip->write_buf(mtd, bufpoi, length);
+
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+EXPORT_SYMBOL(nand_write_oob_syndrome);
+
+/**
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area.
+ */
+static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
+			    struct mtd_oob_ops *ops)
+{
+	int page, realpage, chipnr;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtd_ecc_stats stats;
+	int readlen = ops->ooblen;
+	int len;
+	uint8_t *buf = ops->oobbuf;
+	int ret = 0;
+
+	pr_debug("%s: from = 0x%08Lx, len = %i\n",
+			__func__, (unsigned long long)from, readlen);
+
+	stats = mtd->ecc_stats;
+
+	len = mtd_oobavail(mtd, ops);
+
+	if (unlikely(ops->ooboffs >= len)) {
+		pr_debug("%s: attempt to start read outside oob\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	/* Do not allow reads past end of device */
+	if (unlikely(from >= mtd->size ||
+		     ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
+					(from >> chip->page_shift)) * len)) {
+		pr_debug("%s: attempt to read beyond end of device\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	chipnr = (int)(from >> chip->chip_shift);
+	chip->select_chip(mtd, chipnr);
+
+	/* Shift to get page */
+	realpage = (int)(from >> chip->page_shift);
+	page = realpage & chip->pagemask;
+
+	while (1) {
+		if (ops->mode == MTD_OPS_RAW)
+			ret = chip->ecc.read_oob_raw(mtd, chip, page);
+		else
+			ret = chip->ecc.read_oob(mtd, chip, page);
+
+		if (ret < 0)
+			break;
+
+		len = min(len, readlen);
+		buf = nand_transfer_oob(mtd, buf, ops, len);
+
+		if (chip->options & NAND_NEED_READRDY) {
+			/* Apply delay or wait for ready/busy pin */
+			if (!chip->dev_ready)
+				udelay(chip->chip_delay);
+			else
+				nand_wait_ready(mtd);
+		}
+
+		readlen -= len;
+		if (!readlen)
+			break;
+
+		/* Increment page address */
+		realpage++;
+
+		page = realpage & chip->pagemask;
+		/* Check, if we cross a chip boundary */
+		if (!page) {
+			chipnr++;
+			chip->select_chip(mtd, -1);
+			chip->select_chip(mtd, chipnr);
+		}
+	}
+	chip->select_chip(mtd, -1);
+
+	ops->oobretlen = ops->ooblen - readlen;
+
+	if (ret < 0)
+		return ret;
+
+	if (mtd->ecc_stats.failed - stats.failed)
+		return -EBADMSG;
+
+	return  mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read data and/or out-of-band data.
+ */
+static int nand_read_oob(struct mtd_info *mtd, loff_t from,
+			 struct mtd_oob_ops *ops)
+{
+	int ret;
+
+	ops->retlen = 0;
+
+	/* Do not allow reads past end of device */
+	if (ops->datbuf && (from + ops->len) > mtd->size) {
+		pr_debug("%s: attempt to read beyond end of device\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (ops->mode != MTD_OPS_PLACE_OOB &&
+	    ops->mode != MTD_OPS_AUTO_OOB &&
+	    ops->mode != MTD_OPS_RAW)
+		return -ENOTSUPP;
+
+	nand_get_device(mtd, FL_READING);
+
+	if (!ops->datbuf)
+		ret = nand_do_read_oob(mtd, from, ops);
+	else
+		ret = nand_do_read_ops(mtd, from, ops);
+
+	nand_release_device(mtd);
+	return ret;
+}
+
+
+/**
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
+			       const uint8_t *buf, int oob_required, int page)
+{
+	chip->write_buf(mtd, buf, mtd->writesize);
+	if (oob_required)
+		chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+/**
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * We need a special oob layout and handling even when ECC isn't checked.
+ */
+static int nand_write_page_raw_syndrome(struct mtd_info *mtd,
+					struct nand_chip *chip,
+					const uint8_t *buf, int oob_required,
+					int page)
+{
+	int eccsize = chip->ecc.size;
+	int eccbytes = chip->ecc.bytes;
+	uint8_t *oob = chip->oob_poi;
+	int steps, size;
+
+	for (steps = chip->ecc.steps; steps > 0; steps--) {
+		chip->write_buf(mtd, buf, eccsize);
+		buf += eccsize;
+
+		if (chip->ecc.prepad) {
+			chip->write_buf(mtd, oob, chip->ecc.prepad);
+			oob += chip->ecc.prepad;
+		}
+
+		chip->write_buf(mtd, oob, eccbytes);
+		oob += eccbytes;
+
+		if (chip->ecc.postpad) {
+			chip->write_buf(mtd, oob, chip->ecc.postpad);
+			oob += chip->ecc.postpad;
+		}
+	}
+
+	size = mtd->oobsize - (oob - chip->oob_poi);
+	if (size)
+		chip->write_buf(mtd, oob, size);
+
+	return 0;
+}
+/**
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
+				 const uint8_t *buf, int oob_required,
+				 int page)
+{
+	int i, eccsize = chip->ecc.size, ret;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	const uint8_t *p = buf;
+
+	/* Software ECC calculation */
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	return chip->ecc.write_page_raw(mtd, chip, buf, 1, page);
+}
+
+/**
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+				  const uint8_t *buf, int oob_required,
+				  int page)
+{
+	int i, eccsize = chip->ecc.size, ret;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	const uint8_t *p = buf;
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+		chip->write_buf(mtd, p, eccsize);
+		chip->ecc.calculate(mtd, p, &ecc_calc[i]);
+	}
+
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+
+/**
+ * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
+ * @mtd:	mtd info structure
+ * @chip:	nand chip info structure
+ * @offset:	column address of subpage within the page
+ * @data_len:	data length
+ * @buf:	data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_subpage_hwecc(struct mtd_info *mtd,
+				struct nand_chip *chip, uint32_t offset,
+				uint32_t data_len, const uint8_t *buf,
+				int oob_required, int page)
+{
+	uint8_t *oob_buf  = chip->oob_poi;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	int ecc_size      = chip->ecc.size;
+	int ecc_bytes     = chip->ecc.bytes;
+	int ecc_steps     = chip->ecc.steps;
+	uint32_t start_step = offset / ecc_size;
+	uint32_t end_step   = (offset + data_len - 1) / ecc_size;
+	int oob_bytes       = mtd->oobsize / ecc_steps;
+	int step, ret;
+
+	for (step = 0; step < ecc_steps; step++) {
+		/* configure controller for WRITE access */
+		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+		/* write data (untouched subpages already masked by 0xFF) */
+		chip->write_buf(mtd, buf, ecc_size);
+
+		/* mask ECC of un-touched subpages by padding 0xFF */
+		if ((step < start_step) || (step > end_step))
+			memset(ecc_calc, 0xff, ecc_bytes);
+		else
+			chip->ecc.calculate(mtd, buf, ecc_calc);
+
+		/* mask OOB of un-touched subpages by padding 0xFF */
+		/* if oob_required, preserve OOB metadata of written subpage */
+		if (!oob_required || (step < start_step) || (step > end_step))
+			memset(oob_buf, 0xff, oob_bytes);
+
+		buf += ecc_size;
+		ecc_calc += ecc_bytes;
+		oob_buf  += oob_bytes;
+	}
+
+	/* copy calculated ECC for whole page to chip->buffer->oob */
+	/* this include masked-value(0xFF) for unwritten subpages */
+	ecc_calc = chip->buffers->ecccalc;
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	/* write OOB buffer to NAND device */
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+
+/**
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @mtd: mtd info structure
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_write_page_syndrome(struct mtd_info *mtd,
+				    struct nand_chip *chip,
+				    const uint8_t *buf, int oob_required,
+				    int page)
+{
+	int i, eccsize = chip->ecc.size;
+	int eccbytes = chip->ecc.bytes;
+	int eccsteps = chip->ecc.steps;
+	const uint8_t *p = buf;
+	uint8_t *oob = chip->oob_poi;
+
+	for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+
+		chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+		chip->write_buf(mtd, p, eccsize);
+
+		if (chip->ecc.prepad) {
+			chip->write_buf(mtd, oob, chip->ecc.prepad);
+			oob += chip->ecc.prepad;
+		}
+
+		chip->ecc.calculate(mtd, p, oob);
+		chip->write_buf(mtd, oob, eccbytes);
+		oob += eccbytes;
+
+		if (chip->ecc.postpad) {
+			chip->write_buf(mtd, oob, chip->ecc.postpad);
+			oob += chip->ecc.postpad;
+		}
+	}
+
+	/* Calculate remaining oob bytes */
+	i = mtd->oobsize - (oob - chip->oob_poi);
+	if (i)
+		chip->write_buf(mtd, oob, i);
+
+	return 0;
+}
+
+/**
+ * nand_write_page - [REPLACEABLE] write one page
+ * @mtd: MTD device structure
+ * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
+ * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ * @cached: cached programming
+ * @raw: use _raw version of write_page
+ */
+static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+		uint32_t offset, int data_len, const uint8_t *buf,
+		int oob_required, int page, int cached, int raw)
+{
+	int status, subpage;
+
+	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+		chip->ecc.write_subpage)
+		subpage = offset || (data_len < mtd->writesize);
+	else
+		subpage = 0;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
+
+	if (unlikely(raw))
+		status = chip->ecc.write_page_raw(mtd, chip, buf,
+						  oob_required, page);
+	else if (subpage)
+		status = chip->ecc.write_subpage(mtd, chip, offset, data_len,
+						 buf, oob_required, page);
+	else
+		status = chip->ecc.write_page(mtd, chip, buf, oob_required,
+					      page);
+
+	if (status < 0)
+		return status;
+
+	/*
+	 * Cached progamming disabled for now. Not sure if it's worth the
+	 * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
+	 */
+	cached = 0;
+
+	if (!cached || !NAND_HAS_CACHEPROG(chip)) {
+
+		chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+		status = chip->waitfunc(mtd, chip);
+		/*
+		 * See if operation failed and additional status checks are
+		 * available.
+		 */
+		if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+			status = chip->errstat(mtd, chip, FL_WRITING, status,
+					       page);
+
+		if (status & NAND_STATUS_FAIL)
+			return -EIO;
+	} else {
+		chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1);
+		status = chip->waitfunc(mtd, chip);
+	}
+
+	return 0;
+}
+
+/**
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @mtd: MTD device structure
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
+ */
+static uint8_t *nand_fill_oob(struct mtd_info *mtd, uint8_t *oob, size_t len,
+			      struct mtd_oob_ops *ops)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret;
+
+	/*
+	 * Initialise to all 0xFF, to avoid the possibility of left over OOB
+	 * data from a previous OOB read.
+	 */
+	memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+	switch (ops->mode) {
+
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_RAW:
+		memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+		return oob + len;
+
+	case MTD_OPS_AUTO_OOB:
+		ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
+						  ops->ooboffs, len);
+		BUG_ON(ret);
+		return oob + len;
+
+	default:
+		BUG();
+	}
+	return NULL;
+}
+
+#define NOTALIGNED(x)	((x & (chip->subpagesize - 1)) != 0)
+
+/**
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC.
+ */
+static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
+			     struct mtd_oob_ops *ops)
+{
+	int chipnr, realpage, page, blockmask, column;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	uint32_t writelen = ops->len;
+
+	uint32_t oobwritelen = ops->ooblen;
+	uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
+
+	uint8_t *oob = ops->oobbuf;
+	uint8_t *buf = ops->datbuf;
+	int ret;
+	int oob_required = oob ? 1 : 0;
+
+	ops->retlen = 0;
+	if (!writelen)
+		return 0;
+
+	/* Reject writes, which are not page aligned */
+	if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+		pr_notice("%s: attempt to write non page aligned data\n",
+			   __func__);
+		return -EINVAL;
+	}
+
+	column = to & (mtd->writesize - 1);
+
+	chipnr = (int)(to >> chip->chip_shift);
+	chip->select_chip(mtd, chipnr);
+
+	/* Check, if it is write protected */
+	if (nand_check_wp(mtd)) {
+		ret = -EIO;
+		goto err_out;
+	}
+
+	realpage = (int)(to >> chip->page_shift);
+	page = realpage & chip->pagemask;
+	blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+
+	/* Invalidate the page cache, when we write to the cached page */
+	if (to <= ((loff_t)chip->pagebuf << chip->page_shift) &&
+	    ((loff_t)chip->pagebuf << chip->page_shift) < (to + ops->len))
+		chip->pagebuf = -1;
+
+	/* Don't allow multipage oob writes with offset */
+	if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+		ret = -EINVAL;
+		goto err_out;
+	}
+
+	while (1) {
+		int bytes = mtd->writesize;
+		int cached = writelen > bytes && page != blockmask;
+		uint8_t *wbuf = buf;
+		int use_bufpoi;
+		int part_pagewr = (column || writelen < mtd->writesize);
+
+		if (part_pagewr)
+			use_bufpoi = 1;
+		else if (chip->options & NAND_USE_BOUNCE_BUFFER)
+			use_bufpoi = !virt_addr_valid(buf);
+		else
+			use_bufpoi = 0;
+
+		/* Partial page write?, or need to use bounce buffer */
+		if (use_bufpoi) {
+			pr_debug("%s: using write bounce buffer for buf@%p\n",
+					 __func__, buf);
+			cached = 0;
+			if (part_pagewr)
+				bytes = min_t(int, bytes - column, writelen);
+			chip->pagebuf = -1;
+			memset(chip->buffers->databuf, 0xff, mtd->writesize);
+			memcpy(&chip->buffers->databuf[column], buf, bytes);
+			wbuf = chip->buffers->databuf;
+		}
+
+		if (unlikely(oob)) {
+			size_t len = min(oobwritelen, oobmaxlen);
+			oob = nand_fill_oob(mtd, oob, len, ops);
+			oobwritelen -= len;
+		} else {
+			/* We still need to erase leftover OOB data */
+			memset(chip->oob_poi, 0xff, mtd->oobsize);
+		}
+		ret = chip->write_page(mtd, chip, column, bytes, wbuf,
+					oob_required, page, cached,
+					(ops->mode == MTD_OPS_RAW));
+		if (ret)
+			break;
+
+		writelen -= bytes;
+		if (!writelen)
+			break;
+
+		column = 0;
+		buf += bytes;
+		realpage++;
+
+		page = realpage & chip->pagemask;
+		/* Check, if we cross a chip boundary */
+		if (!page) {
+			chipnr++;
+			chip->select_chip(mtd, -1);
+			chip->select_chip(mtd, chipnr);
+		}
+	}
+
+	ops->retlen = ops->len - writelen;
+	if (unlikely(oob))
+		ops->oobretlen = ops->ooblen;
+
+err_out:
+	chip->select_chip(mtd, -1);
+	return ret;
+}
+
+/**
+ * panic_nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC. Used when performing writes in interrupt context, this
+ * may for example be called by mtdoops when writing an oops while in panic.
+ */
+static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+			    size_t *retlen, const uint8_t *buf)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct mtd_oob_ops ops;
+	int ret;
+
+	/* Wait for the device to get ready */
+	panic_nand_wait(mtd, chip, 400);
+
+	/* Grab the device */
+	panic_nand_get_device(chip, mtd, FL_WRITING);
+
+	memset(&ops, 0, sizeof(ops));
+	ops.len = len;
+	ops.datbuf = (uint8_t *)buf;
+	ops.mode = MTD_OPS_PLACE_OOB;
+
+	ret = nand_do_write_ops(mtd, to, &ops);
+
+	*retlen = ops.retlen;
+	return ret;
+}
+
+/**
+ * nand_write - [MTD Interface] NAND write with ECC
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @len: number of bytes to write
+ * @retlen: pointer to variable to store the number of written bytes
+ * @buf: the data to write
+ *
+ * NAND write with ECC.
+ */
+static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
+			  size_t *retlen, const uint8_t *buf)
+{
+	struct mtd_oob_ops ops;
+	int ret;
+
+	nand_get_device(mtd, FL_WRITING);
+	memset(&ops, 0, sizeof(ops));
+	ops.len = len;
+	ops.datbuf = (uint8_t *)buf;
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ret = nand_do_write_ops(mtd, to, &ops);
+	*retlen = ops.retlen;
+	nand_release_device(mtd);
+	return ret;
+}
+
+/**
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ *
+ * NAND write out-of-band.
+ */
+static int nand_do_write_oob(struct mtd_info *mtd, loff_t to,
+			     struct mtd_oob_ops *ops)
+{
+	int chipnr, page, status, len;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	pr_debug("%s: to = 0x%08x, len = %i\n",
+			 __func__, (unsigned int)to, (int)ops->ooblen);
+
+	len = mtd_oobavail(mtd, ops);
+
+	/* Do not allow write past end of page */
+	if ((ops->ooboffs + ops->ooblen) > len) {
+		pr_debug("%s: attempt to write past end of page\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (unlikely(ops->ooboffs >= len)) {
+		pr_debug("%s: attempt to start write outside oob\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	/* Do not allow write past end of device */
+	if (unlikely(to >= mtd->size ||
+		     ops->ooboffs + ops->ooblen >
+			((mtd->size >> chip->page_shift) -
+			 (to >> chip->page_shift)) * len)) {
+		pr_debug("%s: attempt to write beyond end of device\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	chipnr = (int)(to >> chip->chip_shift);
+	chip->select_chip(mtd, chipnr);
+
+	/* Shift to get page */
+	page = (int)(to >> chip->page_shift);
+
+	/*
+	 * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+	 * of my DiskOnChip 2000 test units) will clear the whole data page too
+	 * if we don't do this. I have no clue why, but I seem to have 'fixed'
+	 * it in the doc2000 driver in August 1999.  dwmw2.
+	 */
+	nand_reset(chip);
+
+	/* Check, if it is write protected */
+	if (nand_check_wp(mtd)) {
+		chip->select_chip(mtd, -1);
+		return -EROFS;
+	}
+
+	/* Invalidate the page cache, if we write to the cached page */
+	if (page == chip->pagebuf)
+		chip->pagebuf = -1;
+
+	nand_fill_oob(mtd, ops->oobbuf, ops->ooblen, ops);
+
+	if (ops->mode == MTD_OPS_RAW)
+		status = chip->ecc.write_oob_raw(mtd, chip, page & chip->pagemask);
+	else
+		status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask);
+
+	chip->select_chip(mtd, -1);
+
+	if (status)
+		return status;
+
+	ops->oobretlen = ops->ooblen;
+
+	return 0;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int nand_write_oob(struct mtd_info *mtd, loff_t to,
+			  struct mtd_oob_ops *ops)
+{
+	int ret = -ENOTSUPP;
+
+	ops->retlen = 0;
+
+	/* Do not allow writes past end of device */
+	if (ops->datbuf && (to + ops->len) > mtd->size) {
+		pr_debug("%s: attempt to write beyond end of device\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	nand_get_device(mtd, FL_WRITING);
+
+	switch (ops->mode) {
+	case MTD_OPS_PLACE_OOB:
+	case MTD_OPS_AUTO_OOB:
+	case MTD_OPS_RAW:
+		break;
+
+	default:
+		goto out;
+	}
+
+	if (!ops->datbuf)
+		ret = nand_do_write_oob(mtd, to, ops);
+	else
+		ret = nand_do_write_ops(mtd, to, ops);
+
+out:
+	nand_release_device(mtd);
+	return ret;
+}
+
+/**
+ * single_erase - [GENERIC] NAND standard block erase command function
+ * @mtd: MTD device structure
+ * @page: the page address of the block which will be erased
+ *
+ * Standard erase command for NAND chips. Returns NAND status.
+ */
+static int single_erase(struct mtd_info *mtd, int page)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	/* Send commands to erase a block */
+	chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page);
+	chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1);
+
+	return chip->waitfunc(mtd, chip);
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks.
+ */
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+	return nand_erase_nand(mtd, instr, 0);
+}
+
+/**
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks.
+ */
+int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
+		    int allowbbt)
+{
+	int page, status, pages_per_block, ret, chipnr;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	loff_t len;
+
+	pr_debug("%s: start = 0x%012llx, len = %llu\n",
+			__func__, (unsigned long long)instr->addr,
+			(unsigned long long)instr->len);
+
+	if (check_offs_len(mtd, instr->addr, instr->len))
+		return -EINVAL;
+
+	/* Grab the lock and see if the device is available */
+	nand_get_device(mtd, FL_ERASING);
+
+	/* Shift to get first page */
+	page = (int)(instr->addr >> chip->page_shift);
+	chipnr = (int)(instr->addr >> chip->chip_shift);
+
+	/* Calculate pages in each block */
+	pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+
+	/* Select the NAND device */
+	chip->select_chip(mtd, chipnr);
+
+	/* Check, if it is write protected */
+	if (nand_check_wp(mtd)) {
+		pr_debug("%s: device is write protected!\n",
+				__func__);
+		instr->state = MTD_ERASE_FAILED;
+		goto erase_exit;
+	}
+
+	/* Loop through the pages */
+	len = instr->len;
+
+	instr->state = MTD_ERASING;
+
+	while (len) {
+		/* Check if we have a bad block, we do not erase bad blocks! */
+		if (nand_block_checkbad(mtd, ((loff_t) page) <<
+					chip->page_shift, allowbbt)) {
+			pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+				    __func__, page);
+			instr->state = MTD_ERASE_FAILED;
+			goto erase_exit;
+		}
+
+		/*
+		 * Invalidate the page cache, if we erase the block which
+		 * contains the current cached page.
+		 */
+		if (page <= chip->pagebuf && chip->pagebuf <
+		    (page + pages_per_block))
+			chip->pagebuf = -1;
+
+		status = chip->erase(mtd, page & chip->pagemask);
+
+		/*
+		 * See if operation failed and additional status checks are
+		 * available
+		 */
+		if ((status & NAND_STATUS_FAIL) && (chip->errstat))
+			status = chip->errstat(mtd, chip, FL_ERASING,
+					       status, page);
+
+		/* See if block erase succeeded */
+		if (status & NAND_STATUS_FAIL) {
+			pr_debug("%s: failed erase, page 0x%08x\n",
+					__func__, page);
+			instr->state = MTD_ERASE_FAILED;
+			instr->fail_addr =
+				((loff_t)page << chip->page_shift);
+			goto erase_exit;
+		}
+
+		/* Increment page address and decrement length */
+		len -= (1ULL << chip->phys_erase_shift);
+		page += pages_per_block;
+
+		/* Check, if we cross a chip boundary */
+		if (len && !(page & chip->pagemask)) {
+			chipnr++;
+			chip->select_chip(mtd, -1);
+			chip->select_chip(mtd, chipnr);
+		}
+	}
+	instr->state = MTD_ERASE_DONE;
+
+erase_exit:
+
+	ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO;
+
+	/* Deselect and wake up anyone waiting on the device */
+	chip->select_chip(mtd, -1);
+	nand_release_device(mtd);
+
+	/* Do call back function */
+	if (!ret)
+		mtd_erase_callback(instr);
+
+	/* Return more or less happy */
+	return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function.
+ */
+static void nand_sync(struct mtd_info *mtd)
+{
+	pr_debug("%s: called\n", __func__);
+
+	/* Grab the lock and see if the device is available */
+	nand_get_device(mtd, FL_SYNCING);
+	/* Release it and go back */
+	nand_release_device(mtd);
+}
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int chipnr = (int)(offs >> chip->chip_shift);
+	int ret;
+
+	/* Select the NAND device */
+	nand_get_device(mtd, FL_READING);
+	chip->select_chip(mtd, chipnr);
+
+	ret = nand_block_checkbad(mtd, offs, 0);
+
+	chip->select_chip(mtd, -1);
+	nand_release_device(mtd);
+
+	return ret;
+}
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	int ret;
+
+	ret = nand_block_isbad(mtd, ofs);
+	if (ret) {
+		/* If it was bad already, return success and do nothing */
+		if (ret > 0)
+			return 0;
+		return ret;
+	}
+
+	return nand_block_markbad_lowlevel(mtd, ofs);
+}
+
+/**
+ * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_set_features(struct mtd_info *mtd, struct nand_chip *chip,
+			int addr, uint8_t *subfeature_param)
+{
+	int status;
+	int i;
+
+	if (!chip->onfi_version ||
+	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
+	      & ONFI_OPT_CMD_SET_GET_FEATURES))
+		return -EINVAL;
+
+	chip->cmdfunc(mtd, NAND_CMD_SET_FEATURES, addr, -1);
+	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+		chip->write_byte(mtd, subfeature_param[i]);
+
+	status = chip->waitfunc(mtd, chip);
+	if (status & NAND_STATUS_FAIL)
+		return -EIO;
+	return 0;
+}
+
+/**
+ * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
+ * @mtd: MTD device structure
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ */
+static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip,
+			int addr, uint8_t *subfeature_param)
+{
+	int i;
+
+	if (!chip->onfi_version ||
+	    !(le16_to_cpu(chip->onfi_params.opt_cmd)
+	      & ONFI_OPT_CMD_SET_GET_FEATURES))
+		return -EINVAL;
+
+	chip->cmdfunc(mtd, NAND_CMD_GET_FEATURES, addr, -1);
+	for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+		*subfeature_param++ = chip->read_byte(mtd);
+	return 0;
+}
+
+/**
+ * nand_suspend - [MTD Interface] Suspend the NAND flash
+ * @mtd: MTD device structure
+ */
+static int nand_suspend(struct mtd_info *mtd)
+{
+	return nand_get_device(mtd, FL_PM_SUSPENDED);
+}
+
+/**
+ * nand_resume - [MTD Interface] Resume the NAND flash
+ * @mtd: MTD device structure
+ */
+static void nand_resume(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (chip->state == FL_PM_SUSPENDED)
+		nand_release_device(mtd);
+	else
+		pr_err("%s called for a chip which is not in suspended state\n",
+			__func__);
+}
+
+/**
+ * nand_shutdown - [MTD Interface] Finish the current NAND operation and
+ *                 prevent further operations
+ * @mtd: MTD device structure
+ */
+static void nand_shutdown(struct mtd_info *mtd)
+{
+	nand_get_device(mtd, FL_PM_SUSPENDED);
+}
+
+/* Set default functions */
+static void nand_set_defaults(struct nand_chip *chip, int busw)
+{
+	/* check for proper chip_delay setup, set 20us if not */
+	if (!chip->chip_delay)
+		chip->chip_delay = 20;
+
+	/* check, if a user supplied command function given */
+	if (chip->cmdfunc == NULL)
+		chip->cmdfunc = nand_command;
+
+	/* check, if a user supplied wait function given */
+	if (chip->waitfunc == NULL)
+		chip->waitfunc = nand_wait;
+
+	if (!chip->select_chip)
+		chip->select_chip = nand_select_chip;
+
+	/* set for ONFI nand */
+	if (!chip->onfi_set_features)
+		chip->onfi_set_features = nand_onfi_set_features;
+	if (!chip->onfi_get_features)
+		chip->onfi_get_features = nand_onfi_get_features;
+
+	/* If called twice, pointers that depend on busw may need to be reset */
+	if (!chip->read_byte || chip->read_byte == nand_read_byte)
+		chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
+	if (!chip->read_word)
+		chip->read_word = nand_read_word;
+	if (!chip->block_bad)
+		chip->block_bad = nand_block_bad;
+	if (!chip->block_markbad)
+		chip->block_markbad = nand_default_block_markbad;
+	if (!chip->write_buf || chip->write_buf == nand_write_buf)
+		chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
+	if (!chip->write_byte || chip->write_byte == nand_write_byte)
+		chip->write_byte = busw ? nand_write_byte16 : nand_write_byte;
+	if (!chip->read_buf || chip->read_buf == nand_read_buf)
+		chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
+	if (!chip->scan_bbt)
+		chip->scan_bbt = nand_default_bbt;
+
+	if (!chip->controller) {
+		chip->controller = &chip->hwcontrol;
+		nand_hw_control_init(chip->controller);
+	}
+
+}
+
+/* Sanitize ONFI strings so we can safely print them */
+static void sanitize_string(uint8_t *s, size_t len)
+{
+	ssize_t i;
+
+	/* Null terminate */
+	s[len - 1] = 0;
+
+	/* Remove non printable chars */
+	for (i = 0; i < len - 1; i++) {
+		if (s[i] < ' ' || s[i] > 127)
+			s[i] = '?';
+	}
+
+	/* Remove trailing spaces */
+	strim(s);
+}
+
+static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+	int i;
+	while (len--) {
+		crc ^= *p++ << 8;
+		for (i = 0; i < 8; i++)
+			crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+	}
+
+	return crc;
+}
+
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct mtd_info *mtd,
+		struct nand_chip *chip, struct nand_onfi_params *p)
+{
+	struct onfi_ext_param_page *ep;
+	struct onfi_ext_section *s;
+	struct onfi_ext_ecc_info *ecc;
+	uint8_t *cursor;
+	int ret = -EINVAL;
+	int len;
+	int i;
+
+	len = le16_to_cpu(p->ext_param_page_length) * 16;
+	ep = kmalloc(len, GFP_KERNEL);
+	if (!ep)
+		return -ENOMEM;
+
+	/* Send our own NAND_CMD_PARAM. */
+	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+
+	/* Use the Change Read Column command to skip the ONFI param pages. */
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+			sizeof(*p) * p->num_of_param_pages , -1);
+
+	/* Read out the Extended Parameter Page. */
+	chip->read_buf(mtd, (uint8_t *)ep, len);
+	if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+		!= le16_to_cpu(ep->crc))) {
+		pr_debug("fail in the CRC.\n");
+		goto ext_out;
+	}
+
+	/*
+	 * Check the signature.
+	 * Do not strictly follow the ONFI spec, maybe changed in future.
+	 */
+	if (strncmp(ep->sig, "EPPS", 4)) {
+		pr_debug("The signature is invalid.\n");
+		goto ext_out;
+	}
+
+	/* find the ECC section. */
+	cursor = (uint8_t *)(ep + 1);
+	for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+		s = ep->sections + i;
+		if (s->type == ONFI_SECTION_TYPE_2)
+			break;
+		cursor += s->length * 16;
+	}
+	if (i == ONFI_EXT_SECTION_MAX) {
+		pr_debug("We can not find the ECC section.\n");
+		goto ext_out;
+	}
+
+	/* get the info we want. */
+	ecc = (struct onfi_ext_ecc_info *)cursor;
+
+	if (!ecc->codeword_size) {
+		pr_debug("Invalid codeword size\n");
+		goto ext_out;
+	}
+
+	chip->ecc_strength_ds = ecc->ecc_bits;
+	chip->ecc_step_ds = 1 << ecc->codeword_size;
+	ret = 0;
+
+ext_out:
+	kfree(ep);
+	return ret;
+}
+
+static int nand_setup_read_retry_micron(struct mtd_info *mtd, int retry_mode)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	uint8_t feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+	return chip->onfi_set_features(mtd, chip, ONFI_FEATURE_ADDR_READ_RETRY,
+			feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static void nand_onfi_detect_micron(struct nand_chip *chip,
+		struct nand_onfi_params *p)
+{
+	struct nand_onfi_vendor_micron *micron = (void *)p->vendor;
+
+	if (le16_to_cpu(p->vendor_revision) < 1)
+		return;
+
+	chip->read_retries = micron->read_retry_options;
+	chip->setup_read_retry = nand_setup_read_retry_micron;
+}
+
+/*
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
+ */
+static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
+					int *busw)
+{
+	struct nand_onfi_params *p = &chip->onfi_params;
+	int i, j;
+	int val;
+
+	/* Try ONFI for unknown chip or LP */
+	chip->cmdfunc(mtd, NAND_CMD_READID, 0x20, -1);
+	if (chip->read_byte(mtd) != 'O' || chip->read_byte(mtd) != 'N' ||
+		chip->read_byte(mtd) != 'F' || chip->read_byte(mtd) != 'I')
+		return 0;
+
+	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0, -1);
+	for (i = 0; i < 3; i++) {
+		for (j = 0; j < sizeof(*p); j++)
+			((uint8_t *)p)[j] = chip->read_byte(mtd);
+		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
+				le16_to_cpu(p->crc)) {
+			break;
+		}
+	}
+
+	if (i == 3) {
+		pr_err("Could not find valid ONFI parameter page; aborting\n");
+		return 0;
+	}
+
+	/* Check version */
+	val = le16_to_cpu(p->revision);
+	if (val & (1 << 5))
+		chip->onfi_version = 23;
+	else if (val & (1 << 4))
+		chip->onfi_version = 22;
+	else if (val & (1 << 3))
+		chip->onfi_version = 21;
+	else if (val & (1 << 2))
+		chip->onfi_version = 20;
+	else if (val & (1 << 1))
+		chip->onfi_version = 10;
+
+	if (!chip->onfi_version) {
+		pr_info("unsupported ONFI version: %d\n", val);
+		return 0;
+	}
+
+	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+	sanitize_string(p->model, sizeof(p->model));
+	if (!mtd->name)
+		mtd->name = p->model;
+
+	mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+	/*
+	 * pages_per_block and blocks_per_lun may not be a power-of-2 size
+	 * (don't ask me who thought of this...). MTD assumes that these
+	 * dimensions will be power-of-2, so just truncate the remaining area.
+	 */
+	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+	mtd->erasesize *= mtd->writesize;
+
+	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+	/* See erasesize comment */
+	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+	chip->bits_per_cell = p->bits_per_cell;
+
+	if (onfi_feature(chip) & ONFI_FEATURE_16_BIT_BUS)
+		*busw = NAND_BUSWIDTH_16;
+	else
+		*busw = 0;
+
+	if (p->ecc_bits != 0xff) {
+		chip->ecc_strength_ds = p->ecc_bits;
+		chip->ecc_step_ds = 512;
+	} else if (chip->onfi_version >= 21 &&
+		(onfi_feature(chip) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+		/*
+		 * The nand_flash_detect_ext_param_page() uses the
+		 * Change Read Column command which maybe not supported
+		 * by the chip->cmdfunc. So try to update the chip->cmdfunc
+		 * now. We do not replace user supplied command function.
+		 */
+		if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+			chip->cmdfunc = nand_command_lp;
+
+		/* The Extended Parameter Page is supported since ONFI 2.1. */
+		if (nand_flash_detect_ext_param_page(mtd, chip, p))
+			pr_warn("Failed to detect ONFI extended param page\n");
+	} else {
+		pr_warn("Could not retrieve ONFI ECC requirements\n");
+	}
+
+	if (p->jedec_id == NAND_MFR_MICRON)
+		nand_onfi_detect_micron(chip, p);
+
+	return 1;
+}
+
+/*
+ * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
+ */
+static int nand_flash_detect_jedec(struct mtd_info *mtd, struct nand_chip *chip,
+					int *busw)
+{
+	struct nand_jedec_params *p = &chip->jedec_params;
+	struct jedec_ecc_info *ecc;
+	int val;
+	int i, j;
+
+	/* Try JEDEC for unknown chip or LP */
+	chip->cmdfunc(mtd, NAND_CMD_READID, 0x40, -1);
+	if (chip->read_byte(mtd) != 'J' || chip->read_byte(mtd) != 'E' ||
+		chip->read_byte(mtd) != 'D' || chip->read_byte(mtd) != 'E' ||
+		chip->read_byte(mtd) != 'C')
+		return 0;
+
+	chip->cmdfunc(mtd, NAND_CMD_PARAM, 0x40, -1);
+	for (i = 0; i < 3; i++) {
+		for (j = 0; j < sizeof(*p); j++)
+			((uint8_t *)p)[j] = chip->read_byte(mtd);
+
+		if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) ==
+				le16_to_cpu(p->crc))
+			break;
+	}
+
+	if (i == 3) {
+		pr_err("Could not find valid JEDEC parameter page; aborting\n");
+		return 0;
+	}
+
+	/* Check version */
+	val = le16_to_cpu(p->revision);
+	if (val & (1 << 2))
+		chip->jedec_version = 10;
+	else if (val & (1 << 1))
+		chip->jedec_version = 1; /* vendor specific version */
+
+	if (!chip->jedec_version) {
+		pr_info("unsupported JEDEC version: %d\n", val);
+		return 0;
+	}
+
+	sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+	sanitize_string(p->model, sizeof(p->model));
+	if (!mtd->name)
+		mtd->name = p->model;
+
+	mtd->writesize = le32_to_cpu(p->byte_per_page);
+
+	/* Please reference to the comment for nand_flash_detect_onfi. */
+	mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+	mtd->erasesize *= mtd->writesize;
+
+	mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+
+	/* Please reference to the comment for nand_flash_detect_onfi. */
+	chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+	chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
+	chip->bits_per_cell = p->bits_per_cell;
+
+	if (jedec_feature(chip) & JEDEC_FEATURE_16_BIT_BUS)
+		*busw = NAND_BUSWIDTH_16;
+	else
+		*busw = 0;
+
+	/* ECC info */
+	ecc = &p->ecc_info[0];
+
+	if (ecc->codeword_size >= 9) {
+		chip->ecc_strength_ds = ecc->ecc_bits;
+		chip->ecc_step_ds = 1 << ecc->codeword_size;
+	} else {
+		pr_warn("Invalid codeword size\n");
+	}
+
+	return 1;
+}
+
+/*
+ * nand_id_has_period - Check if an ID string has a given wraparound period
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+ * @period: the period of repitition
+ *
+ * Check if an ID string is repeated within a given sequence of bytes at
+ * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
+ * if the repetition has a period of @period; otherwise, returns zero.
+ */
+static int nand_id_has_period(u8 *id_data, int arrlen, int period)
+{
+	int i, j;
+	for (i = 0; i < period; i++)
+		for (j = i + period; j < arrlen; j += period)
+			if (id_data[i] != id_data[j])
+				return 0;
+	return 1;
+}
+
+/*
+ * nand_id_len - Get the length of an ID string returned by CMD_READID
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+
+ * Returns the length of the ID string, according to known wraparound/trailing
+ * zero patterns. If no pattern exists, returns the length of the array.
+ */
+static int nand_id_len(u8 *id_data, int arrlen)
+{
+	int last_nonzero, period;
+
+	/* Find last non-zero byte */
+	for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
+		if (id_data[last_nonzero])
+			break;
+
+	/* All zeros */
+	if (last_nonzero < 0)
+		return 0;
+
+	/* Calculate wraparound period */
+	for (period = 1; period < arrlen; period++)
+		if (nand_id_has_period(id_data, arrlen, period))
+			break;
+
+	/* There's a repeated pattern */
+	if (period < arrlen)
+		return period;
+
+	/* There are trailing zeros */
+	if (last_nonzero < arrlen - 1)
+		return last_nonzero + 1;
+
+	/* No pattern detected */
+	return arrlen;
+}
+
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+	int bits;
+
+	bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+	bits >>= NAND_CI_CELLTYPE_SHIFT;
+	return bits + 1;
+}
+
+/*
+ * Many new NAND share similar device ID codes, which represent the size of the
+ * chip. The rest of the parameters must be decoded according to generic or
+ * manufacturer-specific "extended ID" decoding patterns.
+ */
+static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
+				u8 id_data[8], int *busw)
+{
+	int extid, id_len;
+	/* The 3rd id byte holds MLC / multichip data */
+	chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+	/* The 4th id byte is the important one */
+	extid = id_data[3];
+
+	id_len = nand_id_len(id_data, 8);
+
+	/*
+	 * Field definitions are in the following datasheets:
+	 * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
+	 * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
+	 * Hynix MLC   (6 byte ID): Hynix H27UBG8T2B (p.22)
+	 *
+	 * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
+	 * ID to decide what to do.
+	 */
+	if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
+			!nand_is_slc(chip) && id_data[5] != 0x00) {
+		/* Calc pagesize */
+		mtd->writesize = 2048 << (extid & 0x03);
+		extid >>= 2;
+		/* Calc oobsize */
+		switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+		case 1:
+			mtd->oobsize = 128;
+			break;
+		case 2:
+			mtd->oobsize = 218;
+			break;
+		case 3:
+			mtd->oobsize = 400;
+			break;
+		case 4:
+			mtd->oobsize = 436;
+			break;
+		case 5:
+			mtd->oobsize = 512;
+			break;
+		case 6:
+			mtd->oobsize = 640;
+			break;
+		case 7:
+		default: /* Other cases are "reserved" (unknown) */
+			mtd->oobsize = 1024;
+			break;
+		}
+		extid >>= 2;
+		/* Calc blocksize */
+		mtd->erasesize = (128 * 1024) <<
+			(((extid >> 1) & 0x04) | (extid & 0x03));
+		*busw = 0;
+	} else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
+			!nand_is_slc(chip)) {
+		unsigned int tmp;
+
+		/* Calc pagesize */
+		mtd->writesize = 2048 << (extid & 0x03);
+		extid >>= 2;
+		/* Calc oobsize */
+		switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
+		case 0:
+			mtd->oobsize = 128;
+			break;
+		case 1:
+			mtd->oobsize = 224;
+			break;
+		case 2:
+			mtd->oobsize = 448;
+			break;
+		case 3:
+			mtd->oobsize = 64;
+			break;
+		case 4:
+			mtd->oobsize = 32;
+			break;
+		case 5:
+			mtd->oobsize = 16;
+			break;
+		default:
+			mtd->oobsize = 640;
+			break;
+		}
+		extid >>= 2;
+		/* Calc blocksize */
+		tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
+		if (tmp < 0x03)
+			mtd->erasesize = (128 * 1024) << tmp;
+		else if (tmp == 0x03)
+			mtd->erasesize = 768 * 1024;
+		else
+			mtd->erasesize = (64 * 1024) << tmp;
+		*busw = 0;
+	} else {
+		/* Calc pagesize */
+		mtd->writesize = 1024 << (extid & 0x03);
+		extid >>= 2;
+		/* Calc oobsize */
+		mtd->oobsize = (8 << (extid & 0x01)) *
+			(mtd->writesize >> 9);
+		extid >>= 2;
+		/* Calc blocksize. Blocksize is multiples of 64KiB */
+		mtd->erasesize = (64 * 1024) << (extid & 0x03);
+		extid >>= 2;
+		/* Get buswidth information */
+		*busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
+		/*
+		 * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+		 * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+		 * follows:
+		 * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+		 *                         110b -> 24nm
+		 * - ID byte 5, bit[7]:    1 -> BENAND, 0 -> raw SLC
+		 */
+		if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
+				nand_is_slc(chip) &&
+				(id_data[5] & 0x7) == 0x6 /* 24nm */ &&
+				!(id_data[4] & 0x80) /* !BENAND */) {
+			mtd->oobsize = 32 * mtd->writesize >> 9;
+		}
+
+	}
+}
+
+/*
+ * Old devices have chip data hardcoded in the device ID table. nand_decode_id
+ * decodes a matching ID table entry and assigns the MTD size parameters for
+ * the chip.
+ */
+static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
+				struct nand_flash_dev *type, u8 id_data[8],
+				int *busw)
+{
+	int maf_id = id_data[0];
+
+	mtd->erasesize = type->erasesize;
+	mtd->writesize = type->pagesize;
+	mtd->oobsize = mtd->writesize / 32;
+	*busw = type->options & NAND_BUSWIDTH_16;
+
+	/* All legacy ID NAND are small-page, SLC */
+	chip->bits_per_cell = 1;
+
+	/*
+	 * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+	 * some Spansion chips have erasesize that conflicts with size
+	 * listed in nand_ids table.
+	 * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+	 */
+	if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
+			&& id_data[6] == 0x00 && id_data[7] == 0x00
+			&& mtd->writesize == 512) {
+		mtd->erasesize = 128 * 1024;
+		mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+	}
+}
+
+/*
+ * Set the bad block marker/indicator (BBM/BBI) patterns according to some
+ * heuristic patterns using various detected parameters (e.g., manufacturer,
+ * page size, cell-type information).
+ */
+static void nand_decode_bbm_options(struct mtd_info *mtd,
+				    struct nand_chip *chip, u8 id_data[8])
+{
+	int maf_id = id_data[0];
+
+	/* Set the bad block position */
+	if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
+		chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
+	else
+		chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+
+	/*
+	 * Bad block marker is stored in the last page of each block on Samsung
+	 * and Hynix MLC devices; stored in first two pages of each block on
+	 * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
+	 * AMD/Spansion, and Macronix.  All others scan only the first page.
+	 */
+	if (!nand_is_slc(chip) &&
+			(maf_id == NAND_MFR_SAMSUNG ||
+			 maf_id == NAND_MFR_HYNIX))
+		chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
+	else if ((nand_is_slc(chip) &&
+				(maf_id == NAND_MFR_SAMSUNG ||
+				 maf_id == NAND_MFR_HYNIX ||
+				 maf_id == NAND_MFR_TOSHIBA ||
+				 maf_id == NAND_MFR_AMD ||
+				 maf_id == NAND_MFR_MACRONIX)) ||
+			(mtd->writesize == 2048 &&
+			 maf_id == NAND_MFR_MICRON))
+		chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
+}
+
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+	return type->id_len;
+}
+
+static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip,
+		   struct nand_flash_dev *type, u8 *id_data, int *busw)
+{
+	if (!strncmp(type->id, id_data, type->id_len)) {
+		mtd->writesize = type->pagesize;
+		mtd->erasesize = type->erasesize;
+		mtd->oobsize = type->oobsize;
+
+		chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+		chip->chipsize = (uint64_t)type->chipsize << 20;
+		chip->options |= type->options;
+		chip->ecc_strength_ds = NAND_ECC_STRENGTH(type);
+		chip->ecc_step_ds = NAND_ECC_STEP(type);
+		chip->onfi_timing_mode_default =
+					type->onfi_timing_mode_default;
+
+		*busw = type->options & NAND_BUSWIDTH_16;
+
+		if (!mtd->name)
+			mtd->name = type->name;
+
+		return true;
+	}
+	return false;
+}
+
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported.
+ */
+static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
+						  struct nand_chip *chip,
+						  int *maf_id, int *dev_id,
+						  struct nand_flash_dev *type)
+{
+	int busw;
+	int i, maf_idx;
+	u8 id_data[8];
+
+	/* Select the device */
+	chip->select_chip(mtd, 0);
+
+	/*
+	 * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+	 * after power-up.
+	 */
+	nand_reset(chip);
+
+	/* Send the command for reading device ID */
+	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+	/* Read manufacturer and device IDs */
+	*maf_id = chip->read_byte(mtd);
+	*dev_id = chip->read_byte(mtd);
+
+	/*
+	 * Try again to make sure, as some systems the bus-hold or other
+	 * interface concerns can cause random data which looks like a
+	 * possibly credible NAND flash to appear. If the two results do
+	 * not match, ignore the device completely.
+	 */
+
+	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+	/* Read entire ID string */
+	for (i = 0; i < 8; i++)
+		id_data[i] = chip->read_byte(mtd);
+
+	if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
+		pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
+			*maf_id, *dev_id, id_data[0], id_data[1]);
+		return ERR_PTR(-ENODEV);
+	}
+
+	if (!type)
+		type = nand_flash_ids;
+
+	for (; type->name != NULL; type++) {
+		if (is_full_id_nand(type)) {
+			if (find_full_id_nand(mtd, chip, type, id_data, &busw))
+				goto ident_done;
+		} else if (*dev_id == type->dev_id) {
+			break;
+		}
+	}
+
+	chip->onfi_version = 0;
+	if (!type->name || !type->pagesize) {
+		/* Check if the chip is ONFI compliant */
+		if (nand_flash_detect_onfi(mtd, chip, &busw))
+			goto ident_done;
+
+		/* Check if the chip is JEDEC compliant */
+		if (nand_flash_detect_jedec(mtd, chip, &busw))
+			goto ident_done;
+	}
+
+	if (!type->name)
+		return ERR_PTR(-ENODEV);
+
+	if (!mtd->name)
+		mtd->name = type->name;
+
+	chip->chipsize = (uint64_t)type->chipsize << 20;
+
+	if (!type->pagesize) {
+		/* Decode parameters from extended ID */
+		nand_decode_ext_id(mtd, chip, id_data, &busw);
+	} else {
+		nand_decode_id(mtd, chip, type, id_data, &busw);
+	}
+	/* Get chip options */
+	chip->options |= type->options;
+
+	/*
+	 * Check if chip is not a Samsung device. Do not clear the
+	 * options for chips which do not have an extended id.
+	 */
+	if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
+		chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
+ident_done:
+
+	/* Try to identify manufacturer */
+	for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
+		if (nand_manuf_ids[maf_idx].id == *maf_id)
+			break;
+	}
+
+	if (chip->options & NAND_BUSWIDTH_AUTO) {
+		WARN_ON(chip->options & NAND_BUSWIDTH_16);
+		chip->options |= busw;
+		nand_set_defaults(chip, busw);
+	} else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+		/*
+		 * Check, if buswidth is correct. Hardware drivers should set
+		 * chip correct!
+		 */
+		pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+			*maf_id, *dev_id);
+		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name, mtd->name);
+		pr_warn("bus width %d instead %d bit\n",
+			   (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
+			   busw ? 16 : 8);
+		return ERR_PTR(-EINVAL);
+	}
+
+	nand_decode_bbm_options(mtd, chip, id_data);
+
+	/* Calculate the address shift from the page size */
+	chip->page_shift = ffs(mtd->writesize) - 1;
+	/* Convert chipsize to number of pages per chip -1 */
+	chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+
+	chip->bbt_erase_shift = chip->phys_erase_shift =
+		ffs(mtd->erasesize) - 1;
+	if (chip->chipsize & 0xffffffff)
+		chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
+	else {
+		chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
+		chip->chip_shift += 32 - 1;
+	}
+
+	chip->badblockbits = 8;
+	chip->erase = single_erase;
+
+	/* Do not replace user supplied command function! */
+	if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
+		chip->cmdfunc = nand_command_lp;
+
+	pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+		*maf_id, *dev_id);
+
+	if (chip->onfi_version)
+		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+				chip->onfi_params.model);
+	else if (chip->jedec_version)
+		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+				chip->jedec_params.model);
+	else
+		pr_info("%s %s\n", nand_manuf_ids[maf_idx].name,
+				type->name);
+
+	pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
+		(int)(chip->chipsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+		mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
+	return type;
+}
+
+static const char * const nand_ecc_modes[] = {
+	[NAND_ECC_NONE]		= "none",
+	[NAND_ECC_SOFT]		= "soft",
+	[NAND_ECC_HW]		= "hw",
+	[NAND_ECC_HW_SYNDROME]	= "hw_syndrome",
+	[NAND_ECC_HW_OOB_FIRST]	= "hw_oob_first",
+};
+
+static int of_get_nand_ecc_mode(struct device_node *np)
+{
+	const char *pm;
+	int err, i;
+
+	err = of_property_read_string(np, "nand-ecc-mode", &pm);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(nand_ecc_modes); i++)
+		if (!strcasecmp(pm, nand_ecc_modes[i]))
+			return i;
+
+	/*
+	 * For backward compatibility we support few obsoleted values that don't
+	 * have their mappings into nand_ecc_modes_t anymore (they were merged
+	 * with other enums).
+	 */
+	if (!strcasecmp(pm, "soft_bch"))
+		return NAND_ECC_SOFT;
+
+	return -ENODEV;
+}
+
+static const char * const nand_ecc_algos[] = {
+	[NAND_ECC_HAMMING]	= "hamming",
+	[NAND_ECC_BCH]		= "bch",
+};
+
+static int of_get_nand_ecc_algo(struct device_node *np)
+{
+	const char *pm;
+	int err, i;
+
+	err = of_property_read_string(np, "nand-ecc-algo", &pm);
+	if (!err) {
+		for (i = NAND_ECC_HAMMING; i < ARRAY_SIZE(nand_ecc_algos); i++)
+			if (!strcasecmp(pm, nand_ecc_algos[i]))
+				return i;
+		return -ENODEV;
+	}
+
+	/*
+	 * For backward compatibility we also read "nand-ecc-mode" checking
+	 * for some obsoleted values that were specifying ECC algorithm.
+	 */
+	err = of_property_read_string(np, "nand-ecc-mode", &pm);
+	if (err < 0)
+		return err;
+
+	if (!strcasecmp(pm, "soft"))
+		return NAND_ECC_HAMMING;
+	else if (!strcasecmp(pm, "soft_bch"))
+		return NAND_ECC_BCH;
+
+	return -ENODEV;
+}
+
+static int of_get_nand_ecc_step_size(struct device_node *np)
+{
+	int ret;
+	u32 val;
+
+	ret = of_property_read_u32(np, "nand-ecc-step-size", &val);
+	return ret ? ret : val;
+}
+
+static int of_get_nand_ecc_strength(struct device_node *np)
+{
+	int ret;
+	u32 val;
+
+	ret = of_property_read_u32(np, "nand-ecc-strength", &val);
+	return ret ? ret : val;
+}
+
+static int of_get_nand_bus_width(struct device_node *np)
+{
+	u32 val;
+
+	if (of_property_read_u32(np, "nand-bus-width", &val))
+		return 8;
+
+	switch (val) {
+	case 8:
+	case 16:
+		return val;
+	default:
+		return -EIO;
+	}
+}
+
+static bool of_get_nand_on_flash_bbt(struct device_node *np)
+{
+	return of_property_read_bool(np, "nand-on-flash-bbt");
+}
+
+static int nand_dt_init(struct nand_chip *chip)
+{
+	struct device_node *dn = nand_get_flash_node(chip);
+	int ecc_mode, ecc_algo, ecc_strength, ecc_step;
+
+	if (!dn)
+		return 0;
+
+	if (of_get_nand_bus_width(dn) == 16)
+		chip->options |= NAND_BUSWIDTH_16;
+
+	if (of_get_nand_on_flash_bbt(dn))
+		chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+	ecc_mode = of_get_nand_ecc_mode(dn);
+	ecc_algo = of_get_nand_ecc_algo(dn);
+	ecc_strength = of_get_nand_ecc_strength(dn);
+	ecc_step = of_get_nand_ecc_step_size(dn);
+
+	if ((ecc_step >= 0 && !(ecc_strength >= 0)) ||
+	    (!(ecc_step >= 0) && ecc_strength >= 0)) {
+		pr_err("must set both strength and step size in DT\n");
+		return -EINVAL;
+	}
+
+	if (ecc_mode >= 0)
+		chip->ecc.mode = ecc_mode;
+
+	if (ecc_algo >= 0)
+		chip->ecc.algo = ecc_algo;
+
+	if (ecc_strength >= 0)
+		chip->ecc.strength = ecc_strength;
+
+	if (ecc_step > 0)
+		chip->ecc.size = ecc_step;
+
+	if (of_property_read_bool(dn, "nand-ecc-maximize"))
+		chip->ecc.options |= NAND_ECC_MAXIMIZE;
+
+	return 0;
+}
+
+/**
+ * nand_scan_ident - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
+ *
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
+ *
+ */
+int nand_scan_ident(struct mtd_info *mtd, int maxchips,
+		    struct nand_flash_dev *table)
+{
+	int i, nand_maf_id, nand_dev_id;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_flash_dev *type;
+	int ret;
+
+	ret = nand_dt_init(chip);
+	if (ret)
+		return ret;
+
+	if (!mtd->name && mtd->dev.parent)
+		mtd->name = dev_name(mtd->dev.parent);
+
+	if ((!chip->cmdfunc || !chip->select_chip) && !chip->cmd_ctrl) {
+		/*
+		 * Default functions assigned for chip_select() and
+		 * cmdfunc() both expect cmd_ctrl() to be populated,
+		 * so we need to check that that's the case
+		 */
+		pr_err("chip.cmd_ctrl() callback is not provided");
+		return -EINVAL;
+	}
+	/* Set the default functions */
+	nand_set_defaults(chip, chip->options & NAND_BUSWIDTH_16);
+
+	/* Read the flash type */
+	type = nand_get_flash_type(mtd, chip, &nand_maf_id,
+				   &nand_dev_id, table);
+
+	if (IS_ERR(type)) {
+		if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+			pr_warn("No NAND device found\n");
+		chip->select_chip(mtd, -1);
+		return PTR_ERR(type);
+	}
+
+	ret = nand_init_data_interface(chip);
+	if (ret)
+		return ret;
+
+	chip->select_chip(mtd, -1);
+
+	/* Check for a chip array */
+	for (i = 1; i < maxchips; i++) {
+		chip->select_chip(mtd, i);
+		/* See comment in nand_get_flash_type for reset */
+		nand_reset(chip);
+		/* Send the command for reading device ID */
+		chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+		/* Read manufacturer and device IDs */
+		if (nand_maf_id != chip->read_byte(mtd) ||
+		    nand_dev_id != chip->read_byte(mtd)) {
+			chip->select_chip(mtd, -1);
+			break;
+		}
+		chip->select_chip(mtd, -1);
+	}
+	if (i > 1)
+		pr_info("%d chips detected\n", i);
+
+	/* Store the number of chips and calc total size for mtd */
+	chip->numchips = i;
+	mtd->size = i * chip->chipsize;
+
+	return 0;
+}
+EXPORT_SYMBOL(nand_scan_ident);
+
+static int nand_set_ecc_soft_ops(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
+		return -EINVAL;
+
+	switch (ecc->algo) {
+	case NAND_ECC_HAMMING:
+		ecc->calculate = nand_calculate_ecc;
+		ecc->correct = nand_correct_data;
+		ecc->read_page = nand_read_page_swecc;
+		ecc->read_subpage = nand_read_subpage;
+		ecc->write_page = nand_write_page_swecc;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->write_oob = nand_write_oob_std;
+		if (!ecc->size)
+			ecc->size = 256;
+		ecc->bytes = 3;
+		ecc->strength = 1;
+		return 0;
+	case NAND_ECC_BCH:
+		if (!mtd_nand_has_bch()) {
+			WARN(1, "CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+			return -EINVAL;
+		}
+		ecc->calculate = nand_bch_calculate_ecc;
+		ecc->correct = nand_bch_correct_data;
+		ecc->read_page = nand_read_page_swecc;
+		ecc->read_subpage = nand_read_subpage;
+		ecc->write_page = nand_write_page_swecc;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->write_oob = nand_write_oob_std;
+
+		/*
+		* Board driver should supply ecc.size and ecc.strength
+		* values to select how many bits are correctable.
+		* Otherwise, default to 4 bits for large page devices.
+		*/
+		if (!ecc->size && (mtd->oobsize >= 64)) {
+			ecc->size = 512;
+			ecc->strength = 4;
+		}
+
+		/*
+		 * if no ecc placement scheme was provided pickup the default
+		 * large page one.
+		 */
+		if (!mtd->ooblayout) {
+			/* handle large page devices only */
+			if (mtd->oobsize < 64) {
+				WARN(1, "OOB layout is required when using software BCH on small pages\n");
+				return -EINVAL;
+			}
+
+			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+
+		}
+
+		/*
+		 * We can only maximize ECC config when the default layout is
+		 * used, otherwise we don't know how many bytes can really be
+		 * used.
+		 */
+		if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
+		    ecc->options & NAND_ECC_MAXIMIZE) {
+			int steps, bytes;
+
+			/* Always prefer 1k blocks over 512bytes ones */
+			ecc->size = 1024;
+			steps = mtd->writesize / ecc->size;
+
+			/* Reserve 2 bytes for the BBM */
+			bytes = (mtd->oobsize - 2) / steps;
+			ecc->strength = bytes * 8 / fls(8 * ecc->size);
+		}
+
+		/* See nand_bch_init() for details. */
+		ecc->bytes = 0;
+		ecc->priv = nand_bch_init(mtd);
+		if (!ecc->priv) {
+			WARN(1, "BCH ECC initialization failed!\n");
+			return -EINVAL;
+		}
+		return 0;
+	default:
+		WARN(1, "Unsupported ECC algorithm!\n");
+		return -EINVAL;
+	}
+}
+
+/*
+ * Check if the chip configuration meet the datasheet requirements.
+
+ * If our configuration corrects A bits per B bytes and the minimum
+ * required correction level is X bits per Y bytes, then we must ensure
+ * both of the following are true:
+ *
+ * (1) A / B >= X / Y
+ * (2) A >= X
+ *
+ * Requirement (1) ensures we can correct for the required bitflip density.
+ * Requirement (2) ensures we can correct even when all bitflips are clumped
+ * in the same sector.
+ */
+static bool nand_ecc_strength_good(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int corr, ds_corr;
+
+	if (ecc->size == 0 || chip->ecc_step_ds == 0)
+		/* Not enough information */
+		return true;
+
+	/*
+	 * We get the number of corrected bits per page to compare
+	 * the correction density.
+	 */
+	corr = (mtd->writesize * ecc->strength) / ecc->size;
+	ds_corr = (mtd->writesize * chip->ecc_strength_ds) / chip->ecc_step_ds;
+
+	return corr >= ds_corr && ecc->strength >= chip->ecc_strength_ds;
+}
+
+/**
+ * nand_scan_tail - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ *
+ * This is the second phase of the normal nand_scan() function. It fills out
+ * all the uninitialized function pointers with the defaults and scans for a
+ * bad block table if appropriate.
+ */
+int nand_scan_tail(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	struct nand_buffers *nbuf;
+	int ret;
+
+	/* New bad blocks should be marked in OOB, flash-based BBT, or both */
+	if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+		   !(chip->bbt_options & NAND_BBT_USE_FLASH)))
+		return -EINVAL;
+
+	if (!(chip->options & NAND_OWN_BUFFERS)) {
+		nbuf = kzalloc(sizeof(*nbuf) + mtd->writesize
+				+ mtd->oobsize * 3, GFP_KERNEL);
+		if (!nbuf)
+			return -ENOMEM;
+		nbuf->ecccalc = (uint8_t *)(nbuf + 1);
+		nbuf->ecccode = nbuf->ecccalc + mtd->oobsize;
+		nbuf->databuf = nbuf->ecccode + mtd->oobsize;
+
+		chip->buffers = nbuf;
+	} else {
+		if (!chip->buffers)
+			return -ENOMEM;
+	}
+
+	/* Set the internal oob buffer location, just after the page data */
+	chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+
+	/*
+	 * If no default placement scheme is given, select an appropriate one.
+	 */
+	if (!mtd->ooblayout &&
+	    !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
+		switch (mtd->oobsize) {
+		case 8:
+		case 16:
+			mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
+			break;
+		case 64:
+		case 128:
+			mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+			break;
+		default:
+			WARN(1, "No oob scheme defined for oobsize %d\n",
+				mtd->oobsize);
+			ret = -EINVAL;
+			goto err_free;
+		}
+	}
+
+	if (!chip->write_page)
+		chip->write_page = nand_write_page;
+
+	/*
+	 * Check ECC mode, default to software if 3byte/512byte hardware ECC is
+	 * selected and we have 256 byte pagesize fallback to software ECC
+	 */
+
+	switch (ecc->mode) {
+	case NAND_ECC_HW_OOB_FIRST:
+		/* Similar to NAND_ECC_HW, but a separate read_page handle */
+		if (!ecc->calculate || !ecc->correct || !ecc->hwctl) {
+			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+			ret = -EINVAL;
+			goto err_free;
+		}
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_hwecc_oob_first;
+
+	case NAND_ECC_HW:
+		/* Use standard hwecc read page function? */
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_hwecc;
+		if (!ecc->write_page)
+			ecc->write_page = nand_write_page_hwecc;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw;
+		if (!ecc->read_oob)
+			ecc->read_oob = nand_read_oob_std;
+		if (!ecc->write_oob)
+			ecc->write_oob = nand_write_oob_std;
+		if (!ecc->read_subpage)
+			ecc->read_subpage = nand_read_subpage;
+		if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
+			ecc->write_subpage = nand_write_subpage_hwecc;
+
+	case NAND_ECC_HW_SYNDROME:
+		if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+		    (!ecc->read_page ||
+		     ecc->read_page == nand_read_page_hwecc ||
+		     !ecc->write_page ||
+		     ecc->write_page == nand_write_page_hwecc)) {
+			WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+			ret = -EINVAL;
+			goto err_free;
+		}
+		/* Use standard syndrome read/write page function? */
+		if (!ecc->read_page)
+			ecc->read_page = nand_read_page_syndrome;
+		if (!ecc->write_page)
+			ecc->write_page = nand_write_page_syndrome;
+		if (!ecc->read_page_raw)
+			ecc->read_page_raw = nand_read_page_raw_syndrome;
+		if (!ecc->write_page_raw)
+			ecc->write_page_raw = nand_write_page_raw_syndrome;
+		if (!ecc->read_oob)
+			ecc->read_oob = nand_read_oob_syndrome;
+		if (!ecc->write_oob)
+			ecc->write_oob = nand_write_oob_syndrome;
+
+		if (mtd->writesize >= ecc->size) {
+			if (!ecc->strength) {
+				WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
+				ret = -EINVAL;
+				goto err_free;
+			}
+			break;
+		}
+		pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
+			ecc->size, mtd->writesize);
+		ecc->mode = NAND_ECC_SOFT;
+		ecc->algo = NAND_ECC_HAMMING;
+
+	case NAND_ECC_SOFT:
+		ret = nand_set_ecc_soft_ops(mtd);
+		if (ret) {
+			ret = -EINVAL;
+			goto err_free;
+		}
+		break;
+
+	case NAND_ECC_NONE:
+		pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
+		ecc->read_page = nand_read_page_raw;
+		ecc->write_page = nand_write_page_raw;
+		ecc->read_oob = nand_read_oob_std;
+		ecc->read_page_raw = nand_read_page_raw;
+		ecc->write_page_raw = nand_write_page_raw;
+		ecc->write_oob = nand_write_oob_std;
+		ecc->size = mtd->writesize;
+		ecc->bytes = 0;
+		ecc->strength = 0;
+		break;
+
+	default:
+		WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
+		ret = -EINVAL;
+		goto err_free;
+	}
+
+	/* For many systems, the standard OOB write also works for raw */
+	if (!ecc->read_oob_raw)
+		ecc->read_oob_raw = ecc->read_oob;
+	if (!ecc->write_oob_raw)
+		ecc->write_oob_raw = ecc->write_oob;
+
+	/* propagate ecc info to mtd_info */
+	mtd->ecc_strength = ecc->strength;
+	mtd->ecc_step_size = ecc->size;
+
+	/*
+	 * Set the number of read / write steps for one page depending on ECC
+	 * mode.
+	 */
+	ecc->steps = mtd->writesize / ecc->size;
+	if (ecc->steps * ecc->size != mtd->writesize) {
+		WARN(1, "Invalid ECC parameters\n");
+		ret = -EINVAL;
+		goto err_free;
+	}
+	ecc->total = ecc->steps * ecc->bytes;
+
+	/*
+	 * The number of bytes available for a client to place data into
+	 * the out of band area.
+	 */
+	ret = mtd_ooblayout_count_freebytes(mtd);
+	if (ret < 0)
+		ret = 0;
+
+	mtd->oobavail = ret;
+
+	/* ECC sanity check: warn if it's too weak */
+	if (!nand_ecc_strength_good(mtd))
+		pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+			mtd->name);
+
+	/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
+	if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
+		switch (ecc->steps) {
+		case 2:
+			mtd->subpage_sft = 1;
+			break;
+		case 4:
+		case 8:
+		case 16:
+			mtd->subpage_sft = 2;
+			break;
+		}
+	}
+	chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
+
+	/* Initialize state */
+	chip->state = FL_READY;
+
+	/* Invalidate the pagebuffer reference */
+	chip->pagebuf = -1;
+
+	/* Large page NAND with SOFT_ECC should support subpage reads */
+	switch (ecc->mode) {
+	case NAND_ECC_SOFT:
+		if (chip->page_shift > 9)
+			chip->options |= NAND_SUBPAGE_READ;
+		break;
+
+	default:
+		break;
+	}
+
+	/* Fill in remaining MTD driver data */
+	mtd->type = nand_is_slc(chip) ? MTD_NANDFLASH : MTD_MLCNANDFLASH;
+	mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
+						MTD_CAP_NANDFLASH;
+	mtd->_erase = nand_erase;
+	mtd->_point = NULL;
+	mtd->_unpoint = NULL;
+	mtd->_read = nand_read;
+	mtd->_write = nand_write;
+	mtd->_panic_write = panic_nand_write;
+	mtd->_read_oob = nand_read_oob;
+	mtd->_write_oob = nand_write_oob;
+	mtd->_sync = nand_sync;
+	mtd->_lock = NULL;
+	mtd->_unlock = NULL;
+	mtd->_suspend = nand_suspend;
+	mtd->_resume = nand_resume;
+	mtd->_reboot = nand_shutdown;
+	mtd->_block_isreserved = nand_block_isreserved;
+	mtd->_block_isbad = nand_block_isbad;
+	mtd->_block_markbad = nand_block_markbad;
+	mtd->writebufsize = mtd->writesize;
+
+	/*
+	 * Initialize bitflip_threshold to its default prior scan_bbt() call.
+	 * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
+	 * properly set.
+	 */
+	if (!mtd->bitflip_threshold)
+		mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+
+	/* Check, if we should skip the bad block table scan */
+	if (chip->options & NAND_SKIP_BBTSCAN)
+		return 0;
+
+	/* Build bad block table */
+	return chip->scan_bbt(mtd);
+err_free:
+	if (!(chip->options & NAND_OWN_BUFFERS))
+		kfree(chip->buffers);
+	return ret;
+}
+EXPORT_SYMBOL(nand_scan_tail);
+
+/*
+ * is_module_text_address() isn't exported, and it's mostly a pointless
+ * test if this is a module _anyway_ -- they'd have to try _really_ hard
+ * to call us from in-kernel code if the core NAND support is modular.
+ */
+#ifdef MODULE
+#define caller_is_module() (1)
+#else
+#define caller_is_module() \
+	is_module_text_address((unsigned long)__builtin_return_address(0))
+#endif
+
+/**
+ * nand_scan - [NAND Interface] Scan for the NAND device
+ * @mtd: MTD device structure
+ * @maxchips: number of chips to scan for
+ *
+ * This fills out all the uninitialized function pointers with the defaults.
+ * The flash ID is read and the mtd/chip structures are filled with the
+ * appropriate values.
+ */
+int nand_scan(struct mtd_info *mtd, int maxchips)
+{
+	int ret;
+
+	ret = nand_scan_ident(mtd, maxchips, NULL);
+	if (!ret)
+		ret = nand_scan_tail(mtd);
+	return ret;
+}
+EXPORT_SYMBOL(nand_scan);
+
+/**
+ * nand_cleanup - [NAND Interface] Free resources held by the NAND device
+ * @chip: NAND chip object
+ */
+void nand_cleanup(struct nand_chip *chip)
+{
+	if (chip->ecc.mode == NAND_ECC_SOFT &&
+	    chip->ecc.algo == NAND_ECC_BCH)
+		nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
+
+	nand_release_data_interface(chip);
+
+	/* Free bad block table memory */
+	kfree(chip->bbt);
+	if (!(chip->options & NAND_OWN_BUFFERS))
+		kfree(chip->buffers);
+
+	/* Free bad block descriptor memory */
+	if (chip->badblock_pattern && chip->badblock_pattern->options
+			& NAND_BBT_DYNAMICSTRUCT)
+		kfree(chip->badblock_pattern);
+}
+EXPORT_SYMBOL_GPL(nand_cleanup);
+
+/**
+ * nand_release - [NAND Interface] Unregister the MTD device and free resources
+ *		  held by the NAND device
+ * @mtd: MTD device structure
+ */
+void nand_release(struct mtd_info *mtd)
+{
+	mtd_device_unregister(mtd);
+	nand_cleanup(mtd_to_nand(mtd));
+}
+EXPORT_SYMBOL_GPL(nand_release);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/rawnand/nand_bbt.c b/drivers/mtd/nand/rawnand/nand_bbt.c
new file mode 100644
index 000000000000..2915b6739bf8
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/nand_bbt.c
@@ -0,0 +1,1452 @@ 
+/*
+ *  Overview:
+ *   Bad block table support for the NAND driver
+ *
+ *  Copyright © 2004 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Description:
+ *
+ * When nand_scan_bbt is called, then it tries to find the bad block table
+ * depending on the options in the BBT descriptor(s). If no flash based BBT
+ * (NAND_BBT_USE_FLASH) is specified then the device is scanned for factory
+ * marked good / bad blocks. This information is used to create a memory BBT.
+ * Once a new bad block is discovered then the "factory" information is updated
+ * on the device.
+ * If a flash based BBT is specified then the function first tries to find the
+ * BBT on flash. If a BBT is found then the contents are read and the memory
+ * based BBT is created. If a mirrored BBT is selected then the mirror is
+ * searched too and the versions are compared. If the mirror has a greater
+ * version number, then the mirror BBT is used to build the memory based BBT.
+ * If the tables are not versioned, then we "or" the bad block information.
+ * If one of the BBTs is out of date or does not exist it is (re)created.
+ * If no BBT exists at all then the device is scanned for factory marked
+ * good / bad blocks and the bad block tables are created.
+ *
+ * For manufacturer created BBTs like the one found on M-SYS DOC devices
+ * the BBT is searched and read but never created
+ *
+ * The auto generated bad block table is located in the last good blocks
+ * of the device. The table is mirrored, so it can be updated eventually.
+ * The table is marked in the OOB area with an ident pattern and a version
+ * number which indicates which of both tables is more up to date. If the NAND
+ * controller needs the complete OOB area for the ECC information then the
+ * option NAND_BBT_NO_OOB should be used (along with NAND_BBT_USE_FLASH, of
+ * course): it moves the ident pattern and the version byte into the data area
+ * and the OOB area will remain untouched.
+ *
+ * The table uses 2 bits per block
+ * 11b:		block is good
+ * 00b:		block is factory marked bad
+ * 01b, 10b:	block is marked bad due to wear
+ *
+ * The memory bad block table uses the following scheme:
+ * 00b:		block is good
+ * 01b:		block is marked bad due to wear
+ * 10b:		block is reserved (to protect the bbt area)
+ * 11b:		block is factory marked bad
+ *
+ * Multichip devices like DOC store the bad block info per floor.
+ *
+ * Following assumptions are made:
+ * - bbts start at a page boundary, if autolocated on a block boundary
+ * - the space necessary for a bbt in FLASH does not exceed a block boundary
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/bbm.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/vmalloc.h>
+#include <linux/export.h>
+#include <linux/string.h>
+
+#define BBT_BLOCK_GOOD		0x00
+#define BBT_BLOCK_WORN		0x01
+#define BBT_BLOCK_RESERVED	0x02
+#define BBT_BLOCK_FACTORY_BAD	0x03
+
+#define BBT_ENTRY_MASK		0x03
+#define BBT_ENTRY_SHIFT		2
+
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs);
+
+static inline uint8_t bbt_get_entry(struct nand_chip *chip, int block)
+{
+	uint8_t entry = chip->bbt[block >> BBT_ENTRY_SHIFT];
+	entry >>= (block & BBT_ENTRY_MASK) * 2;
+	return entry & BBT_ENTRY_MASK;
+}
+
+static inline void bbt_mark_entry(struct nand_chip *chip, int block,
+		uint8_t mark)
+{
+	uint8_t msk = (mark & BBT_ENTRY_MASK) << ((block & BBT_ENTRY_MASK) * 2);
+	chip->bbt[block >> BBT_ENTRY_SHIFT] |= msk;
+}
+
+static int check_pattern_no_oob(uint8_t *buf, struct nand_bbt_descr *td)
+{
+	if (memcmp(buf, td->pattern, td->len))
+		return -1;
+	return 0;
+}
+
+/**
+ * check_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @len: the length of buffer to search
+ * @paglen: the pagelength
+ * @td: search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers.
+ */
+static int check_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
+{
+	if (td->options & NAND_BBT_NO_OOB)
+		return check_pattern_no_oob(buf, td);
+
+	/* Compare the pattern */
+	if (memcmp(buf + paglen + td->offs, td->pattern, td->len))
+		return -1;
+
+	return 0;
+}
+
+/**
+ * check_short_pattern - [GENERIC] check if a pattern is in the buffer
+ * @buf: the buffer to search
+ * @td:	search pattern descriptor
+ *
+ * Check for a pattern at the given place. Used to search bad block tables and
+ * good / bad block identifiers. Same as check_pattern, but no optional empty
+ * check.
+ */
+static int check_short_pattern(uint8_t *buf, struct nand_bbt_descr *td)
+{
+	/* Compare the pattern */
+	if (memcmp(buf + td->offs, td->pattern, td->len))
+		return -1;
+	return 0;
+}
+
+/**
+ * add_marker_len - compute the length of the marker in data area
+ * @td: BBT descriptor used for computation
+ *
+ * The length will be 0 if the marker is located in OOB area.
+ */
+static u32 add_marker_len(struct nand_bbt_descr *td)
+{
+	u32 len;
+
+	if (!(td->options & NAND_BBT_NO_OOB))
+		return 0;
+
+	len = td->len;
+	if (td->options & NAND_BBT_VERSION)
+		len++;
+	return len;
+}
+
+/**
+ * read_bbt - [GENERIC] Read the bad block table starting from page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @page: the starting page
+ * @num: the number of bbt descriptors to read
+ * @td: the bbt describtion table
+ * @offs: block number offset in the table
+ *
+ * Read the bad block table starting from page.
+ */
+static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
+		struct nand_bbt_descr *td, int offs)
+{
+	int res, ret = 0, i, j, act = 0;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	size_t retlen, len, totlen;
+	loff_t from;
+	int bits = td->options & NAND_BBT_NRBITS_MSK;
+	uint8_t msk = (uint8_t)((1 << bits) - 1);
+	u32 marker_len;
+	int reserved_block_code = td->reserved_block_code;
+
+	totlen = (num * bits) >> 3;
+	marker_len = add_marker_len(td);
+	from = ((loff_t)page) << this->page_shift;
+
+	while (totlen) {
+		len = min(totlen, (size_t)(1 << this->bbt_erase_shift));
+		if (marker_len) {
+			/*
+			 * In case the BBT marker is not in the OOB area it
+			 * will be just in the first page.
+			 */
+			len -= marker_len;
+			from += marker_len;
+			marker_len = 0;
+		}
+		res = mtd_read(mtd, from, len, &retlen, buf);
+		if (res < 0) {
+			if (mtd_is_eccerr(res)) {
+				pr_info("nand_bbt: ECC error in BBT at 0x%012llx\n",
+					from & ~mtd->writesize);
+				return res;
+			} else if (mtd_is_bitflip(res)) {
+				pr_info("nand_bbt: corrected error in BBT at 0x%012llx\n",
+					from & ~mtd->writesize);
+				ret = res;
+			} else {
+				pr_info("nand_bbt: error reading BBT\n");
+				return res;
+			}
+		}
+
+		/* Analyse data */
+		for (i = 0; i < len; i++) {
+			uint8_t dat = buf[i];
+			for (j = 0; j < 8; j += bits, act++) {
+				uint8_t tmp = (dat >> j) & msk;
+				if (tmp == msk)
+					continue;
+				if (reserved_block_code && (tmp == reserved_block_code)) {
+					pr_info("nand_read_bbt: reserved block at 0x%012llx\n",
+						 (loff_t)(offs + act) <<
+						 this->bbt_erase_shift);
+					bbt_mark_entry(this, offs + act,
+							BBT_BLOCK_RESERVED);
+					mtd->ecc_stats.bbtblocks++;
+					continue;
+				}
+				/*
+				 * Leave it for now, if it's matured we can
+				 * move this message to pr_debug.
+				 */
+				pr_info("nand_read_bbt: bad block at 0x%012llx\n",
+					 (loff_t)(offs + act) <<
+					 this->bbt_erase_shift);
+				/* Factory marked bad or worn out? */
+				if (tmp == 0)
+					bbt_mark_entry(this, offs + act,
+							BBT_BLOCK_FACTORY_BAD);
+				else
+					bbt_mark_entry(this, offs + act,
+							BBT_BLOCK_WORN);
+				mtd->ecc_stats.badblocks++;
+			}
+		}
+		totlen -= len;
+		from += len;
+	}
+	return ret;
+}
+
+/**
+ * read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @chip: read the table for a specific chip, -1 read all chips; applies only if
+ *        NAND_BBT_PERCHIP option is set
+ *
+ * Read the bad block table for all chips starting at a given page. We assume
+ * that the bbt bits are in consecutive order.
+ */
+static int read_abs_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td, int chip)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int res = 0, i;
+
+	if (td->options & NAND_BBT_PERCHIP) {
+		int offs = 0;
+		for (i = 0; i < this->numchips; i++) {
+			if (chip == -1 || chip == i)
+				res = read_bbt(mtd, buf, td->pages[i],
+					this->chipsize >> this->bbt_erase_shift,
+					td, offs);
+			if (res)
+				return res;
+			offs += this->chipsize >> this->bbt_erase_shift;
+		}
+	} else {
+		res = read_bbt(mtd, buf, td->pages[0],
+				mtd->size >> this->bbt_erase_shift, td, 0);
+		if (res)
+			return res;
+	}
+	return 0;
+}
+
+/* BBT marker is in the first page, no OOB */
+static int scan_read_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+			 struct nand_bbt_descr *td)
+{
+	size_t retlen;
+	size_t len;
+
+	len = td->len;
+	if (td->options & NAND_BBT_VERSION)
+		len++;
+
+	return mtd_read(mtd, offs, len, &retlen, buf);
+}
+
+/**
+ * scan_read_oob - [GENERIC] Scan data+OOB region to buffer
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @offs: offset at which to scan
+ * @len: length of data region to read
+ *
+ * Scan read data from data+OOB. May traverse multiple pages, interleaving
+ * page,OOB,page,OOB,... in buf. Completes transfer and returns the "strongest"
+ * ECC condition (error or bitflip). May quit on the first (non-ECC) error.
+ */
+static int scan_read_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+			 size_t len)
+{
+	struct mtd_oob_ops ops;
+	int res, ret = 0;
+
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ops.ooboffs = 0;
+	ops.ooblen = mtd->oobsize;
+
+	while (len > 0) {
+		ops.datbuf = buf;
+		ops.len = min(len, (size_t)mtd->writesize);
+		ops.oobbuf = buf + ops.len;
+
+		res = mtd_read_oob(mtd, offs, &ops);
+		if (res) {
+			if (!mtd_is_bitflip_or_eccerr(res))
+				return res;
+			else if (mtd_is_eccerr(res) || !ret)
+				ret = res;
+		}
+
+		buf += mtd->oobsize + mtd->writesize;
+		len -= mtd->writesize;
+		offs += mtd->writesize;
+	}
+	return ret;
+}
+
+static int scan_read(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
+			 size_t len, struct nand_bbt_descr *td)
+{
+	if (td->options & NAND_BBT_NO_OOB)
+		return scan_read_data(mtd, buf, offs, td);
+	else
+		return scan_read_oob(mtd, buf, offs, len);
+}
+
+/* Scan write data with oob to flash */
+static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
+			  uint8_t *buf, uint8_t *oob)
+{
+	struct mtd_oob_ops ops;
+
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ops.ooboffs = 0;
+	ops.ooblen = mtd->oobsize;
+	ops.datbuf = buf;
+	ops.oobbuf = oob;
+	ops.len = len;
+
+	return mtd_write_oob(mtd, offs, &ops);
+}
+
+static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+	u32 ver_offs = td->veroffs;
+
+	if (!(td->options & NAND_BBT_NO_OOB))
+		ver_offs += mtd->writesize;
+	return ver_offs;
+}
+
+/**
+ * read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md:	descriptor for the bad block table mirror
+ *
+ * Read the bad block table(s) for all chips starting at a given page. We
+ * assume that the bbt bits are in consecutive order.
+ */
+static void read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
+			  struct nand_bbt_descr *td, struct nand_bbt_descr *md)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+
+	/* Read the primary version, if available */
+	if (td->options & NAND_BBT_VERSION) {
+		scan_read(mtd, buf, (loff_t)td->pages[0] << this->page_shift,
+			      mtd->writesize, td);
+		td->version[0] = buf[bbt_get_ver_offs(mtd, td)];
+		pr_info("Bad block table at page %d, version 0x%02X\n",
+			 td->pages[0], td->version[0]);
+	}
+
+	/* Read the mirror version, if available */
+	if (md && (md->options & NAND_BBT_VERSION)) {
+		scan_read(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
+			      mtd->writesize, md);
+		md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
+		pr_info("Bad block table at page %d, version 0x%02X\n",
+			 md->pages[0], md->version[0]);
+	}
+}
+
+/* Scan a given block partially */
+static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
+			   loff_t offs, uint8_t *buf, int numpages)
+{
+	struct mtd_oob_ops ops;
+	int j, ret;
+
+	ops.ooblen = mtd->oobsize;
+	ops.oobbuf = buf;
+	ops.ooboffs = 0;
+	ops.datbuf = NULL;
+	ops.mode = MTD_OPS_PLACE_OOB;
+
+	for (j = 0; j < numpages; j++) {
+		/*
+		 * Read the full oob until read_oob is fixed to handle single
+		 * byte reads for 16 bit buswidth.
+		 */
+		ret = mtd_read_oob(mtd, offs, &ops);
+		/* Ignore ECC errors when checking for BBM */
+		if (ret && !mtd_is_bitflip_or_eccerr(ret))
+			return ret;
+
+		if (check_short_pattern(buf, bd))
+			return 1;
+
+		offs += mtd->writesize;
+	}
+	return 0;
+}
+
+/**
+ * create_bbt - [GENERIC] Create a bad block table by scanning the device
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ * @chip: create the table for a specific chip, -1 read all chips; applies only
+ *        if NAND_BBT_PERCHIP option is set
+ *
+ * Create a bad block table by scanning the device for the given good/bad block
+ * identify pattern.
+ */
+static int create_bbt(struct mtd_info *mtd, uint8_t *buf,
+	struct nand_bbt_descr *bd, int chip)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int i, numblocks, numpages;
+	int startblock;
+	loff_t from;
+
+	pr_info("Scanning device for bad blocks\n");
+
+	if (bd->options & NAND_BBT_SCAN2NDPAGE)
+		numpages = 2;
+	else
+		numpages = 1;
+
+	if (chip == -1) {
+		numblocks = mtd->size >> this->bbt_erase_shift;
+		startblock = 0;
+		from = 0;
+	} else {
+		if (chip >= this->numchips) {
+			pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
+			       chip + 1, this->numchips);
+			return -EINVAL;
+		}
+		numblocks = this->chipsize >> this->bbt_erase_shift;
+		startblock = chip * numblocks;
+		numblocks += startblock;
+		from = (loff_t)startblock << this->bbt_erase_shift;
+	}
+
+	if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
+		from += mtd->erasesize - (mtd->writesize * numpages);
+
+	for (i = startblock; i < numblocks; i++) {
+		int ret;
+
+		BUG_ON(bd->options & NAND_BBT_NO_OOB);
+
+		ret = scan_block_fast(mtd, bd, from, buf, numpages);
+		if (ret < 0)
+			return ret;
+
+		if (ret) {
+			bbt_mark_entry(this, i, BBT_BLOCK_FACTORY_BAD);
+			pr_warn("Bad eraseblock %d at 0x%012llx\n",
+				i, (unsigned long long)from);
+			mtd->ecc_stats.badblocks++;
+		}
+
+		from += (1 << this->bbt_erase_shift);
+	}
+	return 0;
+}
+
+/**
+ * search_bbt - [GENERIC] scan the device for a specific bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ *
+ * Read the bad block table by searching for a given ident pattern. Search is
+ * preformed either from the beginning up or from the end of the device
+ * downwards. The search starts always at the start of a block. If the option
+ * NAND_BBT_PERCHIP is given, each chip is searched for a bbt, which contains
+ * the bad block information of this chip. This is necessary to provide support
+ * for certain DOC devices.
+ *
+ * The bbt ident pattern resides in the oob area of the first page in a block.
+ */
+static int search_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *td)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int i, chips;
+	int startblock, block, dir;
+	int scanlen = mtd->writesize + mtd->oobsize;
+	int bbtblocks;
+	int blocktopage = this->bbt_erase_shift - this->page_shift;
+
+	/* Search direction top -> down? */
+	if (td->options & NAND_BBT_LASTBLOCK) {
+		startblock = (mtd->size >> this->bbt_erase_shift) - 1;
+		dir = -1;
+	} else {
+		startblock = 0;
+		dir = 1;
+	}
+
+	/* Do we have a bbt per chip? */
+	if (td->options & NAND_BBT_PERCHIP) {
+		chips = this->numchips;
+		bbtblocks = this->chipsize >> this->bbt_erase_shift;
+		startblock &= bbtblocks - 1;
+	} else {
+		chips = 1;
+		bbtblocks = mtd->size >> this->bbt_erase_shift;
+	}
+
+	for (i = 0; i < chips; i++) {
+		/* Reset version information */
+		td->version[i] = 0;
+		td->pages[i] = -1;
+		/* Scan the maximum number of blocks */
+		for (block = 0; block < td->maxblocks; block++) {
+
+			int actblock = startblock + dir * block;
+			loff_t offs = (loff_t)actblock << this->bbt_erase_shift;
+
+			/* Read first page */
+			scan_read(mtd, buf, offs, mtd->writesize, td);
+			if (!check_pattern(buf, scanlen, mtd->writesize, td)) {
+				td->pages[i] = actblock << blocktopage;
+				if (td->options & NAND_BBT_VERSION) {
+					offs = bbt_get_ver_offs(mtd, td);
+					td->version[i] = buf[offs];
+				}
+				break;
+			}
+		}
+		startblock += this->chipsize >> this->bbt_erase_shift;
+	}
+	/* Check, if we found a bbt for each requested chip */
+	for (i = 0; i < chips; i++) {
+		if (td->pages[i] == -1)
+			pr_warn("Bad block table not found for chip %d\n", i);
+		else
+			pr_info("Bad block table found at page %d, version 0x%02X\n",
+				td->pages[i], td->version[i]);
+	}
+	return 0;
+}
+
+/**
+ * search_read_bbts - [GENERIC] scan the device for bad block table(s)
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ *
+ * Search and read the bad block table(s).
+ */
+static void search_read_bbts(struct mtd_info *mtd, uint8_t *buf,
+			     struct nand_bbt_descr *td,
+			     struct nand_bbt_descr *md)
+{
+	/* Search the primary table */
+	search_bbt(mtd, buf, td);
+
+	/* Search the mirror table */
+	if (md)
+		search_bbt(mtd, buf, md);
+}
+
+/**
+ * get_bbt_block - Get the first valid eraseblock suitable to store a BBT
+ * @this: the NAND device
+ * @td: the BBT description
+ * @md: the mirror BBT descriptor
+ * @chip: the CHIP selector
+ *
+ * This functions returns a positive block number pointing a valid eraseblock
+ * suitable to store a BBT (i.e. in the range reserved for BBT), or -ENOSPC if
+ * all blocks are already used of marked bad. If td->pages[chip] was already
+ * pointing to a valid block we re-use it, otherwise we search for the next
+ * valid one.
+ */
+static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
+			 struct nand_bbt_descr *md, int chip)
+{
+	int startblock, dir, page, numblocks, i;
+
+	/*
+	 * There was already a version of the table, reuse the page. This
+	 * applies for absolute placement too, as we have the page number in
+	 * td->pages.
+	 */
+	if (td->pages[chip] != -1)
+		return td->pages[chip] >>
+				(this->bbt_erase_shift - this->page_shift);
+
+	numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+	if (!(td->options & NAND_BBT_PERCHIP))
+		numblocks *= this->numchips;
+
+	/*
+	 * Automatic placement of the bad block table. Search direction
+	 * top -> down?
+	 */
+	if (td->options & NAND_BBT_LASTBLOCK) {
+		startblock = numblocks * (chip + 1) - 1;
+		dir = -1;
+	} else {
+		startblock = chip * numblocks;
+		dir = 1;
+	}
+
+	for (i = 0; i < td->maxblocks; i++) {
+		int block = startblock + dir * i;
+
+		/* Check, if the block is bad */
+		switch (bbt_get_entry(this, block)) {
+		case BBT_BLOCK_WORN:
+		case BBT_BLOCK_FACTORY_BAD:
+			continue;
+		}
+
+		page = block << (this->bbt_erase_shift - this->page_shift);
+
+		/* Check, if the block is used by the mirror table */
+		if (!md || md->pages[chip] != page)
+			return block;
+	}
+
+	return -ENOSPC;
+}
+
+/**
+ * mark_bbt_block_bad - Mark one of the block reserved for BBT bad
+ * @this: the NAND device
+ * @td: the BBT description
+ * @chip: the CHIP selector
+ * @block: the BBT block to mark
+ *
+ * Blocks reserved for BBT can become bad. This functions is an helper to mark
+ * such blocks as bad. It takes care of updating the in-memory BBT, marking the
+ * block as bad using a bad block marker and invalidating the associated
+ * td->pages[] entry.
+ */
+static void mark_bbt_block_bad(struct nand_chip *this,
+			       struct nand_bbt_descr *td,
+			       int chip, int block)
+{
+	struct mtd_info *mtd = nand_to_mtd(this);
+	loff_t to;
+	int res;
+
+	bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+	to = (loff_t)block << this->bbt_erase_shift;
+	res = this->block_markbad(mtd, to);
+	if (res)
+		pr_warn("nand_bbt: error %d while marking block %d bad\n",
+			res, block);
+
+	td->pages[chip] = -1;
+}
+
+/**
+ * write_bbt - [GENERIC] (Re)write the bad block table
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @td: descriptor for the bad block table
+ * @md: descriptor for the bad block table mirror
+ * @chipsel: selector for a specific chip, -1 for all
+ *
+ * (Re)write the bad block table.
+ */
+static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
+		     struct nand_bbt_descr *td, struct nand_bbt_descr *md,
+		     int chipsel)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct erase_info einfo;
+	int i, res, chip = 0;
+	int bits, page, offs, numblocks, sft, sftmsk;
+	int nrchips, pageoffs, ooboffs;
+	uint8_t msk[4];
+	uint8_t rcode = td->reserved_block_code;
+	size_t retlen, len = 0;
+	loff_t to;
+	struct mtd_oob_ops ops;
+
+	ops.ooblen = mtd->oobsize;
+	ops.ooboffs = 0;
+	ops.datbuf = NULL;
+	ops.mode = MTD_OPS_PLACE_OOB;
+
+	if (!rcode)
+		rcode = 0xff;
+	/* Write bad block table per chip rather than per device? */
+	if (td->options & NAND_BBT_PERCHIP) {
+		numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+		/* Full device write or specific chip? */
+		if (chipsel == -1) {
+			nrchips = this->numchips;
+		} else {
+			nrchips = chipsel + 1;
+			chip = chipsel;
+		}
+	} else {
+		numblocks = (int)(mtd->size >> this->bbt_erase_shift);
+		nrchips = 1;
+	}
+
+	/* Loop through the chips */
+	while (chip < nrchips) {
+		int block;
+
+		block = get_bbt_block(this, td, md, chip);
+		if (block < 0) {
+			pr_err("No space left to write bad block table\n");
+			res = block;
+			goto outerr;
+		}
+
+		/*
+		 * get_bbt_block() returns a block number, shift the value to
+		 * get a page number.
+		 */
+		page = block << (this->bbt_erase_shift - this->page_shift);
+
+		/* Set up shift count and masks for the flash table */
+		bits = td->options & NAND_BBT_NRBITS_MSK;
+		msk[2] = ~rcode;
+		switch (bits) {
+		case 1: sft = 3; sftmsk = 0x07; msk[0] = 0x00; msk[1] = 0x01;
+			msk[3] = 0x01;
+			break;
+		case 2: sft = 2; sftmsk = 0x06; msk[0] = 0x00; msk[1] = 0x01;
+			msk[3] = 0x03;
+			break;
+		case 4: sft = 1; sftmsk = 0x04; msk[0] = 0x00; msk[1] = 0x0C;
+			msk[3] = 0x0f;
+			break;
+		case 8: sft = 0; sftmsk = 0x00; msk[0] = 0x00; msk[1] = 0x0F;
+			msk[3] = 0xff;
+			break;
+		default: return -EINVAL;
+		}
+
+		to = ((loff_t)page) << this->page_shift;
+
+		/* Must we save the block contents? */
+		if (td->options & NAND_BBT_SAVECONTENT) {
+			/* Make it block aligned */
+			to &= ~(((loff_t)1 << this->bbt_erase_shift) - 1);
+			len = 1 << this->bbt_erase_shift;
+			res = mtd_read(mtd, to, len, &retlen, buf);
+			if (res < 0) {
+				if (retlen != len) {
+					pr_info("nand_bbt: error reading block for writing the bad block table\n");
+					return res;
+				}
+				pr_warn("nand_bbt: ECC error while reading block for writing bad block table\n");
+			}
+			/* Read oob data */
+			ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
+			ops.oobbuf = &buf[len];
+			res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
+			if (res < 0 || ops.oobretlen != ops.ooblen)
+				goto outerr;
+
+			/* Calc the byte offset in the buffer */
+			pageoffs = page - (int)(to >> this->page_shift);
+			offs = pageoffs << this->page_shift;
+			/* Preset the bbt area with 0xff */
+			memset(&buf[offs], 0xff, (size_t)(numblocks >> sft));
+			ooboffs = len + (pageoffs * mtd->oobsize);
+
+		} else if (td->options & NAND_BBT_NO_OOB) {
+			ooboffs = 0;
+			offs = td->len;
+			/* The version byte */
+			if (td->options & NAND_BBT_VERSION)
+				offs++;
+			/* Calc length */
+			len = (size_t)(numblocks >> sft);
+			len += offs;
+			/* Make it page aligned! */
+			len = ALIGN(len, mtd->writesize);
+			/* Preset the buffer with 0xff */
+			memset(buf, 0xff, len);
+			/* Pattern is located at the begin of first page */
+			memcpy(buf, td->pattern, td->len);
+		} else {
+			/* Calc length */
+			len = (size_t)(numblocks >> sft);
+			/* Make it page aligned! */
+			len = ALIGN(len, mtd->writesize);
+			/* Preset the buffer with 0xff */
+			memset(buf, 0xff, len +
+			       (len >> this->page_shift)* mtd->oobsize);
+			offs = 0;
+			ooboffs = len;
+			/* Pattern is located in oob area of first page */
+			memcpy(&buf[ooboffs + td->offs], td->pattern, td->len);
+		}
+
+		if (td->options & NAND_BBT_VERSION)
+			buf[ooboffs + td->veroffs] = td->version[chip];
+
+		/* Walk through the memory table */
+		for (i = 0; i < numblocks; i++) {
+			uint8_t dat;
+			int sftcnt = (i << (3 - sft)) & sftmsk;
+			dat = bbt_get_entry(this, chip * numblocks + i);
+			/* Do not store the reserved bbt blocks! */
+			buf[offs + (i >> sft)] &= ~(msk[dat] << sftcnt);
+		}
+
+		memset(&einfo, 0, sizeof(einfo));
+		einfo.mtd = mtd;
+		einfo.addr = to;
+		einfo.len = 1 << this->bbt_erase_shift;
+		res = nand_erase_nand(mtd, &einfo, 1);
+		if (res < 0) {
+			pr_warn("nand_bbt: error while erasing BBT block %d\n",
+				res);
+			mark_bbt_block_bad(this, td, chip, block);
+			continue;
+		}
+
+		res = scan_write_bbt(mtd, to, len, buf,
+				td->options & NAND_BBT_NO_OOB ? NULL :
+				&buf[len]);
+		if (res < 0) {
+			pr_warn("nand_bbt: error while writing BBT block %d\n",
+				res);
+			mark_bbt_block_bad(this, td, chip, block);
+			continue;
+		}
+
+		pr_info("Bad block table written to 0x%012llx, version 0x%02X\n",
+			 (unsigned long long)to, td->version[chip]);
+
+		/* Mark it as used */
+		td->pages[chip++] = page;
+	}
+	return 0;
+
+ outerr:
+	pr_warn("nand_bbt: error while writing bad block table %d\n", res);
+	return res;
+}
+
+/**
+ * nand_memory_bbt - [GENERIC] create a memory based bad block table
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function creates a memory based bbt by scanning the device for
+ * manufacturer / software marked good / bad blocks.
+ */
+static inline int nand_memory_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+
+	return create_bbt(mtd, this->buffers->databuf, bd, -1);
+}
+
+/**
+ * check_create - [GENERIC] create and write bbt(s) if necessary
+ * @mtd: MTD device structure
+ * @buf: temporary buffer
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks the results of the previous call to read_bbt and creates
+ * / updates the bbt(s) if necessary. Creation is necessary if no bbt was found
+ * for the chip/device. Update is necessary if one of the tables is missing or
+ * the version nr. of one table is less than the other.
+ */
+static int check_create(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd)
+{
+	int i, chips, writeops, create, chipsel, res, res2;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct nand_bbt_descr *td = this->bbt_td;
+	struct nand_bbt_descr *md = this->bbt_md;
+	struct nand_bbt_descr *rd, *rd2;
+
+	/* Do we have a bbt per chip? */
+	if (td->options & NAND_BBT_PERCHIP)
+		chips = this->numchips;
+	else
+		chips = 1;
+
+	for (i = 0; i < chips; i++) {
+		writeops = 0;
+		create = 0;
+		rd = NULL;
+		rd2 = NULL;
+		res = res2 = 0;
+		/* Per chip or per device? */
+		chipsel = (td->options & NAND_BBT_PERCHIP) ? i : -1;
+		/* Mirrored table available? */
+		if (md) {
+			if (td->pages[i] == -1 && md->pages[i] == -1) {
+				create = 1;
+				writeops = 0x03;
+			} else if (td->pages[i] == -1) {
+				rd = md;
+				writeops = 0x01;
+			} else if (md->pages[i] == -1) {
+				rd = td;
+				writeops = 0x02;
+			} else if (td->version[i] == md->version[i]) {
+				rd = td;
+				if (!(td->options & NAND_BBT_VERSION))
+					rd2 = md;
+			} else if (((int8_t)(td->version[i] - md->version[i])) > 0) {
+				rd = td;
+				writeops = 0x02;
+			} else {
+				rd = md;
+				writeops = 0x01;
+			}
+		} else {
+			if (td->pages[i] == -1) {
+				create = 1;
+				writeops = 0x01;
+			} else {
+				rd = td;
+			}
+		}
+
+		if (create) {
+			/* Create the bad block table by scanning the device? */
+			if (!(td->options & NAND_BBT_CREATE))
+				continue;
+
+			/* Create the table in memory by scanning the chip(s) */
+			if (!(this->bbt_options & NAND_BBT_CREATE_EMPTY))
+				create_bbt(mtd, buf, bd, chipsel);
+
+			td->version[i] = 1;
+			if (md)
+				md->version[i] = 1;
+		}
+
+		/* Read back first? */
+		if (rd) {
+			res = read_abs_bbt(mtd, buf, rd, chipsel);
+			if (mtd_is_eccerr(res)) {
+				/* Mark table as invalid */
+				rd->pages[i] = -1;
+				rd->version[i] = 0;
+				i--;
+				continue;
+			}
+		}
+		/* If they weren't versioned, read both */
+		if (rd2) {
+			res2 = read_abs_bbt(mtd, buf, rd2, chipsel);
+			if (mtd_is_eccerr(res2)) {
+				/* Mark table as invalid */
+				rd2->pages[i] = -1;
+				rd2->version[i] = 0;
+				i--;
+				continue;
+			}
+		}
+
+		/* Scrub the flash table(s)? */
+		if (mtd_is_bitflip(res) || mtd_is_bitflip(res2))
+			writeops = 0x03;
+
+		/* Update version numbers before writing */
+		if (md) {
+			td->version[i] = max(td->version[i], md->version[i]);
+			md->version[i] = td->version[i];
+		}
+
+		/* Write the bad block table to the device? */
+		if ((writeops & 0x01) && (td->options & NAND_BBT_WRITE)) {
+			res = write_bbt(mtd, buf, td, md, chipsel);
+			if (res < 0)
+				return res;
+		}
+
+		/* Write the mirror bad block table to the device? */
+		if ((writeops & 0x02) && md && (md->options & NAND_BBT_WRITE)) {
+			res = write_bbt(mtd, buf, md, td, chipsel);
+			if (res < 0)
+				return res;
+		}
+	}
+	return 0;
+}
+
+/**
+ * mark_bbt_regions - [GENERIC] mark the bad block table regions
+ * @mtd: MTD device structure
+ * @td: bad block table descriptor
+ *
+ * The bad block table regions are marked as "bad" to prevent accidental
+ * erasures / writes. The regions are identified by the mark 0x02.
+ */
+static void mark_bbt_region(struct mtd_info *mtd, struct nand_bbt_descr *td)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int i, j, chips, block, nrblocks, update;
+	uint8_t oldval;
+
+	/* Do we have a bbt per chip? */
+	if (td->options & NAND_BBT_PERCHIP) {
+		chips = this->numchips;
+		nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+	} else {
+		chips = 1;
+		nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
+	}
+
+	for (i = 0; i < chips; i++) {
+		if ((td->options & NAND_BBT_ABSPAGE) ||
+		    !(td->options & NAND_BBT_WRITE)) {
+			if (td->pages[i] == -1)
+				continue;
+			block = td->pages[i] >> (this->bbt_erase_shift - this->page_shift);
+			oldval = bbt_get_entry(this, block);
+			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+			if ((oldval != BBT_BLOCK_RESERVED) &&
+					td->reserved_block_code)
+				nand_update_bbt(mtd, (loff_t)block <<
+						this->bbt_erase_shift);
+			continue;
+		}
+		update = 0;
+		if (td->options & NAND_BBT_LASTBLOCK)
+			block = ((i + 1) * nrblocks) - td->maxblocks;
+		else
+			block = i * nrblocks;
+		for (j = 0; j < td->maxblocks; j++) {
+			oldval = bbt_get_entry(this, block);
+			bbt_mark_entry(this, block, BBT_BLOCK_RESERVED);
+			if (oldval != BBT_BLOCK_RESERVED)
+				update = 1;
+			block++;
+		}
+		/*
+		 * If we want reserved blocks to be recorded to flash, and some
+		 * new ones have been marked, then we need to update the stored
+		 * bbts.  This should only happen once.
+		 */
+		if (update && td->reserved_block_code)
+			nand_update_bbt(mtd, (loff_t)(block - 1) <<
+					this->bbt_erase_shift);
+	}
+}
+
+/**
+ * verify_bbt_descr - verify the bad block description
+ * @mtd: MTD device structure
+ * @bd: the table to verify
+ *
+ * This functions performs a few sanity checks on the bad block description
+ * table.
+ */
+static void verify_bbt_descr(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	u32 pattern_len;
+	u32 bits;
+	u32 table_size;
+
+	if (!bd)
+		return;
+
+	pattern_len = bd->len;
+	bits = bd->options & NAND_BBT_NRBITS_MSK;
+
+	BUG_ON((this->bbt_options & NAND_BBT_NO_OOB) &&
+			!(this->bbt_options & NAND_BBT_USE_FLASH));
+	BUG_ON(!bits);
+
+	if (bd->options & NAND_BBT_VERSION)
+		pattern_len++;
+
+	if (bd->options & NAND_BBT_NO_OOB) {
+		BUG_ON(!(this->bbt_options & NAND_BBT_USE_FLASH));
+		BUG_ON(!(this->bbt_options & NAND_BBT_NO_OOB));
+		BUG_ON(bd->offs);
+		if (bd->options & NAND_BBT_VERSION)
+			BUG_ON(bd->veroffs != bd->len);
+		BUG_ON(bd->options & NAND_BBT_SAVECONTENT);
+	}
+
+	if (bd->options & NAND_BBT_PERCHIP)
+		table_size = this->chipsize >> this->bbt_erase_shift;
+	else
+		table_size = mtd->size >> this->bbt_erase_shift;
+	table_size >>= 3;
+	table_size *= bits;
+	if (bd->options & NAND_BBT_NO_OOB)
+		table_size += pattern_len;
+	BUG_ON(table_size > (1 << this->bbt_erase_shift));
+}
+
+/**
+ * nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
+ * @mtd: MTD device structure
+ * @bd: descriptor for the good/bad block search pattern
+ *
+ * The function checks, if a bad block table(s) is/are already available. If
+ * not it scans the device for manufacturer marked good / bad blocks and writes
+ * the bad block table(s) to the selected place.
+ *
+ * The bad block table memory is allocated here. It must be freed by calling
+ * the nand_free_bbt function.
+ */
+static int nand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int len, res;
+	uint8_t *buf;
+	struct nand_bbt_descr *td = this->bbt_td;
+	struct nand_bbt_descr *md = this->bbt_md;
+
+	len = (mtd->size >> (this->bbt_erase_shift + 2)) ? : 1;
+	/*
+	 * Allocate memory (2bit per block) and clear the memory bad block
+	 * table.
+	 */
+	this->bbt = kzalloc(len, GFP_KERNEL);
+	if (!this->bbt)
+		return -ENOMEM;
+
+	/*
+	 * If no primary table decriptor is given, scan the device to build a
+	 * memory based bad block table.
+	 */
+	if (!td) {
+		if ((res = nand_memory_bbt(mtd, bd))) {
+			pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
+			goto err;
+		}
+		return 0;
+	}
+	verify_bbt_descr(mtd, td);
+	verify_bbt_descr(mtd, md);
+
+	/* Allocate a temporary buffer for one eraseblock incl. oob */
+	len = (1 << this->bbt_erase_shift);
+	len += (len >> this->page_shift) * mtd->oobsize;
+	buf = vmalloc(len);
+	if (!buf) {
+		res = -ENOMEM;
+		goto err;
+	}
+
+	/* Is the bbt at a given page? */
+	if (td->options & NAND_BBT_ABSPAGE) {
+		read_abs_bbts(mtd, buf, td, md);
+	} else {
+		/* Search the bad block table using a pattern in oob */
+		search_read_bbts(mtd, buf, td, md);
+	}
+
+	res = check_create(mtd, buf, bd);
+	if (res)
+		goto err;
+
+	/* Prevent the bbt regions from erasing / writing */
+	mark_bbt_region(mtd, td);
+	if (md)
+		mark_bbt_region(mtd, md);
+
+	vfree(buf);
+	return 0;
+
+err:
+	kfree(this->bbt);
+	this->bbt = NULL;
+	return res;
+}
+
+/**
+ * nand_update_bbt - update bad block table(s)
+ * @mtd: MTD device structure
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+static int nand_update_bbt(struct mtd_info *mtd, loff_t offs)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int len, res = 0;
+	int chip, chipsel;
+	uint8_t *buf;
+	struct nand_bbt_descr *td = this->bbt_td;
+	struct nand_bbt_descr *md = this->bbt_md;
+
+	if (!this->bbt || !td)
+		return -EINVAL;
+
+	/* Allocate a temporary buffer for one eraseblock incl. oob */
+	len = (1 << this->bbt_erase_shift);
+	len += (len >> this->page_shift) * mtd->oobsize;
+	buf = kmalloc(len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	/* Do we have a bbt per chip? */
+	if (td->options & NAND_BBT_PERCHIP) {
+		chip = (int)(offs >> this->chip_shift);
+		chipsel = chip;
+	} else {
+		chip = 0;
+		chipsel = -1;
+	}
+
+	td->version[chip]++;
+	if (md)
+		md->version[chip]++;
+
+	/* Write the bad block table to the device? */
+	if (td->options & NAND_BBT_WRITE) {
+		res = write_bbt(mtd, buf, td, md, chipsel);
+		if (res < 0)
+			goto out;
+	}
+	/* Write the mirror bad block table to the device? */
+	if (md && (md->options & NAND_BBT_WRITE)) {
+		res = write_bbt(mtd, buf, md, td, chipsel);
+	}
+
+ out:
+	kfree(buf);
+	return res;
+}
+
+/*
+ * Define some generic bad / good block scan pattern which are used
+ * while scanning a device for factory marked good / bad blocks.
+ */
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+/* Generic flash bbt descriptors */
+static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
+static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs =	8,
+	.len = 4,
+	.veroffs = 12,
+	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
+	.offs =	8,
+	.len = 4,
+	.veroffs = 12,
+	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+	.pattern = mirror_pattern
+};
+
+static struct nand_bbt_descr bbt_main_no_oob_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+		| NAND_BBT_NO_OOB,
+	.len = 4,
+	.veroffs = 4,
+	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_no_oob_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP
+		| NAND_BBT_NO_OOB,
+	.len = 4,
+	.veroffs = 4,
+	.maxblocks = NAND_BBT_SCAN_MAXBLOCKS,
+	.pattern = mirror_pattern
+};
+
+#define BADBLOCK_SCAN_MASK (~NAND_BBT_NO_OOB)
+/**
+ * nand_create_badblock_pattern - [INTERN] Creates a BBT descriptor structure
+ * @this: NAND chip to create descriptor for
+ *
+ * This function allocates and initializes a nand_bbt_descr for BBM detection
+ * based on the properties of @this. The new descriptor is stored in
+ * this->badblock_pattern. Thus, this->badblock_pattern should be NULL when
+ * passed to this function.
+ */
+static int nand_create_badblock_pattern(struct nand_chip *this)
+{
+	struct nand_bbt_descr *bd;
+	if (this->badblock_pattern) {
+		pr_warn("Bad block pattern already allocated; not replacing\n");
+		return -EINVAL;
+	}
+	bd = kzalloc(sizeof(*bd), GFP_KERNEL);
+	if (!bd)
+		return -ENOMEM;
+	bd->options = this->bbt_options & BADBLOCK_SCAN_MASK;
+	bd->offs = this->badblockpos;
+	bd->len = (this->options & NAND_BUSWIDTH_16) ? 2 : 1;
+	bd->pattern = scan_ff_pattern;
+	bd->options |= NAND_BBT_DYNAMICSTRUCT;
+	this->badblock_pattern = bd;
+	return 0;
+}
+
+/**
+ * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
+ * @mtd: MTD device structure
+ *
+ * This function selects the default bad block table support for the device and
+ * calls the nand_scan_bbt function.
+ */
+int nand_default_bbt(struct mtd_info *mtd)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int ret;
+
+	/* Is a flash based bad block table requested? */
+	if (this->bbt_options & NAND_BBT_USE_FLASH) {
+		/* Use the default pattern descriptors */
+		if (!this->bbt_td) {
+			if (this->bbt_options & NAND_BBT_NO_OOB) {
+				this->bbt_td = &bbt_main_no_oob_descr;
+				this->bbt_md = &bbt_mirror_no_oob_descr;
+			} else {
+				this->bbt_td = &bbt_main_descr;
+				this->bbt_md = &bbt_mirror_descr;
+			}
+		}
+	} else {
+		this->bbt_td = NULL;
+		this->bbt_md = NULL;
+	}
+
+	if (!this->badblock_pattern) {
+		ret = nand_create_badblock_pattern(this);
+		if (ret)
+			return ret;
+	}
+
+	return nand_scan_bbt(mtd, this->badblock_pattern);
+}
+
+/**
+ * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ */
+int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int block;
+
+	block = (int)(offs >> this->bbt_erase_shift);
+	return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
+}
+
+/**
+ * nand_isbad_bbt - [NAND Interface] Check if a block is bad
+ * @mtd: MTD device structure
+ * @offs: offset in the device
+ * @allowbbt: allow access to bad block table region
+ */
+int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int block, res;
+
+	block = (int)(offs >> this->bbt_erase_shift);
+	res = bbt_get_entry(this, block);
+
+	pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
+		 (unsigned int)offs, block, res);
+
+	switch (res) {
+	case BBT_BLOCK_GOOD:
+		return 0;
+	case BBT_BLOCK_WORN:
+		return 1;
+	case BBT_BLOCK_RESERVED:
+		return allowbbt ? 0 : 1;
+	}
+	return 1;
+}
+
+/**
+ * nand_markbad_bbt - [NAND Interface] Mark a block bad in the BBT
+ * @mtd: MTD device structure
+ * @offs: offset of the bad block
+ */
+int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	int block, ret = 0;
+
+	block = (int)(offs >> this->bbt_erase_shift);
+
+	/* Mark bad block in memory */
+	bbt_mark_entry(this, block, BBT_BLOCK_WORN);
+
+	/* Update flash-based bad block table */
+	if (this->bbt_options & NAND_BBT_USE_FLASH)
+		ret = nand_update_bbt(mtd, offs);
+
+	return ret;
+}
diff --git a/drivers/mtd/nand/rawnand/nand_bch.c b/drivers/mtd/nand/rawnand/nand_bch.c
new file mode 100644
index 000000000000..505441c9373b
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/nand_bch.c
@@ -0,0 +1,234 @@ 
+/*
+ * This file provides ECC correction for more than 1 bit per block of data,
+ * using binary BCH codes. It relies on the generic BCH library lib/bch.c.
+ *
+ * Copyright © 2011 Ivan Djelic <ivan.djelic@parrot.com>
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/bch.h>
+
+/**
+ * struct nand_bch_control - private NAND BCH control structure
+ * @bch:       BCH control structure
+ * @errloc:    error location array
+ * @eccmask:   XOR ecc mask, allows erased pages to be decoded as valid
+ */
+struct nand_bch_control {
+	struct bch_control   *bch;
+	unsigned int         *errloc;
+	unsigned char        *eccmask;
+};
+
+/**
+ * nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
+ * @mtd:	MTD block structure
+ * @buf:	input buffer with raw data
+ * @code:	output buffer with ECC
+ */
+int nand_bch_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+			   unsigned char *code)
+{
+	const struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_bch_control *nbc = chip->ecc.priv;
+	unsigned int i;
+
+	memset(code, 0, chip->ecc.bytes);
+	encode_bch(nbc->bch, buf, chip->ecc.size, code);
+
+	/* apply mask so that an erased page is a valid codeword */
+	for (i = 0; i < chip->ecc.bytes; i++)
+		code[i] ^= nbc->eccmask[i];
+
+	return 0;
+}
+EXPORT_SYMBOL(nand_bch_calculate_ecc);
+
+/**
+ * nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd:	MTD block structure
+ * @buf:	raw data read from the chip
+ * @read_ecc:	ECC from the chip
+ * @calc_ecc:	the ECC calculated from raw data
+ *
+ * Detect and correct bit errors for a data byte block
+ */
+int nand_bch_correct_data(struct mtd_info *mtd, unsigned char *buf,
+			  unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+	const struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_bch_control *nbc = chip->ecc.priv;
+	unsigned int *errloc = nbc->errloc;
+	int i, count;
+
+	count = decode_bch(nbc->bch, NULL, chip->ecc.size, read_ecc, calc_ecc,
+			   NULL, errloc);
+	if (count > 0) {
+		for (i = 0; i < count; i++) {
+			if (errloc[i] < (chip->ecc.size*8))
+				/* error is located in data, correct it */
+				buf[errloc[i] >> 3] ^= (1 << (errloc[i] & 7));
+			/* else error in ecc, no action needed */
+
+			pr_debug("%s: corrected bitflip %u\n", __func__,
+					errloc[i]);
+		}
+	} else if (count < 0) {
+		printk(KERN_ERR "ecc unrecoverable error\n");
+		count = -EBADMSG;
+	}
+	return count;
+}
+EXPORT_SYMBOL(nand_bch_correct_data);
+
+/**
+ * nand_bch_init - [NAND Interface] Initialize NAND BCH error correction
+ * @mtd:	MTD block structure
+ *
+ * Returns:
+ *  a pointer to a new NAND BCH control structure, or NULL upon failure
+ *
+ * Initialize NAND BCH error correction. Parameters @eccsize and @eccbytes
+ * are used to compute BCH parameters m (Galois field order) and t (error
+ * correction capability). @eccbytes should be equal to the number of bytes
+ * required to store m*t bits, where m is such that 2^m-1 > @eccsize*8.
+ *
+ * Example: to configure 4 bit correction per 512 bytes, you should pass
+ * @eccsize = 512  (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
+ * @eccbytes = 7   (7 bytes are required to store m*t = 13*4 = 52 bits)
+ */
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	unsigned int m, t, eccsteps, i;
+	struct nand_bch_control *nbc = NULL;
+	unsigned char *erased_page;
+	unsigned int eccsize = nand->ecc.size;
+	unsigned int eccbytes = nand->ecc.bytes;
+	unsigned int eccstrength = nand->ecc.strength;
+
+	if (!eccbytes && eccstrength) {
+		eccbytes = DIV_ROUND_UP(eccstrength * fls(8 * eccsize), 8);
+		nand->ecc.bytes = eccbytes;
+	}
+
+	if (!eccsize || !eccbytes) {
+		printk(KERN_WARNING "ecc parameters not supplied\n");
+		goto fail;
+	}
+
+	m = fls(1+8*eccsize);
+	t = (eccbytes*8)/m;
+
+	nbc = kzalloc(sizeof(*nbc), GFP_KERNEL);
+	if (!nbc)
+		goto fail;
+
+	nbc->bch = init_bch(m, t, 0);
+	if (!nbc->bch)
+		goto fail;
+
+	/* verify that eccbytes has the expected value */
+	if (nbc->bch->ecc_bytes != eccbytes) {
+		printk(KERN_WARNING "invalid eccbytes %u, should be %u\n",
+		       eccbytes, nbc->bch->ecc_bytes);
+		goto fail;
+	}
+
+	eccsteps = mtd->writesize/eccsize;
+
+	/* Check that we have an oob layout description. */
+	if (!mtd->ooblayout) {
+		pr_warn("missing oob scheme");
+		goto fail;
+	}
+
+	/* sanity checks */
+	if (8*(eccsize+eccbytes) >= (1 << m)) {
+		printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
+		goto fail;
+	}
+
+	/*
+	 * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(),
+	 * which is called by mtd_ooblayout_count_eccbytes().
+	 * Make sure they are properly initialized before calling
+	 * mtd_ooblayout_count_eccbytes().
+	 * FIXME: we should probably rework the sequencing in nand_scan_tail()
+	 * to avoid setting those fields twice.
+	 */
+	nand->ecc.steps = eccsteps;
+	nand->ecc.total = eccsteps * eccbytes;
+	if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
+		printk(KERN_WARNING "invalid ecc layout\n");
+		goto fail;
+	}
+
+	nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
+	nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
+	if (!nbc->eccmask || !nbc->errloc)
+		goto fail;
+	/*
+	 * compute and store the inverted ecc of an erased ecc block
+	 */
+	erased_page = kmalloc(eccsize, GFP_KERNEL);
+	if (!erased_page)
+		goto fail;
+
+	memset(erased_page, 0xff, eccsize);
+	memset(nbc->eccmask, 0, eccbytes);
+	encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
+	kfree(erased_page);
+
+	for (i = 0; i < eccbytes; i++)
+		nbc->eccmask[i] ^= 0xff;
+
+	if (!eccstrength)
+		nand->ecc.strength = (eccbytes * 8) / fls(8 * eccsize);
+
+	return nbc;
+fail:
+	nand_bch_free(nbc);
+	return NULL;
+}
+EXPORT_SYMBOL(nand_bch_init);
+
+/**
+ * nand_bch_free - [NAND Interface] Release NAND BCH ECC resources
+ * @nbc:	NAND BCH control structure
+ */
+void nand_bch_free(struct nand_bch_control *nbc)
+{
+	if (nbc) {
+		free_bch(nbc->bch);
+		kfree(nbc->errloc);
+		kfree(nbc->eccmask);
+		kfree(nbc);
+	}
+}
+EXPORT_SYMBOL(nand_bch_free);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ivan Djelic <ivan.djelic@parrot.com>");
+MODULE_DESCRIPTION("NAND software BCH ECC support");
diff --git a/drivers/mtd/nand/rawnand/nand_ecc.c b/drivers/mtd/nand/rawnand/nand_ecc.c
new file mode 100644
index 000000000000..7613a0388044
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/nand_ecc.c
@@ -0,0 +1,533 @@ 
+/*
+ * This file contains an ECC algorithm that detects and corrects 1 bit
+ * errors in a 256 byte block of data.
+ *
+ * drivers/mtd/nand/nand_ecc.c
+ *
+ * Copyright © 2008 Koninklijke Philips Electronics NV.
+ *                  Author: Frans Meulenbroeks
+ *
+ * Completely replaces the previous ECC implementation which was written by:
+ *   Steven J. Hill (sjhill@realitydiluted.com)
+ *   Thomas Gleixner (tglx@linutronix.de)
+ *
+ * Information on how this algorithm works and how it was developed
+ * can be found in Documentation/mtd/nand_ecc.txt
+ *
+ * This file is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 or (at your option) any
+ * later version.
+ *
+ * This file is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this file; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+/*
+ * The STANDALONE macro is useful when running the code outside the kernel
+ * e.g. when running the code in a testbed or a benchmark program.
+ * When STANDALONE is used, the module related macros are commented out
+ * as well as the linux include files.
+ * Instead a private definition of mtd_info is given to satisfy the compiler
+ * (the code does not use mtd_info, so the code does not care)
+ */
+#ifndef STANDALONE
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <asm/byteorder.h>
+#else
+#include <stdint.h>
+struct mtd_info;
+#define EXPORT_SYMBOL(x)  /* x */
+
+#define MODULE_LICENSE(x)	/* x */
+#define MODULE_AUTHOR(x)	/* x */
+#define MODULE_DESCRIPTION(x)	/* x */
+
+#define pr_err printf
+#endif
+
+/*
+ * invparity is a 256 byte table that contains the odd parity
+ * for each byte. So if the number of bits in a byte is even,
+ * the array element is 1, and when the number of bits is odd
+ * the array eleemnt is 0.
+ */
+static const char invparity[256] = {
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+	1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
+};
+
+/*
+ * bitsperbyte contains the number of bits per byte
+ * this is only used for testing and repairing parity
+ * (a precalculated value slightly improves performance)
+ */
+static const char bitsperbyte[256] = {
+	0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+	4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
+
+/*
+ * addressbits is a lookup table to filter out the bits from the xor-ed
+ * ECC data that identify the faulty location.
+ * this is only used for repairing parity
+ * see the comments in nand_correct_data for more details
+ */
+static const char addressbits[256] = {
+	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+	0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+	0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+	0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+	0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+	0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+	0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+	0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+	0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
+};
+
+/**
+ * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ *			 block
+ * @buf:	input buffer with raw data
+ * @eccsize:	data bytes per ECC step (256 or 512)
+ * @code:	output buffer with ECC
+ */
+void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
+		       unsigned char *code)
+{
+	int i;
+	const uint32_t *bp = (uint32_t *)buf;
+	/* 256 or 512 bytes/ecc  */
+	const uint32_t eccsize_mult = eccsize >> 8;
+	uint32_t cur;		/* current value in buffer */
+	/* rp0..rp15..rp17 are the various accumulated parities (per byte) */
+	uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+	uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
+	uint32_t uninitialized_var(rp17);	/* to make compiler happy */
+	uint32_t par;		/* the cumulative parity for all data */
+	uint32_t tmppar;	/* the cumulative parity for this iteration;
+				   for rp12, rp14 and rp16 at the end of the
+				   loop */
+
+	par = 0;
+	rp4 = 0;
+	rp6 = 0;
+	rp8 = 0;
+	rp10 = 0;
+	rp12 = 0;
+	rp14 = 0;
+	rp16 = 0;
+
+	/*
+	 * The loop is unrolled a number of times;
+	 * This avoids if statements to decide on which rp value to update
+	 * Also we process the data by longwords.
+	 * Note: passing unaligned data might give a performance penalty.
+	 * It is assumed that the buffers are aligned.
+	 * tmppar is the cumulative sum of this iteration.
+	 * needed for calculating rp12, rp14, rp16 and par
+	 * also used as a performance improvement for rp6, rp8 and rp10
+	 */
+	for (i = 0; i < eccsize_mult << 2; i++) {
+		cur = *bp++;
+		tmppar = cur;
+		rp4 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp6 ^= tmppar;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp8 ^= tmppar;
+
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		rp6 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp6 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp10 ^= tmppar;
+
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		rp6 ^= cur;
+		rp8 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp6 ^= cur;
+		rp8 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		rp8 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp8 ^= cur;
+
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		rp6 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp6 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+		rp4 ^= cur;
+		cur = *bp++;
+		tmppar ^= cur;
+
+		par ^= tmppar;
+		if ((i & 0x1) == 0)
+			rp12 ^= tmppar;
+		if ((i & 0x2) == 0)
+			rp14 ^= tmppar;
+		if (eccsize_mult == 2 && (i & 0x4) == 0)
+			rp16 ^= tmppar;
+	}
+
+	/*
+	 * handle the fact that we use longword operations
+	 * we'll bring rp4..rp14..rp16 back to single byte entities by
+	 * shifting and xoring first fold the upper and lower 16 bits,
+	 * then the upper and lower 8 bits.
+	 */
+	rp4 ^= (rp4 >> 16);
+	rp4 ^= (rp4 >> 8);
+	rp4 &= 0xff;
+	rp6 ^= (rp6 >> 16);
+	rp6 ^= (rp6 >> 8);
+	rp6 &= 0xff;
+	rp8 ^= (rp8 >> 16);
+	rp8 ^= (rp8 >> 8);
+	rp8 &= 0xff;
+	rp10 ^= (rp10 >> 16);
+	rp10 ^= (rp10 >> 8);
+	rp10 &= 0xff;
+	rp12 ^= (rp12 >> 16);
+	rp12 ^= (rp12 >> 8);
+	rp12 &= 0xff;
+	rp14 ^= (rp14 >> 16);
+	rp14 ^= (rp14 >> 8);
+	rp14 &= 0xff;
+	if (eccsize_mult == 2) {
+		rp16 ^= (rp16 >> 16);
+		rp16 ^= (rp16 >> 8);
+		rp16 &= 0xff;
+	}
+
+	/*
+	 * we also need to calculate the row parity for rp0..rp3
+	 * This is present in par, because par is now
+	 * rp3 rp3 rp2 rp2 in little endian and
+	 * rp2 rp2 rp3 rp3 in big endian
+	 * as well as
+	 * rp1 rp0 rp1 rp0 in little endian and
+	 * rp0 rp1 rp0 rp1 in big endian
+	 * First calculate rp2 and rp3
+	 */
+#ifdef __BIG_ENDIAN
+	rp2 = (par >> 16);
+	rp2 ^= (rp2 >> 8);
+	rp2 &= 0xff;
+	rp3 = par & 0xffff;
+	rp3 ^= (rp3 >> 8);
+	rp3 &= 0xff;
+#else
+	rp3 = (par >> 16);
+	rp3 ^= (rp3 >> 8);
+	rp3 &= 0xff;
+	rp2 = par & 0xffff;
+	rp2 ^= (rp2 >> 8);
+	rp2 &= 0xff;
+#endif
+
+	/* reduce par to 16 bits then calculate rp1 and rp0 */
+	par ^= (par >> 16);
+#ifdef __BIG_ENDIAN
+	rp0 = (par >> 8) & 0xff;
+	rp1 = (par & 0xff);
+#else
+	rp1 = (par >> 8) & 0xff;
+	rp0 = (par & 0xff);
+#endif
+
+	/* finally reduce par to 8 bits */
+	par ^= (par >> 8);
+	par &= 0xff;
+
+	/*
+	 * and calculate rp5..rp15..rp17
+	 * note that par = rp4 ^ rp5 and due to the commutative property
+	 * of the ^ operator we can say:
+	 * rp5 = (par ^ rp4);
+	 * The & 0xff seems superfluous, but benchmarking learned that
+	 * leaving it out gives slightly worse results. No idea why, probably
+	 * it has to do with the way the pipeline in pentium is organized.
+	 */
+	rp5 = (par ^ rp4) & 0xff;
+	rp7 = (par ^ rp6) & 0xff;
+	rp9 = (par ^ rp8) & 0xff;
+	rp11 = (par ^ rp10) & 0xff;
+	rp13 = (par ^ rp12) & 0xff;
+	rp15 = (par ^ rp14) & 0xff;
+	if (eccsize_mult == 2)
+		rp17 = (par ^ rp16) & 0xff;
+
+	/*
+	 * Finally calculate the ECC bits.
+	 * Again here it might seem that there are performance optimisations
+	 * possible, but benchmarks showed that on the system this is developed
+	 * the code below is the fastest
+	 */
+#ifdef CONFIG_MTD_NAND_ECC_SMC
+	code[0] =
+	    (invparity[rp7] << 7) |
+	    (invparity[rp6] << 6) |
+	    (invparity[rp5] << 5) |
+	    (invparity[rp4] << 4) |
+	    (invparity[rp3] << 3) |
+	    (invparity[rp2] << 2) |
+	    (invparity[rp1] << 1) |
+	    (invparity[rp0]);
+	code[1] =
+	    (invparity[rp15] << 7) |
+	    (invparity[rp14] << 6) |
+	    (invparity[rp13] << 5) |
+	    (invparity[rp12] << 4) |
+	    (invparity[rp11] << 3) |
+	    (invparity[rp10] << 2) |
+	    (invparity[rp9] << 1)  |
+	    (invparity[rp8]);
+#else
+	code[1] =
+	    (invparity[rp7] << 7) |
+	    (invparity[rp6] << 6) |
+	    (invparity[rp5] << 5) |
+	    (invparity[rp4] << 4) |
+	    (invparity[rp3] << 3) |
+	    (invparity[rp2] << 2) |
+	    (invparity[rp1] << 1) |
+	    (invparity[rp0]);
+	code[0] =
+	    (invparity[rp15] << 7) |
+	    (invparity[rp14] << 6) |
+	    (invparity[rp13] << 5) |
+	    (invparity[rp12] << 4) |
+	    (invparity[rp11] << 3) |
+	    (invparity[rp10] << 2) |
+	    (invparity[rp9] << 1)  |
+	    (invparity[rp8]);
+#endif
+	if (eccsize_mult == 1)
+		code[2] =
+		    (invparity[par & 0xf0] << 7) |
+		    (invparity[par & 0x0f] << 6) |
+		    (invparity[par & 0xcc] << 5) |
+		    (invparity[par & 0x33] << 4) |
+		    (invparity[par & 0xaa] << 3) |
+		    (invparity[par & 0x55] << 2) |
+		    3;
+	else
+		code[2] =
+		    (invparity[par & 0xf0] << 7) |
+		    (invparity[par & 0x0f] << 6) |
+		    (invparity[par & 0xcc] << 5) |
+		    (invparity[par & 0x33] << 4) |
+		    (invparity[par & 0xaa] << 3) |
+		    (invparity[par & 0x55] << 2) |
+		    (invparity[rp17] << 1) |
+		    (invparity[rp16] << 0);
+}
+EXPORT_SYMBOL(__nand_calculate_ecc);
+
+/**
+ * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ *			 block
+ * @mtd:	MTD block structure
+ * @buf:	input buffer with raw data
+ * @code:	output buffer with ECC
+ */
+int nand_calculate_ecc(struct mtd_info *mtd, const unsigned char *buf,
+		       unsigned char *code)
+{
+	__nand_calculate_ecc(buf,
+			mtd_to_nand(mtd)->ecc.size, code);
+
+	return 0;
+}
+EXPORT_SYMBOL(nand_calculate_ecc);
+
+/**
+ * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @buf:	raw data read from the chip
+ * @read_ecc:	ECC from the chip
+ * @calc_ecc:	the ECC calculated from raw data
+ * @eccsize:	data bytes per ECC step (256 or 512)
+ *
+ * Detect and correct a 1 bit error for eccsize byte block
+ */
+int __nand_correct_data(unsigned char *buf,
+			unsigned char *read_ecc, unsigned char *calc_ecc,
+			unsigned int eccsize)
+{
+	unsigned char b0, b1, b2, bit_addr;
+	unsigned int byte_addr;
+	/* 256 or 512 bytes/ecc  */
+	const uint32_t eccsize_mult = eccsize >> 8;
+
+	/*
+	 * b0 to b2 indicate which bit is faulty (if any)
+	 * we might need the xor result  more than once,
+	 * so keep them in a local var
+	*/
+#ifdef CONFIG_MTD_NAND_ECC_SMC
+	b0 = read_ecc[0] ^ calc_ecc[0];
+	b1 = read_ecc[1] ^ calc_ecc[1];
+#else
+	b0 = read_ecc[1] ^ calc_ecc[1];
+	b1 = read_ecc[0] ^ calc_ecc[0];
+#endif
+	b2 = read_ecc[2] ^ calc_ecc[2];
+
+	/* check if there are any bitfaults */
+
+	/* repeated if statements are slightly more efficient than switch ... */
+	/* ordered in order of likelihood */
+
+	if ((b0 | b1 | b2) == 0)
+		return 0;	/* no error */
+
+	if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
+	    (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
+	    ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
+	     (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
+	/* single bit error */
+		/*
+		 * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
+		 * byte, cp 5/3/1 indicate the faulty bit.
+		 * A lookup table (called addressbits) is used to filter
+		 * the bits from the byte they are in.
+		 * A marginal optimisation is possible by having three
+		 * different lookup tables.
+		 * One as we have now (for b0), one for b2
+		 * (that would avoid the >> 1), and one for b1 (with all values
+		 * << 4). However it was felt that introducing two more tables
+		 * hardly justify the gain.
+		 *
+		 * The b2 shift is there to get rid of the lowest two bits.
+		 * We could also do addressbits[b2] >> 1 but for the
+		 * performance it does not make any difference
+		 */
+		if (eccsize_mult == 1)
+			byte_addr = (addressbits[b1] << 4) + addressbits[b0];
+		else
+			byte_addr = (addressbits[b2 & 0x3] << 8) +
+				    (addressbits[b1] << 4) + addressbits[b0];
+		bit_addr = addressbits[b2 >> 2];
+		/* flip the bit */
+		buf[byte_addr] ^= (1 << bit_addr);
+		return 1;
+
+	}
+	/* count nr of bits; use table lookup, faster than calculating it */
+	if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
+		return 1;	/* error in ECC data; no action needed */
+
+	pr_err("%s: uncorrectable ECC error\n", __func__);
+	return -EBADMSG;
+}
+EXPORT_SYMBOL(__nand_correct_data);
+
+/**
+ * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @mtd:	MTD block structure
+ * @buf:	raw data read from the chip
+ * @read_ecc:	ECC from the chip
+ * @calc_ecc:	the ECC calculated from raw data
+ *
+ * Detect and correct a 1 bit error for 256/512 byte block
+ */
+int nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+		      unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+	return __nand_correct_data(buf, read_ecc, calc_ecc,
+				   mtd_to_nand(mtd)->ecc.size);
+}
+EXPORT_SYMBOL(nand_correct_data);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks@gmail.com>");
+MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/drivers/mtd/nand/rawnand/nand_ids.c b/drivers/mtd/nand/rawnand/nand_ids.c
new file mode 100644
index 000000000000..80550dbf9467
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/nand_ids.c
@@ -0,0 +1,193 @@ 
+/*
+ *  Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/sizes.h>
+
+#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
+#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
+
+#define SP_OPTIONS NAND_NEED_READRDY
+#define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16)
+
+/*
+ * The chip ID list:
+ *    name, device ID, page size, chip size in MiB, eraseblock size, options
+ *
+ * If page size and eraseblock size are 0, the sizes are taken from the
+ * extended chip ID.
+ */
+struct nand_flash_dev nand_flash_ids[] = {
+	/*
+	 * Some incompatible NAND chips share device ID's and so must be
+	 * listed by full ID. We list them first so that we can easily identify
+	 * the most specific match.
+	 */
+	{"TC58NVG0S3E 1G 3.3V 8-bit",
+		{ .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
+		  SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512),
+		  2 },
+	{"TC58NVG2S0F 4G 3.3V 8-bit",
+		{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
+		  SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+	{"TC58NVG3S0F 8G 3.3V 8-bit",
+		{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
+		  SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
+	{"TC58NVG5D2 32G 3.3V 8-bit",
+		{ .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
+		  SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+	{"TC58NVG6D2 64G 3.3V 8-bit",
+		{ .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
+		  SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+	{"SDTNRGAMA 64G 3.3V 8-bit",
+		{ .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
+		  SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
+	{"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
+		{ .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
+		  SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+		  NAND_ECC_INFO(40, SZ_1K), 4 },
+
+	LEGACY_ID_NAND("NAND 4MiB 5V 8-bit",   0x6B, 4, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+	LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit",  0x33, 16, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit",  0x73, 16, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit",  0x35, 32, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit",  0x75, 32, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit",  0x36, 64, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit",  0x76, 64, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit",  0x78, 128, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit",  0x39, 128, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit",  0x79, 128, SZ_16K, SP_OPTIONS),
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+	LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+	LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
+
+	/*
+	 * These are the new chips with large page size. Their page size and
+	 * eraseblock size are determined from the extended ID bytes.
+	 */
+
+	/* 512 Megabit */
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit",  0xA2,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit",  0xA0,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xF2,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xD0,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit",  0xF0,  64, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2,  64, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0,  64, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2,  64, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0,  64, LP_OPTIONS16),
+
+	/* 1 Gigabit */
+	EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit",  0xA1, 128, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit",  0xF1, 128, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit",  0xD1, 128, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
+
+	/* 2 Gigabit */
+	EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit",  0xAA, 256, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit",  0xDA, 256, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
+
+	/* 4 Gigabit */
+	EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit",  0xAC, 512, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit",  0xDC, 512, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
+
+	/* 8 Gigabit */
+	EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit",  0xA3, 1024, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit",  0xD3, 1024, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
+
+	/* 16 Gigabit */
+	EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit",  0xA5, 2048, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit",  0xD5, 2048, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
+
+	/* 32 Gigabit */
+	EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit",  0xA7, 4096, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit",  0xD7, 4096, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
+
+	/* 64 Gigabit */
+	EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit",  0xAE, 8192, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit",  0xDE, 8192, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
+
+	/* 128 Gigabit */
+	EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit",  0x1A, 16384, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit",  0x3A, 16384, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
+
+	/* 256 Gigabit */
+	EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit",  0x1C, 32768, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit",  0x3C, 32768, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
+
+	/* 512 Gigabit */
+	EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit",  0x1E, 65536, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit",  0x3E, 65536, LP_OPTIONS),
+	EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+	EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
+
+	{NULL}
+};
+
+/* Manufacturer IDs */
+struct nand_manufacturers nand_manuf_ids[] = {
+	{NAND_MFR_TOSHIBA, "Toshiba"},
+	{NAND_MFR_ESMT, "ESMT"},
+	{NAND_MFR_SAMSUNG, "Samsung"},
+	{NAND_MFR_FUJITSU, "Fujitsu"},
+	{NAND_MFR_NATIONAL, "National"},
+	{NAND_MFR_RENESAS, "Renesas"},
+	{NAND_MFR_STMICRO, "ST Micro"},
+	{NAND_MFR_HYNIX, "Hynix"},
+	{NAND_MFR_MICRON, "Micron"},
+	{NAND_MFR_AMD, "AMD/Spansion"},
+	{NAND_MFR_MACRONIX, "Macronix"},
+	{NAND_MFR_EON, "Eon"},
+	{NAND_MFR_SANDISK, "SanDisk"},
+	{NAND_MFR_INTEL, "Intel"},
+	{NAND_MFR_ATO, "ATO"},
+	{0x0, "Unknown"}
+};
+
+EXPORT_SYMBOL(nand_manuf_ids);
+EXPORT_SYMBOL(nand_flash_ids);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("Nand device & manufacturer IDs");
diff --git a/drivers/mtd/nand/rawnand/nand_timings.c b/drivers/mtd/nand/rawnand/nand_timings.c
new file mode 100644
index 000000000000..5cf237268284
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/nand_timings.c
@@ -0,0 +1,311 @@ 
+/*
+ *  Copyright (C) 2014 Free Electrons
+ *
+ *  Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/mtd/rawnand.h>
+
+static const struct nand_data_interface onfi_sdr_timings[] = {
+	/* Mode 0 */
+	{
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 20000,
+			.tALS_min = 50000,
+			.tAR_min = 25000,
+			.tCEA_max = 100000,
+			.tCEH_min = 20000,
+			.tCH_min = 20000,
+			.tCHZ_max = 100000,
+			.tCLH_min = 20000,
+			.tCLR_min = 20000,
+			.tCLS_min = 50000,
+			.tCOH_min = 0,
+			.tCS_min = 70000,
+			.tDH_min = 20000,
+			.tDS_min = 40000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 10000,
+			.tITC_max = 1000000,
+			.tRC_min = 100000,
+			.tREA_max = 40000,
+			.tREH_min = 30000,
+			.tRHOH_min = 0,
+			.tRHW_min = 200000,
+			.tRHZ_max = 200000,
+			.tRLOH_min = 0,
+			.tRP_min = 50000,
+			.tRR_min = 40000,
+			.tRST_max = 250000000000ULL,
+			.tWB_max = 200000,
+			.tWC_min = 100000,
+			.tWH_min = 30000,
+			.tWHR_min = 120000,
+			.tWP_min = 50000,
+			.tWW_min = 100000,
+		},
+	},
+	/* Mode 1 */
+	{
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 10000,
+			.tALS_min = 25000,
+			.tAR_min = 10000,
+			.tCEA_max = 45000,
+			.tCEH_min = 20000,
+			.tCH_min = 10000,
+			.tCHZ_max = 50000,
+			.tCLH_min = 10000,
+			.tCLR_min = 10000,
+			.tCLS_min = 25000,
+			.tCOH_min = 15000,
+			.tCS_min = 35000,
+			.tDH_min = 10000,
+			.tDS_min = 20000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 50000,
+			.tREA_max = 30000,
+			.tREH_min = 15000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 0,
+			.tRP_min = 25000,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tWC_min = 45000,
+			.tWH_min = 15000,
+			.tWHR_min = 80000,
+			.tWP_min = 25000,
+			.tWW_min = 100000,
+		},
+	},
+	/* Mode 2 */
+	{
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 10000,
+			.tALS_min = 15000,
+			.tAR_min = 10000,
+			.tCEA_max = 30000,
+			.tCEH_min = 20000,
+			.tCH_min = 10000,
+			.tCHZ_max = 50000,
+			.tCLH_min = 10000,
+			.tCLR_min = 10000,
+			.tCLS_min = 15000,
+			.tCOH_min = 15000,
+			.tCS_min = 25000,
+			.tDH_min = 5000,
+			.tDS_min = 15000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 35000,
+			.tREA_max = 25000,
+			.tREH_min = 15000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 0,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tRP_min = 17000,
+			.tWC_min = 35000,
+			.tWH_min = 15000,
+			.tWHR_min = 80000,
+			.tWP_min = 17000,
+			.tWW_min = 100000,
+		},
+	},
+	/* Mode 3 */
+	{
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 5000,
+			.tALS_min = 10000,
+			.tAR_min = 10000,
+			.tCEA_max = 25000,
+			.tCEH_min = 20000,
+			.tCH_min = 5000,
+			.tCHZ_max = 50000,
+			.tCLH_min = 5000,
+			.tCLR_min = 10000,
+			.tCLS_min = 10000,
+			.tCOH_min = 15000,
+			.tCS_min = 25000,
+			.tDH_min = 5000,
+			.tDS_min = 10000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 30000,
+			.tREA_max = 20000,
+			.tREH_min = 10000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 0,
+			.tRP_min = 15000,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tWC_min = 30000,
+			.tWH_min = 10000,
+			.tWHR_min = 80000,
+			.tWP_min = 15000,
+			.tWW_min = 100000,
+		},
+	},
+	/* Mode 4 */
+	{
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 5000,
+			.tALS_min = 10000,
+			.tAR_min = 10000,
+			.tCEA_max = 25000,
+			.tCEH_min = 20000,
+			.tCH_min = 5000,
+			.tCHZ_max = 30000,
+			.tCLH_min = 5000,
+			.tCLR_min = 10000,
+			.tCLS_min = 10000,
+			.tCOH_min = 15000,
+			.tCS_min = 20000,
+			.tDH_min = 5000,
+			.tDS_min = 10000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 25000,
+			.tREA_max = 20000,
+			.tREH_min = 10000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 5000,
+			.tRP_min = 12000,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tWC_min = 25000,
+			.tWH_min = 10000,
+			.tWHR_min = 80000,
+			.tWP_min = 12000,
+			.tWW_min = 100000,
+		},
+	},
+	/* Mode 5 */
+	{
+		.type = NAND_SDR_IFACE,
+		.timings.sdr = {
+			.tADL_min = 400000,
+			.tALH_min = 5000,
+			.tALS_min = 10000,
+			.tAR_min = 10000,
+			.tCEA_max = 25000,
+			.tCEH_min = 20000,
+			.tCH_min = 5000,
+			.tCHZ_max = 30000,
+			.tCLH_min = 5000,
+			.tCLR_min = 10000,
+			.tCLS_min = 10000,
+			.tCOH_min = 15000,
+			.tCS_min = 15000,
+			.tDH_min = 5000,
+			.tDS_min = 7000,
+			.tFEAT_max = 1000000,
+			.tIR_min = 0,
+			.tITC_max = 1000000,
+			.tRC_min = 20000,
+			.tREA_max = 16000,
+			.tREH_min = 7000,
+			.tRHOH_min = 15000,
+			.tRHW_min = 100000,
+			.tRHZ_max = 100000,
+			.tRLOH_min = 5000,
+			.tRP_min = 10000,
+			.tRR_min = 20000,
+			.tRST_max = 500000000,
+			.tWB_max = 100000,
+			.tWC_min = 20000,
+			.tWH_min = 7000,
+			.tWHR_min = 80000,
+			.tWP_min = 10000,
+			.tWW_min = 100000,
+		},
+	},
+};
+
+/**
+ * onfi_async_timing_mode_to_sdr_timings - [NAND Interface] Retrieve NAND
+ * timings according to the given ONFI timing mode
+ * @mode: ONFI timing mode
+ */
+const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
+{
+	if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
+		return ERR_PTR(-EINVAL);
+
+	return &onfi_sdr_timings[mode].timings.sdr;
+}
+EXPORT_SYMBOL(onfi_async_timing_mode_to_sdr_timings);
+
+/**
+ * onfi_init_data_interface - [NAND Interface] Initialize a data interface from
+ * given ONFI mode
+ * @iface: The data interface to be initialized
+ * @mode: The ONFI timing mode
+ */
+int onfi_init_data_interface(struct nand_chip *chip,
+			     struct nand_data_interface *iface,
+			     enum nand_data_interface_type type,
+			     int timing_mode)
+{
+	if (type != NAND_SDR_IFACE)
+		return -EINVAL;
+
+	if (timing_mode < 0 || timing_mode >= ARRAY_SIZE(onfi_sdr_timings))
+		return -EINVAL;
+
+	*iface = onfi_sdr_timings[timing_mode];
+
+	/*
+	 * TODO: initialize timings that cannot be deduced from timing mode:
+	 * tR, tPROG, tCCS, ...
+	 * These information are part of the ONFI parameter page.
+	 */
+
+	return 0;
+}
+EXPORT_SYMBOL(onfi_init_data_interface);
+
+/**
+ * nand_get_default_data_interface - [NAND Interface] Retrieve NAND
+ * data interface for mode 0. This is used as default timing after
+ * reset.
+ */
+const struct nand_data_interface *nand_get_default_data_interface(void)
+{
+	return &onfi_sdr_timings[0];
+}
+EXPORT_SYMBOL(nand_get_default_data_interface);
diff --git a/drivers/mtd/nand/rawnand/nandsim.c b/drivers/mtd/nand/rawnand/nandsim.c
new file mode 100644
index 000000000000..9c16635b5338
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/nandsim.c
@@ -0,0 +1,2431 @@ 
+/*
+ * NAND flash simulator.
+ *
+ * Author: Artem B. Bityuckiy <dedekind@oktetlabs.ru>, <dedekind@infradead.org>
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ *
+ * Note: NS means "NAND Simulator".
+ * Note: Input means input TO flash chip, output means output FROM chip.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ * Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/vmalloc.h>
+#include <linux/math64.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+
+/* Default simulator parameters values */
+#if !defined(CONFIG_NANDSIM_FIRST_ID_BYTE)  || \
+    !defined(CONFIG_NANDSIM_SECOND_ID_BYTE) || \
+    !defined(CONFIG_NANDSIM_THIRD_ID_BYTE)  || \
+    !defined(CONFIG_NANDSIM_FOURTH_ID_BYTE)
+#define CONFIG_NANDSIM_FIRST_ID_BYTE  0x98
+#define CONFIG_NANDSIM_SECOND_ID_BYTE 0x39
+#define CONFIG_NANDSIM_THIRD_ID_BYTE  0xFF /* No byte */
+#define CONFIG_NANDSIM_FOURTH_ID_BYTE 0xFF /* No byte */
+#endif
+
+#ifndef CONFIG_NANDSIM_ACCESS_DELAY
+#define CONFIG_NANDSIM_ACCESS_DELAY 25
+#endif
+#ifndef CONFIG_NANDSIM_PROGRAMM_DELAY
+#define CONFIG_NANDSIM_PROGRAMM_DELAY 200
+#endif
+#ifndef CONFIG_NANDSIM_ERASE_DELAY
+#define CONFIG_NANDSIM_ERASE_DELAY 2
+#endif
+#ifndef CONFIG_NANDSIM_OUTPUT_CYCLE
+#define CONFIG_NANDSIM_OUTPUT_CYCLE 40
+#endif
+#ifndef CONFIG_NANDSIM_INPUT_CYCLE
+#define CONFIG_NANDSIM_INPUT_CYCLE  50
+#endif
+#ifndef CONFIG_NANDSIM_BUS_WIDTH
+#define CONFIG_NANDSIM_BUS_WIDTH  8
+#endif
+#ifndef CONFIG_NANDSIM_DO_DELAYS
+#define CONFIG_NANDSIM_DO_DELAYS  0
+#endif
+#ifndef CONFIG_NANDSIM_LOG
+#define CONFIG_NANDSIM_LOG        0
+#endif
+#ifndef CONFIG_NANDSIM_DBG
+#define CONFIG_NANDSIM_DBG        0
+#endif
+#ifndef CONFIG_NANDSIM_MAX_PARTS
+#define CONFIG_NANDSIM_MAX_PARTS  32
+#endif
+
+static uint access_delay   = CONFIG_NANDSIM_ACCESS_DELAY;
+static uint programm_delay = CONFIG_NANDSIM_PROGRAMM_DELAY;
+static uint erase_delay    = CONFIG_NANDSIM_ERASE_DELAY;
+static uint output_cycle   = CONFIG_NANDSIM_OUTPUT_CYCLE;
+static uint input_cycle    = CONFIG_NANDSIM_INPUT_CYCLE;
+static uint bus_width      = CONFIG_NANDSIM_BUS_WIDTH;
+static uint do_delays      = CONFIG_NANDSIM_DO_DELAYS;
+static uint log            = CONFIG_NANDSIM_LOG;
+static uint dbg            = CONFIG_NANDSIM_DBG;
+static unsigned long parts[CONFIG_NANDSIM_MAX_PARTS];
+static unsigned int parts_num;
+static char *badblocks = NULL;
+static char *weakblocks = NULL;
+static char *weakpages = NULL;
+static unsigned int bitflips = 0;
+static char *gravepages = NULL;
+static unsigned int overridesize = 0;
+static char *cache_file = NULL;
+static unsigned int bbt;
+static unsigned int bch;
+static u_char id_bytes[8] = {
+	[0] = CONFIG_NANDSIM_FIRST_ID_BYTE,
+	[1] = CONFIG_NANDSIM_SECOND_ID_BYTE,
+	[2] = CONFIG_NANDSIM_THIRD_ID_BYTE,
+	[3] = CONFIG_NANDSIM_FOURTH_ID_BYTE,
+	[4 ... 7] = 0xFF,
+};
+
+module_param_array(id_bytes, byte, NULL, 0400);
+module_param_named(first_id_byte, id_bytes[0], byte, 0400);
+module_param_named(second_id_byte, id_bytes[1], byte, 0400);
+module_param_named(third_id_byte, id_bytes[2], byte, 0400);
+module_param_named(fourth_id_byte, id_bytes[3], byte, 0400);
+module_param(access_delay,   uint, 0400);
+module_param(programm_delay, uint, 0400);
+module_param(erase_delay,    uint, 0400);
+module_param(output_cycle,   uint, 0400);
+module_param(input_cycle,    uint, 0400);
+module_param(bus_width,      uint, 0400);
+module_param(do_delays,      uint, 0400);
+module_param(log,            uint, 0400);
+module_param(dbg,            uint, 0400);
+module_param_array(parts, ulong, &parts_num, 0400);
+module_param(badblocks,      charp, 0400);
+module_param(weakblocks,     charp, 0400);
+module_param(weakpages,      charp, 0400);
+module_param(bitflips,       uint, 0400);
+module_param(gravepages,     charp, 0400);
+module_param(overridesize,   uint, 0400);
+module_param(cache_file,     charp, 0400);
+module_param(bbt,	     uint, 0400);
+module_param(bch,	     uint, 0400);
+
+MODULE_PARM_DESC(id_bytes,       "The ID bytes returned by NAND Flash 'read ID' command");
+MODULE_PARM_DESC(first_id_byte,  "The first byte returned by NAND Flash 'read ID' command (manufacturer ID) (obsolete)");
+MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID) (obsolete)");
+MODULE_PARM_DESC(third_id_byte,  "The third byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(fourth_id_byte, "The fourth byte returned by NAND Flash 'read ID' command (obsolete)");
+MODULE_PARM_DESC(access_delay,   "Initial page access delay (microseconds)");
+MODULE_PARM_DESC(programm_delay, "Page programm delay (microseconds");
+MODULE_PARM_DESC(erase_delay,    "Sector erase delay (milliseconds)");
+MODULE_PARM_DESC(output_cycle,   "Word output (from flash) time (nanoseconds)");
+MODULE_PARM_DESC(input_cycle,    "Word input (to flash) time (nanoseconds)");
+MODULE_PARM_DESC(bus_width,      "Chip's bus width (8- or 16-bit)");
+MODULE_PARM_DESC(do_delays,      "Simulate NAND delays using busy-waits if not zero");
+MODULE_PARM_DESC(log,            "Perform logging if not zero");
+MODULE_PARM_DESC(dbg,            "Output debug information if not zero");
+MODULE_PARM_DESC(parts,          "Partition sizes (in erase blocks) separated by commas");
+/* Page and erase block positions for the following parameters are independent of any partitions */
+MODULE_PARM_DESC(badblocks,      "Erase blocks that are initially marked bad, separated by commas");
+MODULE_PARM_DESC(weakblocks,     "Weak erase blocks [: remaining erase cycles (defaults to 3)]"
+				 " separated by commas e.g. 113:2 means eb 113"
+				 " can be erased only twice before failing");
+MODULE_PARM_DESC(weakpages,      "Weak pages [: maximum writes (defaults to 3)]"
+				 " separated by commas e.g. 1401:2 means page 1401"
+				 " can be written only twice before failing");
+MODULE_PARM_DESC(bitflips,       "Maximum number of random bit flips per page (zero by default)");
+MODULE_PARM_DESC(gravepages,     "Pages that lose data [: maximum reads (defaults to 3)]"
+				 " separated by commas e.g. 1401:2 means page 1401"
+				 " can be read only twice before failing");
+MODULE_PARM_DESC(overridesize,   "Specifies the NAND Flash size overriding the ID bytes. "
+				 "The size is specified in erase blocks and as the exponent of a power of two"
+				 " e.g. 5 means a size of 32 erase blocks");
+MODULE_PARM_DESC(cache_file,     "File to use to cache nand pages instead of memory");
+MODULE_PARM_DESC(bbt,		 "0 OOB, 1 BBT with marker in OOB, 2 BBT with marker in data area");
+MODULE_PARM_DESC(bch,		 "Enable BCH ecc and set how many bits should "
+				 "be correctable in 512-byte blocks");
+
+/* The largest possible page size */
+#define NS_LARGEST_PAGE_SIZE	4096
+
+/* The prefix for simulator output */
+#define NS_OUTPUT_PREFIX "[nandsim]"
+
+/* Simulator's output macros (logging, debugging, warning, error) */
+#define NS_LOG(args...) \
+	do { if (log) printk(KERN_DEBUG NS_OUTPUT_PREFIX " log: " args); } while(0)
+#define NS_DBG(args...) \
+	do { if (dbg) printk(KERN_DEBUG NS_OUTPUT_PREFIX " debug: " args); } while(0)
+#define NS_WARN(args...) \
+	do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
+#define NS_ERR(args...) \
+	do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
+#define NS_INFO(args...) \
+	do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
+
+/* Busy-wait delay macros (microseconds, milliseconds) */
+#define NS_UDELAY(us) \
+        do { if (do_delays) udelay(us); } while(0)
+#define NS_MDELAY(us) \
+        do { if (do_delays) mdelay(us); } while(0)
+
+/* Is the nandsim structure initialized ? */
+#define NS_IS_INITIALIZED(ns) ((ns)->geom.totsz != 0)
+
+/* Good operation completion status */
+#define NS_STATUS_OK(ns) (NAND_STATUS_READY | (NAND_STATUS_WP * ((ns)->lines.wp == 0)))
+
+/* Operation failed completion status */
+#define NS_STATUS_FAILED(ns) (NAND_STATUS_FAIL | NS_STATUS_OK(ns))
+
+/* Calculate the page offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET(ns) \
+	(((ns)->regs.row * (ns)->geom.pgszoob) + (ns)->regs.column)
+
+/* Calculate the OOB offset in flash RAM image by (row, column) address */
+#define NS_RAW_OFFSET_OOB(ns) (NS_RAW_OFFSET(ns) + ns->geom.pgsz)
+
+/* After a command is input, the simulator goes to one of the following states */
+#define STATE_CMD_READ0        0x00000001 /* read data from the beginning of page */
+#define STATE_CMD_READ1        0x00000002 /* read data from the second half of page */
+#define STATE_CMD_READSTART    0x00000003 /* read data second command (large page devices) */
+#define STATE_CMD_PAGEPROG     0x00000004 /* start page program */
+#define STATE_CMD_READOOB      0x00000005 /* read OOB area */
+#define STATE_CMD_ERASE1       0x00000006 /* sector erase first command */
+#define STATE_CMD_STATUS       0x00000007 /* read status */
+#define STATE_CMD_SEQIN        0x00000009 /* sequential data input */
+#define STATE_CMD_READID       0x0000000A /* read ID */
+#define STATE_CMD_ERASE2       0x0000000B /* sector erase second command */
+#define STATE_CMD_RESET        0x0000000C /* reset */
+#define STATE_CMD_RNDOUT       0x0000000D /* random output command */
+#define STATE_CMD_RNDOUTSTART  0x0000000E /* random output start command */
+#define STATE_CMD_MASK         0x0000000F /* command states mask */
+
+/* After an address is input, the simulator goes to one of these states */
+#define STATE_ADDR_PAGE        0x00000010 /* full (row, column) address is accepted */
+#define STATE_ADDR_SEC         0x00000020 /* sector address was accepted */
+#define STATE_ADDR_COLUMN      0x00000030 /* column address was accepted */
+#define STATE_ADDR_ZERO        0x00000040 /* one byte zero address was accepted */
+#define STATE_ADDR_MASK        0x00000070 /* address states mask */
+
+/* During data input/output the simulator is in these states */
+#define STATE_DATAIN           0x00000100 /* waiting for data input */
+#define STATE_DATAIN_MASK      0x00000100 /* data input states mask */
+
+#define STATE_DATAOUT          0x00001000 /* waiting for page data output */
+#define STATE_DATAOUT_ID       0x00002000 /* waiting for ID bytes output */
+#define STATE_DATAOUT_STATUS   0x00003000 /* waiting for status output */
+#define STATE_DATAOUT_MASK     0x00007000 /* data output states mask */
+
+/* Previous operation is done, ready to accept new requests */
+#define STATE_READY            0x00000000
+
+/* This state is used to mark that the next state isn't known yet */
+#define STATE_UNKNOWN          0x10000000
+
+/* Simulator's actions bit masks */
+#define ACTION_CPY       0x00100000 /* copy page/OOB to the internal buffer */
+#define ACTION_PRGPAGE   0x00200000 /* program the internal buffer to flash */
+#define ACTION_SECERASE  0x00300000 /* erase sector */
+#define ACTION_ZEROOFF   0x00400000 /* don't add any offset to address */
+#define ACTION_HALFOFF   0x00500000 /* add to address half of page */
+#define ACTION_OOBOFF    0x00600000 /* add to address OOB offset */
+#define ACTION_MASK      0x00700000 /* action mask */
+
+#define NS_OPER_NUM      13 /* Number of operations supported by the simulator */
+#define NS_OPER_STATES   6  /* Maximum number of states in operation */
+
+#define OPT_ANY          0xFFFFFFFF /* any chip supports this operation */
+#define OPT_PAGE512      0x00000002 /* 512-byte  page chips */
+#define OPT_PAGE2048     0x00000008 /* 2048-byte page chips */
+#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
+#define OPT_PAGE4096     0x00000080 /* 4096-byte page chips */
+#define OPT_LARGEPAGE    (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
+#define OPT_SMALLPAGE    (OPT_PAGE512) /* 512-byte page chips */
+
+/* Remove action bits from state */
+#define NS_STATE(x) ((x) & ~ACTION_MASK)
+
+/*
+ * Maximum previous states which need to be saved. Currently saving is
+ * only needed for page program operation with preceded read command
+ * (which is only valid for 512-byte pages).
+ */
+#define NS_MAX_PREVSTATES 1
+
+/* Maximum page cache pages needed to read or write a NAND page to the cache_file */
+#define NS_MAX_HELD_PAGES 16
+
+struct nandsim_debug_info {
+	struct dentry *dfs_root;
+	struct dentry *dfs_wear_report;
+};
+
+/*
+ * A union to represent flash memory contents and flash buffer.
+ */
+union ns_mem {
+	u_char *byte;    /* for byte access */
+	uint16_t *word;  /* for 16-bit word access */
+};
+
+/*
+ * The structure which describes all the internal simulator data.
+ */
+struct nandsim {
+	struct mtd_partition partitions[CONFIG_NANDSIM_MAX_PARTS];
+	unsigned int nbparts;
+
+	uint busw;              /* flash chip bus width (8 or 16) */
+	u_char ids[8];          /* chip's ID bytes */
+	uint32_t options;       /* chip's characteristic bits */
+	uint32_t state;         /* current chip state */
+	uint32_t nxstate;       /* next expected state */
+
+	uint32_t *op;           /* current operation, NULL operations isn't known yet  */
+	uint32_t pstates[NS_MAX_PREVSTATES]; /* previous states */
+	uint16_t npstates;      /* number of previous states saved */
+	uint16_t stateidx;      /* current state index */
+
+	/* The simulated NAND flash pages array */
+	union ns_mem *pages;
+
+	/* Slab allocator for nand pages */
+	struct kmem_cache *nand_pages_slab;
+
+	/* Internal buffer of page + OOB size bytes */
+	union ns_mem buf;
+
+	/* NAND flash "geometry" */
+	struct {
+		uint64_t totsz;     /* total flash size, bytes */
+		uint32_t secsz;     /* flash sector (erase block) size, bytes */
+		uint pgsz;          /* NAND flash page size, bytes */
+		uint oobsz;         /* page OOB area size, bytes */
+		uint64_t totszoob;  /* total flash size including OOB, bytes */
+		uint pgszoob;       /* page size including OOB , bytes*/
+		uint secszoob;      /* sector size including OOB, bytes */
+		uint pgnum;         /* total number of pages */
+		uint pgsec;         /* number of pages per sector */
+		uint secshift;      /* bits number in sector size */
+		uint pgshift;       /* bits number in page size */
+		uint pgaddrbytes;   /* bytes per page address */
+		uint secaddrbytes;  /* bytes per sector address */
+		uint idbytes;       /* the number ID bytes that this chip outputs */
+	} geom;
+
+	/* NAND flash internal registers */
+	struct {
+		unsigned command; /* the command register */
+		u_char   status;  /* the status register */
+		uint     row;     /* the page number */
+		uint     column;  /* the offset within page */
+		uint     count;   /* internal counter */
+		uint     num;     /* number of bytes which must be processed */
+		uint     off;     /* fixed page offset */
+	} regs;
+
+	/* NAND flash lines state */
+        struct {
+                int ce;  /* chip Enable */
+                int cle; /* command Latch Enable */
+                int ale; /* address Latch Enable */
+                int wp;  /* write Protect */
+        } lines;
+
+	/* Fields needed when using a cache file */
+	struct file *cfile; /* Open file */
+	unsigned long *pages_written; /* Which pages have been written */
+	void *file_buf;
+	struct page *held_pages[NS_MAX_HELD_PAGES];
+	int held_cnt;
+
+	struct nandsim_debug_info dbg;
+};
+
+/*
+ * Operations array. To perform any operation the simulator must pass
+ * through the correspondent states chain.
+ */
+static struct nandsim_operations {
+	uint32_t reqopts;  /* options which are required to perform the operation */
+	uint32_t states[NS_OPER_STATES]; /* operation's states */
+} ops[NS_OPER_NUM] = {
+	/* Read page + OOB from the beginning */
+	{OPT_SMALLPAGE, {STATE_CMD_READ0 | ACTION_ZEROOFF, STATE_ADDR_PAGE | ACTION_CPY,
+			STATE_DATAOUT, STATE_READY}},
+	/* Read page + OOB from the second half */
+	{OPT_PAGE512_8BIT, {STATE_CMD_READ1 | ACTION_HALFOFF, STATE_ADDR_PAGE | ACTION_CPY,
+			STATE_DATAOUT, STATE_READY}},
+	/* Read OOB */
+	{OPT_SMALLPAGE, {STATE_CMD_READOOB | ACTION_OOBOFF, STATE_ADDR_PAGE | ACTION_CPY,
+			STATE_DATAOUT, STATE_READY}},
+	/* Program page starting from the beginning */
+	{OPT_ANY, {STATE_CMD_SEQIN, STATE_ADDR_PAGE, STATE_DATAIN,
+			STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+	/* Program page starting from the beginning */
+	{OPT_SMALLPAGE, {STATE_CMD_READ0, STATE_CMD_SEQIN | ACTION_ZEROOFF, STATE_ADDR_PAGE,
+			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+	/* Program page starting from the second half */
+	{OPT_PAGE512, {STATE_CMD_READ1, STATE_CMD_SEQIN | ACTION_HALFOFF, STATE_ADDR_PAGE,
+			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+	/* Program OOB */
+	{OPT_SMALLPAGE, {STATE_CMD_READOOB, STATE_CMD_SEQIN | ACTION_OOBOFF, STATE_ADDR_PAGE,
+			      STATE_DATAIN, STATE_CMD_PAGEPROG | ACTION_PRGPAGE, STATE_READY}},
+	/* Erase sector */
+	{OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}},
+	/* Read status */
+	{OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}},
+	/* Read ID */
+	{OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}},
+	/* Large page devices read page */
+	{OPT_LARGEPAGE, {STATE_CMD_READ0, STATE_ADDR_PAGE, STATE_CMD_READSTART | ACTION_CPY,
+			       STATE_DATAOUT, STATE_READY}},
+	/* Large page devices random page read */
+	{OPT_LARGEPAGE, {STATE_CMD_RNDOUT, STATE_ADDR_COLUMN, STATE_CMD_RNDOUTSTART | ACTION_CPY,
+			       STATE_DATAOUT, STATE_READY}},
+};
+
+struct weak_block {
+	struct list_head list;
+	unsigned int erase_block_no;
+	unsigned int max_erases;
+	unsigned int erases_done;
+};
+
+static LIST_HEAD(weak_blocks);
+
+struct weak_page {
+	struct list_head list;
+	unsigned int page_no;
+	unsigned int max_writes;
+	unsigned int writes_done;
+};
+
+static LIST_HEAD(weak_pages);
+
+struct grave_page {
+	struct list_head list;
+	unsigned int page_no;
+	unsigned int max_reads;
+	unsigned int reads_done;
+};
+
+static LIST_HEAD(grave_pages);
+
+static unsigned long *erase_block_wear = NULL;
+static unsigned int wear_eb_count = 0;
+static unsigned long total_wear = 0;
+
+/* MTD structure for NAND controller */
+static struct mtd_info *nsmtd;
+
+static int nandsim_debugfs_show(struct seq_file *m, void *private)
+{
+	unsigned long wmin = -1, wmax = 0, avg;
+	unsigned long deciles[10], decile_max[10], tot = 0;
+	unsigned int i;
+
+	/* Calc wear stats */
+	for (i = 0; i < wear_eb_count; ++i) {
+		unsigned long wear = erase_block_wear[i];
+		if (wear < wmin)
+			wmin = wear;
+		if (wear > wmax)
+			wmax = wear;
+		tot += wear;
+	}
+
+	for (i = 0; i < 9; ++i) {
+		deciles[i] = 0;
+		decile_max[i] = (wmax * (i + 1) + 5) / 10;
+	}
+	deciles[9] = 0;
+	decile_max[9] = wmax;
+	for (i = 0; i < wear_eb_count; ++i) {
+		int d;
+		unsigned long wear = erase_block_wear[i];
+		for (d = 0; d < 10; ++d)
+			if (wear <= decile_max[d]) {
+				deciles[d] += 1;
+				break;
+			}
+	}
+	avg = tot / wear_eb_count;
+
+	/* Output wear report */
+	seq_printf(m, "Total numbers of erases:  %lu\n", tot);
+	seq_printf(m, "Number of erase blocks:   %u\n", wear_eb_count);
+	seq_printf(m, "Average number of erases: %lu\n", avg);
+	seq_printf(m, "Maximum number of erases: %lu\n", wmax);
+	seq_printf(m, "Minimum number of erases: %lu\n", wmin);
+	for (i = 0; i < 10; ++i) {
+		unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+		if (from > decile_max[i])
+			continue;
+		seq_printf(m, "Number of ebs with erase counts from %lu to %lu : %lu\n",
+			from,
+			decile_max[i],
+			deciles[i]);
+	}
+
+	return 0;
+}
+
+static int nandsim_debugfs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nandsim_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations dfs_fops = {
+	.open		= nandsim_debugfs_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+/**
+ * nandsim_debugfs_create - initialize debugfs
+ * @dev: nandsim device description object
+ *
+ * This function creates all debugfs files for UBI device @ubi. Returns zero in
+ * case of success and a negative error code in case of failure.
+ */
+static int nandsim_debugfs_create(struct nandsim *dev)
+{
+	struct nandsim_debug_info *dbg = &dev->dbg;
+	struct dentry *dent;
+	int err;
+
+	if (!IS_ENABLED(CONFIG_DEBUG_FS))
+		return 0;
+
+	dent = debugfs_create_dir("nandsim", NULL);
+	if (IS_ERR_OR_NULL(dent)) {
+		int err = dent ? -ENODEV : PTR_ERR(dent);
+
+		NS_ERR("cannot create \"nandsim\" debugfs directory, err %d\n",
+			err);
+		return err;
+	}
+	dbg->dfs_root = dent;
+
+	dent = debugfs_create_file("wear_report", S_IRUSR,
+				   dbg->dfs_root, dev, &dfs_fops);
+	if (IS_ERR_OR_NULL(dent))
+		goto out_remove;
+	dbg->dfs_wear_report = dent;
+
+	return 0;
+
+out_remove:
+	debugfs_remove_recursive(dbg->dfs_root);
+	err = dent ? PTR_ERR(dent) : -ENODEV;
+	return err;
+}
+
+/**
+ * nandsim_debugfs_remove - destroy all debugfs files
+ */
+static void nandsim_debugfs_remove(struct nandsim *ns)
+{
+	if (IS_ENABLED(CONFIG_DEBUG_FS))
+		debugfs_remove_recursive(ns->dbg.dfs_root);
+}
+
+/*
+ * Allocate array of page pointers, create slab allocation for an array
+ * and initialize the array by NULL pointers.
+ *
+ * RETURNS: 0 if success, -ENOMEM if memory alloc fails.
+ */
+static int __init alloc_device(struct nandsim *ns)
+{
+	struct file *cfile;
+	int i, err;
+
+	if (cache_file) {
+		cfile = filp_open(cache_file, O_CREAT | O_RDWR | O_LARGEFILE, 0600);
+		if (IS_ERR(cfile))
+			return PTR_ERR(cfile);
+		if (!(cfile->f_mode & FMODE_CAN_READ)) {
+			NS_ERR("alloc_device: cache file not readable\n");
+			err = -EINVAL;
+			goto err_close;
+		}
+		if (!(cfile->f_mode & FMODE_CAN_WRITE)) {
+			NS_ERR("alloc_device: cache file not writeable\n");
+			err = -EINVAL;
+			goto err_close;
+		}
+		ns->pages_written = vzalloc(BITS_TO_LONGS(ns->geom.pgnum) *
+					    sizeof(unsigned long));
+		if (!ns->pages_written) {
+			NS_ERR("alloc_device: unable to allocate pages written array\n");
+			err = -ENOMEM;
+			goto err_close;
+		}
+		ns->file_buf = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+		if (!ns->file_buf) {
+			NS_ERR("alloc_device: unable to allocate file buf\n");
+			err = -ENOMEM;
+			goto err_free;
+		}
+		ns->cfile = cfile;
+		return 0;
+	}
+
+	ns->pages = vmalloc(ns->geom.pgnum * sizeof(union ns_mem));
+	if (!ns->pages) {
+		NS_ERR("alloc_device: unable to allocate page array\n");
+		return -ENOMEM;
+	}
+	for (i = 0; i < ns->geom.pgnum; i++) {
+		ns->pages[i].byte = NULL;
+	}
+	ns->nand_pages_slab = kmem_cache_create("nandsim",
+						ns->geom.pgszoob, 0, 0, NULL);
+	if (!ns->nand_pages_slab) {
+		NS_ERR("cache_create: unable to create kmem_cache\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+
+err_free:
+	vfree(ns->pages_written);
+err_close:
+	filp_close(cfile, NULL);
+	return err;
+}
+
+/*
+ * Free any allocated pages, and free the array of page pointers.
+ */
+static void free_device(struct nandsim *ns)
+{
+	int i;
+
+	if (ns->cfile) {
+		kfree(ns->file_buf);
+		vfree(ns->pages_written);
+		filp_close(ns->cfile, NULL);
+		return;
+	}
+
+	if (ns->pages) {
+		for (i = 0; i < ns->geom.pgnum; i++) {
+			if (ns->pages[i].byte)
+				kmem_cache_free(ns->nand_pages_slab,
+						ns->pages[i].byte);
+		}
+		kmem_cache_destroy(ns->nand_pages_slab);
+		vfree(ns->pages);
+	}
+}
+
+static char __init *get_partition_name(int i)
+{
+	return kasprintf(GFP_KERNEL, "NAND simulator partition %d", i);
+}
+
+/*
+ * Initialize the nandsim structure.
+ *
+ * RETURNS: 0 if success, -ERRNO if failure.
+ */
+static int __init init_nandsim(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nandsim   *ns   = nand_get_controller_data(chip);
+	int i, ret = 0;
+	uint64_t remains;
+	uint64_t next_offset;
+
+	if (NS_IS_INITIALIZED(ns)) {
+		NS_ERR("init_nandsim: nandsim is already initialized\n");
+		return -EIO;
+	}
+
+	/* Force mtd to not do delays */
+	chip->chip_delay = 0;
+
+	/* Initialize the NAND flash parameters */
+	ns->busw = chip->options & NAND_BUSWIDTH_16 ? 16 : 8;
+	ns->geom.totsz    = mtd->size;
+	ns->geom.pgsz     = mtd->writesize;
+	ns->geom.oobsz    = mtd->oobsize;
+	ns->geom.secsz    = mtd->erasesize;
+	ns->geom.pgszoob  = ns->geom.pgsz + ns->geom.oobsz;
+	ns->geom.pgnum    = div_u64(ns->geom.totsz, ns->geom.pgsz);
+	ns->geom.totszoob = ns->geom.totsz + (uint64_t)ns->geom.pgnum * ns->geom.oobsz;
+	ns->geom.secshift = ffs(ns->geom.secsz) - 1;
+	ns->geom.pgshift  = chip->page_shift;
+	ns->geom.pgsec    = ns->geom.secsz / ns->geom.pgsz;
+	ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec;
+	ns->options = 0;
+
+	if (ns->geom.pgsz == 512) {
+		ns->options |= OPT_PAGE512;
+		if (ns->busw == 8)
+			ns->options |= OPT_PAGE512_8BIT;
+	} else if (ns->geom.pgsz == 2048) {
+		ns->options |= OPT_PAGE2048;
+	} else if (ns->geom.pgsz == 4096) {
+		ns->options |= OPT_PAGE4096;
+	} else {
+		NS_ERR("init_nandsim: unknown page size %u\n", ns->geom.pgsz);
+		return -EIO;
+	}
+
+	if (ns->options & OPT_SMALLPAGE) {
+		if (ns->geom.totsz <= (32 << 20)) {
+			ns->geom.pgaddrbytes  = 3;
+			ns->geom.secaddrbytes = 2;
+		} else {
+			ns->geom.pgaddrbytes  = 4;
+			ns->geom.secaddrbytes = 3;
+		}
+	} else {
+		if (ns->geom.totsz <= (128 << 20)) {
+			ns->geom.pgaddrbytes  = 4;
+			ns->geom.secaddrbytes = 2;
+		} else {
+			ns->geom.pgaddrbytes  = 5;
+			ns->geom.secaddrbytes = 3;
+		}
+	}
+
+	/* Fill the partition_info structure */
+	if (parts_num > ARRAY_SIZE(ns->partitions)) {
+		NS_ERR("too many partitions.\n");
+		return -EINVAL;
+	}
+	remains = ns->geom.totsz;
+	next_offset = 0;
+	for (i = 0; i < parts_num; ++i) {
+		uint64_t part_sz = (uint64_t)parts[i] * ns->geom.secsz;
+
+		if (!part_sz || part_sz > remains) {
+			NS_ERR("bad partition size.\n");
+			return -EINVAL;
+		}
+		ns->partitions[i].name   = get_partition_name(i);
+		if (!ns->partitions[i].name) {
+			NS_ERR("unable to allocate memory.\n");
+			return -ENOMEM;
+		}
+		ns->partitions[i].offset = next_offset;
+		ns->partitions[i].size   = part_sz;
+		next_offset += ns->partitions[i].size;
+		remains -= ns->partitions[i].size;
+	}
+	ns->nbparts = parts_num;
+	if (remains) {
+		if (parts_num + 1 > ARRAY_SIZE(ns->partitions)) {
+			NS_ERR("too many partitions.\n");
+			return -EINVAL;
+		}
+		ns->partitions[i].name   = get_partition_name(i);
+		if (!ns->partitions[i].name) {
+			NS_ERR("unable to allocate memory.\n");
+			return -ENOMEM;
+		}
+		ns->partitions[i].offset = next_offset;
+		ns->partitions[i].size   = remains;
+		ns->nbparts += 1;
+	}
+
+	if (ns->busw == 16)
+		NS_WARN("16-bit flashes support wasn't tested\n");
+
+	printk("flash size: %llu MiB\n",
+			(unsigned long long)ns->geom.totsz >> 20);
+	printk("page size: %u bytes\n",         ns->geom.pgsz);
+	printk("OOB area size: %u bytes\n",     ns->geom.oobsz);
+	printk("sector size: %u KiB\n",         ns->geom.secsz >> 10);
+	printk("pages number: %u\n",            ns->geom.pgnum);
+	printk("pages per sector: %u\n",        ns->geom.pgsec);
+	printk("bus width: %u\n",               ns->busw);
+	printk("bits in sector size: %u\n",     ns->geom.secshift);
+	printk("bits in page size: %u\n",       ns->geom.pgshift);
+	printk("bits in OOB size: %u\n",	ffs(ns->geom.oobsz) - 1);
+	printk("flash size with OOB: %llu KiB\n",
+			(unsigned long long)ns->geom.totszoob >> 10);
+	printk("page address bytes: %u\n",      ns->geom.pgaddrbytes);
+	printk("sector address bytes: %u\n",    ns->geom.secaddrbytes);
+	printk("options: %#x\n",                ns->options);
+
+	if ((ret = alloc_device(ns)) != 0)
+		return ret;
+
+	/* Allocate / initialize the internal buffer */
+	ns->buf.byte = kmalloc(ns->geom.pgszoob, GFP_KERNEL);
+	if (!ns->buf.byte) {
+		NS_ERR("init_nandsim: unable to allocate %u bytes for the internal buffer\n",
+			ns->geom.pgszoob);
+		return -ENOMEM;
+	}
+	memset(ns->buf.byte, 0xFF, ns->geom.pgszoob);
+
+	return 0;
+}
+
+/*
+ * Free the nandsim structure.
+ */
+static void free_nandsim(struct nandsim *ns)
+{
+	kfree(ns->buf.byte);
+	free_device(ns);
+
+	return;
+}
+
+static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
+{
+	char *w;
+	int zero_ok;
+	unsigned int erase_block_no;
+	loff_t offset;
+
+	if (!badblocks)
+		return 0;
+	w = badblocks;
+	do {
+		zero_ok = (*w == '0' ? 1 : 0);
+		erase_block_no = simple_strtoul(w, &w, 0);
+		if (!zero_ok && !erase_block_no) {
+			NS_ERR("invalid badblocks.\n");
+			return -EINVAL;
+		}
+		offset = (loff_t)erase_block_no * ns->geom.secsz;
+		if (mtd_block_markbad(mtd, offset)) {
+			NS_ERR("invalid badblocks.\n");
+			return -EINVAL;
+		}
+		if (*w == ',')
+			w += 1;
+	} while (*w);
+	return 0;
+}
+
+static int parse_weakblocks(void)
+{
+	char *w;
+	int zero_ok;
+	unsigned int erase_block_no;
+	unsigned int max_erases;
+	struct weak_block *wb;
+
+	if (!weakblocks)
+		return 0;
+	w = weakblocks;
+	do {
+		zero_ok = (*w == '0' ? 1 : 0);
+		erase_block_no = simple_strtoul(w, &w, 0);
+		if (!zero_ok && !erase_block_no) {
+			NS_ERR("invalid weakblocks.\n");
+			return -EINVAL;
+		}
+		max_erases = 3;
+		if (*w == ':') {
+			w += 1;
+			max_erases = simple_strtoul(w, &w, 0);
+		}
+		if (*w == ',')
+			w += 1;
+		wb = kzalloc(sizeof(*wb), GFP_KERNEL);
+		if (!wb) {
+			NS_ERR("unable to allocate memory.\n");
+			return -ENOMEM;
+		}
+		wb->erase_block_no = erase_block_no;
+		wb->max_erases = max_erases;
+		list_add(&wb->list, &weak_blocks);
+	} while (*w);
+	return 0;
+}
+
+static int erase_error(unsigned int erase_block_no)
+{
+	struct weak_block *wb;
+
+	list_for_each_entry(wb, &weak_blocks, list)
+		if (wb->erase_block_no == erase_block_no) {
+			if (wb->erases_done >= wb->max_erases)
+				return 1;
+			wb->erases_done += 1;
+			return 0;
+		}
+	return 0;
+}
+
+static int parse_weakpages(void)
+{
+	char *w;
+	int zero_ok;
+	unsigned int page_no;
+	unsigned int max_writes;
+	struct weak_page *wp;
+
+	if (!weakpages)
+		return 0;
+	w = weakpages;
+	do {
+		zero_ok = (*w == '0' ? 1 : 0);
+		page_no = simple_strtoul(w, &w, 0);
+		if (!zero_ok && !page_no) {
+			NS_ERR("invalid weakpagess.\n");
+			return -EINVAL;
+		}
+		max_writes = 3;
+		if (*w == ':') {
+			w += 1;
+			max_writes = simple_strtoul(w, &w, 0);
+		}
+		if (*w == ',')
+			w += 1;
+		wp = kzalloc(sizeof(*wp), GFP_KERNEL);
+		if (!wp) {
+			NS_ERR("unable to allocate memory.\n");
+			return -ENOMEM;
+		}
+		wp->page_no = page_no;
+		wp->max_writes = max_writes;
+		list_add(&wp->list, &weak_pages);
+	} while (*w);
+	return 0;
+}
+
+static int write_error(unsigned int page_no)
+{
+	struct weak_page *wp;
+
+	list_for_each_entry(wp, &weak_pages, list)
+		if (wp->page_no == page_no) {
+			if (wp->writes_done >= wp->max_writes)
+				return 1;
+			wp->writes_done += 1;
+			return 0;
+		}
+	return 0;
+}
+
+static int parse_gravepages(void)
+{
+	char *g;
+	int zero_ok;
+	unsigned int page_no;
+	unsigned int max_reads;
+	struct grave_page *gp;
+
+	if (!gravepages)
+		return 0;
+	g = gravepages;
+	do {
+		zero_ok = (*g == '0' ? 1 : 0);
+		page_no = simple_strtoul(g, &g, 0);
+		if (!zero_ok && !page_no) {
+			NS_ERR("invalid gravepagess.\n");
+			return -EINVAL;
+		}
+		max_reads = 3;
+		if (*g == ':') {
+			g += 1;
+			max_reads = simple_strtoul(g, &g, 0);
+		}
+		if (*g == ',')
+			g += 1;
+		gp = kzalloc(sizeof(*gp), GFP_KERNEL);
+		if (!gp) {
+			NS_ERR("unable to allocate memory.\n");
+			return -ENOMEM;
+		}
+		gp->page_no = page_no;
+		gp->max_reads = max_reads;
+		list_add(&gp->list, &grave_pages);
+	} while (*g);
+	return 0;
+}
+
+static int read_error(unsigned int page_no)
+{
+	struct grave_page *gp;
+
+	list_for_each_entry(gp, &grave_pages, list)
+		if (gp->page_no == page_no) {
+			if (gp->reads_done >= gp->max_reads)
+				return 1;
+			gp->reads_done += 1;
+			return 0;
+		}
+	return 0;
+}
+
+static void free_lists(void)
+{
+	struct list_head *pos, *n;
+	list_for_each_safe(pos, n, &weak_blocks) {
+		list_del(pos);
+		kfree(list_entry(pos, struct weak_block, list));
+	}
+	list_for_each_safe(pos, n, &weak_pages) {
+		list_del(pos);
+		kfree(list_entry(pos, struct weak_page, list));
+	}
+	list_for_each_safe(pos, n, &grave_pages) {
+		list_del(pos);
+		kfree(list_entry(pos, struct grave_page, list));
+	}
+	kfree(erase_block_wear);
+}
+
+static int setup_wear_reporting(struct mtd_info *mtd)
+{
+	size_t mem;
+
+	wear_eb_count = div_u64(mtd->size, mtd->erasesize);
+	mem = wear_eb_count * sizeof(unsigned long);
+	if (mem / sizeof(unsigned long) != wear_eb_count) {
+		NS_ERR("Too many erase blocks for wear reporting\n");
+		return -ENOMEM;
+	}
+	erase_block_wear = kzalloc(mem, GFP_KERNEL);
+	if (!erase_block_wear) {
+		NS_ERR("Too many erase blocks for wear reporting\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void update_wear(unsigned int erase_block_no)
+{
+	if (!erase_block_wear)
+		return;
+	total_wear += 1;
+	/*
+	 * TODO: Notify this through a debugfs entry,
+	 * instead of showing an error message.
+	 */
+	if (total_wear == 0)
+		NS_ERR("Erase counter total overflow\n");
+	erase_block_wear[erase_block_no] += 1;
+	if (erase_block_wear[erase_block_no] == 0)
+		NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
+}
+
+/*
+ * Returns the string representation of 'state' state.
+ */
+static char *get_state_name(uint32_t state)
+{
+	switch (NS_STATE(state)) {
+		case STATE_CMD_READ0:
+			return "STATE_CMD_READ0";
+		case STATE_CMD_READ1:
+			return "STATE_CMD_READ1";
+		case STATE_CMD_PAGEPROG:
+			return "STATE_CMD_PAGEPROG";
+		case STATE_CMD_READOOB:
+			return "STATE_CMD_READOOB";
+		case STATE_CMD_READSTART:
+			return "STATE_CMD_READSTART";
+		case STATE_CMD_ERASE1:
+			return "STATE_CMD_ERASE1";
+		case STATE_CMD_STATUS:
+			return "STATE_CMD_STATUS";
+		case STATE_CMD_SEQIN:
+			return "STATE_CMD_SEQIN";
+		case STATE_CMD_READID:
+			return "STATE_CMD_READID";
+		case STATE_CMD_ERASE2:
+			return "STATE_CMD_ERASE2";
+		case STATE_CMD_RESET:
+			return "STATE_CMD_RESET";
+		case STATE_CMD_RNDOUT:
+			return "STATE_CMD_RNDOUT";
+		case STATE_CMD_RNDOUTSTART:
+			return "STATE_CMD_RNDOUTSTART";
+		case STATE_ADDR_PAGE:
+			return "STATE_ADDR_PAGE";
+		case STATE_ADDR_SEC:
+			return "STATE_ADDR_SEC";
+		case STATE_ADDR_ZERO:
+			return "STATE_ADDR_ZERO";
+		case STATE_ADDR_COLUMN:
+			return "STATE_ADDR_COLUMN";
+		case STATE_DATAIN:
+			return "STATE_DATAIN";
+		case STATE_DATAOUT:
+			return "STATE_DATAOUT";
+		case STATE_DATAOUT_ID:
+			return "STATE_DATAOUT_ID";
+		case STATE_DATAOUT_STATUS:
+			return "STATE_DATAOUT_STATUS";
+		case STATE_READY:
+			return "STATE_READY";
+		case STATE_UNKNOWN:
+			return "STATE_UNKNOWN";
+	}
+
+	NS_ERR("get_state_name: unknown state, BUG\n");
+	return NULL;
+}
+
+/*
+ * Check if command is valid.
+ *
+ * RETURNS: 1 if wrong command, 0 if right.
+ */
+static int check_command(int cmd)
+{
+	switch (cmd) {
+
+	case NAND_CMD_READ0:
+	case NAND_CMD_READ1:
+	case NAND_CMD_READSTART:
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_READOOB:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_STATUS:
+	case NAND_CMD_SEQIN:
+	case NAND_CMD_READID:
+	case NAND_CMD_ERASE2:
+	case NAND_CMD_RESET:
+	case NAND_CMD_RNDOUT:
+	case NAND_CMD_RNDOUTSTART:
+		return 0;
+
+	default:
+		return 1;
+	}
+}
+
+/*
+ * Returns state after command is accepted by command number.
+ */
+static uint32_t get_state_by_command(unsigned command)
+{
+	switch (command) {
+		case NAND_CMD_READ0:
+			return STATE_CMD_READ0;
+		case NAND_CMD_READ1:
+			return STATE_CMD_READ1;
+		case NAND_CMD_PAGEPROG:
+			return STATE_CMD_PAGEPROG;
+		case NAND_CMD_READSTART:
+			return STATE_CMD_READSTART;
+		case NAND_CMD_READOOB:
+			return STATE_CMD_READOOB;
+		case NAND_CMD_ERASE1:
+			return STATE_CMD_ERASE1;
+		case NAND_CMD_STATUS:
+			return STATE_CMD_STATUS;
+		case NAND_CMD_SEQIN:
+			return STATE_CMD_SEQIN;
+		case NAND_CMD_READID:
+			return STATE_CMD_READID;
+		case NAND_CMD_ERASE2:
+			return STATE_CMD_ERASE2;
+		case NAND_CMD_RESET:
+			return STATE_CMD_RESET;
+		case NAND_CMD_RNDOUT:
+			return STATE_CMD_RNDOUT;
+		case NAND_CMD_RNDOUTSTART:
+			return STATE_CMD_RNDOUTSTART;
+	}
+
+	NS_ERR("get_state_by_command: unknown command, BUG\n");
+	return 0;
+}
+
+/*
+ * Move an address byte to the correspondent internal register.
+ */
+static inline void accept_addr_byte(struct nandsim *ns, u_char bt)
+{
+	uint byte = (uint)bt;
+
+	if (ns->regs.count < (ns->geom.pgaddrbytes - ns->geom.secaddrbytes))
+		ns->regs.column |= (byte << 8 * ns->regs.count);
+	else {
+		ns->regs.row |= (byte << 8 * (ns->regs.count -
+						ns->geom.pgaddrbytes +
+						ns->geom.secaddrbytes));
+	}
+
+	return;
+}
+
+/*
+ * Switch to STATE_READY state.
+ */
+static inline void switch_to_ready_state(struct nandsim *ns, u_char status)
+{
+	NS_DBG("switch_to_ready_state: switch to %s state\n", get_state_name(STATE_READY));
+
+	ns->state       = STATE_READY;
+	ns->nxstate     = STATE_UNKNOWN;
+	ns->op          = NULL;
+	ns->npstates    = 0;
+	ns->stateidx    = 0;
+	ns->regs.num    = 0;
+	ns->regs.count  = 0;
+	ns->regs.off    = 0;
+	ns->regs.row    = 0;
+	ns->regs.column = 0;
+	ns->regs.status = status;
+}
+
+/*
+ * If the operation isn't known yet, try to find it in the global array
+ * of supported operations.
+ *
+ * Operation can be unknown because of the following.
+ *   1. New command was accepted and this is the first call to find the
+ *      correspondent states chain. In this case ns->npstates = 0;
+ *   2. There are several operations which begin with the same command(s)
+ *      (for example program from the second half and read from the
+ *      second half operations both begin with the READ1 command). In this
+ *      case the ns->pstates[] array contains previous states.
+ *
+ * Thus, the function tries to find operation containing the following
+ * states (if the 'flag' parameter is 0):
+ *    ns->pstates[0], ... ns->pstates[ns->npstates], ns->state
+ *
+ * If (one and only one) matching operation is found, it is accepted (
+ * ns->ops, ns->state, ns->nxstate are initialized, ns->npstate is
+ * zeroed).
+ *
+ * If there are several matches, the current state is pushed to the
+ * ns->pstates.
+ *
+ * The operation can be unknown only while commands are input to the chip.
+ * As soon as address command is accepted, the operation must be known.
+ * In such situation the function is called with 'flag' != 0, and the
+ * operation is searched using the following pattern:
+ *     ns->pstates[0], ... ns->pstates[ns->npstates], <address input>
+ *
+ * It is supposed that this pattern must either match one operation or
+ * none. There can't be ambiguity in that case.
+ *
+ * If no matches found, the function does the following:
+ *   1. if there are saved states present, try to ignore them and search
+ *      again only using the last command. If nothing was found, switch
+ *      to the STATE_READY state.
+ *   2. if there are no saved states, switch to the STATE_READY state.
+ *
+ * RETURNS: -2 - no matched operations found.
+ *          -1 - several matches.
+ *           0 - operation is found.
+ */
+static int find_operation(struct nandsim *ns, uint32_t flag)
+{
+	int opsfound = 0;
+	int i, j, idx = 0;
+
+	for (i = 0; i < NS_OPER_NUM; i++) {
+
+		int found = 1;
+
+		if (!(ns->options & ops[i].reqopts))
+			/* Ignore operations we can't perform */
+			continue;
+
+		if (flag) {
+			if (!(ops[i].states[ns->npstates] & STATE_ADDR_MASK))
+				continue;
+		} else {
+			if (NS_STATE(ns->state) != NS_STATE(ops[i].states[ns->npstates]))
+				continue;
+		}
+
+		for (j = 0; j < ns->npstates; j++)
+			if (NS_STATE(ops[i].states[j]) != NS_STATE(ns->pstates[j])
+				&& (ns->options & ops[idx].reqopts)) {
+				found = 0;
+				break;
+			}
+
+		if (found) {
+			idx = i;
+			opsfound += 1;
+		}
+	}
+
+	if (opsfound == 1) {
+		/* Exact match */
+		ns->op = &ops[idx].states[0];
+		if (flag) {
+			/*
+			 * In this case the find_operation function was
+			 * called when address has just began input. But it isn't
+			 * yet fully input and the current state must
+			 * not be one of STATE_ADDR_*, but the STATE_ADDR_*
+			 * state must be the next state (ns->nxstate).
+			 */
+			ns->stateidx = ns->npstates - 1;
+		} else {
+			ns->stateidx = ns->npstates;
+		}
+		ns->npstates = 0;
+		ns->state = ns->op[ns->stateidx];
+		ns->nxstate = ns->op[ns->stateidx + 1];
+		NS_DBG("find_operation: operation found, index: %d, state: %s, nxstate %s\n",
+				idx, get_state_name(ns->state), get_state_name(ns->nxstate));
+		return 0;
+	}
+
+	if (opsfound == 0) {
+		/* Nothing was found. Try to ignore previous commands (if any) and search again */
+		if (ns->npstates != 0) {
+			NS_DBG("find_operation: no operation found, try again with state %s\n",
+					get_state_name(ns->state));
+			ns->npstates = 0;
+			return find_operation(ns, 0);
+
+		}
+		NS_DBG("find_operation: no operations found\n");
+		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		return -2;
+	}
+
+	if (flag) {
+		/* This shouldn't happen */
+		NS_DBG("find_operation: BUG, operation must be known if address is input\n");
+		return -2;
+	}
+
+	NS_DBG("find_operation: there is still ambiguity\n");
+
+	ns->pstates[ns->npstates++] = ns->state;
+
+	return -1;
+}
+
+static void put_pages(struct nandsim *ns)
+{
+	int i;
+
+	for (i = 0; i < ns->held_cnt; i++)
+		put_page(ns->held_pages[i]);
+}
+
+/* Get page cache pages in advance to provide NOFS memory allocation */
+static int get_pages(struct nandsim *ns, struct file *file, size_t count, loff_t pos)
+{
+	pgoff_t index, start_index, end_index;
+	struct page *page;
+	struct address_space *mapping = file->f_mapping;
+
+	start_index = pos >> PAGE_SHIFT;
+	end_index = (pos + count - 1) >> PAGE_SHIFT;
+	if (end_index - start_index + 1 > NS_MAX_HELD_PAGES)
+		return -EINVAL;
+	ns->held_cnt = 0;
+	for (index = start_index; index <= end_index; index++) {
+		page = find_get_page(mapping, index);
+		if (page == NULL) {
+			page = find_or_create_page(mapping, index, GFP_NOFS);
+			if (page == NULL) {
+				write_inode_now(mapping->host, 1);
+				page = find_or_create_page(mapping, index, GFP_NOFS);
+			}
+			if (page == NULL) {
+				put_pages(ns);
+				return -ENOMEM;
+			}
+			unlock_page(page);
+		}
+		ns->held_pages[ns->held_cnt++] = page;
+	}
+	return 0;
+}
+
+static int set_memalloc(void)
+{
+	if (current->flags & PF_MEMALLOC)
+		return 0;
+	current->flags |= PF_MEMALLOC;
+	return 1;
+}
+
+static void clear_memalloc(int memalloc)
+{
+	if (memalloc)
+		current->flags &= ~PF_MEMALLOC;
+}
+
+static ssize_t read_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+{
+	ssize_t tx;
+	int err, memalloc;
+
+	err = get_pages(ns, file, count, pos);
+	if (err)
+		return err;
+	memalloc = set_memalloc();
+	tx = kernel_read(file, pos, buf, count);
+	clear_memalloc(memalloc);
+	put_pages(ns);
+	return tx;
+}
+
+static ssize_t write_file(struct nandsim *ns, struct file *file, void *buf, size_t count, loff_t pos)
+{
+	ssize_t tx;
+	int err, memalloc;
+
+	err = get_pages(ns, file, count, pos);
+	if (err)
+		return err;
+	memalloc = set_memalloc();
+	tx = kernel_write(file, buf, count, pos);
+	clear_memalloc(memalloc);
+	put_pages(ns);
+	return tx;
+}
+
+/*
+ * Returns a pointer to the current page.
+ */
+static inline union ns_mem *NS_GET_PAGE(struct nandsim *ns)
+{
+	return &(ns->pages[ns->regs.row]);
+}
+
+/*
+ * Retuns a pointer to the current byte, within the current page.
+ */
+static inline u_char *NS_PAGE_BYTE_OFF(struct nandsim *ns)
+{
+	return NS_GET_PAGE(ns)->byte + ns->regs.column + ns->regs.off;
+}
+
+static int do_read_error(struct nandsim *ns, int num)
+{
+	unsigned int page_no = ns->regs.row;
+
+	if (read_error(page_no)) {
+		prandom_bytes(ns->buf.byte, num);
+		NS_WARN("simulating read error in page %u\n", page_no);
+		return 1;
+	}
+	return 0;
+}
+
+static void do_bit_flips(struct nandsim *ns, int num)
+{
+	if (bitflips && prandom_u32() < (1 << 22)) {
+		int flips = 1;
+		if (bitflips > 1)
+			flips = (prandom_u32() % (int) bitflips) + 1;
+		while (flips--) {
+			int pos = prandom_u32() % (num * 8);
+			ns->buf.byte[pos / 8] ^= (1 << (pos % 8));
+			NS_WARN("read_page: flipping bit %d in page %d "
+				"reading from %d ecc: corrected=%u failed=%u\n",
+				pos, ns->regs.row, ns->regs.column + ns->regs.off,
+				nsmtd->ecc_stats.corrected, nsmtd->ecc_stats.failed);
+		}
+	}
+}
+
+/*
+ * Fill the NAND buffer with data read from the specified page.
+ */
+static void read_page(struct nandsim *ns, int num)
+{
+	union ns_mem *mypage;
+
+	if (ns->cfile) {
+		if (!test_bit(ns->regs.row, ns->pages_written)) {
+			NS_DBG("read_page: page %d not written\n", ns->regs.row);
+			memset(ns->buf.byte, 0xFF, num);
+		} else {
+			loff_t pos;
+			ssize_t tx;
+
+			NS_DBG("read_page: page %d written, reading from %d\n",
+				ns->regs.row, ns->regs.column + ns->regs.off);
+			if (do_read_error(ns, num))
+				return;
+			pos = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+			tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos);
+			if (tx != num) {
+				NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+				return;
+			}
+			do_bit_flips(ns, num);
+		}
+		return;
+	}
+
+	mypage = NS_GET_PAGE(ns);
+	if (mypage->byte == NULL) {
+		NS_DBG("read_page: page %d not allocated\n", ns->regs.row);
+		memset(ns->buf.byte, 0xFF, num);
+	} else {
+		NS_DBG("read_page: page %d allocated, reading from %d\n",
+			ns->regs.row, ns->regs.column + ns->regs.off);
+		if (do_read_error(ns, num))
+			return;
+		memcpy(ns->buf.byte, NS_PAGE_BYTE_OFF(ns), num);
+		do_bit_flips(ns, num);
+	}
+}
+
+/*
+ * Erase all pages in the specified sector.
+ */
+static void erase_sector(struct nandsim *ns)
+{
+	union ns_mem *mypage;
+	int i;
+
+	if (ns->cfile) {
+		for (i = 0; i < ns->geom.pgsec; i++)
+			if (__test_and_clear_bit(ns->regs.row + i,
+						 ns->pages_written)) {
+				NS_DBG("erase_sector: freeing page %d\n", ns->regs.row + i);
+			}
+		return;
+	}
+
+	mypage = NS_GET_PAGE(ns);
+	for (i = 0; i < ns->geom.pgsec; i++) {
+		if (mypage->byte != NULL) {
+			NS_DBG("erase_sector: freeing page %d\n", ns->regs.row+i);
+			kmem_cache_free(ns->nand_pages_slab, mypage->byte);
+			mypage->byte = NULL;
+		}
+		mypage++;
+	}
+}
+
+/*
+ * Program the specified page with the contents from the NAND buffer.
+ */
+static int prog_page(struct nandsim *ns, int num)
+{
+	int i;
+	union ns_mem *mypage;
+	u_char *pg_off;
+
+	if (ns->cfile) {
+		loff_t off;
+		ssize_t tx;
+		int all;
+
+		NS_DBG("prog_page: writing page %d\n", ns->regs.row);
+		pg_off = ns->file_buf + ns->regs.column + ns->regs.off;
+		off = (loff_t)NS_RAW_OFFSET(ns) + ns->regs.off;
+		if (!test_bit(ns->regs.row, ns->pages_written)) {
+			all = 1;
+			memset(ns->file_buf, 0xff, ns->geom.pgszoob);
+		} else {
+			all = 0;
+			tx = read_file(ns, ns->cfile, pg_off, num, off);
+			if (tx != num) {
+				NS_ERR("prog_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx);
+				return -1;
+			}
+		}
+		for (i = 0; i < num; i++)
+			pg_off[i] &= ns->buf.byte[i];
+		if (all) {
+			loff_t pos = (loff_t)ns->regs.row * ns->geom.pgszoob;
+			tx = write_file(ns, ns->cfile, ns->file_buf, ns->geom.pgszoob, pos);
+			if (tx != ns->geom.pgszoob) {
+				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+				return -1;
+			}
+			__set_bit(ns->regs.row, ns->pages_written);
+		} else {
+			tx = write_file(ns, ns->cfile, pg_off, num, off);
+			if (tx != num) {
+				NS_ERR("prog_page: write error for page %d ret %ld\n", ns->regs.row, (long)tx);
+				return -1;
+			}
+		}
+		return 0;
+	}
+
+	mypage = NS_GET_PAGE(ns);
+	if (mypage->byte == NULL) {
+		NS_DBG("prog_page: allocating page %d\n", ns->regs.row);
+		/*
+		 * We allocate memory with GFP_NOFS because a flash FS may
+		 * utilize this. If it is holding an FS lock, then gets here,
+		 * then kernel memory alloc runs writeback which goes to the FS
+		 * again and deadlocks. This was seen in practice.
+		 */
+		mypage->byte = kmem_cache_alloc(ns->nand_pages_slab, GFP_NOFS);
+		if (mypage->byte == NULL) {
+			NS_ERR("prog_page: error allocating memory for page %d\n", ns->regs.row);
+			return -1;
+		}
+		memset(mypage->byte, 0xFF, ns->geom.pgszoob);
+	}
+
+	pg_off = NS_PAGE_BYTE_OFF(ns);
+	for (i = 0; i < num; i++)
+		pg_off[i] &= ns->buf.byte[i];
+
+	return 0;
+}
+
+/*
+ * If state has any action bit, perform this action.
+ *
+ * RETURNS: 0 if success, -1 if error.
+ */
+static int do_state_action(struct nandsim *ns, uint32_t action)
+{
+	int num;
+	int busdiv = ns->busw == 8 ? 1 : 2;
+	unsigned int erase_block_no, page_no;
+
+	action &= ACTION_MASK;
+
+	/* Check that page address input is correct */
+	if (action != ACTION_SECERASE && ns->regs.row >= ns->geom.pgnum) {
+		NS_WARN("do_state_action: wrong page number (%#x)\n", ns->regs.row);
+		return -1;
+	}
+
+	switch (action) {
+
+	case ACTION_CPY:
+		/*
+		 * Copy page data to the internal buffer.
+		 */
+
+		/* Column shouldn't be very large */
+		if (ns->regs.column >= (ns->geom.pgszoob - ns->regs.off)) {
+			NS_ERR("do_state_action: column number is too large\n");
+			break;
+		}
+		num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+		read_page(ns, num);
+
+		NS_DBG("do_state_action: (ACTION_CPY:) copy %d bytes to int buf, raw offset %d\n",
+			num, NS_RAW_OFFSET(ns) + ns->regs.off);
+
+		if (ns->regs.off == 0)
+			NS_LOG("read page %d\n", ns->regs.row);
+		else if (ns->regs.off < ns->geom.pgsz)
+			NS_LOG("read page %d (second half)\n", ns->regs.row);
+		else
+			NS_LOG("read OOB of page %d\n", ns->regs.row);
+
+		NS_UDELAY(access_delay);
+		NS_UDELAY(input_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+		break;
+
+	case ACTION_SECERASE:
+		/*
+		 * Erase sector.
+		 */
+
+		if (ns->lines.wp) {
+			NS_ERR("do_state_action: device is write-protected, ignore sector erase\n");
+			return -1;
+		}
+
+		if (ns->regs.row >= ns->geom.pgnum - ns->geom.pgsec
+			|| (ns->regs.row & ~(ns->geom.secsz - 1))) {
+			NS_ERR("do_state_action: wrong sector address (%#x)\n", ns->regs.row);
+			return -1;
+		}
+
+		ns->regs.row = (ns->regs.row <<
+				8 * (ns->geom.pgaddrbytes - ns->geom.secaddrbytes)) | ns->regs.column;
+		ns->regs.column = 0;
+
+		erase_block_no = ns->regs.row >> (ns->geom.secshift - ns->geom.pgshift);
+
+		NS_DBG("do_state_action: erase sector at address %#x, off = %d\n",
+				ns->regs.row, NS_RAW_OFFSET(ns));
+		NS_LOG("erase sector %u\n", erase_block_no);
+
+		erase_sector(ns);
+
+		NS_MDELAY(erase_delay);
+
+		if (erase_block_wear)
+			update_wear(erase_block_no);
+
+		if (erase_error(erase_block_no)) {
+			NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
+			return -1;
+		}
+
+		break;
+
+	case ACTION_PRGPAGE:
+		/*
+		 * Program page - move internal buffer data to the page.
+		 */
+
+		if (ns->lines.wp) {
+			NS_WARN("do_state_action: device is write-protected, programm\n");
+			return -1;
+		}
+
+		num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+		if (num != ns->regs.count) {
+			NS_ERR("do_state_action: too few bytes were input (%d instead of %d)\n",
+					ns->regs.count, num);
+			return -1;
+		}
+
+		if (prog_page(ns, num) == -1)
+			return -1;
+
+		page_no = ns->regs.row;
+
+		NS_DBG("do_state_action: copy %d bytes from int buf to (%#x, %#x), raw off = %d\n",
+			num, ns->regs.row, ns->regs.column, NS_RAW_OFFSET(ns) + ns->regs.off);
+		NS_LOG("programm page %d\n", ns->regs.row);
+
+		NS_UDELAY(programm_delay);
+		NS_UDELAY(output_cycle * ns->geom.pgsz / 1000 / busdiv);
+
+		if (write_error(page_no)) {
+			NS_WARN("simulating write failure in page %u\n", page_no);
+			return -1;
+		}
+
+		break;
+
+	case ACTION_ZEROOFF:
+		NS_DBG("do_state_action: set internal offset to 0\n");
+		ns->regs.off = 0;
+		break;
+
+	case ACTION_HALFOFF:
+		if (!(ns->options & OPT_PAGE512_8BIT)) {
+			NS_ERR("do_state_action: BUG! can't skip half of page for non-512"
+				"byte page size 8x chips\n");
+			return -1;
+		}
+		NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz/2);
+		ns->regs.off = ns->geom.pgsz/2;
+		break;
+
+	case ACTION_OOBOFF:
+		NS_DBG("do_state_action: set internal offset to %d\n", ns->geom.pgsz);
+		ns->regs.off = ns->geom.pgsz;
+		break;
+
+	default:
+		NS_DBG("do_state_action: BUG! unknown action\n");
+	}
+
+	return 0;
+}
+
+/*
+ * Switch simulator's state.
+ */
+static void switch_state(struct nandsim *ns)
+{
+	if (ns->op) {
+		/*
+		 * The current operation have already been identified.
+		 * Just follow the states chain.
+		 */
+
+		ns->stateidx += 1;
+		ns->state = ns->nxstate;
+		ns->nxstate = ns->op[ns->stateidx + 1];
+
+		NS_DBG("switch_state: operation is known, switch to the next state, "
+			"state: %s, nxstate: %s\n",
+			get_state_name(ns->state), get_state_name(ns->nxstate));
+
+		/* See, whether we need to do some action */
+		if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			return;
+		}
+
+	} else {
+		/*
+		 * We don't yet know which operation we perform.
+		 * Try to identify it.
+		 */
+
+		/*
+		 *  The only event causing the switch_state function to
+		 *  be called with yet unknown operation is new command.
+		 */
+		ns->state = get_state_by_command(ns->regs.command);
+
+		NS_DBG("switch_state: operation is unknown, try to find it\n");
+
+		if (find_operation(ns, 0) != 0)
+			return;
+
+		if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			return;
+		}
+	}
+
+	/* For 16x devices column means the page offset in words */
+	if ((ns->nxstate & STATE_ADDR_MASK) && ns->busw == 16) {
+		NS_DBG("switch_state: double the column number for 16x device\n");
+		ns->regs.column <<= 1;
+	}
+
+	if (NS_STATE(ns->nxstate) == STATE_READY) {
+		/*
+		 * The current state is the last. Return to STATE_READY
+		 */
+
+		u_char status = NS_STATUS_OK(ns);
+
+		/* In case of data states, see if all bytes were input/output */
+		if ((ns->state & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK))
+			&& ns->regs.count != ns->regs.num) {
+			NS_WARN("switch_state: not all bytes were processed, %d left\n",
+					ns->regs.num - ns->regs.count);
+			status = NS_STATUS_FAILED(ns);
+		}
+
+		NS_DBG("switch_state: operation complete, switch to STATE_READY state\n");
+
+		switch_to_ready_state(ns, status);
+
+		return;
+	} else if (ns->nxstate & (STATE_DATAIN_MASK | STATE_DATAOUT_MASK)) {
+		/*
+		 * If the next state is data input/output, switch to it now
+		 */
+
+		ns->state      = ns->nxstate;
+		ns->nxstate    = ns->op[++ns->stateidx + 1];
+		ns->regs.num   = ns->regs.count = 0;
+
+		NS_DBG("switch_state: the next state is data I/O, switch, "
+			"state: %s, nxstate: %s\n",
+			get_state_name(ns->state), get_state_name(ns->nxstate));
+
+		/*
+		 * Set the internal register to the count of bytes which
+		 * are expected to be input or output
+		 */
+		switch (NS_STATE(ns->state)) {
+			case STATE_DATAIN:
+			case STATE_DATAOUT:
+				ns->regs.num = ns->geom.pgszoob - ns->regs.off - ns->regs.column;
+				break;
+
+			case STATE_DATAOUT_ID:
+				ns->regs.num = ns->geom.idbytes;
+				break;
+
+			case STATE_DATAOUT_STATUS:
+				ns->regs.count = ns->regs.num = 0;
+				break;
+
+			default:
+				NS_ERR("switch_state: BUG! unknown data state\n");
+		}
+
+	} else if (ns->nxstate & STATE_ADDR_MASK) {
+		/*
+		 * If the next state is address input, set the internal
+		 * register to the number of expected address bytes
+		 */
+
+		ns->regs.count = 0;
+
+		switch (NS_STATE(ns->nxstate)) {
+			case STATE_ADDR_PAGE:
+				ns->regs.num = ns->geom.pgaddrbytes;
+
+				break;
+			case STATE_ADDR_SEC:
+				ns->regs.num = ns->geom.secaddrbytes;
+				break;
+
+			case STATE_ADDR_ZERO:
+				ns->regs.num = 1;
+				break;
+
+			case STATE_ADDR_COLUMN:
+				/* Column address is always 2 bytes */
+				ns->regs.num = ns->geom.pgaddrbytes - ns->geom.secaddrbytes;
+				break;
+
+			default:
+				NS_ERR("switch_state: BUG! unknown address state\n");
+		}
+	} else {
+		/*
+		 * Just reset internal counters.
+		 */
+
+		ns->regs.num = 0;
+		ns->regs.count = 0;
+	}
+}
+
+static u_char ns_nand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nandsim *ns = nand_get_controller_data(chip);
+	u_char outb = 0x00;
+
+	/* Sanity and correctness checks */
+	if (!ns->lines.ce) {
+		NS_ERR("read_byte: chip is disabled, return %#x\n", (uint)outb);
+		return outb;
+	}
+	if (ns->lines.ale || ns->lines.cle) {
+		NS_ERR("read_byte: ALE or CLE pin is high, return %#x\n", (uint)outb);
+		return outb;
+	}
+	if (!(ns->state & STATE_DATAOUT_MASK)) {
+		NS_WARN("read_byte: unexpected data output cycle, state is %s "
+			"return %#x\n", get_state_name(ns->state), (uint)outb);
+		return outb;
+	}
+
+	/* Status register may be read as many times as it is wanted */
+	if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS) {
+		NS_DBG("read_byte: return %#x status\n", ns->regs.status);
+		return ns->regs.status;
+	}
+
+	/* Check if there is any data in the internal buffer which may be read */
+	if (ns->regs.count == ns->regs.num) {
+		NS_WARN("read_byte: no more data to output, return %#x\n", (uint)outb);
+		return outb;
+	}
+
+	switch (NS_STATE(ns->state)) {
+		case STATE_DATAOUT:
+			if (ns->busw == 8) {
+				outb = ns->buf.byte[ns->regs.count];
+				ns->regs.count += 1;
+			} else {
+				outb = (u_char)cpu_to_le16(ns->buf.word[ns->regs.count >> 1]);
+				ns->regs.count += 2;
+			}
+			break;
+		case STATE_DATAOUT_ID:
+			NS_DBG("read_byte: read ID byte %d, total = %d\n", ns->regs.count, ns->regs.num);
+			outb = ns->ids[ns->regs.count];
+			ns->regs.count += 1;
+			break;
+		default:
+			BUG();
+	}
+
+	if (ns->regs.count == ns->regs.num) {
+		NS_DBG("read_byte: all bytes were read\n");
+
+		if (NS_STATE(ns->nxstate) == STATE_READY)
+			switch_state(ns);
+	}
+
+	return outb;
+}
+
+static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nandsim *ns = nand_get_controller_data(chip);
+
+	/* Sanity and correctness checks */
+	if (!ns->lines.ce) {
+		NS_ERR("write_byte: chip is disabled, ignore write\n");
+		return;
+	}
+	if (ns->lines.ale && ns->lines.cle) {
+		NS_ERR("write_byte: ALE and CLE pins are high simultaneously, ignore write\n");
+		return;
+	}
+
+	if (ns->lines.cle == 1) {
+		/*
+		 * The byte written is a command.
+		 */
+
+		if (byte == NAND_CMD_RESET) {
+			NS_LOG("reset chip\n");
+			switch_to_ready_state(ns, NS_STATUS_OK(ns));
+			return;
+		}
+
+		/* Check that the command byte is correct */
+		if (check_command(byte)) {
+			NS_ERR("write_byte: unknown command %#x\n", (uint)byte);
+			return;
+		}
+
+		if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS
+			|| NS_STATE(ns->state) == STATE_DATAOUT) {
+			int row = ns->regs.row;
+
+			switch_state(ns);
+			if (byte == NAND_CMD_RNDOUT)
+				ns->regs.row = row;
+		}
+
+		/* Check if chip is expecting command */
+		if (NS_STATE(ns->nxstate) != STATE_UNKNOWN && !(ns->nxstate & STATE_CMD_MASK)) {
+			/* Do not warn if only 2 id bytes are read */
+			if (!(ns->regs.command == NAND_CMD_READID &&
+			    NS_STATE(ns->state) == STATE_DATAOUT_ID && ns->regs.count == 2)) {
+				/*
+				 * We are in situation when something else (not command)
+				 * was expected but command was input. In this case ignore
+				 * previous command(s)/state(s) and accept the last one.
+				 */
+				NS_WARN("write_byte: command (%#x) wasn't expected, expected state is %s, "
+					"ignore previous states\n", (uint)byte, get_state_name(ns->nxstate));
+			}
+			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		}
+
+		NS_DBG("command byte corresponding to %s state accepted\n",
+			get_state_name(get_state_by_command(byte)));
+		ns->regs.command = byte;
+		switch_state(ns);
+
+	} else if (ns->lines.ale == 1) {
+		/*
+		 * The byte written is an address.
+		 */
+
+		if (NS_STATE(ns->nxstate) == STATE_UNKNOWN) {
+
+			NS_DBG("write_byte: operation isn't known yet, identify it\n");
+
+			if (find_operation(ns, 1) < 0)
+				return;
+
+			if ((ns->state & ACTION_MASK) && do_state_action(ns, ns->state) < 0) {
+				switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+				return;
+			}
+
+			ns->regs.count = 0;
+			switch (NS_STATE(ns->nxstate)) {
+				case STATE_ADDR_PAGE:
+					ns->regs.num = ns->geom.pgaddrbytes;
+					break;
+				case STATE_ADDR_SEC:
+					ns->regs.num = ns->geom.secaddrbytes;
+					break;
+				case STATE_ADDR_ZERO:
+					ns->regs.num = 1;
+					break;
+				default:
+					BUG();
+			}
+		}
+
+		/* Check that chip is expecting address */
+		if (!(ns->nxstate & STATE_ADDR_MASK)) {
+			NS_ERR("write_byte: address (%#x) isn't expected, expected state is %s, "
+				"switch to STATE_READY\n", (uint)byte, get_state_name(ns->nxstate));
+			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			return;
+		}
+
+		/* Check if this is expected byte */
+		if (ns->regs.count == ns->regs.num) {
+			NS_ERR("write_byte: no more address bytes expected\n");
+			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			return;
+		}
+
+		accept_addr_byte(ns, byte);
+
+		ns->regs.count += 1;
+
+		NS_DBG("write_byte: address byte %#x was accepted (%d bytes input, %d expected)\n",
+				(uint)byte, ns->regs.count, ns->regs.num);
+
+		if (ns->regs.count == ns->regs.num) {
+			NS_DBG("address (%#x, %#x) is accepted\n", ns->regs.row, ns->regs.column);
+			switch_state(ns);
+		}
+
+	} else {
+		/*
+		 * The byte written is an input data.
+		 */
+
+		/* Check that chip is expecting data input */
+		if (!(ns->state & STATE_DATAIN_MASK)) {
+			NS_ERR("write_byte: data input (%#x) isn't expected, state is %s, "
+				"switch to %s\n", (uint)byte,
+				get_state_name(ns->state), get_state_name(STATE_READY));
+			switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+			return;
+		}
+
+		/* Check if this is expected byte */
+		if (ns->regs.count == ns->regs.num) {
+			NS_WARN("write_byte: %u input bytes has already been accepted, ignore write\n",
+					ns->regs.num);
+			return;
+		}
+
+		if (ns->busw == 8) {
+			ns->buf.byte[ns->regs.count] = byte;
+			ns->regs.count += 1;
+		} else {
+			ns->buf.word[ns->regs.count >> 1] = cpu_to_le16((uint16_t)byte);
+			ns->regs.count += 2;
+		}
+	}
+
+	return;
+}
+
+static void ns_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int bitmask)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nandsim *ns = nand_get_controller_data(chip);
+
+	ns->lines.cle = bitmask & NAND_CLE ? 1 : 0;
+	ns->lines.ale = bitmask & NAND_ALE ? 1 : 0;
+	ns->lines.ce = bitmask & NAND_NCE ? 1 : 0;
+
+	if (cmd != NAND_CMD_NONE)
+		ns_nand_write_byte(mtd, cmd);
+}
+
+static int ns_device_ready(struct mtd_info *mtd)
+{
+	NS_DBG("device_ready\n");
+	return 1;
+}
+
+static uint16_t ns_nand_read_word(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	NS_DBG("read_word\n");
+
+	return chip->read_byte(mtd) | (chip->read_byte(mtd) << 8);
+}
+
+static void ns_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nandsim *ns = nand_get_controller_data(chip);
+
+	/* Check that chip is expecting data input */
+	if (!(ns->state & STATE_DATAIN_MASK)) {
+		NS_ERR("write_buf: data input isn't expected, state is %s, "
+			"switch to STATE_READY\n", get_state_name(ns->state));
+		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		return;
+	}
+
+	/* Check if these are expected bytes */
+	if (ns->regs.count + len > ns->regs.num) {
+		NS_ERR("write_buf: too many input bytes\n");
+		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		return;
+	}
+
+	memcpy(ns->buf.byte + ns->regs.count, buf, len);
+	ns->regs.count += len;
+
+	if (ns->regs.count == ns->regs.num) {
+		NS_DBG("write_buf: %d bytes were written\n", ns->regs.count);
+	}
+}
+
+static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nandsim *ns = nand_get_controller_data(chip);
+
+	/* Sanity and correctness checks */
+	if (!ns->lines.ce) {
+		NS_ERR("read_buf: chip is disabled\n");
+		return;
+	}
+	if (ns->lines.ale || ns->lines.cle) {
+		NS_ERR("read_buf: ALE or CLE pin is high\n");
+		return;
+	}
+	if (!(ns->state & STATE_DATAOUT_MASK)) {
+		NS_WARN("read_buf: unexpected data output cycle, current state is %s\n",
+			get_state_name(ns->state));
+		return;
+	}
+
+	if (NS_STATE(ns->state) != STATE_DATAOUT) {
+		int i;
+
+		for (i = 0; i < len; i++)
+			buf[i] = mtd_to_nand(mtd)->read_byte(mtd);
+
+		return;
+	}
+
+	/* Check if these are expected bytes */
+	if (ns->regs.count + len > ns->regs.num) {
+		NS_ERR("read_buf: too many bytes to read\n");
+		switch_to_ready_state(ns, NS_STATUS_FAILED(ns));
+		return;
+	}
+
+	memcpy(buf, ns->buf.byte + ns->regs.count, len);
+	ns->regs.count += len;
+
+	if (ns->regs.count == ns->regs.num) {
+		if (NS_STATE(ns->nxstate) == STATE_READY)
+			switch_state(ns);
+	}
+
+	return;
+}
+
+/*
+ * Module initialization function
+ */
+static int __init ns_init_module(void)
+{
+	struct nand_chip *chip;
+	struct nandsim *nand;
+	int retval = -ENOMEM, i;
+
+	if (bus_width != 8 && bus_width != 16) {
+		NS_ERR("wrong bus width (%d), use only 8 or 16\n", bus_width);
+		return -EINVAL;
+	}
+
+	/* Allocate and initialize mtd_info, nand_chip and nandsim structures */
+	chip = kzalloc(sizeof(struct nand_chip) + sizeof(struct nandsim),
+		       GFP_KERNEL);
+	if (!chip) {
+		NS_ERR("unable to allocate core structures.\n");
+		return -ENOMEM;
+	}
+	nsmtd       = nand_to_mtd(chip);
+	nand        = (struct nandsim *)(chip + 1);
+	nand_set_controller_data(chip, (void *)nand);
+
+	/*
+	 * Register simulator's callbacks.
+	 */
+	chip->cmd_ctrl	 = ns_hwcontrol;
+	chip->read_byte  = ns_nand_read_byte;
+	chip->dev_ready  = ns_device_ready;
+	chip->write_buf  = ns_nand_write_buf;
+	chip->read_buf   = ns_nand_read_buf;
+	chip->read_word  = ns_nand_read_word;
+	chip->ecc.mode   = NAND_ECC_SOFT;
+	chip->ecc.algo   = NAND_ECC_HAMMING;
+	/* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */
+	/* and 'badblocks' parameters to work */
+	chip->options   |= NAND_SKIP_BBTSCAN;
+
+	switch (bbt) {
+	case 2:
+		 chip->bbt_options |= NAND_BBT_NO_OOB;
+	case 1:
+		 chip->bbt_options |= NAND_BBT_USE_FLASH;
+	case 0:
+		break;
+	default:
+		NS_ERR("bbt has to be 0..2\n");
+		retval = -EINVAL;
+		goto error;
+	}
+	/*
+	 * Perform minimum nandsim structure initialization to handle
+	 * the initial ID read command correctly
+	 */
+	if (id_bytes[6] != 0xFF || id_bytes[7] != 0xFF)
+		nand->geom.idbytes = 8;
+	else if (id_bytes[4] != 0xFF || id_bytes[5] != 0xFF)
+		nand->geom.idbytes = 6;
+	else if (id_bytes[2] != 0xFF || id_bytes[3] != 0xFF)
+		nand->geom.idbytes = 4;
+	else
+		nand->geom.idbytes = 2;
+	nand->regs.status = NS_STATUS_OK(nand);
+	nand->nxstate = STATE_UNKNOWN;
+	nand->options |= OPT_PAGE512; /* temporary value */
+	memcpy(nand->ids, id_bytes, sizeof(nand->ids));
+	if (bus_width == 16) {
+		nand->busw = 16;
+		chip->options |= NAND_BUSWIDTH_16;
+	}
+
+	nsmtd->owner = THIS_MODULE;
+
+	if ((retval = parse_weakblocks()) != 0)
+		goto error;
+
+	if ((retval = parse_weakpages()) != 0)
+		goto error;
+
+	if ((retval = parse_gravepages()) != 0)
+		goto error;
+
+	retval = nand_scan_ident(nsmtd, 1, NULL);
+	if (retval) {
+		NS_ERR("cannot scan NAND Simulator device\n");
+		if (retval > 0)
+			retval = -ENXIO;
+		goto error;
+	}
+
+	if (bch) {
+		unsigned int eccsteps, eccbytes;
+		if (!mtd_nand_has_bch()) {
+			NS_ERR("BCH ECC support is disabled\n");
+			retval = -EINVAL;
+			goto error;
+		}
+		/* use 512-byte ecc blocks */
+		eccsteps = nsmtd->writesize/512;
+		eccbytes = (bch*13+7)/8;
+		/* do not bother supporting small page devices */
+		if ((nsmtd->oobsize < 64) || !eccsteps) {
+			NS_ERR("bch not available on small page devices\n");
+			retval = -EINVAL;
+			goto error;
+		}
+		if ((eccbytes*eccsteps+2) > nsmtd->oobsize) {
+			NS_ERR("invalid bch value %u\n", bch);
+			retval = -EINVAL;
+			goto error;
+		}
+		chip->ecc.mode = NAND_ECC_SOFT;
+		chip->ecc.algo = NAND_ECC_BCH;
+		chip->ecc.size = 512;
+		chip->ecc.strength = bch;
+		chip->ecc.bytes = eccbytes;
+		NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size);
+	}
+
+	retval = nand_scan_tail(nsmtd);
+	if (retval) {
+		NS_ERR("can't register NAND Simulator\n");
+		if (retval > 0)
+			retval = -ENXIO;
+		goto error;
+	}
+
+	if (overridesize) {
+		uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
+		if (new_size >> overridesize != nsmtd->erasesize) {
+			NS_ERR("overridesize is too big\n");
+			retval = -EINVAL;
+			goto err_exit;
+		}
+		/* N.B. This relies on nand_scan not doing anything with the size before we change it */
+		nsmtd->size = new_size;
+		chip->chipsize = new_size;
+		chip->chip_shift = ffs(nsmtd->erasesize) + overridesize - 1;
+		chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+	}
+
+	if ((retval = setup_wear_reporting(nsmtd)) != 0)
+		goto err_exit;
+
+	if ((retval = nandsim_debugfs_create(nand)) != 0)
+		goto err_exit;
+
+	if ((retval = init_nandsim(nsmtd)) != 0)
+		goto err_exit;
+
+	if ((retval = chip->scan_bbt(nsmtd)) != 0)
+		goto err_exit;
+
+	if ((retval = parse_badblocks(nand, nsmtd)) != 0)
+		goto err_exit;
+
+	/* Register NAND partitions */
+	retval = mtd_device_register(nsmtd, &nand->partitions[0],
+				     nand->nbparts);
+	if (retval != 0)
+		goto err_exit;
+
+        return 0;
+
+err_exit:
+	free_nandsim(nand);
+	nand_release(nsmtd);
+	for (i = 0;i < ARRAY_SIZE(nand->partitions); ++i)
+		kfree(nand->partitions[i].name);
+error:
+	kfree(chip);
+	free_lists();
+
+	return retval;
+}
+
+module_init(ns_init_module);
+
+/*
+ * Module clean-up function
+ */
+static void __exit ns_cleanup_module(void)
+{
+	struct nand_chip *chip = mtd_to_nand(nsmtd);
+	struct nandsim *ns = nand_get_controller_data(chip);
+	int i;
+
+	nandsim_debugfs_remove(ns);
+	free_nandsim(ns);    /* Free nandsim private resources */
+	nand_release(nsmtd); /* Unregister driver */
+	for (i = 0;i < ARRAY_SIZE(ns->partitions); ++i)
+		kfree(ns->partitions[i].name);
+	kfree(mtd_to_nand(nsmtd));        /* Free other structures */
+	free_lists();
+}
+
+module_exit(ns_cleanup_module);
+
+MODULE_LICENSE ("GPL");
+MODULE_AUTHOR ("Artem B. Bityuckiy");
+MODULE_DESCRIPTION ("The NAND flash simulator");
diff --git a/drivers/mtd/nand/rawnand/ndfc.c b/drivers/mtd/nand/rawnand/ndfc.c
new file mode 100644
index 000000000000..d8a806894937
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/ndfc.c
@@ -0,0 +1,286 @@ 
+/*
+ *  Overview:
+ *   Platform independent driver for NDFC (NanD Flash Controller)
+ *   integrated into EP440 cores
+ *
+ *   Ported to an OF platform driver by Sean MacLennan
+ *
+ *   The NDFC supports multiple chips, but this driver only supports a
+ *   single chip since I do not have access to any boards with
+ *   multiple chips.
+ *
+ *  Author: Thomas Gleixner
+ *
+ *  Copyright 2006 IBM
+ *  Copyright 2008 PIKA Technologies
+ *    Sean MacLennan <smaclennan@pikatech.com>
+ *
+ *  This program is free software; you can redistribute	 it and/or modify it
+ *  under  the terms of	 the GNU General  Public License as published by the
+ *  Free Software Foundation;  either version 2 of the	License, or (at your
+ *  option) any later version.
+ *
+ */
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/ndfc.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <asm/io.h>
+
+#define NDFC_MAX_CS    4
+
+struct ndfc_controller {
+	struct platform_device *ofdev;
+	void __iomem *ndfcbase;
+	struct nand_chip chip;
+	int chip_select;
+	struct nand_hw_control ndfc_control;
+};
+
+static struct ndfc_controller ndfc_ctrl[NDFC_MAX_CS];
+
+static void ndfc_select_chip(struct mtd_info *mtd, int chip)
+{
+	uint32_t ccr;
+	struct nand_chip *nchip = mtd_to_nand(mtd);
+	struct ndfc_controller *ndfc = nand_get_controller_data(nchip);
+
+	ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+	if (chip >= 0) {
+		ccr &= ~NDFC_CCR_BS_MASK;
+		ccr |= NDFC_CCR_BS(chip + ndfc->chip_select);
+	} else
+		ccr |= NDFC_CCR_RESET_CE;
+	out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+}
+
+static void ndfc_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_CMD);
+	else
+		writel(cmd & 0xFF, ndfc->ndfcbase + NDFC_ALE);
+}
+
+static int ndfc_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+
+	return in_be32(ndfc->ndfcbase + NDFC_STAT) & NDFC_STAT_IS_READY;
+}
+
+static void ndfc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	uint32_t ccr;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+
+	ccr = in_be32(ndfc->ndfcbase + NDFC_CCR);
+	ccr |= NDFC_CCR_RESET_ECC;
+	out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+	wmb();
+}
+
+static int ndfc_calculate_ecc(struct mtd_info *mtd,
+			      const u_char *dat, u_char *ecc_code)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+	uint32_t ecc;
+	uint8_t *p = (uint8_t *)&ecc;
+
+	wmb();
+	ecc = in_be32(ndfc->ndfcbase + NDFC_ECC);
+	/* The NDFC uses Smart Media (SMC) bytes order */
+	ecc_code[0] = p[1];
+	ecc_code[1] = p[2];
+	ecc_code[2] = p[3];
+
+	return 0;
+}
+
+/*
+ * Speedups for buffer read/write/verify
+ *
+ * NDFC allows 32bit read/write of data. So we can speed up the buffer
+ * functions. No further checking, as nand_base will always read/write
+ * page aligned.
+ */
+static void ndfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+	uint32_t *p = (uint32_t *) buf;
+
+	for(;len > 0; len -= 4)
+		*p++ = in_be32(ndfc->ndfcbase + NDFC_DATA);
+}
+
+static void ndfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct ndfc_controller *ndfc = nand_get_controller_data(chip);
+	uint32_t *p = (uint32_t *) buf;
+
+	for(;len > 0; len -= 4)
+		out_be32(ndfc->ndfcbase + NDFC_DATA, *p++);
+}
+
+/*
+ * Initialize chip structure
+ */
+static int ndfc_chip_init(struct ndfc_controller *ndfc,
+			  struct device_node *node)
+{
+	struct device_node *flash_np;
+	struct nand_chip *chip = &ndfc->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	int ret;
+
+	chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA;
+	chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA;
+	chip->cmd_ctrl = ndfc_hwcontrol;
+	chip->dev_ready = ndfc_ready;
+	chip->select_chip = ndfc_select_chip;
+	chip->chip_delay = 50;
+	chip->controller = &ndfc->ndfc_control;
+	chip->read_buf = ndfc_read_buf;
+	chip->write_buf = ndfc_write_buf;
+	chip->ecc.correct = nand_correct_data;
+	chip->ecc.hwctl = ndfc_enable_hwecc;
+	chip->ecc.calculate = ndfc_calculate_ecc;
+	chip->ecc.mode = NAND_ECC_HW;
+	chip->ecc.size = 256;
+	chip->ecc.bytes = 3;
+	chip->ecc.strength = 1;
+	nand_set_controller_data(chip, ndfc);
+
+	mtd->dev.parent = &ndfc->ofdev->dev;
+
+	flash_np = of_get_next_child(node, NULL);
+	if (!flash_np)
+		return -ENODEV;
+	nand_set_flash_node(chip, flash_np);
+
+	mtd->name = kasprintf(GFP_KERNEL, "%s.%s", dev_name(&ndfc->ofdev->dev),
+			      flash_np->name);
+	if (!mtd->name) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ret = nand_scan(mtd, 1);
+	if (ret)
+		goto err;
+
+	ret = mtd_device_register(mtd, NULL, 0);
+
+err:
+	of_node_put(flash_np);
+	if (ret)
+		kfree(mtd->name);
+	return ret;
+}
+
+static int ndfc_probe(struct platform_device *ofdev)
+{
+	struct ndfc_controller *ndfc;
+	const __be32 *reg;
+	u32 ccr;
+	u32 cs;
+	int err, len;
+
+	/* Read the reg property to get the chip select */
+	reg = of_get_property(ofdev->dev.of_node, "reg", &len);
+	if (reg == NULL || len != 12) {
+		dev_err(&ofdev->dev, "unable read reg property (%d)\n", len);
+		return -ENOENT;
+	}
+
+	cs = be32_to_cpu(reg[0]);
+	if (cs >= NDFC_MAX_CS) {
+		dev_err(&ofdev->dev, "invalid CS number (%d)\n", cs);
+		return -EINVAL;
+	}
+
+	ndfc = &ndfc_ctrl[cs];
+	ndfc->chip_select = cs;
+
+	nand_hw_control_init(&ndfc->ndfc_control);
+	ndfc->ofdev = ofdev;
+	dev_set_drvdata(&ofdev->dev, ndfc);
+
+	ndfc->ndfcbase = of_iomap(ofdev->dev.of_node, 0);
+	if (!ndfc->ndfcbase) {
+		dev_err(&ofdev->dev, "failed to get memory\n");
+		return -EIO;
+	}
+
+	ccr = NDFC_CCR_BS(ndfc->chip_select);
+
+	/* It is ok if ccr does not exist - just default to 0 */
+	reg = of_get_property(ofdev->dev.of_node, "ccr", NULL);
+	if (reg)
+		ccr |= be32_to_cpup(reg);
+
+	out_be32(ndfc->ndfcbase + NDFC_CCR, ccr);
+
+	/* Set the bank settings if given */
+	reg = of_get_property(ofdev->dev.of_node, "bank-settings", NULL);
+	if (reg) {
+		int offset = NDFC_BCFG0 + (ndfc->chip_select << 2);
+		out_be32(ndfc->ndfcbase + offset, be32_to_cpup(reg));
+	}
+
+	err = ndfc_chip_init(ndfc, ofdev->dev.of_node);
+	if (err) {
+		iounmap(ndfc->ndfcbase);
+		return err;
+	}
+
+	return 0;
+}
+
+static int ndfc_remove(struct platform_device *ofdev)
+{
+	struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev);
+	struct mtd_info *mtd = nand_to_mtd(&ndfc->chip);
+
+	nand_release(mtd);
+	kfree(mtd->name);
+
+	return 0;
+}
+
+static const struct of_device_id ndfc_match[] = {
+	{ .compatible = "ibm,ndfc", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, ndfc_match);
+
+static struct platform_driver ndfc_driver = {
+	.driver = {
+		.name = "ndfc",
+		.of_match_table = ndfc_match,
+	},
+	.probe = ndfc_probe,
+	.remove = ndfc_remove,
+};
+
+module_platform_driver(ndfc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
+MODULE_DESCRIPTION("OF Platform driver for NDFC");
diff --git a/drivers/mtd/nand/rawnand/nuc900_nand.c b/drivers/mtd/nand/rawnand/nuc900_nand.c
new file mode 100644
index 000000000000..7bb4d2ea9342
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/nuc900_nand.c
@@ -0,0 +1,306 @@ 
+/*
+ * Copyright © 2009 Nuvoton technology corporation.
+ *
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation;version 2 of the License.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#define REG_FMICSR   	0x00
+#define REG_SMCSR    	0xa0
+#define REG_SMISR    	0xac
+#define REG_SMCMD    	0xb0
+#define REG_SMADDR   	0xb4
+#define REG_SMDATA   	0xb8
+
+#define RESET_FMI	0x01
+#define NAND_EN		0x08
+#define READYBUSY	(0x01 << 18)
+
+#define SWRST		0x01
+#define PSIZE		(0x01 << 3)
+#define DMARWEN		(0x03 << 1)
+#define BUSWID		(0x01 << 4)
+#define ECC4EN		(0x01 << 5)
+#define WP		(0x01 << 24)
+#define NANDCS		(0x01 << 25)
+#define ENDADDR		(0x01 << 31)
+
+#define read_data_reg(dev)		\
+	__raw_readl((dev)->reg + REG_SMDATA)
+
+#define write_data_reg(dev, val)	\
+	__raw_writel((val), (dev)->reg + REG_SMDATA)
+
+#define write_cmd_reg(dev, val)		\
+	__raw_writel((val), (dev)->reg + REG_SMCMD)
+
+#define write_addr_reg(dev, val)	\
+	__raw_writel((val), (dev)->reg + REG_SMADDR)
+
+struct nuc900_nand {
+	struct nand_chip chip;
+	void __iomem *reg;
+	struct clk *clk;
+	spinlock_t lock;
+};
+
+static inline struct nuc900_nand *mtd_to_nuc900(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct nuc900_nand, chip);
+}
+
+static const struct mtd_partition partitions[] = {
+	{
+	 .name = "NAND FS 0",
+	 .offset = 0,
+	 .size = 8 * 1024 * 1024
+	},
+	{
+	 .name = "NAND FS 1",
+	 .offset = MTDPART_OFS_APPEND,
+	 .size = MTDPART_SIZ_FULL
+	}
+};
+
+static unsigned char nuc900_nand_read_byte(struct mtd_info *mtd)
+{
+	unsigned char ret;
+	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
+
+	ret = (unsigned char)read_data_reg(nand);
+
+	return ret;
+}
+
+static void nuc900_nand_read_buf(struct mtd_info *mtd,
+				 unsigned char *buf, int len)
+{
+	int i;
+	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
+
+	for (i = 0; i < len; i++)
+		buf[i] = (unsigned char)read_data_reg(nand);
+}
+
+static void nuc900_nand_write_buf(struct mtd_info *mtd,
+				  const unsigned char *buf, int len)
+{
+	int i;
+	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
+
+	for (i = 0; i < len; i++)
+		write_data_reg(nand, buf[i]);
+}
+
+static int nuc900_check_rb(struct nuc900_nand *nand)
+{
+	unsigned int val;
+	spin_lock(&nand->lock);
+	val = __raw_readl(nand->reg + REG_SMISR);
+	val &= READYBUSY;
+	spin_unlock(&nand->lock);
+
+	return val;
+}
+
+static int nuc900_nand_devready(struct mtd_info *mtd)
+{
+	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
+	int ready;
+
+	ready = (nuc900_check_rb(nand)) ? 1 : 0;
+	return ready;
+}
+
+static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command,
+				   int column, int page_addr)
+{
+	register struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nuc900_nand *nand = mtd_to_nuc900(mtd);
+
+	if (command == NAND_CMD_READOOB) {
+		column += mtd->writesize;
+		command = NAND_CMD_READ0;
+	}
+
+	write_cmd_reg(nand, command & 0xff);
+
+	if (column != -1 || page_addr != -1) {
+
+		if (column != -1) {
+			if (chip->options & NAND_BUSWIDTH_16 &&
+					!nand_opcode_8bits(command))
+				column >>= 1;
+			write_addr_reg(nand, column);
+			write_addr_reg(nand, column >> 8 | ENDADDR);
+		}
+		if (page_addr != -1) {
+			write_addr_reg(nand, page_addr);
+
+			if (chip->chipsize > (128 << 20)) {
+				write_addr_reg(nand, page_addr >> 8);
+				write_addr_reg(nand, page_addr >> 16 | ENDADDR);
+			} else {
+				write_addr_reg(nand, page_addr >> 8 | ENDADDR);
+			}
+		}
+	}
+
+	switch (command) {
+	case NAND_CMD_CACHEDPROG:
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_ERASE1:
+	case NAND_CMD_ERASE2:
+	case NAND_CMD_SEQIN:
+	case NAND_CMD_RNDIN:
+	case NAND_CMD_STATUS:
+		return;
+
+	case NAND_CMD_RESET:
+		if (chip->dev_ready)
+			break;
+		udelay(chip->chip_delay);
+
+		write_cmd_reg(nand, NAND_CMD_STATUS);
+		write_cmd_reg(nand, command);
+
+		while (!nuc900_check_rb(nand))
+			;
+
+		return;
+
+	case NAND_CMD_RNDOUT:
+		write_cmd_reg(nand, NAND_CMD_RNDOUTSTART);
+		return;
+
+	case NAND_CMD_READ0:
+
+		write_cmd_reg(nand, NAND_CMD_READSTART);
+	default:
+
+		if (!chip->dev_ready) {
+			udelay(chip->chip_delay);
+			return;
+		}
+	}
+
+	/* Apply this short delay always to ensure that we do wait tWB in
+	 * any case on any machine. */
+	ndelay(100);
+
+	while (!chip->dev_ready(mtd))
+		;
+}
+
+
+static void nuc900_nand_enable(struct nuc900_nand *nand)
+{
+	unsigned int val;
+	spin_lock(&nand->lock);
+	__raw_writel(RESET_FMI, (nand->reg + REG_FMICSR));
+
+	val = __raw_readl(nand->reg + REG_FMICSR);
+
+	if (!(val & NAND_EN))
+		__raw_writel(val | NAND_EN, nand->reg + REG_FMICSR);
+
+	val = __raw_readl(nand->reg + REG_SMCSR);
+
+	val &= ~(SWRST|PSIZE|DMARWEN|BUSWID|ECC4EN|NANDCS);
+	val |= WP;
+
+	__raw_writel(val, nand->reg + REG_SMCSR);
+
+	spin_unlock(&nand->lock);
+}
+
+static int nuc900_nand_probe(struct platform_device *pdev)
+{
+	struct nuc900_nand *nuc900_nand;
+	struct nand_chip *chip;
+	struct mtd_info *mtd;
+	struct resource *res;
+
+	nuc900_nand = devm_kzalloc(&pdev->dev, sizeof(struct nuc900_nand),
+				   GFP_KERNEL);
+	if (!nuc900_nand)
+		return -ENOMEM;
+	chip = &(nuc900_nand->chip);
+	mtd = nand_to_mtd(chip);
+
+	mtd->dev.parent		= &pdev->dev;
+	spin_lock_init(&nuc900_nand->lock);
+
+	nuc900_nand->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(nuc900_nand->clk))
+		return -ENOENT;
+	clk_enable(nuc900_nand->clk);
+
+	chip->cmdfunc		= nuc900_nand_command_lp;
+	chip->dev_ready		= nuc900_nand_devready;
+	chip->read_byte		= nuc900_nand_read_byte;
+	chip->write_buf		= nuc900_nand_write_buf;
+	chip->read_buf		= nuc900_nand_read_buf;
+	chip->chip_delay	= 50;
+	chip->options		= 0;
+	chip->ecc.mode		= NAND_ECC_SOFT;
+	chip->ecc.algo		= NAND_ECC_HAMMING;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	nuc900_nand->reg = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(nuc900_nand->reg))
+		return PTR_ERR(nuc900_nand->reg);
+
+	nuc900_nand_enable(nuc900_nand);
+
+	if (nand_scan(mtd, 1))
+		return -ENXIO;
+
+	mtd_device_register(mtd, partitions, ARRAY_SIZE(partitions));
+
+	platform_set_drvdata(pdev, nuc900_nand);
+
+	return 0;
+}
+
+static int nuc900_nand_remove(struct platform_device *pdev)
+{
+	struct nuc900_nand *nuc900_nand = platform_get_drvdata(pdev);
+
+	nand_release(nand_to_mtd(&nuc900_nand->chip));
+	clk_disable(nuc900_nand->clk);
+
+	return 0;
+}
+
+static struct platform_driver nuc900_nand_driver = {
+	.probe		= nuc900_nand_probe,
+	.remove		= nuc900_nand_remove,
+	.driver		= {
+		.name	= "nuc900-fmi",
+	},
+};
+
+module_platform_driver(nuc900_nand_driver);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-fmi");
diff --git a/drivers/mtd/nand/rawnand/omap2.c b/drivers/mtd/nand/rawnand/omap2.c
new file mode 100644
index 000000000000..ebfa1751051d
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/omap2.c
@@ -0,0 +1,2214 @@ 
+/*
+ * Copyright © 2004 Texas Instruments, Jian Zhang <jzhang@ti.com>
+ * Copyright © 2004 Micron Technology Inc.
+ * Copyright © 2004 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/omap-dma.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/mtd/nand_bch.h>
+#include <linux/platform_data/elm.h>
+
+#include <linux/omap-gpmc.h>
+#include <linux/platform_data/mtd-nand-omap2.h>
+
+#define	DRIVER_NAME	"omap2-nand"
+#define	OMAP_NAND_TIMEOUT_MS	5000
+
+#define NAND_Ecc_P1e		(1 << 0)
+#define NAND_Ecc_P2e		(1 << 1)
+#define NAND_Ecc_P4e		(1 << 2)
+#define NAND_Ecc_P8e		(1 << 3)
+#define NAND_Ecc_P16e		(1 << 4)
+#define NAND_Ecc_P32e		(1 << 5)
+#define NAND_Ecc_P64e		(1 << 6)
+#define NAND_Ecc_P128e		(1 << 7)
+#define NAND_Ecc_P256e		(1 << 8)
+#define NAND_Ecc_P512e		(1 << 9)
+#define NAND_Ecc_P1024e		(1 << 10)
+#define NAND_Ecc_P2048e		(1 << 11)
+
+#define NAND_Ecc_P1o		(1 << 16)
+#define NAND_Ecc_P2o		(1 << 17)
+#define NAND_Ecc_P4o		(1 << 18)
+#define NAND_Ecc_P8o		(1 << 19)
+#define NAND_Ecc_P16o		(1 << 20)
+#define NAND_Ecc_P32o		(1 << 21)
+#define NAND_Ecc_P64o		(1 << 22)
+#define NAND_Ecc_P128o		(1 << 23)
+#define NAND_Ecc_P256o		(1 << 24)
+#define NAND_Ecc_P512o		(1 << 25)
+#define NAND_Ecc_P1024o		(1 << 26)
+#define NAND_Ecc_P2048o		(1 << 27)
+
+#define TF(value)	(value ? 1 : 0)
+
+#define P2048e(a)	(TF(a & NAND_Ecc_P2048e)	<< 0)
+#define P2048o(a)	(TF(a & NAND_Ecc_P2048o)	<< 1)
+#define P1e(a)		(TF(a & NAND_Ecc_P1e)		<< 2)
+#define P1o(a)		(TF(a & NAND_Ecc_P1o)		<< 3)
+#define P2e(a)		(TF(a & NAND_Ecc_P2e)		<< 4)
+#define P2o(a)		(TF(a & NAND_Ecc_P2o)		<< 5)
+#define P4e(a)		(TF(a & NAND_Ecc_P4e)		<< 6)
+#define P4o(a)		(TF(a & NAND_Ecc_P4o)		<< 7)
+
+#define P8e(a)		(TF(a & NAND_Ecc_P8e)		<< 0)
+#define P8o(a)		(TF(a & NAND_Ecc_P8o)		<< 1)
+#define P16e(a)		(TF(a & NAND_Ecc_P16e)		<< 2)
+#define P16o(a)		(TF(a & NAND_Ecc_P16o)		<< 3)
+#define P32e(a)		(TF(a & NAND_Ecc_P32e)		<< 4)
+#define P32o(a)		(TF(a & NAND_Ecc_P32o)		<< 5)
+#define P64e(a)		(TF(a & NAND_Ecc_P64e)		<< 6)
+#define P64o(a)		(TF(a & NAND_Ecc_P64o)		<< 7)
+
+#define P128e(a)	(TF(a & NAND_Ecc_P128e)		<< 0)
+#define P128o(a)	(TF(a & NAND_Ecc_P128o)		<< 1)
+#define P256e(a)	(TF(a & NAND_Ecc_P256e)		<< 2)
+#define P256o(a)	(TF(a & NAND_Ecc_P256o)		<< 3)
+#define P512e(a)	(TF(a & NAND_Ecc_P512e)		<< 4)
+#define P512o(a)	(TF(a & NAND_Ecc_P512o)		<< 5)
+#define P1024e(a)	(TF(a & NAND_Ecc_P1024e)	<< 6)
+#define P1024o(a)	(TF(a & NAND_Ecc_P1024o)	<< 7)
+
+#define P8e_s(a)	(TF(a & NAND_Ecc_P8e)		<< 0)
+#define P8o_s(a)	(TF(a & NAND_Ecc_P8o)		<< 1)
+#define P16e_s(a)	(TF(a & NAND_Ecc_P16e)		<< 2)
+#define P16o_s(a)	(TF(a & NAND_Ecc_P16o)		<< 3)
+#define P1e_s(a)	(TF(a & NAND_Ecc_P1e)		<< 4)
+#define P1o_s(a)	(TF(a & NAND_Ecc_P1o)		<< 5)
+#define P2e_s(a)	(TF(a & NAND_Ecc_P2e)		<< 6)
+#define P2o_s(a)	(TF(a & NAND_Ecc_P2o)		<< 7)
+
+#define P4e_s(a)	(TF(a & NAND_Ecc_P4e)		<< 0)
+#define P4o_s(a)	(TF(a & NAND_Ecc_P4o)		<< 1)
+
+#define	PREFETCH_CONFIG1_CS_SHIFT	24
+#define	ECC_CONFIG_CS_SHIFT		1
+#define	CS_MASK				0x7
+#define	ENABLE_PREFETCH			(0x1 << 7)
+#define	DMA_MPU_MODE_SHIFT		2
+#define	ECCSIZE0_SHIFT			12
+#define	ECCSIZE1_SHIFT			22
+#define	ECC1RESULTSIZE			0x1
+#define	ECCCLEAR			0x100
+#define	ECC1				0x1
+#define	PREFETCH_FIFOTHRESHOLD_MAX	0x40
+#define	PREFETCH_FIFOTHRESHOLD(val)	((val) << 8)
+#define	PREFETCH_STATUS_COUNT(val)	(val & 0x00003fff)
+#define	PREFETCH_STATUS_FIFO_CNT(val)	((val >> 24) & 0x7F)
+#define	STATUS_BUFF_EMPTY		0x00000001
+
+#define SECTOR_BYTES		512
+/* 4 bit padding to make byte aligned, 56 = 52 + 4 */
+#define BCH4_BIT_PAD		4
+
+/* GPMC ecc engine settings for read */
+#define BCH_WRAPMODE_1		1	/* BCH wrap mode 1 */
+#define BCH8R_ECC_SIZE0		0x1a	/* ecc_size0 = 26 */
+#define BCH8R_ECC_SIZE1		0x2	/* ecc_size1 = 2 */
+#define BCH4R_ECC_SIZE0		0xd	/* ecc_size0 = 13 */
+#define BCH4R_ECC_SIZE1		0x3	/* ecc_size1 = 3 */
+
+/* GPMC ecc engine settings for write */
+#define BCH_WRAPMODE_6		6	/* BCH wrap mode 6 */
+#define BCH_ECC_SIZE0		0x0	/* ecc_size0 = 0, no oob protection */
+#define BCH_ECC_SIZE1		0x20	/* ecc_size1 = 32 */
+
+#define BADBLOCK_MARKER_LENGTH		2
+
+static u_char bch16_vector[] = {0xf5, 0x24, 0x1c, 0xd0, 0x61, 0xb3, 0xf1, 0x55,
+				0x2e, 0x2c, 0x86, 0xa3, 0xed, 0x36, 0x1b, 0x78,
+				0x48, 0x76, 0xa9, 0x3b, 0x97, 0xd1, 0x7a, 0x93,
+				0x07, 0x0e};
+static u_char bch8_vector[] = {0xf3, 0xdb, 0x14, 0x16, 0x8b, 0xd2, 0xbe, 0xcc,
+	0xac, 0x6b, 0xff, 0x99, 0x7b};
+static u_char bch4_vector[] = {0x00, 0x6b, 0x31, 0xdd, 0x41, 0xbc, 0x10};
+
+/* Shared among all NAND instances to synchronize access to the ECC Engine */
+static struct nand_hw_control omap_gpmc_controller = {
+	.lock = __SPIN_LOCK_UNLOCKED(omap_gpmc_controller.lock),
+	.wq = __WAIT_QUEUE_HEAD_INITIALIZER(omap_gpmc_controller.wq),
+};
+
+struct omap_nand_info {
+	struct nand_chip		nand;
+	struct platform_device		*pdev;
+
+	int				gpmc_cs;
+	bool				dev_ready;
+	enum nand_io			xfer_type;
+	int				devsize;
+	enum omap_ecc			ecc_opt;
+	struct device_node		*elm_of_node;
+
+	unsigned long			phys_base;
+	struct completion		comp;
+	struct dma_chan			*dma;
+	int				gpmc_irq_fifo;
+	int				gpmc_irq_count;
+	enum {
+		OMAP_NAND_IO_READ = 0,	/* read */
+		OMAP_NAND_IO_WRITE,	/* write */
+	} iomode;
+	u_char				*buf;
+	int					buf_len;
+	/* Interface to GPMC */
+	struct gpmc_nand_regs		reg;
+	struct gpmc_nand_ops		*ops;
+	bool				flash_bbt;
+	/* fields specific for BCHx_HW ECC scheme */
+	struct device			*elm_dev;
+	/* NAND ready gpio */
+	struct gpio_desc		*ready_gpiod;
+};
+
+static inline struct omap_nand_info *mtd_to_omap(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct omap_nand_info, nand);
+}
+
+/**
+ * omap_prefetch_enable - configures and starts prefetch transfer
+ * @cs: cs (chip select) number
+ * @fifo_th: fifo threshold to be used for read/ write
+ * @dma_mode: dma mode enable (1) or disable (0)
+ * @u32_count: number of bytes to be transferred
+ * @is_write: prefetch read(0) or write post(1) mode
+ */
+static int omap_prefetch_enable(int cs, int fifo_th, int dma_mode,
+	unsigned int u32_count, int is_write, struct omap_nand_info *info)
+{
+	u32 val;
+
+	if (fifo_th > PREFETCH_FIFOTHRESHOLD_MAX)
+		return -1;
+
+	if (readl(info->reg.gpmc_prefetch_control))
+		return -EBUSY;
+
+	/* Set the amount of bytes to be prefetched */
+	writel(u32_count, info->reg.gpmc_prefetch_config2);
+
+	/* Set dma/mpu mode, the prefetch read / post write and
+	 * enable the engine. Set which cs is has requested for.
+	 */
+	val = ((cs << PREFETCH_CONFIG1_CS_SHIFT) |
+		PREFETCH_FIFOTHRESHOLD(fifo_th) | ENABLE_PREFETCH |
+		(dma_mode << DMA_MPU_MODE_SHIFT) | (is_write & 0x1));
+	writel(val, info->reg.gpmc_prefetch_config1);
+
+	/*  Start the prefetch engine */
+	writel(0x1, info->reg.gpmc_prefetch_control);
+
+	return 0;
+}
+
+/**
+ * omap_prefetch_reset - disables and stops the prefetch engine
+ */
+static int omap_prefetch_reset(int cs, struct omap_nand_info *info)
+{
+	u32 config1;
+
+	/* check if the same module/cs is trying to reset */
+	config1 = readl(info->reg.gpmc_prefetch_config1);
+	if (((config1 >> PREFETCH_CONFIG1_CS_SHIFT) & CS_MASK) != cs)
+		return -EINVAL;
+
+	/* Stop the PFPW engine */
+	writel(0x0, info->reg.gpmc_prefetch_control);
+
+	/* Reset/disable the PFPW engine */
+	writel(0x0, info->reg.gpmc_prefetch_config1);
+
+	return 0;
+}
+
+/**
+ * omap_hwcontrol - hardware specific access to control-lines
+ * @mtd: MTD device structure
+ * @cmd: command to device
+ * @ctrl:
+ * NAND_NCE: bit 0 -> don't care
+ * NAND_CLE: bit 1 -> Command Latch
+ * NAND_ALE: bit 2 -> Address Latch
+ *
+ * NOTE: boards may use different bits for these!!
+ */
+static void omap_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+
+	if (cmd != NAND_CMD_NONE) {
+		if (ctrl & NAND_CLE)
+			writeb(cmd, info->reg.gpmc_nand_command);
+
+		else if (ctrl & NAND_ALE)
+			writeb(cmd, info->reg.gpmc_nand_address);
+
+		else /* NAND_NCE */
+			writeb(cmd, info->reg.gpmc_nand_data);
+	}
+}
+
+/**
+ * omap_read_buf8 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf8(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+
+	ioread8_rep(nand->IO_ADDR_R, buf, len);
+}
+
+/**
+ * omap_write_buf8 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf8(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	u_char *p = (u_char *)buf;
+	bool status;
+
+	while (len--) {
+		iowrite8(*p++, info->nand.IO_ADDR_W);
+		/* wait until buffer is available for write */
+		do {
+			status = info->ops->nand_writebuffer_empty();
+		} while (!status);
+	}
+}
+
+/**
+ * omap_read_buf16 - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf16(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+
+	ioread16_rep(nand->IO_ADDR_R, buf, len / 2);
+}
+
+/**
+ * omap_write_buf16 - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf16(struct mtd_info *mtd, const u_char * buf, int len)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	u16 *p = (u16 *) buf;
+	bool status;
+	/* FIXME try bursts of writesw() or DMA ... */
+	len >>= 1;
+
+	while (len--) {
+		iowrite16(*p++, info->nand.IO_ADDR_W);
+		/* wait until buffer is available for write */
+		do {
+			status = info->ops->nand_writebuffer_empty();
+		} while (!status);
+	}
+}
+
+/**
+ * omap_read_buf_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	uint32_t r_count = 0;
+	int ret = 0;
+	u32 *p = (u32 *)buf;
+
+	/* take care of subpage reads */
+	if (len % 4) {
+		if (info->nand.options & NAND_BUSWIDTH_16)
+			omap_read_buf16(mtd, buf, len % 4);
+		else
+			omap_read_buf8(mtd, buf, len % 4);
+		p = (u32 *) (buf + len % 4);
+		len -= len % 4;
+	}
+
+	/* configure and start prefetch transfer */
+	ret = omap_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x0, info);
+	if (ret) {
+		/* PFPW engine is busy, use cpu copy method */
+		if (info->nand.options & NAND_BUSWIDTH_16)
+			omap_read_buf16(mtd, (u_char *)p, len);
+		else
+			omap_read_buf8(mtd, (u_char *)p, len);
+	} else {
+		do {
+			r_count = readl(info->reg.gpmc_prefetch_status);
+			r_count = PREFETCH_STATUS_FIFO_CNT(r_count);
+			r_count = r_count >> 2;
+			ioread32_rep(info->nand.IO_ADDR_R, p, r_count);
+			p += r_count;
+			len -= r_count << 2;
+		} while (len);
+		/* disable and stop the PFPW engine */
+		omap_prefetch_reset(info->gpmc_cs, info);
+	}
+}
+
+/**
+ * omap_write_buf_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_pref(struct mtd_info *mtd,
+					const u_char *buf, int len)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	uint32_t w_count = 0;
+	int i = 0, ret = 0;
+	u16 *p = (u16 *)buf;
+	unsigned long tim, limit;
+	u32 val;
+
+	/* take care of subpage writes */
+	if (len % 2 != 0) {
+		writeb(*buf, info->nand.IO_ADDR_W);
+		p = (u16 *)(buf + 1);
+		len--;
+	}
+
+	/*  configure and start prefetch transfer */
+	ret = omap_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX, 0x0, len, 0x1, info);
+	if (ret) {
+		/* PFPW engine is busy, use cpu copy method */
+		if (info->nand.options & NAND_BUSWIDTH_16)
+			omap_write_buf16(mtd, (u_char *)p, len);
+		else
+			omap_write_buf8(mtd, (u_char *)p, len);
+	} else {
+		while (len) {
+			w_count = readl(info->reg.gpmc_prefetch_status);
+			w_count = PREFETCH_STATUS_FIFO_CNT(w_count);
+			w_count = w_count >> 1;
+			for (i = 0; (i < w_count) && len; i++, len -= 2)
+				iowrite16(*p++, info->nand.IO_ADDR_W);
+		}
+		/* wait for data to flushed-out before reset the prefetch */
+		tim = 0;
+		limit = (loops_per_jiffy *
+					msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+		do {
+			cpu_relax();
+			val = readl(info->reg.gpmc_prefetch_status);
+			val = PREFETCH_STATUS_COUNT(val);
+		} while (val && (tim++ < limit));
+
+		/* disable and stop the PFPW engine */
+		omap_prefetch_reset(info->gpmc_cs, info);
+	}
+}
+
+/*
+ * omap_nand_dma_callback: callback on the completion of dma transfer
+ * @data: pointer to completion data structure
+ */
+static void omap_nand_dma_callback(void *data)
+{
+	complete((struct completion *) data);
+}
+
+/*
+ * omap_nand_dma_transfer: configure and start dma transfer
+ * @mtd: MTD device structure
+ * @addr: virtual address in RAM of source/destination
+ * @len: number of data bytes to be transferred
+ * @is_write: flag for read/write operation
+ */
+static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
+					unsigned int len, int is_write)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	struct dma_async_tx_descriptor *tx;
+	enum dma_data_direction dir = is_write ? DMA_TO_DEVICE :
+							DMA_FROM_DEVICE;
+	struct scatterlist sg;
+	unsigned long tim, limit;
+	unsigned n;
+	int ret;
+	u32 val;
+
+	if (!virt_addr_valid(addr))
+		goto out_copy;
+
+	sg_init_one(&sg, addr, len);
+	n = dma_map_sg(info->dma->device->dev, &sg, 1, dir);
+	if (n == 0) {
+		dev_err(&info->pdev->dev,
+			"Couldn't DMA map a %d byte buffer\n", len);
+		goto out_copy;
+	}
+
+	tx = dmaengine_prep_slave_sg(info->dma, &sg, n,
+		is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+		DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+	if (!tx)
+		goto out_copy_unmap;
+
+	tx->callback = omap_nand_dma_callback;
+	tx->callback_param = &info->comp;
+	dmaengine_submit(tx);
+
+	init_completion(&info->comp);
+
+	/* setup and start DMA using dma_addr */
+	dma_async_issue_pending(info->dma);
+
+	/*  configure and start prefetch transfer */
+	ret = omap_prefetch_enable(info->gpmc_cs,
+		PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write, info);
+	if (ret)
+		/* PFPW engine is busy, use cpu copy method */
+		goto out_copy_unmap;
+
+	wait_for_completion(&info->comp);
+	tim = 0;
+	limit = (loops_per_jiffy * msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+
+	do {
+		cpu_relax();
+		val = readl(info->reg.gpmc_prefetch_status);
+		val = PREFETCH_STATUS_COUNT(val);
+	} while (val && (tim++ < limit));
+
+	/* disable and stop the PFPW engine */
+	omap_prefetch_reset(info->gpmc_cs, info);
+
+	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+	return 0;
+
+out_copy_unmap:
+	dma_unmap_sg(info->dma->device->dev, &sg, 1, dir);
+out_copy:
+	if (info->nand.options & NAND_BUSWIDTH_16)
+		is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
+			: omap_write_buf16(mtd, (u_char *) addr, len);
+	else
+		is_write == 0 ? omap_read_buf8(mtd, (u_char *) addr, len)
+			: omap_write_buf8(mtd, (u_char *) addr, len);
+	return 0;
+}
+
+/**
+ * omap_read_buf_dma_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_dma_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+	if (len <= mtd->oobsize)
+		omap_read_buf_pref(mtd, buf, len);
+	else
+		/* start transfer in DMA mode */
+		omap_nand_dma_transfer(mtd, buf, len, 0x0);
+}
+
+/**
+ * omap_write_buf_dma_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_dma_pref(struct mtd_info *mtd,
+					const u_char *buf, int len)
+{
+	if (len <= mtd->oobsize)
+		omap_write_buf_pref(mtd, buf, len);
+	else
+		/* start transfer in DMA mode */
+		omap_nand_dma_transfer(mtd, (u_char *) buf, len, 0x1);
+}
+
+/*
+ * omap_nand_irq - GPMC irq handler
+ * @this_irq: gpmc irq number
+ * @dev: omap_nand_info structure pointer is passed here
+ */
+static irqreturn_t omap_nand_irq(int this_irq, void *dev)
+{
+	struct omap_nand_info *info = (struct omap_nand_info *) dev;
+	u32 bytes;
+
+	bytes = readl(info->reg.gpmc_prefetch_status);
+	bytes = PREFETCH_STATUS_FIFO_CNT(bytes);
+	bytes = bytes  & 0xFFFC; /* io in multiple of 4 bytes */
+	if (info->iomode == OMAP_NAND_IO_WRITE) { /* checks for write io */
+		if (this_irq == info->gpmc_irq_count)
+			goto done;
+
+		if (info->buf_len && (info->buf_len < bytes))
+			bytes = info->buf_len;
+		else if (!info->buf_len)
+			bytes = 0;
+		iowrite32_rep(info->nand.IO_ADDR_W,
+						(u32 *)info->buf, bytes >> 2);
+		info->buf = info->buf + bytes;
+		info->buf_len -= bytes;
+
+	} else {
+		ioread32_rep(info->nand.IO_ADDR_R,
+						(u32 *)info->buf, bytes >> 2);
+		info->buf = info->buf + bytes;
+
+		if (this_irq == info->gpmc_irq_count)
+			goto done;
+	}
+
+	return IRQ_HANDLED;
+
+done:
+	complete(&info->comp);
+
+	disable_irq_nosync(info->gpmc_irq_fifo);
+	disable_irq_nosync(info->gpmc_irq_count);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * omap_read_buf_irq_pref - read data from NAND controller into buffer
+ * @mtd: MTD device structure
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ */
+static void omap_read_buf_irq_pref(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	int ret = 0;
+
+	if (len <= mtd->oobsize) {
+		omap_read_buf_pref(mtd, buf, len);
+		return;
+	}
+
+	info->iomode = OMAP_NAND_IO_READ;
+	info->buf = buf;
+	init_completion(&info->comp);
+
+	/*  configure and start prefetch transfer */
+	ret = omap_prefetch_enable(info->gpmc_cs,
+			PREFETCH_FIFOTHRESHOLD_MAX/2, 0x0, len, 0x0, info);
+	if (ret)
+		/* PFPW engine is busy, use cpu copy method */
+		goto out_copy;
+
+	info->buf_len = len;
+
+	enable_irq(info->gpmc_irq_count);
+	enable_irq(info->gpmc_irq_fifo);
+
+	/* waiting for read to complete */
+	wait_for_completion(&info->comp);
+
+	/* disable and stop the PFPW engine */
+	omap_prefetch_reset(info->gpmc_cs, info);
+	return;
+
+out_copy:
+	if (info->nand.options & NAND_BUSWIDTH_16)
+		omap_read_buf16(mtd, buf, len);
+	else
+		omap_read_buf8(mtd, buf, len);
+}
+
+/*
+ * omap_write_buf_irq_pref - write buffer to NAND controller
+ * @mtd: MTD device structure
+ * @buf: data buffer
+ * @len: number of bytes to write
+ */
+static void omap_write_buf_irq_pref(struct mtd_info *mtd,
+					const u_char *buf, int len)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	int ret = 0;
+	unsigned long tim, limit;
+	u32 val;
+
+	if (len <= mtd->oobsize) {
+		omap_write_buf_pref(mtd, buf, len);
+		return;
+	}
+
+	info->iomode = OMAP_NAND_IO_WRITE;
+	info->buf = (u_char *) buf;
+	init_completion(&info->comp);
+
+	/* configure and start prefetch transfer : size=24 */
+	ret = omap_prefetch_enable(info->gpmc_cs,
+		(PREFETCH_FIFOTHRESHOLD_MAX * 3) / 8, 0x0, len, 0x1, info);
+	if (ret)
+		/* PFPW engine is busy, use cpu copy method */
+		goto out_copy;
+
+	info->buf_len = len;
+
+	enable_irq(info->gpmc_irq_count);
+	enable_irq(info->gpmc_irq_fifo);
+
+	/* waiting for write to complete */
+	wait_for_completion(&info->comp);
+
+	/* wait for data to flushed-out before reset the prefetch */
+	tim = 0;
+	limit = (loops_per_jiffy *  msecs_to_jiffies(OMAP_NAND_TIMEOUT_MS));
+	do {
+		val = readl(info->reg.gpmc_prefetch_status);
+		val = PREFETCH_STATUS_COUNT(val);
+		cpu_relax();
+	} while (val && (tim++ < limit));
+
+	/* disable and stop the PFPW engine */
+	omap_prefetch_reset(info->gpmc_cs, info);
+	return;
+
+out_copy:
+	if (info->nand.options & NAND_BUSWIDTH_16)
+		omap_write_buf16(mtd, buf, len);
+	else
+		omap_write_buf8(mtd, buf, len);
+}
+
+/**
+ * gen_true_ecc - This function will generate true ECC value
+ * @ecc_buf: buffer to store ecc code
+ *
+ * This generated true ECC value can be used when correcting
+ * data read from NAND flash memory core
+ */
+static void gen_true_ecc(u8 *ecc_buf)
+{
+	u32 tmp = ecc_buf[0] | (ecc_buf[1] << 16) |
+		((ecc_buf[2] & 0xF0) << 20) | ((ecc_buf[2] & 0x0F) << 8);
+
+	ecc_buf[0] = ~(P64o(tmp) | P64e(tmp) | P32o(tmp) | P32e(tmp) |
+			P16o(tmp) | P16e(tmp) | P8o(tmp) | P8e(tmp));
+	ecc_buf[1] = ~(P1024o(tmp) | P1024e(tmp) | P512o(tmp) | P512e(tmp) |
+			P256o(tmp) | P256e(tmp) | P128o(tmp) | P128e(tmp));
+	ecc_buf[2] = ~(P4o(tmp) | P4e(tmp) | P2o(tmp) | P2e(tmp) | P1o(tmp) |
+			P1e(tmp) | P2048o(tmp) | P2048e(tmp));
+}
+
+/**
+ * omap_compare_ecc - Detect (2 bits) and correct (1 bit) error in data
+ * @ecc_data1:  ecc code from nand spare area
+ * @ecc_data2:  ecc code from hardware register obtained from hardware ecc
+ * @page_data:  page data
+ *
+ * This function compares two ECC's and indicates if there is an error.
+ * If the error can be corrected it will be corrected to the buffer.
+ * If there is no error, %0 is returned. If there is an error but it
+ * was corrected, %1 is returned. Otherwise, %-1 is returned.
+ */
+static int omap_compare_ecc(u8 *ecc_data1,	/* read from NAND memory */
+			    u8 *ecc_data2,	/* read from register */
+			    u8 *page_data)
+{
+	uint	i;
+	u8	tmp0_bit[8], tmp1_bit[8], tmp2_bit[8];
+	u8	comp0_bit[8], comp1_bit[8], comp2_bit[8];
+	u8	ecc_bit[24];
+	u8	ecc_sum = 0;
+	u8	find_bit = 0;
+	uint	find_byte = 0;
+	int	isEccFF;
+
+	isEccFF = ((*(u32 *)ecc_data1 & 0xFFFFFF) == 0xFFFFFF);
+
+	gen_true_ecc(ecc_data1);
+	gen_true_ecc(ecc_data2);
+
+	for (i = 0; i <= 2; i++) {
+		*(ecc_data1 + i) = ~(*(ecc_data1 + i));
+		*(ecc_data2 + i) = ~(*(ecc_data2 + i));
+	}
+
+	for (i = 0; i < 8; i++) {
+		tmp0_bit[i]     = *ecc_data1 % 2;
+		*ecc_data1	= *ecc_data1 / 2;
+	}
+
+	for (i = 0; i < 8; i++) {
+		tmp1_bit[i]	 = *(ecc_data1 + 1) % 2;
+		*(ecc_data1 + 1) = *(ecc_data1 + 1) / 2;
+	}
+
+	for (i = 0; i < 8; i++) {
+		tmp2_bit[i]	 = *(ecc_data1 + 2) % 2;
+		*(ecc_data1 + 2) = *(ecc_data1 + 2) / 2;
+	}
+
+	for (i = 0; i < 8; i++) {
+		comp0_bit[i]     = *ecc_data2 % 2;
+		*ecc_data2       = *ecc_data2 / 2;
+	}
+
+	for (i = 0; i < 8; i++) {
+		comp1_bit[i]     = *(ecc_data2 + 1) % 2;
+		*(ecc_data2 + 1) = *(ecc_data2 + 1) / 2;
+	}
+
+	for (i = 0; i < 8; i++) {
+		comp2_bit[i]     = *(ecc_data2 + 2) % 2;
+		*(ecc_data2 + 2) = *(ecc_data2 + 2) / 2;
+	}
+
+	for (i = 0; i < 6; i++)
+		ecc_bit[i] = tmp2_bit[i + 2] ^ comp2_bit[i + 2];
+
+	for (i = 0; i < 8; i++)
+		ecc_bit[i + 6] = tmp0_bit[i] ^ comp0_bit[i];
+
+	for (i = 0; i < 8; i++)
+		ecc_bit[i + 14] = tmp1_bit[i] ^ comp1_bit[i];
+
+	ecc_bit[22] = tmp2_bit[0] ^ comp2_bit[0];
+	ecc_bit[23] = tmp2_bit[1] ^ comp2_bit[1];
+
+	for (i = 0; i < 24; i++)
+		ecc_sum += ecc_bit[i];
+
+	switch (ecc_sum) {
+	case 0:
+		/* Not reached because this function is not called if
+		 *  ECC values are equal
+		 */
+		return 0;
+
+	case 1:
+		/* Uncorrectable error */
+		pr_debug("ECC UNCORRECTED_ERROR 1\n");
+		return -EBADMSG;
+
+	case 11:
+		/* UN-Correctable error */
+		pr_debug("ECC UNCORRECTED_ERROR B\n");
+		return -EBADMSG;
+
+	case 12:
+		/* Correctable error */
+		find_byte = (ecc_bit[23] << 8) +
+			    (ecc_bit[21] << 7) +
+			    (ecc_bit[19] << 6) +
+			    (ecc_bit[17] << 5) +
+			    (ecc_bit[15] << 4) +
+			    (ecc_bit[13] << 3) +
+			    (ecc_bit[11] << 2) +
+			    (ecc_bit[9]  << 1) +
+			    ecc_bit[7];
+
+		find_bit = (ecc_bit[5] << 2) + (ecc_bit[3] << 1) + ecc_bit[1];
+
+		pr_debug("Correcting single bit ECC error at offset: "
+				"%d, bit: %d\n", find_byte, find_bit);
+
+		page_data[find_byte] ^= (1 << find_bit);
+
+		return 1;
+	default:
+		if (isEccFF) {
+			if (ecc_data2[0] == 0 &&
+			    ecc_data2[1] == 0 &&
+			    ecc_data2[2] == 0)
+				return 0;
+		}
+		pr_debug("UNCORRECTED_ERROR default\n");
+		return -EBADMSG;
+	}
+}
+
+/**
+ * omap_correct_data - Compares the ECC read with HW generated ECC
+ * @mtd: MTD device structure
+ * @dat: page data
+ * @read_ecc: ecc read from nand flash
+ * @calc_ecc: ecc read from HW ECC registers
+ *
+ * Compares the ecc read from nand spare area with ECC registers values
+ * and if ECC's mismatched, it will call 'omap_compare_ecc' for error
+ * detection and correction. If there are no errors, %0 is returned. If
+ * there were errors and all of the errors were corrected, the number of
+ * corrected errors is returned. If uncorrectable errors exist, %-1 is
+ * returned.
+ */
+static int omap_correct_data(struct mtd_info *mtd, u_char *dat,
+				u_char *read_ecc, u_char *calc_ecc)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	int blockCnt = 0, i = 0, ret = 0;
+	int stat = 0;
+
+	/* Ex NAND_ECC_HW12_2048 */
+	if ((info->nand.ecc.mode == NAND_ECC_HW) &&
+			(info->nand.ecc.size  == 2048))
+		blockCnt = 4;
+	else
+		blockCnt = 1;
+
+	for (i = 0; i < blockCnt; i++) {
+		if (memcmp(read_ecc, calc_ecc, 3) != 0) {
+			ret = omap_compare_ecc(read_ecc, calc_ecc, dat);
+			if (ret < 0)
+				return ret;
+			/* keep track of the number of corrected errors */
+			stat += ret;
+		}
+		read_ecc += 3;
+		calc_ecc += 3;
+		dat      += 512;
+	}
+	return stat;
+}
+
+/**
+ * omap_calcuate_ecc - Generate non-inverted ECC bytes.
+ * @mtd: MTD device structure
+ * @dat: The pointer to data on which ecc is computed
+ * @ecc_code: The ecc_code buffer
+ *
+ * Using noninverted ECC can be considered ugly since writing a blank
+ * page ie. padding will clear the ECC bytes. This is no problem as long
+ * nobody is trying to write data on the seemingly unused page. Reading
+ * an erased page will produce an ECC mismatch between generated and read
+ * ECC bytes that has to be dealt with separately.
+ */
+static int omap_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+				u_char *ecc_code)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	u32 val;
+
+	val = readl(info->reg.gpmc_ecc_config);
+	if (((val >> ECC_CONFIG_CS_SHIFT) & CS_MASK) != info->gpmc_cs)
+		return -EINVAL;
+
+	/* read ecc result */
+	val = readl(info->reg.gpmc_ecc1_result);
+	*ecc_code++ = val;          /* P128e, ..., P1e */
+	*ecc_code++ = val >> 16;    /* P128o, ..., P1o */
+	/* P2048o, P1024o, P512o, P256o, P2048e, P1024e, P512e, P256e */
+	*ecc_code++ = ((val >> 8) & 0x0f) | ((val >> 20) & 0xf0);
+
+	return 0;
+}
+
+/**
+ * omap_enable_hwecc - This function enables the hardware ecc functionality
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ */
+static void omap_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	unsigned int dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+	u32 val;
+
+	/* clear ecc and enable bits */
+	val = ECCCLEAR | ECC1;
+	writel(val, info->reg.gpmc_ecc_control);
+
+	/* program ecc and result sizes */
+	val = ((((info->nand.ecc.size >> 1) - 1) << ECCSIZE1_SHIFT) |
+			 ECC1RESULTSIZE);
+	writel(val, info->reg.gpmc_ecc_size_config);
+
+	switch (mode) {
+	case NAND_ECC_READ:
+	case NAND_ECC_WRITE:
+		writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+		break;
+	case NAND_ECC_READSYN:
+		writel(ECCCLEAR, info->reg.gpmc_ecc_control);
+		break;
+	default:
+		dev_info(&info->pdev->dev,
+			"error: unrecognized Mode[%d]!\n", mode);
+		break;
+	}
+
+	/* (ECC 16 or 8 bit col) | ( CS  )  | ECC Enable */
+	val = (dev_width << 7) | (info->gpmc_cs << 1) | (0x1);
+	writel(val, info->reg.gpmc_ecc_config);
+}
+
+/**
+ * omap_wait - wait until the command is done
+ * @mtd: MTD device structure
+ * @chip: NAND Chip structure
+ *
+ * Wait function is called during Program and erase operations and
+ * the way it is called from MTD layer, we should wait till the NAND
+ * chip is ready after the programming/erase operation has completed.
+ *
+ * Erase can take up to 400ms and program up to 20ms according to
+ * general NAND and SmartMedia specs
+ */
+static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	unsigned long timeo = jiffies;
+	int status, state = this->state;
+
+	if (state == FL_ERASING)
+		timeo += msecs_to_jiffies(400);
+	else
+		timeo += msecs_to_jiffies(20);
+
+	writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command);
+	while (time_before(jiffies, timeo)) {
+		status = readb(info->reg.gpmc_nand_data);
+		if (status & NAND_STATUS_READY)
+			break;
+		cond_resched();
+	}
+
+	status = readb(info->reg.gpmc_nand_data);
+	return status;
+}
+
+/**
+ * omap_dev_ready - checks the NAND Ready GPIO line
+ * @mtd: MTD device structure
+ *
+ * Returns true if ready and false if busy.
+ */
+static int omap_dev_ready(struct mtd_info *mtd)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+
+	return gpiod_get_value(info->ready_gpiod);
+}
+
+/**
+ * omap_enable_hwecc_bch - Program GPMC to perform BCH ECC calculation
+ * @mtd: MTD device structure
+ * @mode: Read/Write mode
+ *
+ * When using BCH with SW correction (i.e. no ELM), sector size is set
+ * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode
+ * for both reading and writing with:
+ * eccsize0 = 0  (no additional protected byte in spare area)
+ * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
+ */
+static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode)
+{
+	unsigned int bch_type;
+	unsigned int dev_width, nsectors;
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	enum omap_ecc ecc_opt = info->ecc_opt;
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	u32 val, wr_mode;
+	unsigned int ecc_size1, ecc_size0;
+
+	/* GPMC configurations for calculating ECC */
+	switch (ecc_opt) {
+	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+		bch_type = 0;
+		nsectors = 1;
+		wr_mode	  = BCH_WRAPMODE_6;
+		ecc_size0 = BCH_ECC_SIZE0;
+		ecc_size1 = BCH_ECC_SIZE1;
+		break;
+	case OMAP_ECC_BCH4_CODE_HW:
+		bch_type = 0;
+		nsectors = chip->ecc.steps;
+		if (mode == NAND_ECC_READ) {
+			wr_mode	  = BCH_WRAPMODE_1;
+			ecc_size0 = BCH4R_ECC_SIZE0;
+			ecc_size1 = BCH4R_ECC_SIZE1;
+		} else {
+			wr_mode   = BCH_WRAPMODE_6;
+			ecc_size0 = BCH_ECC_SIZE0;
+			ecc_size1 = BCH_ECC_SIZE1;
+		}
+		break;
+	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+		bch_type = 1;
+		nsectors = 1;
+		wr_mode	  = BCH_WRAPMODE_6;
+		ecc_size0 = BCH_ECC_SIZE0;
+		ecc_size1 = BCH_ECC_SIZE1;
+		break;
+	case OMAP_ECC_BCH8_CODE_HW:
+		bch_type = 1;
+		nsectors = chip->ecc.steps;
+		if (mode == NAND_ECC_READ) {
+			wr_mode	  = BCH_WRAPMODE_1;
+			ecc_size0 = BCH8R_ECC_SIZE0;
+			ecc_size1 = BCH8R_ECC_SIZE1;
+		} else {
+			wr_mode   = BCH_WRAPMODE_6;
+			ecc_size0 = BCH_ECC_SIZE0;
+			ecc_size1 = BCH_ECC_SIZE1;
+		}
+		break;
+	case OMAP_ECC_BCH16_CODE_HW:
+		bch_type = 0x2;
+		nsectors = chip->ecc.steps;
+		if (mode == NAND_ECC_READ) {
+			wr_mode	  = 0x01;
+			ecc_size0 = 52; /* ECC bits in nibbles per sector */
+			ecc_size1 = 0;  /* non-ECC bits in nibbles per sector */
+		} else {
+			wr_mode	  = 0x01;
+			ecc_size0 = 0;  /* extra bits in nibbles per sector */
+			ecc_size1 = 52; /* OOB bits in nibbles per sector */
+		}
+		break;
+	default:
+		return;
+	}
+
+	writel(ECC1, info->reg.gpmc_ecc_control);
+
+	/* Configure ecc size for BCH */
+	val = (ecc_size1 << ECCSIZE1_SHIFT) | (ecc_size0 << ECCSIZE0_SHIFT);
+	writel(val, info->reg.gpmc_ecc_size_config);
+
+	dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
+
+	/* BCH configuration */
+	val = ((1                        << 16) | /* enable BCH */
+	       (bch_type		 << 12) | /* BCH4/BCH8/BCH16 */
+	       (wr_mode                  <<  8) | /* wrap mode */
+	       (dev_width                <<  7) | /* bus width */
+	       (((nsectors-1) & 0x7)     <<  4) | /* number of sectors */
+	       (info->gpmc_cs            <<  1) | /* ECC CS */
+	       (0x1));                            /* enable ECC */
+
+	writel(val, info->reg.gpmc_ecc_config);
+
+	/* Clear ecc and enable bits */
+	writel(ECCCLEAR | ECC1, info->reg.gpmc_ecc_control);
+}
+
+static u8  bch4_polynomial[] = {0x28, 0x13, 0xcc, 0x39, 0x96, 0xac, 0x7f};
+static u8  bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
+				0x97, 0x79, 0xe5, 0x24, 0xb5};
+
+/**
+ * omap_calculate_ecc_bch - Generate bytes of ECC bytes
+ * @mtd:	MTD device structure
+ * @dat:	The pointer to data on which ecc is computed
+ * @ecc_code:	The ecc_code buffer
+ *
+ * Support calculating of BCH4/8 ecc vectors for the page
+ */
+static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
+					const u_char *dat, u_char *ecc_calc)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	int eccbytes	= info->nand.ecc.bytes;
+	struct gpmc_nand_regs	*gpmc_regs = &info->reg;
+	u8 *ecc_code;
+	unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
+	u32 val;
+	int i, j;
+
+	nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
+	for (i = 0; i < nsectors; i++) {
+		ecc_code = ecc_calc;
+		switch (info->ecc_opt) {
+		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+		case OMAP_ECC_BCH8_CODE_HW:
+			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+			bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
+			bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
+			*ecc_code++ = (bch_val4 & 0xFF);
+			*ecc_code++ = ((bch_val3 >> 24) & 0xFF);
+			*ecc_code++ = ((bch_val3 >> 16) & 0xFF);
+			*ecc_code++ = ((bch_val3 >> 8) & 0xFF);
+			*ecc_code++ = (bch_val3 & 0xFF);
+			*ecc_code++ = ((bch_val2 >> 24) & 0xFF);
+			*ecc_code++ = ((bch_val2 >> 16) & 0xFF);
+			*ecc_code++ = ((bch_val2 >> 8) & 0xFF);
+			*ecc_code++ = (bch_val2 & 0xFF);
+			*ecc_code++ = ((bch_val1 >> 24) & 0xFF);
+			*ecc_code++ = ((bch_val1 >> 16) & 0xFF);
+			*ecc_code++ = ((bch_val1 >> 8) & 0xFF);
+			*ecc_code++ = (bch_val1 & 0xFF);
+			break;
+		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+		case OMAP_ECC_BCH4_CODE_HW:
+			bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
+			bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
+			*ecc_code++ = ((bch_val2 >> 12) & 0xFF);
+			*ecc_code++ = ((bch_val2 >> 4) & 0xFF);
+			*ecc_code++ = ((bch_val2 & 0xF) << 4) |
+				((bch_val1 >> 28) & 0xF);
+			*ecc_code++ = ((bch_val1 >> 20) & 0xFF);
+			*ecc_code++ = ((bch_val1 >> 12) & 0xFF);
+			*ecc_code++ = ((bch_val1 >> 4) & 0xFF);
+			*ecc_code++ = ((bch_val1 & 0xF) << 4);
+			break;
+		case OMAP_ECC_BCH16_CODE_HW:
+			val = readl(gpmc_regs->gpmc_bch_result6[i]);
+			ecc_code[0]  = ((val >>  8) & 0xFF);
+			ecc_code[1]  = ((val >>  0) & 0xFF);
+			val = readl(gpmc_regs->gpmc_bch_result5[i]);
+			ecc_code[2]  = ((val >> 24) & 0xFF);
+			ecc_code[3]  = ((val >> 16) & 0xFF);
+			ecc_code[4]  = ((val >>  8) & 0xFF);
+			ecc_code[5]  = ((val >>  0) & 0xFF);
+			val = readl(gpmc_regs->gpmc_bch_result4[i]);
+			ecc_code[6]  = ((val >> 24) & 0xFF);
+			ecc_code[7]  = ((val >> 16) & 0xFF);
+			ecc_code[8]  = ((val >>  8) & 0xFF);
+			ecc_code[9]  = ((val >>  0) & 0xFF);
+			val = readl(gpmc_regs->gpmc_bch_result3[i]);
+			ecc_code[10] = ((val >> 24) & 0xFF);
+			ecc_code[11] = ((val >> 16) & 0xFF);
+			ecc_code[12] = ((val >>  8) & 0xFF);
+			ecc_code[13] = ((val >>  0) & 0xFF);
+			val = readl(gpmc_regs->gpmc_bch_result2[i]);
+			ecc_code[14] = ((val >> 24) & 0xFF);
+			ecc_code[15] = ((val >> 16) & 0xFF);
+			ecc_code[16] = ((val >>  8) & 0xFF);
+			ecc_code[17] = ((val >>  0) & 0xFF);
+			val = readl(gpmc_regs->gpmc_bch_result1[i]);
+			ecc_code[18] = ((val >> 24) & 0xFF);
+			ecc_code[19] = ((val >> 16) & 0xFF);
+			ecc_code[20] = ((val >>  8) & 0xFF);
+			ecc_code[21] = ((val >>  0) & 0xFF);
+			val = readl(gpmc_regs->gpmc_bch_result0[i]);
+			ecc_code[22] = ((val >> 24) & 0xFF);
+			ecc_code[23] = ((val >> 16) & 0xFF);
+			ecc_code[24] = ((val >>  8) & 0xFF);
+			ecc_code[25] = ((val >>  0) & 0xFF);
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		/* ECC scheme specific syndrome customizations */
+		switch (info->ecc_opt) {
+		case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+			/* Add constant polynomial to remainder, so that
+			 * ECC of blank pages results in 0x0 on reading back */
+			for (j = 0; j < eccbytes; j++)
+				ecc_calc[j] ^= bch4_polynomial[j];
+			break;
+		case OMAP_ECC_BCH4_CODE_HW:
+			/* Set  8th ECC byte as 0x0 for ROM compatibility */
+			ecc_calc[eccbytes - 1] = 0x0;
+			break;
+		case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+			/* Add constant polynomial to remainder, so that
+			 * ECC of blank pages results in 0x0 on reading back */
+			for (j = 0; j < eccbytes; j++)
+				ecc_calc[j] ^= bch8_polynomial[j];
+			break;
+		case OMAP_ECC_BCH8_CODE_HW:
+			/* Set 14th ECC byte as 0x0 for ROM compatibility */
+			ecc_calc[eccbytes - 1] = 0x0;
+			break;
+		case OMAP_ECC_BCH16_CODE_HW:
+			break;
+		default:
+			return -EINVAL;
+		}
+
+	ecc_calc += eccbytes;
+	}
+
+	return 0;
+}
+
+/**
+ * erased_sector_bitflips - count bit flips
+ * @data:	data sector buffer
+ * @oob:	oob buffer
+ * @info:	omap_nand_info
+ *
+ * Check the bit flips in erased page falls below correctable level.
+ * If falls below, report the page as erased with correctable bit
+ * flip, else report as uncorrectable page.
+ */
+static int erased_sector_bitflips(u_char *data, u_char *oob,
+		struct omap_nand_info *info)
+{
+	int flip_bits = 0, i;
+
+	for (i = 0; i < info->nand.ecc.size; i++) {
+		flip_bits += hweight8(~data[i]);
+		if (flip_bits > info->nand.ecc.strength)
+			return 0;
+	}
+
+	for (i = 0; i < info->nand.ecc.bytes - 1; i++) {
+		flip_bits += hweight8(~oob[i]);
+		if (flip_bits > info->nand.ecc.strength)
+			return 0;
+	}
+
+	/*
+	 * Bit flips falls in correctable level.
+	 * Fill data area with 0xFF
+	 */
+	if (flip_bits) {
+		memset(data, 0xFF, info->nand.ecc.size);
+		memset(oob, 0xFF, info->nand.ecc.bytes);
+	}
+
+	return flip_bits;
+}
+
+/**
+ * omap_elm_correct_data - corrects page data area in case error reported
+ * @mtd:	MTD device structure
+ * @data:	page data
+ * @read_ecc:	ecc read from nand flash
+ * @calc_ecc:	ecc read from HW ECC registers
+ *
+ * Calculated ecc vector reported as zero in case of non-error pages.
+ * In case of non-zero ecc vector, first filter out erased-pages, and
+ * then process data via ELM to detect bit-flips.
+ */
+static int omap_elm_correct_data(struct mtd_info *mtd, u_char *data,
+				u_char *read_ecc, u_char *calc_ecc)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	struct nand_ecc_ctrl *ecc = &info->nand.ecc;
+	int eccsteps = info->nand.ecc.steps;
+	int i , j, stat = 0;
+	int eccflag, actual_eccbytes;
+	struct elm_errorvec err_vec[ERROR_VECTOR_MAX];
+	u_char *ecc_vec = calc_ecc;
+	u_char *spare_ecc = read_ecc;
+	u_char *erased_ecc_vec;
+	u_char *buf;
+	int bitflip_count;
+	bool is_error_reported = false;
+	u32 bit_pos, byte_pos, error_max, pos;
+	int err;
+
+	switch (info->ecc_opt) {
+	case OMAP_ECC_BCH4_CODE_HW:
+		/* omit  7th ECC byte reserved for ROM code compatibility */
+		actual_eccbytes = ecc->bytes - 1;
+		erased_ecc_vec = bch4_vector;
+		break;
+	case OMAP_ECC_BCH8_CODE_HW:
+		/* omit 14th ECC byte reserved for ROM code compatibility */
+		actual_eccbytes = ecc->bytes - 1;
+		erased_ecc_vec = bch8_vector;
+		break;
+	case OMAP_ECC_BCH16_CODE_HW:
+		actual_eccbytes = ecc->bytes;
+		erased_ecc_vec = bch16_vector;
+		break;
+	default:
+		dev_err(&info->pdev->dev, "invalid driver configuration\n");
+		return -EINVAL;
+	}
+
+	/* Initialize elm error vector to zero */
+	memset(err_vec, 0, sizeof(err_vec));
+
+	for (i = 0; i < eccsteps ; i++) {
+		eccflag = 0;	/* initialize eccflag */
+
+		/*
+		 * Check any error reported,
+		 * In case of error, non zero ecc reported.
+		 */
+		for (j = 0; j < actual_eccbytes; j++) {
+			if (calc_ecc[j] != 0) {
+				eccflag = 1; /* non zero ecc, error present */
+				break;
+			}
+		}
+
+		if (eccflag == 1) {
+			if (memcmp(calc_ecc, erased_ecc_vec,
+						actual_eccbytes) == 0) {
+				/*
+				 * calc_ecc[] matches pattern for ECC(all 0xff)
+				 * so this is definitely an erased-page
+				 */
+			} else {
+				buf = &data[info->nand.ecc.size * i];
+				/*
+				 * count number of 0-bits in read_buf.
+				 * This check can be removed once a similar
+				 * check is introduced in generic NAND driver
+				 */
+				bitflip_count = erased_sector_bitflips(
+						buf, read_ecc, info);
+				if (bitflip_count) {
+					/*
+					 * number of 0-bits within ECC limits
+					 * So this may be an erased-page
+					 */
+					stat += bitflip_count;
+				} else {
+					/*
+					 * Too many 0-bits. It may be a
+					 * - programmed-page, OR
+					 * - erased-page with many bit-flips
+					 * So this page requires check by ELM
+					 */
+					err_vec[i].error_reported = true;
+					is_error_reported = true;
+				}
+			}
+		}
+
+		/* Update the ecc vector */
+		calc_ecc += ecc->bytes;
+		read_ecc += ecc->bytes;
+	}
+
+	/* Check if any error reported */
+	if (!is_error_reported)
+		return stat;
+
+	/* Decode BCH error using ELM module */
+	elm_decode_bch_error_page(info->elm_dev, ecc_vec, err_vec);
+
+	err = 0;
+	for (i = 0; i < eccsteps; i++) {
+		if (err_vec[i].error_uncorrectable) {
+			dev_err(&info->pdev->dev,
+				"uncorrectable bit-flips found\n");
+			err = -EBADMSG;
+		} else if (err_vec[i].error_reported) {
+			for (j = 0; j < err_vec[i].error_count; j++) {
+				switch (info->ecc_opt) {
+				case OMAP_ECC_BCH4_CODE_HW:
+					/* Add 4 bits to take care of padding */
+					pos = err_vec[i].error_loc[j] +
+						BCH4_BIT_PAD;
+					break;
+				case OMAP_ECC_BCH8_CODE_HW:
+				case OMAP_ECC_BCH16_CODE_HW:
+					pos = err_vec[i].error_loc[j];
+					break;
+				default:
+					return -EINVAL;
+				}
+				error_max = (ecc->size + actual_eccbytes) * 8;
+				/* Calculate bit position of error */
+				bit_pos = pos % 8;
+
+				/* Calculate byte position of error */
+				byte_pos = (error_max - pos - 1) / 8;
+
+				if (pos < error_max) {
+					if (byte_pos < 512) {
+						pr_debug("bitflip@dat[%d]=%x\n",
+						     byte_pos, data[byte_pos]);
+						data[byte_pos] ^= 1 << bit_pos;
+					} else {
+						pr_debug("bitflip@oob[%d]=%x\n",
+							(byte_pos - 512),
+						     spare_ecc[byte_pos - 512]);
+						spare_ecc[byte_pos - 512] ^=
+							1 << bit_pos;
+					}
+				} else {
+					dev_err(&info->pdev->dev,
+						"invalid bit-flip @ %d:%d\n",
+						byte_pos, bit_pos);
+					err = -EBADMSG;
+				}
+			}
+		}
+
+		/* Update number of correctable errors */
+		stat += err_vec[i].error_count;
+
+		/* Update page data with sector size */
+		data += ecc->size;
+		spare_ecc += ecc->bytes;
+	}
+
+	return (err) ? err : stat;
+}
+
+/**
+ * omap_write_page_bch - BCH ecc based write page function for entire page
+ * @mtd:		mtd info structure
+ * @chip:		nand chip info structure
+ * @buf:		data buffer
+ * @oob_required:	must write chip->oob_poi to OOB
+ * @page:		page
+ *
+ * Custom write page method evolved to support multi sector writing in one shot
+ */
+static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+			       const uint8_t *buf, int oob_required, int page)
+{
+	int ret;
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+
+	/* Enable GPMC ecc engine */
+	chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
+
+	/* Write data */
+	chip->write_buf(mtd, buf, mtd->writesize);
+
+	/* Update ecc vector from GPMC result registers */
+	chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
+
+	ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	/* Write ecc vector to OOB area */
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+/**
+ * omap_read_page_bch - BCH ecc based page read function for entire page
+ * @mtd:		mtd info structure
+ * @chip:		nand chip info structure
+ * @buf:		buffer to store read data
+ * @oob_required:	caller requires OOB data read to chip->oob_poi
+ * @page:		page number to read
+ *
+ * For BCH ecc scheme, GPMC used for syndrome calculation and ELM module
+ * used for error correction.
+ * Custom method evolved to support ELM error correction & multi sector
+ * reading. On reading page data area is read along with OOB data with
+ * ecc engine enabled. ecc vector updated after read of OOB data.
+ * For non error pages ecc vector reported as zero.
+ */
+static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
+{
+	uint8_t *ecc_calc = chip->buffers->ecccalc;
+	uint8_t *ecc_code = chip->buffers->ecccode;
+	int stat, ret;
+	unsigned int max_bitflips = 0;
+
+	/* Enable GPMC ecc engine */
+	chip->ecc.hwctl(mtd, NAND_ECC_READ);
+
+	/* Read data */
+	chip->read_buf(mtd, buf, mtd->writesize);
+
+	/* Read oob bytes */
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT,
+		      mtd->writesize + BADBLOCK_MARKER_LENGTH, -1);
+	chip->read_buf(mtd, chip->oob_poi + BADBLOCK_MARKER_LENGTH,
+		       chip->ecc.total);
+
+	/* Calculate ecc bytes */
+	chip->ecc.calculate(mtd, buf, ecc_calc);
+
+	ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+					 chip->ecc.total);
+	if (ret)
+		return ret;
+
+	stat = chip->ecc.correct(mtd, buf, ecc_code, ecc_calc);
+
+	if (stat < 0) {
+		mtd->ecc_stats.failed++;
+	} else {
+		mtd->ecc_stats.corrected += stat;
+		max_bitflips = max_t(unsigned int, max_bitflips, stat);
+	}
+
+	return max_bitflips;
+}
+
+/**
+ * is_elm_present - checks for presence of ELM module by scanning DT nodes
+ * @omap_nand_info: NAND device structure containing platform data
+ */
+static bool is_elm_present(struct omap_nand_info *info,
+			   struct device_node *elm_node)
+{
+	struct platform_device *pdev;
+
+	/* check whether elm-id is passed via DT */
+	if (!elm_node) {
+		dev_err(&info->pdev->dev, "ELM devicetree node not found\n");
+		return false;
+	}
+	pdev = of_find_device_by_node(elm_node);
+	/* check whether ELM device is registered */
+	if (!pdev) {
+		dev_err(&info->pdev->dev, "ELM device not found\n");
+		return false;
+	}
+	/* ELM module available, now configure it */
+	info->elm_dev = &pdev->dev;
+	return true;
+}
+
+static bool omap2_nand_ecc_check(struct omap_nand_info *info,
+				 struct omap_nand_platform_data	*pdata)
+{
+	bool ecc_needs_bch, ecc_needs_omap_bch, ecc_needs_elm;
+
+	switch (info->ecc_opt) {
+	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+		ecc_needs_omap_bch = false;
+		ecc_needs_bch = true;
+		ecc_needs_elm = false;
+		break;
+	case OMAP_ECC_BCH4_CODE_HW:
+	case OMAP_ECC_BCH8_CODE_HW:
+	case OMAP_ECC_BCH16_CODE_HW:
+		ecc_needs_omap_bch = true;
+		ecc_needs_bch = false;
+		ecc_needs_elm = true;
+		break;
+	default:
+		ecc_needs_omap_bch = false;
+		ecc_needs_bch = false;
+		ecc_needs_elm = false;
+		break;
+	}
+
+	if (ecc_needs_bch && !IS_ENABLED(CONFIG_MTD_NAND_ECC_BCH)) {
+		dev_err(&info->pdev->dev,
+			"CONFIG_MTD_NAND_ECC_BCH not enabled\n");
+		return false;
+	}
+	if (ecc_needs_omap_bch && !IS_ENABLED(CONFIG_MTD_NAND_OMAP_BCH)) {
+		dev_err(&info->pdev->dev,
+			"CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
+		return false;
+	}
+	if (ecc_needs_elm && !is_elm_present(info, info->elm_of_node)) {
+		dev_err(&info->pdev->dev, "ELM not available\n");
+		return false;
+	}
+
+	return true;
+}
+
+static const char * const nand_xfer_types[] = {
+	[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
+	[NAND_OMAP_POLLED] = "polled",
+	[NAND_OMAP_PREFETCH_DMA] = "prefetch-dma",
+	[NAND_OMAP_PREFETCH_IRQ] = "prefetch-irq",
+};
+
+static int omap_get_dt_info(struct device *dev, struct omap_nand_info *info)
+{
+	struct device_node *child = dev->of_node;
+	int i;
+	const char *s;
+	u32 cs;
+
+	if (of_property_read_u32(child, "reg", &cs) < 0) {
+		dev_err(dev, "reg not found in DT\n");
+		return -EINVAL;
+	}
+
+	info->gpmc_cs = cs;
+
+	/* detect availability of ELM module. Won't be present pre-OMAP4 */
+	info->elm_of_node = of_parse_phandle(child, "ti,elm-id", 0);
+	if (!info->elm_of_node) {
+		info->elm_of_node = of_parse_phandle(child, "elm_id", 0);
+		if (!info->elm_of_node)
+			dev_dbg(dev, "ti,elm-id not in DT\n");
+	}
+
+	/* select ecc-scheme for NAND */
+	if (of_property_read_string(child, "ti,nand-ecc-opt", &s)) {
+		dev_err(dev, "ti,nand-ecc-opt not found\n");
+		return -EINVAL;
+	}
+
+	if (!strcmp(s, "sw")) {
+		info->ecc_opt = OMAP_ECC_HAM1_CODE_SW;
+	} else if (!strcmp(s, "ham1") ||
+		   !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) {
+		info->ecc_opt =	OMAP_ECC_HAM1_CODE_HW;
+	} else if (!strcmp(s, "bch4")) {
+		if (info->elm_of_node)
+			info->ecc_opt = OMAP_ECC_BCH4_CODE_HW;
+		else
+			info->ecc_opt = OMAP_ECC_BCH4_CODE_HW_DETECTION_SW;
+	} else if (!strcmp(s, "bch8")) {
+		if (info->elm_of_node)
+			info->ecc_opt = OMAP_ECC_BCH8_CODE_HW;
+		else
+			info->ecc_opt = OMAP_ECC_BCH8_CODE_HW_DETECTION_SW;
+	} else if (!strcmp(s, "bch16")) {
+		info->ecc_opt =	OMAP_ECC_BCH16_CODE_HW;
+	} else {
+		dev_err(dev, "unrecognized value for ti,nand-ecc-opt\n");
+		return -EINVAL;
+	}
+
+	/* select data transfer mode */
+	if (!of_property_read_string(child, "ti,nand-xfer-type", &s)) {
+		for (i = 0; i < ARRAY_SIZE(nand_xfer_types); i++) {
+			if (!strcasecmp(s, nand_xfer_types[i])) {
+				info->xfer_type = i;
+				return 0;
+			}
+		}
+
+		dev_err(dev, "unrecognized value for ti,nand-xfer-type\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int omap_ooblayout_ecc(struct mtd_info *mtd, int section,
+			      struct mtd_oob_region *oobregion)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	struct nand_chip *chip = &info->nand;
+	int off = BADBLOCK_MARKER_LENGTH;
+
+	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+	    !(chip->options & NAND_BUSWIDTH_16))
+		off = 1;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = off;
+	oobregion->length = chip->ecc.total;
+
+	return 0;
+}
+
+static int omap_ooblayout_free(struct mtd_info *mtd, int section,
+			       struct mtd_oob_region *oobregion)
+{
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	struct nand_chip *chip = &info->nand;
+	int off = BADBLOCK_MARKER_LENGTH;
+
+	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_HW &&
+	    !(chip->options & NAND_BUSWIDTH_16))
+		off = 1;
+
+	if (section)
+		return -ERANGE;
+
+	off += chip->ecc.total;
+	if (off >= mtd->oobsize)
+		return -ERANGE;
+
+	oobregion->offset = off;
+	oobregion->length = mtd->oobsize - off;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_ooblayout_ops = {
+	.ecc = omap_ooblayout_ecc,
+	.free = omap_ooblayout_free,
+};
+
+static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int off = BADBLOCK_MARKER_LENGTH;
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	/*
+	 * When SW correction is employed, one OMAP specific marker byte is
+	 * reserved after each ECC step.
+	 */
+	oobregion->offset = off + (section * (chip->ecc.bytes + 1));
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int off = BADBLOCK_MARKER_LENGTH;
+
+	if (section)
+		return -ERANGE;
+
+	/*
+	 * When SW correction is employed, one OMAP specific marker byte is
+	 * reserved after each ECC step.
+	 */
+	off += ((chip->ecc.bytes + 1) * chip->ecc.steps);
+	if (off >= mtd->oobsize)
+		return -ERANGE;
+
+	oobregion->offset = off;
+	oobregion->length = mtd->oobsize - off;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops omap_sw_ooblayout_ops = {
+	.ecc = omap_sw_ooblayout_ecc,
+	.free = omap_sw_ooblayout_free,
+};
+
+static int omap_nand_probe(struct platform_device *pdev)
+{
+	struct omap_nand_info		*info;
+	struct omap_nand_platform_data	*pdata = NULL;
+	struct mtd_info			*mtd;
+	struct nand_chip		*nand_chip;
+	int				err;
+	dma_cap_mask_t			mask;
+	struct resource			*res;
+	struct device			*dev = &pdev->dev;
+	int				min_oobbytes = BADBLOCK_MARKER_LENGTH;
+	int				oobbytes_per_step;
+
+	info = devm_kzalloc(&pdev->dev, sizeof(struct omap_nand_info),
+				GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->pdev = pdev;
+
+	if (dev->of_node) {
+		if (omap_get_dt_info(dev, info))
+			return -EINVAL;
+	} else {
+		pdata = dev_get_platdata(&pdev->dev);
+		if (!pdata) {
+			dev_err(&pdev->dev, "platform data missing\n");
+			return -EINVAL;
+		}
+
+		info->gpmc_cs = pdata->cs;
+		info->reg = pdata->reg;
+		info->ecc_opt = pdata->ecc_opt;
+		if (pdata->dev_ready)
+			dev_info(&pdev->dev, "pdata->dev_ready is deprecated\n");
+
+		info->xfer_type = pdata->xfer_type;
+		info->devsize = pdata->devsize;
+		info->elm_of_node = pdata->elm_of_node;
+		info->flash_bbt = pdata->flash_bbt;
+	}
+
+	platform_set_drvdata(pdev, info);
+	info->ops = gpmc_omap_get_nand_ops(&info->reg, info->gpmc_cs);
+	if (!info->ops) {
+		dev_err(&pdev->dev, "Failed to get GPMC->NAND interface\n");
+		return -ENODEV;
+	}
+
+	nand_chip		= &info->nand;
+	mtd			= nand_to_mtd(nand_chip);
+	mtd->dev.parent		= &pdev->dev;
+	nand_chip->ecc.priv	= NULL;
+	nand_set_flash_node(nand_chip, dev->of_node);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	nand_chip->IO_ADDR_R = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(nand_chip->IO_ADDR_R))
+		return PTR_ERR(nand_chip->IO_ADDR_R);
+
+	info->phys_base = res->start;
+
+	nand_chip->controller = &omap_gpmc_controller;
+
+	nand_chip->IO_ADDR_W = nand_chip->IO_ADDR_R;
+	nand_chip->cmd_ctrl  = omap_hwcontrol;
+
+	info->ready_gpiod = devm_gpiod_get_optional(&pdev->dev, "rb",
+						    GPIOD_IN);
+	if (IS_ERR(info->ready_gpiod)) {
+		dev_err(dev, "failed to get ready gpio\n");
+		return PTR_ERR(info->ready_gpiod);
+	}
+
+	/*
+	 * If RDY/BSY line is connected to OMAP then use the omap ready
+	 * function and the generic nand_wait function which reads the status
+	 * register after monitoring the RDY/BSY line. Otherwise use a standard
+	 * chip delay which is slightly more than tR (AC Timing) of the NAND
+	 * device and read status register until you get a failure or success
+	 */
+	if (info->ready_gpiod) {
+		nand_chip->dev_ready = omap_dev_ready;
+		nand_chip->chip_delay = 0;
+	} else {
+		nand_chip->waitfunc = omap_wait;
+		nand_chip->chip_delay = 50;
+	}
+
+	if (info->flash_bbt)
+		nand_chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+	/* scan NAND device connected to chip controller */
+	nand_chip->options |= info->devsize & NAND_BUSWIDTH_16;
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		dev_err(&info->pdev->dev,
+			"scan failed, may be bus-width mismatch\n");
+		err = -ENXIO;
+		goto return_error;
+	}
+
+	if (nand_chip->bbt_options & NAND_BBT_USE_FLASH)
+		nand_chip->bbt_options |= NAND_BBT_NO_OOB;
+	else
+		nand_chip->options |= NAND_SKIP_BBTSCAN;
+
+	/* re-populate low-level callbacks based on xfer modes */
+	switch (info->xfer_type) {
+	case NAND_OMAP_PREFETCH_POLLED:
+		nand_chip->read_buf   = omap_read_buf_pref;
+		nand_chip->write_buf  = omap_write_buf_pref;
+		break;
+
+	case NAND_OMAP_POLLED:
+		/* Use nand_base defaults for {read,write}_buf */
+		break;
+
+	case NAND_OMAP_PREFETCH_DMA:
+		dma_cap_zero(mask);
+		dma_cap_set(DMA_SLAVE, mask);
+		info->dma = dma_request_chan(pdev->dev.parent, "rxtx");
+
+		if (IS_ERR(info->dma)) {
+			dev_err(&pdev->dev, "DMA engine request failed\n");
+			err = PTR_ERR(info->dma);
+			goto return_error;
+		} else {
+			struct dma_slave_config cfg;
+
+			memset(&cfg, 0, sizeof(cfg));
+			cfg.src_addr = info->phys_base;
+			cfg.dst_addr = info->phys_base;
+			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+			cfg.src_maxburst = 16;
+			cfg.dst_maxburst = 16;
+			err = dmaengine_slave_config(info->dma, &cfg);
+			if (err) {
+				dev_err(&pdev->dev, "DMA engine slave config failed: %d\n",
+					err);
+				goto return_error;
+			}
+			nand_chip->read_buf   = omap_read_buf_dma_pref;
+			nand_chip->write_buf  = omap_write_buf_dma_pref;
+		}
+		break;
+
+	case NAND_OMAP_PREFETCH_IRQ:
+		info->gpmc_irq_fifo = platform_get_irq(pdev, 0);
+		if (info->gpmc_irq_fifo <= 0) {
+			dev_err(&pdev->dev, "error getting fifo irq\n");
+			err = -ENODEV;
+			goto return_error;
+		}
+		err = devm_request_irq(&pdev->dev, info->gpmc_irq_fifo,
+					omap_nand_irq, IRQF_SHARED,
+					"gpmc-nand-fifo", info);
+		if (err) {
+			dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+						info->gpmc_irq_fifo, err);
+			info->gpmc_irq_fifo = 0;
+			goto return_error;
+		}
+
+		info->gpmc_irq_count = platform_get_irq(pdev, 1);
+		if (info->gpmc_irq_count <= 0) {
+			dev_err(&pdev->dev, "error getting count irq\n");
+			err = -ENODEV;
+			goto return_error;
+		}
+		err = devm_request_irq(&pdev->dev, info->gpmc_irq_count,
+					omap_nand_irq, IRQF_SHARED,
+					"gpmc-nand-count", info);
+		if (err) {
+			dev_err(&pdev->dev, "requesting irq(%d) error:%d",
+						info->gpmc_irq_count, err);
+			info->gpmc_irq_count = 0;
+			goto return_error;
+		}
+
+		nand_chip->read_buf  = omap_read_buf_irq_pref;
+		nand_chip->write_buf = omap_write_buf_irq_pref;
+
+		break;
+
+	default:
+		dev_err(&pdev->dev,
+			"xfer_type(%d) not supported!\n", info->xfer_type);
+		err = -EINVAL;
+		goto return_error;
+	}
+
+	if (!omap2_nand_ecc_check(info, pdata)) {
+		err = -EINVAL;
+		goto return_error;
+	}
+
+	/*
+	 * Bail out earlier to let NAND_ECC_SOFT code create its own
+	 * ooblayout instead of using ours.
+	 */
+	if (info->ecc_opt == OMAP_ECC_HAM1_CODE_SW) {
+		nand_chip->ecc.mode = NAND_ECC_SOFT;
+		nand_chip->ecc.algo = NAND_ECC_HAMMING;
+		goto scan_tail;
+	}
+
+	/* populate MTD interface based on ECC scheme */
+	switch (info->ecc_opt) {
+	case OMAP_ECC_HAM1_CODE_HW:
+		pr_info("nand: using OMAP_ECC_HAM1_CODE_HW\n");
+		nand_chip->ecc.mode             = NAND_ECC_HW;
+		nand_chip->ecc.bytes            = 3;
+		nand_chip->ecc.size             = 512;
+		nand_chip->ecc.strength         = 1;
+		nand_chip->ecc.calculate        = omap_calculate_ecc;
+		nand_chip->ecc.hwctl            = omap_enable_hwecc;
+		nand_chip->ecc.correct          = omap_correct_data;
+		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+		oobbytes_per_step		= nand_chip->ecc.bytes;
+
+		if (!(nand_chip->options & NAND_BUSWIDTH_16))
+			min_oobbytes		= 1;
+
+		break;
+
+	case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
+		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW_DETECTION_SW\n");
+		nand_chip->ecc.mode		= NAND_ECC_HW;
+		nand_chip->ecc.size		= 512;
+		nand_chip->ecc.bytes		= 7;
+		nand_chip->ecc.strength		= 4;
+		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
+		nand_chip->ecc.correct		= nand_bch_correct_data;
+		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
+		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+		/* Reserve one byte for the OMAP marker */
+		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
+		/* software bch library is used for locating errors */
+		nand_chip->ecc.priv		= nand_bch_init(mtd);
+		if (!nand_chip->ecc.priv) {
+			dev_err(&info->pdev->dev, "unable to use BCH library\n");
+			err = -EINVAL;
+			goto return_error;
+		}
+		break;
+
+	case OMAP_ECC_BCH4_CODE_HW:
+		pr_info("nand: using OMAP_ECC_BCH4_CODE_HW ECC scheme\n");
+		nand_chip->ecc.mode		= NAND_ECC_HW;
+		nand_chip->ecc.size		= 512;
+		/* 14th bit is kept reserved for ROM-code compatibility */
+		nand_chip->ecc.bytes		= 7 + 1;
+		nand_chip->ecc.strength		= 4;
+		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
+		nand_chip->ecc.correct		= omap_elm_correct_data;
+		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
+		nand_chip->ecc.read_page	= omap_read_page_bch;
+		nand_chip->ecc.write_page	= omap_write_page_bch;
+		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+		oobbytes_per_step		= nand_chip->ecc.bytes;
+
+		err = elm_config(info->elm_dev, BCH4_ECC,
+				 mtd->writesize / nand_chip->ecc.size,
+				 nand_chip->ecc.size, nand_chip->ecc.bytes);
+		if (err < 0)
+			goto return_error;
+		break;
+
+	case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
+		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW_DETECTION_SW\n");
+		nand_chip->ecc.mode		= NAND_ECC_HW;
+		nand_chip->ecc.size		= 512;
+		nand_chip->ecc.bytes		= 13;
+		nand_chip->ecc.strength		= 8;
+		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
+		nand_chip->ecc.correct		= nand_bch_correct_data;
+		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
+		mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
+		/* Reserve one byte for the OMAP marker */
+		oobbytes_per_step		= nand_chip->ecc.bytes + 1;
+		/* software bch library is used for locating errors */
+		nand_chip->ecc.priv		= nand_bch_init(mtd);
+		if (!nand_chip->ecc.priv) {
+			dev_err(&info->pdev->dev, "unable to use BCH library\n");
+			err = -EINVAL;
+			goto return_error;
+		}
+		break;
+
+	case OMAP_ECC_BCH8_CODE_HW:
+		pr_info("nand: using OMAP_ECC_BCH8_CODE_HW ECC scheme\n");
+		nand_chip->ecc.mode		= NAND_ECC_HW;
+		nand_chip->ecc.size		= 512;
+		/* 14th bit is kept reserved for ROM-code compatibility */
+		nand_chip->ecc.bytes		= 13 + 1;
+		nand_chip->ecc.strength		= 8;
+		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
+		nand_chip->ecc.correct		= omap_elm_correct_data;
+		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
+		nand_chip->ecc.read_page	= omap_read_page_bch;
+		nand_chip->ecc.write_page	= omap_write_page_bch;
+		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+		oobbytes_per_step		= nand_chip->ecc.bytes;
+
+		err = elm_config(info->elm_dev, BCH8_ECC,
+				 mtd->writesize / nand_chip->ecc.size,
+				 nand_chip->ecc.size, nand_chip->ecc.bytes);
+		if (err < 0)
+			goto return_error;
+
+		break;
+
+	case OMAP_ECC_BCH16_CODE_HW:
+		pr_info("using OMAP_ECC_BCH16_CODE_HW ECC scheme\n");
+		nand_chip->ecc.mode		= NAND_ECC_HW;
+		nand_chip->ecc.size		= 512;
+		nand_chip->ecc.bytes		= 26;
+		nand_chip->ecc.strength		= 16;
+		nand_chip->ecc.hwctl		= omap_enable_hwecc_bch;
+		nand_chip->ecc.correct		= omap_elm_correct_data;
+		nand_chip->ecc.calculate	= omap_calculate_ecc_bch;
+		nand_chip->ecc.read_page	= omap_read_page_bch;
+		nand_chip->ecc.write_page	= omap_write_page_bch;
+		mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
+		oobbytes_per_step		= nand_chip->ecc.bytes;
+
+		err = elm_config(info->elm_dev, BCH16_ECC,
+				 mtd->writesize / nand_chip->ecc.size,
+				 nand_chip->ecc.size, nand_chip->ecc.bytes);
+		if (err < 0)
+			goto return_error;
+
+		break;
+	default:
+		dev_err(&info->pdev->dev, "invalid or unsupported ECC scheme\n");
+		err = -EINVAL;
+		goto return_error;
+	}
+
+	/* check if NAND device's OOB is enough to store ECC signatures */
+	min_oobbytes += (oobbytes_per_step *
+			 (mtd->writesize / nand_chip->ecc.size));
+	if (mtd->oobsize < min_oobbytes) {
+		dev_err(&info->pdev->dev,
+			"not enough OOB bytes required = %d, available=%d\n",
+			min_oobbytes, mtd->oobsize);
+		err = -EINVAL;
+		goto return_error;
+	}
+
+scan_tail:
+	/* second phase scan */
+	if (nand_scan_tail(mtd)) {
+		err = -ENXIO;
+		goto return_error;
+	}
+
+	if (dev->of_node)
+		mtd_device_register(mtd, NULL, 0);
+	else
+		mtd_device_register(mtd, pdata->parts, pdata->nr_parts);
+
+	platform_set_drvdata(pdev, mtd);
+
+	return 0;
+
+return_error:
+	if (info->dma)
+		dma_release_channel(info->dma);
+	if (nand_chip->ecc.priv) {
+		nand_bch_free(nand_chip->ecc.priv);
+		nand_chip->ecc.priv = NULL;
+	}
+	return err;
+}
+
+static int omap_nand_remove(struct platform_device *pdev)
+{
+	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct omap_nand_info *info = mtd_to_omap(mtd);
+	if (nand_chip->ecc.priv) {
+		nand_bch_free(nand_chip->ecc.priv);
+		nand_chip->ecc.priv = NULL;
+	}
+	if (info->dma)
+		dma_release_channel(info->dma);
+	nand_release(mtd);
+	return 0;
+}
+
+static const struct of_device_id omap_nand_ids[] = {
+	{ .compatible = "ti,omap2-nand", },
+	{},
+};
+
+static struct platform_driver omap_nand_driver = {
+	.probe		= omap_nand_probe,
+	.remove		= omap_nand_remove,
+	.driver		= {
+		.name	= DRIVER_NAME,
+		.of_match_table = of_match_ptr(omap_nand_ids),
+	},
+};
+
+module_platform_driver(omap_nand_driver);
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/rawnand/omap_elm.c b/drivers/mtd/nand/rawnand/omap_elm.c
new file mode 100644
index 000000000000..a3f32f939cc1
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/omap_elm.c
@@ -0,0 +1,578 @@ 
+/*
+ * Error Location Module
+ *
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define DRIVER_NAME	"omap-elm"
+
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_data/elm.h>
+
+#define ELM_SYSCONFIG			0x010
+#define ELM_IRQSTATUS			0x018
+#define ELM_IRQENABLE			0x01c
+#define ELM_LOCATION_CONFIG		0x020
+#define ELM_PAGE_CTRL			0x080
+#define ELM_SYNDROME_FRAGMENT_0		0x400
+#define ELM_SYNDROME_FRAGMENT_1		0x404
+#define ELM_SYNDROME_FRAGMENT_2		0x408
+#define ELM_SYNDROME_FRAGMENT_3		0x40c
+#define ELM_SYNDROME_FRAGMENT_4		0x410
+#define ELM_SYNDROME_FRAGMENT_5		0x414
+#define ELM_SYNDROME_FRAGMENT_6		0x418
+#define ELM_LOCATION_STATUS		0x800
+#define ELM_ERROR_LOCATION_0		0x880
+
+/* ELM Interrupt Status Register */
+#define INTR_STATUS_PAGE_VALID		BIT(8)
+
+/* ELM Interrupt Enable Register */
+#define INTR_EN_PAGE_MASK		BIT(8)
+
+/* ELM Location Configuration Register */
+#define ECC_BCH_LEVEL_MASK		0x3
+
+/* ELM syndrome */
+#define ELM_SYNDROME_VALID		BIT(16)
+
+/* ELM_LOCATION_STATUS Register */
+#define ECC_CORRECTABLE_MASK		BIT(8)
+#define ECC_NB_ERRORS_MASK		0x1f
+
+/* ELM_ERROR_LOCATION_0-15 Registers */
+#define ECC_ERROR_LOCATION_MASK		0x1fff
+
+#define ELM_ECC_SIZE			0x7ff
+
+#define SYNDROME_FRAGMENT_REG_SIZE	0x40
+#define ERROR_LOCATION_SIZE		0x100
+
+struct elm_registers {
+	u32 elm_irqenable;
+	u32 elm_sysconfig;
+	u32 elm_location_config;
+	u32 elm_page_ctrl;
+	u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
+	u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
+	u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
+	u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
+	u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
+	u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
+	u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
+};
+
+struct elm_info {
+	struct device *dev;
+	void __iomem *elm_base;
+	struct completion elm_completion;
+	struct list_head list;
+	enum bch_ecc bch_type;
+	struct elm_registers elm_regs;
+	int ecc_steps;
+	int ecc_syndrome_size;
+};
+
+static LIST_HEAD(elm_devices);
+
+static void elm_write_reg(struct elm_info *info, int offset, u32 val)
+{
+	writel(val, info->elm_base + offset);
+}
+
+static u32 elm_read_reg(struct elm_info *info, int offset)
+{
+	return readl(info->elm_base + offset);
+}
+
+/**
+ * elm_config - Configure ELM module
+ * @dev:	ELM device
+ * @bch_type:	Type of BCH ecc
+ */
+int elm_config(struct device *dev, enum bch_ecc bch_type,
+	int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
+{
+	u32 reg_val;
+	struct elm_info *info = dev_get_drvdata(dev);
+
+	if (!info) {
+		dev_err(dev, "Unable to configure elm - device not probed?\n");
+		return -EPROBE_DEFER;
+	}
+	/* ELM cannot detect ECC errors for chunks > 1KB */
+	if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
+		dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size);
+		return -EINVAL;
+	}
+	/* ELM support 8 error syndrome process */
+	if (ecc_steps > ERROR_VECTOR_MAX) {
+		dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
+		return -EINVAL;
+	}
+
+	reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
+	elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
+	info->bch_type		= bch_type;
+	info->ecc_steps		= ecc_steps;
+	info->ecc_syndrome_size	= ecc_syndrome_size;
+
+	return 0;
+}
+EXPORT_SYMBOL(elm_config);
+
+/**
+ * elm_configure_page_mode - Enable/Disable page mode
+ * @info:	elm info
+ * @index:	index number of syndrome fragment vector
+ * @enable:	enable/disable flag for page mode
+ *
+ * Enable page mode for syndrome fragment index
+ */
+static void elm_configure_page_mode(struct elm_info *info, int index,
+		bool enable)
+{
+	u32 reg_val;
+
+	reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
+	if (enable)
+		reg_val |= BIT(index);	/* enable page mode */
+	else
+		reg_val &= ~BIT(index);	/* disable page mode */
+
+	elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
+}
+
+/**
+ * elm_load_syndrome - Load ELM syndrome reg
+ * @info:	elm info
+ * @err_vec:	elm error vectors
+ * @ecc:	buffer with calculated ecc
+ *
+ * Load syndrome fragment registers with calculated ecc in reverse order.
+ */
+static void elm_load_syndrome(struct elm_info *info,
+		struct elm_errorvec *err_vec, u8 *ecc)
+{
+	int i, offset;
+	u32 val;
+
+	for (i = 0; i < info->ecc_steps; i++) {
+
+		/* Check error reported */
+		if (err_vec[i].error_reported) {
+			elm_configure_page_mode(info, i, true);
+			offset = ELM_SYNDROME_FRAGMENT_0 +
+				SYNDROME_FRAGMENT_REG_SIZE * i;
+			switch (info->bch_type) {
+			case BCH8_ECC:
+				/* syndrome fragment 0 = ecc[9-12B] */
+				val = cpu_to_be32(*(u32 *) &ecc[9]);
+				elm_write_reg(info, offset, val);
+
+				/* syndrome fragment 1 = ecc[5-8B] */
+				offset += 4;
+				val = cpu_to_be32(*(u32 *) &ecc[5]);
+				elm_write_reg(info, offset, val);
+
+				/* syndrome fragment 2 = ecc[1-4B] */
+				offset += 4;
+				val = cpu_to_be32(*(u32 *) &ecc[1]);
+				elm_write_reg(info, offset, val);
+
+				/* syndrome fragment 3 = ecc[0B] */
+				offset += 4;
+				val = ecc[0];
+				elm_write_reg(info, offset, val);
+				break;
+			case BCH4_ECC:
+				/* syndrome fragment 0 = ecc[20-52b] bits */
+				val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
+					((ecc[2] & 0xf) << 28);
+				elm_write_reg(info, offset, val);
+
+				/* syndrome fragment 1 = ecc[0-20b] bits */
+				offset += 4;
+				val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
+				elm_write_reg(info, offset, val);
+				break;
+			case BCH16_ECC:
+				val = cpu_to_be32(*(u32 *) &ecc[22]);
+				elm_write_reg(info, offset, val);
+				offset += 4;
+				val = cpu_to_be32(*(u32 *) &ecc[18]);
+				elm_write_reg(info, offset, val);
+				offset += 4;
+				val = cpu_to_be32(*(u32 *) &ecc[14]);
+				elm_write_reg(info, offset, val);
+				offset += 4;
+				val = cpu_to_be32(*(u32 *) &ecc[10]);
+				elm_write_reg(info, offset, val);
+				offset += 4;
+				val = cpu_to_be32(*(u32 *) &ecc[6]);
+				elm_write_reg(info, offset, val);
+				offset += 4;
+				val = cpu_to_be32(*(u32 *) &ecc[2]);
+				elm_write_reg(info, offset, val);
+				offset += 4;
+				val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
+				elm_write_reg(info, offset, val);
+				break;
+			default:
+				pr_err("invalid config bch_type\n");
+			}
+		}
+
+		/* Update ecc pointer with ecc byte size */
+		ecc += info->ecc_syndrome_size;
+	}
+}
+
+/**
+ * elm_start_processing - start elm syndrome processing
+ * @info:	elm info
+ * @err_vec:	elm error vectors
+ *
+ * Set syndrome valid bit for syndrome fragment registers for which
+ * elm syndrome fragment registers are loaded. This enables elm module
+ * to start processing syndrome vectors.
+ */
+static void elm_start_processing(struct elm_info *info,
+		struct elm_errorvec *err_vec)
+{
+	int i, offset;
+	u32 reg_val;
+
+	/*
+	 * Set syndrome vector valid, so that ELM module
+	 * will process it for vectors error is reported
+	 */
+	for (i = 0; i < info->ecc_steps; i++) {
+		if (err_vec[i].error_reported) {
+			offset = ELM_SYNDROME_FRAGMENT_6 +
+				SYNDROME_FRAGMENT_REG_SIZE * i;
+			reg_val = elm_read_reg(info, offset);
+			reg_val |= ELM_SYNDROME_VALID;
+			elm_write_reg(info, offset, reg_val);
+		}
+	}
+}
+
+/**
+ * elm_error_correction - locate correctable error position
+ * @info:	elm info
+ * @err_vec:	elm error vectors
+ *
+ * On completion of processing by elm module, error location status
+ * register updated with correctable/uncorrectable error information.
+ * In case of correctable errors, number of errors located from
+ * elm location status register & read the positions from
+ * elm error location register.
+ */
+static void elm_error_correction(struct elm_info *info,
+		struct elm_errorvec *err_vec)
+{
+	int i, j, errors = 0;
+	int offset;
+	u32 reg_val;
+
+	for (i = 0; i < info->ecc_steps; i++) {
+
+		/* Check error reported */
+		if (err_vec[i].error_reported) {
+			offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
+			reg_val = elm_read_reg(info, offset);
+
+			/* Check correctable error or not */
+			if (reg_val & ECC_CORRECTABLE_MASK) {
+				offset = ELM_ERROR_LOCATION_0 +
+					ERROR_LOCATION_SIZE * i;
+
+				/* Read count of correctable errors */
+				err_vec[i].error_count = reg_val &
+					ECC_NB_ERRORS_MASK;
+
+				/* Update the error locations in error vector */
+				for (j = 0; j < err_vec[i].error_count; j++) {
+
+					reg_val = elm_read_reg(info, offset);
+					err_vec[i].error_loc[j] = reg_val &
+						ECC_ERROR_LOCATION_MASK;
+
+					/* Update error location register */
+					offset += 4;
+				}
+
+				errors += err_vec[i].error_count;
+			} else {
+				err_vec[i].error_uncorrectable = true;
+			}
+
+			/* Clearing interrupts for processed error vectors */
+			elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
+
+			/* Disable page mode */
+			elm_configure_page_mode(info, i, false);
+		}
+	}
+}
+
+/**
+ * elm_decode_bch_error_page - Locate error position
+ * @dev:	device pointer
+ * @ecc_calc:	calculated ECC bytes from GPMC
+ * @err_vec:	elm error vectors
+ *
+ * Called with one or more error reported vectors & vectors with
+ * error reported is updated in err_vec[].error_reported
+ */
+void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
+		struct elm_errorvec *err_vec)
+{
+	struct elm_info *info = dev_get_drvdata(dev);
+	u32 reg_val;
+
+	/* Enable page mode interrupt */
+	reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+	elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
+	elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
+
+	/* Load valid ecc byte to syndrome fragment register */
+	elm_load_syndrome(info, err_vec, ecc_calc);
+
+	/* Enable syndrome processing for which syndrome fragment is updated */
+	elm_start_processing(info, err_vec);
+
+	/* Wait for ELM module to finish locating error correction */
+	wait_for_completion(&info->elm_completion);
+
+	/* Disable page mode interrupt */
+	reg_val = elm_read_reg(info, ELM_IRQENABLE);
+	elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
+	elm_error_correction(info, err_vec);
+}
+EXPORT_SYMBOL(elm_decode_bch_error_page);
+
+static irqreturn_t elm_isr(int this_irq, void *dev_id)
+{
+	u32 reg_val;
+	struct elm_info *info = dev_id;
+
+	reg_val = elm_read_reg(info, ELM_IRQSTATUS);
+
+	/* All error vectors processed */
+	if (reg_val & INTR_STATUS_PAGE_VALID) {
+		elm_write_reg(info, ELM_IRQSTATUS,
+				reg_val & INTR_STATUS_PAGE_VALID);
+		complete(&info->elm_completion);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+static int elm_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+	struct resource *res, *irq;
+	struct elm_info *info;
+
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->dev = &pdev->dev;
+
+	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!irq) {
+		dev_err(&pdev->dev, "no irq resource defined\n");
+		return -ENODEV;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	info->elm_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(info->elm_base))
+		return PTR_ERR(info->elm_base);
+
+	ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
+			pdev->name, info);
+	if (ret) {
+		dev_err(&pdev->dev, "failure requesting %pr\n", irq);
+		return ret;
+	}
+
+	pm_runtime_enable(&pdev->dev);
+	if (pm_runtime_get_sync(&pdev->dev) < 0) {
+		ret = -EINVAL;
+		pm_runtime_disable(&pdev->dev);
+		dev_err(&pdev->dev, "can't enable clock\n");
+		return ret;
+	}
+
+	init_completion(&info->elm_completion);
+	INIT_LIST_HEAD(&info->list);
+	list_add(&info->list, &elm_devices);
+	platform_set_drvdata(pdev, info);
+	return ret;
+}
+
+static int elm_remove(struct platform_device *pdev)
+{
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * elm_context_save
+ * saves ELM configurations to preserve them across Hardware powered-down
+ */
+static int elm_context_save(struct elm_info *info)
+{
+	struct elm_registers *regs = &info->elm_regs;
+	enum bch_ecc bch_type = info->bch_type;
+	u32 offset = 0, i;
+
+	regs->elm_irqenable       = elm_read_reg(info, ELM_IRQENABLE);
+	regs->elm_sysconfig       = elm_read_reg(info, ELM_SYSCONFIG);
+	regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
+	regs->elm_page_ctrl       = elm_read_reg(info, ELM_PAGE_CTRL);
+	for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+		offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+		switch (bch_type) {
+		case BCH16_ECC:
+			regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+					ELM_SYNDROME_FRAGMENT_6 + offset);
+			regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
+					ELM_SYNDROME_FRAGMENT_5 + offset);
+			regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
+					ELM_SYNDROME_FRAGMENT_4 + offset);
+		case BCH8_ECC:
+			regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
+					ELM_SYNDROME_FRAGMENT_3 + offset);
+			regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
+					ELM_SYNDROME_FRAGMENT_2 + offset);
+		case BCH4_ECC:
+			regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
+					ELM_SYNDROME_FRAGMENT_1 + offset);
+			regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
+					ELM_SYNDROME_FRAGMENT_0 + offset);
+			break;
+		default:
+			return -EINVAL;
+		}
+		/* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
+		 * to be saved for all BCH schemes*/
+		regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
+					ELM_SYNDROME_FRAGMENT_6 + offset);
+	}
+	return 0;
+}
+
+/**
+ * elm_context_restore
+ * writes configurations saved duing power-down back into ELM registers
+ */
+static int elm_context_restore(struct elm_info *info)
+{
+	struct elm_registers *regs = &info->elm_regs;
+	enum bch_ecc bch_type = info->bch_type;
+	u32 offset = 0, i;
+
+	elm_write_reg(info, ELM_IRQENABLE,	 regs->elm_irqenable);
+	elm_write_reg(info, ELM_SYSCONFIG,	 regs->elm_sysconfig);
+	elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
+	elm_write_reg(info, ELM_PAGE_CTRL,	 regs->elm_page_ctrl);
+	for (i = 0; i < ERROR_VECTOR_MAX; i++) {
+		offset = i * SYNDROME_FRAGMENT_REG_SIZE;
+		switch (bch_type) {
+		case BCH16_ECC:
+			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+					regs->elm_syndrome_fragment_6[i]);
+			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
+					regs->elm_syndrome_fragment_5[i]);
+			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
+					regs->elm_syndrome_fragment_4[i]);
+		case BCH8_ECC:
+			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
+					regs->elm_syndrome_fragment_3[i]);
+			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
+					regs->elm_syndrome_fragment_2[i]);
+		case BCH4_ECC:
+			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
+					regs->elm_syndrome_fragment_1[i]);
+			elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
+					regs->elm_syndrome_fragment_0[i]);
+			break;
+		default:
+			return -EINVAL;
+		}
+		/* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
+		elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
+					regs->elm_syndrome_fragment_6[i] &
+							 ELM_SYNDROME_VALID);
+	}
+	return 0;
+}
+
+static int elm_suspend(struct device *dev)
+{
+	struct elm_info *info = dev_get_drvdata(dev);
+	elm_context_save(info);
+	pm_runtime_put_sync(dev);
+	return 0;
+}
+
+static int elm_resume(struct device *dev)
+{
+	struct elm_info *info = dev_get_drvdata(dev);
+	pm_runtime_get_sync(dev);
+	elm_context_restore(info);
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
+
+#ifdef CONFIG_OF
+static const struct of_device_id elm_of_match[] = {
+	{ .compatible = "ti,am3352-elm" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, elm_of_match);
+#endif
+
+static struct platform_driver elm_driver = {
+	.driver	= {
+		.name	= DRIVER_NAME,
+		.of_match_table = of_match_ptr(elm_of_match),
+		.pm	= &elm_pm_ops,
+	},
+	.probe	= elm_probe,
+	.remove	= elm_remove,
+};
+
+module_platform_driver(elm_driver);
+
+MODULE_DESCRIPTION("ELM driver for BCH error correction");
+MODULE_AUTHOR("Texas Instruments");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/rawnand/orion_nand.c b/drivers/mtd/nand/rawnand/orion_nand.c
new file mode 100644
index 000000000000..e68c4231e8b7
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/orion_nand.c
@@ -0,0 +1,218 @@ 
+/*
+ * drivers/mtd/nand/orion_nand.c
+ *
+ * NAND support for Marvell Orion SoC platforms
+ *
+ * Tzachi Perelstein <tzachi@marvell.com>
+ *
+ * This file is licensed under  the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <asm/sizes.h>
+#include <linux/platform_data/mtd-orion_nand.h>
+
+static void orion_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	struct nand_chip *nc = mtd_to_nand(mtd);
+	struct orion_nand_data *board = nand_get_controller_data(nc);
+	u32 offs;
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		offs = (1 << board->cle);
+	else if (ctrl & NAND_ALE)
+		offs = (1 << board->ale);
+	else
+		return;
+
+	if (nc->options & NAND_BUSWIDTH_16)
+		offs <<= 1;
+
+	writeb(cmd, nc->IO_ADDR_W + offs);
+}
+
+static void orion_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	void __iomem *io_base = chip->IO_ADDR_R;
+	uint64_t *buf64;
+	int i = 0;
+
+	while (len && (unsigned long)buf & 7) {
+		*buf++ = readb(io_base);
+		len--;
+	}
+	buf64 = (uint64_t *)buf;
+	while (i < len/8) {
+		/*
+		 * Since GCC has no proper constraint (PR 43518)
+		 * force x variable to r2/r3 registers as ldrd instruction
+		 * requires first register to be even.
+		 */
+		register uint64_t x asm ("r2");
+
+		asm volatile ("ldrd\t%0, [%1]" : "=&r" (x) : "r" (io_base));
+		buf64[i++] = x;
+	}
+	i *= 8;
+	while (i < len)
+		buf[i++] = readb(io_base);
+}
+
+static int __init orion_nand_probe(struct platform_device *pdev)
+{
+	struct mtd_info *mtd;
+	struct nand_chip *nc;
+	struct orion_nand_data *board;
+	struct resource *res;
+	struct clk *clk;
+	void __iomem *io_base;
+	int ret = 0;
+	u32 val = 0;
+
+	nc = devm_kzalloc(&pdev->dev,
+			sizeof(struct nand_chip),
+			GFP_KERNEL);
+	if (!nc)
+		return -ENOMEM;
+	mtd = nand_to_mtd(nc);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	io_base = devm_ioremap_resource(&pdev->dev, res);
+
+	if (IS_ERR(io_base))
+		return PTR_ERR(io_base);
+
+	if (pdev->dev.of_node) {
+		board = devm_kzalloc(&pdev->dev, sizeof(struct orion_nand_data),
+					GFP_KERNEL);
+		if (!board)
+			return -ENOMEM;
+		if (!of_property_read_u32(pdev->dev.of_node, "cle", &val))
+			board->cle = (u8)val;
+		else
+			board->cle = 0;
+		if (!of_property_read_u32(pdev->dev.of_node, "ale", &val))
+			board->ale = (u8)val;
+		else
+			board->ale = 1;
+		if (!of_property_read_u32(pdev->dev.of_node,
+						"bank-width", &val))
+			board->width = (u8)val * 8;
+		else
+			board->width = 8;
+		if (!of_property_read_u32(pdev->dev.of_node,
+						"chip-delay", &val))
+			board->chip_delay = (u8)val;
+	} else {
+		board = dev_get_platdata(&pdev->dev);
+	}
+
+	mtd->dev.parent = &pdev->dev;
+
+	nand_set_controller_data(nc, board);
+	nand_set_flash_node(nc, pdev->dev.of_node);
+	nc->IO_ADDR_R = nc->IO_ADDR_W = io_base;
+	nc->cmd_ctrl = orion_nand_cmd_ctrl;
+	nc->read_buf = orion_nand_read_buf;
+	nc->ecc.mode = NAND_ECC_SOFT;
+	nc->ecc.algo = NAND_ECC_HAMMING;
+
+	if (board->chip_delay)
+		nc->chip_delay = board->chip_delay;
+
+	WARN(board->width > 16,
+		"%d bit bus width out of range",
+		board->width);
+
+	if (board->width == 16)
+		nc->options |= NAND_BUSWIDTH_16;
+
+	if (board->dev_ready)
+		nc->dev_ready = board->dev_ready;
+
+	platform_set_drvdata(pdev, mtd);
+
+	/* Not all platforms can gate the clock, so it is not
+	   an error if the clock does not exists. */
+	clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(clk)) {
+		clk_prepare_enable(clk);
+		clk_put(clk);
+	}
+
+	if (nand_scan(mtd, 1)) {
+		ret = -ENXIO;
+		goto no_dev;
+	}
+
+	mtd->name = "orion_nand";
+	ret = mtd_device_register(mtd, board->parts, board->nr_parts);
+	if (ret) {
+		nand_release(mtd);
+		goto no_dev;
+	}
+
+	return 0;
+
+no_dev:
+	if (!IS_ERR(clk)) {
+		clk_disable_unprepare(clk);
+		clk_put(clk);
+	}
+
+	return ret;
+}
+
+static int orion_nand_remove(struct platform_device *pdev)
+{
+	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct clk *clk;
+
+	nand_release(mtd);
+
+	clk = clk_get(&pdev->dev, NULL);
+	if (!IS_ERR(clk)) {
+		clk_disable_unprepare(clk);
+		clk_put(clk);
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id orion_nand_of_match_table[] = {
+	{ .compatible = "marvell,orion-nand", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, orion_nand_of_match_table);
+#endif
+
+static struct platform_driver orion_nand_driver = {
+	.remove		= orion_nand_remove,
+	.driver		= {
+		.name	= "orion_nand",
+		.of_match_table = of_match_ptr(orion_nand_of_match_table),
+	},
+};
+
+module_platform_driver_probe(orion_nand_driver, orion_nand_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Tzachi Perelstein");
+MODULE_DESCRIPTION("NAND glue for Orion platforms");
+MODULE_ALIAS("platform:orion_nand");
diff --git a/drivers/mtd/nand/rawnand/pasemi_nand.c b/drivers/mtd/nand/rawnand/pasemi_nand.c
new file mode 100644
index 000000000000..372b9736ac02
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/pasemi_nand.c
@@ -0,0 +1,233 @@ 
+/*
+ * Copyright (C) 2006-2007 PA Semi, Inc
+ *
+ * Author: Egor Martovetsky <egor@pasemi.com>
+ * Maintained by: Olof Johansson <olof@lixom.net>
+ *
+ * Driver for the PWRficient onchip NAND flash interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#undef DEBUG
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/io.h>
+
+#define LBICTRL_LPCCTL_NR		0x00004000
+#define CLE_PIN_CTL			15
+#define ALE_PIN_CTL			14
+
+static unsigned int lpcctl;
+static struct mtd_info *pasemi_nand_mtd;
+static const char driver_name[] = "pasemi-nand";
+
+static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	while (len > 0x800) {
+		memcpy_fromio(buf, chip->IO_ADDR_R, 0x800);
+		buf += 0x800;
+		len -= 0x800;
+	}
+	memcpy_fromio(buf, chip->IO_ADDR_R, len);
+}
+
+static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	while (len > 0x800) {
+		memcpy_toio(chip->IO_ADDR_R, buf, 0x800);
+		buf += 0x800;
+		len -= 0x800;
+	}
+	memcpy_toio(chip->IO_ADDR_R, buf, len);
+}
+
+static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd,
+			     unsigned int ctrl)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd);
+	else
+		out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd);
+
+	/* Push out posted writes */
+	eieio();
+	inl(lpcctl);
+}
+
+int pasemi_device_ready(struct mtd_info *mtd)
+{
+	return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR);
+}
+
+static int pasemi_nand_probe(struct platform_device *ofdev)
+{
+	struct device *dev = &ofdev->dev;
+	struct pci_dev *pdev;
+	struct device_node *np = dev->of_node;
+	struct resource res;
+	struct nand_chip *chip;
+	int err = 0;
+
+	err = of_address_to_resource(np, 0, &res);
+
+	if (err)
+		return -EINVAL;
+
+	/* We only support one device at the moment */
+	if (pasemi_nand_mtd)
+		return -ENODEV;
+
+	dev_dbg(dev, "pasemi_nand at %pR\n", &res);
+
+	/* Allocate memory for MTD device structure and private data */
+	chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+	if (!chip) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	pasemi_nand_mtd = nand_to_mtd(chip);
+
+	/* Link the private data with the MTD structure */
+	pasemi_nand_mtd->dev.parent = dev;
+
+	chip->IO_ADDR_R = of_iomap(np, 0);
+	chip->IO_ADDR_W = chip->IO_ADDR_R;
+
+	if (!chip->IO_ADDR_R) {
+		err = -EIO;
+		goto out_mtd;
+	}
+
+	pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL);
+	if (!pdev) {
+		err = -ENODEV;
+		goto out_ior;
+	}
+
+	lpcctl = pci_resource_start(pdev, 0);
+	pci_dev_put(pdev);
+
+	if (!request_region(lpcctl, 4, driver_name)) {
+		err = -EBUSY;
+		goto out_ior;
+	}
+
+	chip->cmd_ctrl = pasemi_hwcontrol;
+	chip->dev_ready = pasemi_device_ready;
+	chip->read_buf = pasemi_read_buf;
+	chip->write_buf = pasemi_write_buf;
+	chip->chip_delay = 0;
+	chip->ecc.mode = NAND_ECC_SOFT;
+	chip->ecc.algo = NAND_ECC_HAMMING;
+
+	/* Enable the following for a flash based bad block table */
+	chip->bbt_options = NAND_BBT_USE_FLASH;
+
+	/* Scan to find existence of the device */
+	if (nand_scan(pasemi_nand_mtd, 1)) {
+		err = -ENXIO;
+		goto out_lpc;
+	}
+
+	if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) {
+		dev_err(dev, "Unable to register MTD device\n");
+		err = -ENODEV;
+		goto out_lpc;
+	}
+
+	dev_info(dev, "PA Semi NAND flash at %pR, control at I/O %x\n", &res,
+		 lpcctl);
+
+	return 0;
+
+ out_lpc:
+	release_region(lpcctl, 4);
+ out_ior:
+	iounmap(chip->IO_ADDR_R);
+ out_mtd:
+	kfree(chip);
+ out:
+	return err;
+}
+
+static int pasemi_nand_remove(struct platform_device *ofdev)
+{
+	struct nand_chip *chip;
+
+	if (!pasemi_nand_mtd)
+		return 0;
+
+	chip = mtd_to_nand(pasemi_nand_mtd);
+
+	/* Release resources, unregister device */
+	nand_release(pasemi_nand_mtd);
+
+	release_region(lpcctl, 4);
+
+	iounmap(chip->IO_ADDR_R);
+
+	/* Free the MTD device structure */
+	kfree(chip);
+
+	pasemi_nand_mtd = NULL;
+
+	return 0;
+}
+
+static const struct of_device_id pasemi_nand_match[] =
+{
+	{
+		.compatible   = "pasemi,localbus-nand",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, pasemi_nand_match);
+
+static struct platform_driver pasemi_nand_driver =
+{
+	.driver = {
+		.name = driver_name,
+		.of_match_table = pasemi_nand_match,
+	},
+	.probe		= pasemi_nand_probe,
+	.remove		= pasemi_nand_remove,
+};
+
+module_platform_driver(pasemi_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
+MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
diff --git a/drivers/mtd/nand/rawnand/plat_nand.c b/drivers/mtd/nand/rawnand/plat_nand.c
new file mode 100644
index 000000000000..d5c3c894c60d
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/plat_nand.c
@@ -0,0 +1,145 @@ 
+/*
+ * Generic NAND driver
+ *
+ * Author: Vitaly Wool <vitalywool@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+struct plat_nand_data {
+	struct nand_chip	chip;
+	void __iomem		*io_base;
+};
+
+/*
+ * Probe for the NAND device.
+ */
+static int plat_nand_probe(struct platform_device *pdev)
+{
+	struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+	struct plat_nand_data *data;
+	struct mtd_info *mtd;
+	struct resource *res;
+	const char **part_types;
+	int err = 0;
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "platform_nand_data is missing\n");
+		return -EINVAL;
+	}
+
+	if (pdata->chip.nr_chips < 1) {
+		dev_err(&pdev->dev, "invalid number of chips specified\n");
+		return -EINVAL;
+	}
+
+	/* Allocate memory for the device structure (and zero it) */
+	data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data),
+			    GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->io_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(data->io_base))
+		return PTR_ERR(data->io_base);
+
+	nand_set_flash_node(&data->chip, pdev->dev.of_node);
+	mtd = nand_to_mtd(&data->chip);
+	mtd->dev.parent = &pdev->dev;
+
+	data->chip.IO_ADDR_R = data->io_base;
+	data->chip.IO_ADDR_W = data->io_base;
+	data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl;
+	data->chip.dev_ready = pdata->ctrl.dev_ready;
+	data->chip.select_chip = pdata->ctrl.select_chip;
+	data->chip.write_buf = pdata->ctrl.write_buf;
+	data->chip.read_buf = pdata->ctrl.read_buf;
+	data->chip.read_byte = pdata->ctrl.read_byte;
+	data->chip.chip_delay = pdata->chip.chip_delay;
+	data->chip.options |= pdata->chip.options;
+	data->chip.bbt_options |= pdata->chip.bbt_options;
+
+	data->chip.ecc.hwctl = pdata->ctrl.hwcontrol;
+	data->chip.ecc.mode = NAND_ECC_SOFT;
+	data->chip.ecc.algo = NAND_ECC_HAMMING;
+
+	platform_set_drvdata(pdev, data);
+
+	/* Handle any platform specific setup */
+	if (pdata->ctrl.probe) {
+		err = pdata->ctrl.probe(pdev);
+		if (err)
+			goto out;
+	}
+
+	/* Scan to find existence of the device */
+	if (nand_scan(mtd, pdata->chip.nr_chips)) {
+		err = -ENXIO;
+		goto out;
+	}
+
+	part_types = pdata->chip.part_probe_types;
+
+	err = mtd_device_parse_register(mtd, part_types, NULL,
+					pdata->chip.partitions,
+					pdata->chip.nr_partitions);
+
+	if (!err)
+		return err;
+
+	nand_release(mtd);
+out:
+	if (pdata->ctrl.remove)
+		pdata->ctrl.remove(pdev);
+	return err;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int plat_nand_remove(struct platform_device *pdev)
+{
+	struct plat_nand_data *data = platform_get_drvdata(pdev);
+	struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev);
+
+	nand_release(nand_to_mtd(&data->chip));
+	if (pdata->ctrl.remove)
+		pdata->ctrl.remove(pdev);
+
+	return 0;
+}
+
+static const struct of_device_id plat_nand_match[] = {
+	{ .compatible = "gen_nand" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, plat_nand_match);
+
+static struct platform_driver plat_nand_driver = {
+	.probe	= plat_nand_probe,
+	.remove	= plat_nand_remove,
+	.driver	= {
+		.name		= "gen_nand",
+		.of_match_table = plat_nand_match,
+	},
+};
+
+module_platform_driver(plat_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Vitaly Wool");
+MODULE_DESCRIPTION("Simple generic NAND driver");
+MODULE_ALIAS("platform:gen_nand");
diff --git a/drivers/mtd/nand/rawnand/pxa3xx_nand.c b/drivers/mtd/nand/rawnand/pxa3xx_nand.c
new file mode 100644
index 000000000000..4feec4ea3082
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/pxa3xx_nand.c
@@ -0,0 +1,2067 @@ 
+/*
+ * drivers/mtd/nand/pxa3xx_nand.c
+ *
+ * Copyright © 2005 Intel Corporation
+ * Copyright © 2006 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * See Documentation/mtd/nand/pxa3xx-nand.txt for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/pxa-dma.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_data/mtd-nand-pxa3xx.h>
+
+#define	CHIP_DELAY_TIMEOUT	msecs_to_jiffies(200)
+#define NAND_STOP_DELAY		msecs_to_jiffies(40)
+#define PAGE_CHUNK_SIZE		(2048)
+
+/*
+ * Define a buffer size for the initial command that detects the flash device:
+ * STATUS, READID and PARAM.
+ * ONFI param page is 256 bytes, and there are three redundant copies
+ * to be read. JEDEC param page is 512 bytes, and there are also three
+ * redundant copies to be read.
+ * Hence this buffer should be at least 512 x 3. Let's pick 2048.
+ */
+#define INIT_BUFFER_SIZE	2048
+
+/* registers and bit definitions */
+#define NDCR		(0x00) /* Control register */
+#define NDTR0CS0	(0x04) /* Timing Parameter 0 for CS0 */
+#define NDTR1CS0	(0x0C) /* Timing Parameter 1 for CS0 */
+#define NDSR		(0x14) /* Status Register */
+#define NDPCR		(0x18) /* Page Count Register */
+#define NDBDR0		(0x1C) /* Bad Block Register 0 */
+#define NDBDR1		(0x20) /* Bad Block Register 1 */
+#define NDECCCTRL	(0x28) /* ECC control */
+#define NDDB		(0x40) /* Data Buffer */
+#define NDCB0		(0x48) /* Command Buffer0 */
+#define NDCB1		(0x4C) /* Command Buffer1 */
+#define NDCB2		(0x50) /* Command Buffer2 */
+
+#define NDCR_SPARE_EN		(0x1 << 31)
+#define NDCR_ECC_EN		(0x1 << 30)
+#define NDCR_DMA_EN		(0x1 << 29)
+#define NDCR_ND_RUN		(0x1 << 28)
+#define NDCR_DWIDTH_C		(0x1 << 27)
+#define NDCR_DWIDTH_M		(0x1 << 26)
+#define NDCR_PAGE_SZ		(0x1 << 24)
+#define NDCR_NCSX		(0x1 << 23)
+#define NDCR_ND_MODE		(0x3 << 21)
+#define NDCR_NAND_MODE   	(0x0)
+#define NDCR_CLR_PG_CNT		(0x1 << 20)
+#define NFCV1_NDCR_ARB_CNTL	(0x1 << 19)
+#define NFCV2_NDCR_STOP_ON_UNCOR	(0x1 << 19)
+#define NDCR_RD_ID_CNT_MASK	(0x7 << 16)
+#define NDCR_RD_ID_CNT(x)	(((x) << 16) & NDCR_RD_ID_CNT_MASK)
+
+#define NDCR_RA_START		(0x1 << 15)
+#define NDCR_PG_PER_BLK		(0x1 << 14)
+#define NDCR_ND_ARB_EN		(0x1 << 12)
+#define NDCR_INT_MASK           (0xFFF)
+
+#define NDSR_MASK		(0xfff)
+#define NDSR_ERR_CNT_OFF	(16)
+#define NDSR_ERR_CNT_MASK       (0x1f)
+#define NDSR_ERR_CNT(sr)	((sr >> NDSR_ERR_CNT_OFF) & NDSR_ERR_CNT_MASK)
+#define NDSR_RDY                (0x1 << 12)
+#define NDSR_FLASH_RDY          (0x1 << 11)
+#define NDSR_CS0_PAGED		(0x1 << 10)
+#define NDSR_CS1_PAGED		(0x1 << 9)
+#define NDSR_CS0_CMDD		(0x1 << 8)
+#define NDSR_CS1_CMDD		(0x1 << 7)
+#define NDSR_CS0_BBD		(0x1 << 6)
+#define NDSR_CS1_BBD		(0x1 << 5)
+#define NDSR_UNCORERR		(0x1 << 4)
+#define NDSR_CORERR		(0x1 << 3)
+#define NDSR_WRDREQ		(0x1 << 2)
+#define NDSR_RDDREQ		(0x1 << 1)
+#define NDSR_WRCMDREQ		(0x1)
+
+#define NDCB0_LEN_OVRD		(0x1 << 28)
+#define NDCB0_ST_ROW_EN         (0x1 << 26)
+#define NDCB0_AUTO_RS		(0x1 << 25)
+#define NDCB0_CSEL		(0x1 << 24)
+#define NDCB0_EXT_CMD_TYPE_MASK	(0x7 << 29)
+#define NDCB0_EXT_CMD_TYPE(x)	(((x) << 29) & NDCB0_EXT_CMD_TYPE_MASK)
+#define NDCB0_CMD_TYPE_MASK	(0x7 << 21)
+#define NDCB0_CMD_TYPE(x)	(((x) << 21) & NDCB0_CMD_TYPE_MASK)
+#define NDCB0_NC		(0x1 << 20)
+#define NDCB0_DBC		(0x1 << 19)
+#define NDCB0_ADDR_CYC_MASK	(0x7 << 16)
+#define NDCB0_ADDR_CYC(x)	(((x) << 16) & NDCB0_ADDR_CYC_MASK)
+#define NDCB0_CMD2_MASK		(0xff << 8)
+#define NDCB0_CMD1_MASK		(0xff)
+#define NDCB0_ADDR_CYC_SHIFT	(16)
+
+#define EXT_CMD_TYPE_DISPATCH	6 /* Command dispatch */
+#define EXT_CMD_TYPE_NAKED_RW	5 /* Naked read or Naked write */
+#define EXT_CMD_TYPE_READ	4 /* Read */
+#define EXT_CMD_TYPE_DISP_WR	4 /* Command dispatch with write */
+#define EXT_CMD_TYPE_FINAL	3 /* Final command */
+#define EXT_CMD_TYPE_LAST_RW	1 /* Last naked read/write */
+#define EXT_CMD_TYPE_MONO	0 /* Monolithic read/write */
+
+/*
+ * This should be large enough to read 'ONFI' and 'JEDEC'.
+ * Let's use 7 bytes, which is the maximum ID count supported
+ * by the controller (see NDCR_RD_ID_CNT_MASK).
+ */
+#define READ_ID_BYTES		7
+
+/* macros for registers read/write */
+#define nand_writel(info, off, val)					\
+	do {								\
+		dev_vdbg(&info->pdev->dev,				\
+			 "%s():%d nand_writel(0x%x, 0x%04x)\n",		\
+			 __func__, __LINE__, (val), (off));		\
+		writel_relaxed((val), (info)->mmio_base + (off));	\
+	} while (0)
+
+#define nand_readl(info, off)						\
+	({								\
+		unsigned int _v;					\
+		_v = readl_relaxed((info)->mmio_base + (off));		\
+		dev_vdbg(&info->pdev->dev,				\
+			 "%s():%d nand_readl(0x%04x) = 0x%x\n",		\
+			 __func__, __LINE__, (off), _v);		\
+		_v;							\
+	})
+
+/* error code and state */
+enum {
+	ERR_NONE	= 0,
+	ERR_DMABUSERR	= -1,
+	ERR_SENDCMD	= -2,
+	ERR_UNCORERR	= -3,
+	ERR_BBERR	= -4,
+	ERR_CORERR	= -5,
+};
+
+enum {
+	STATE_IDLE = 0,
+	STATE_PREPARED,
+	STATE_CMD_HANDLE,
+	STATE_DMA_READING,
+	STATE_DMA_WRITING,
+	STATE_DMA_DONE,
+	STATE_PIO_READING,
+	STATE_PIO_WRITING,
+	STATE_CMD_DONE,
+	STATE_READY,
+};
+
+enum pxa3xx_nand_variant {
+	PXA3XX_NAND_VARIANT_PXA,
+	PXA3XX_NAND_VARIANT_ARMADA370,
+};
+
+struct pxa3xx_nand_host {
+	struct nand_chip	chip;
+	void			*info_data;
+
+	/* page size of attached chip */
+	int			use_ecc;
+	int			cs;
+
+	/* calculated from pxa3xx_nand_flash data */
+	unsigned int		col_addr_cycles;
+	unsigned int		row_addr_cycles;
+};
+
+struct pxa3xx_nand_info {
+	struct nand_hw_control	controller;
+	struct platform_device	 *pdev;
+
+	struct clk		*clk;
+	void __iomem		*mmio_base;
+	unsigned long		mmio_phys;
+	struct completion	cmd_complete, dev_ready;
+
+	unsigned int 		buf_start;
+	unsigned int		buf_count;
+	unsigned int		buf_size;
+	unsigned int		data_buff_pos;
+	unsigned int		oob_buff_pos;
+
+	/* DMA information */
+	struct scatterlist	sg;
+	enum dma_data_direction	dma_dir;
+	struct dma_chan		*dma_chan;
+	dma_cookie_t		dma_cookie;
+	int			drcmr_dat;
+
+	unsigned char		*data_buff;
+	unsigned char		*oob_buff;
+	dma_addr_t 		data_buff_phys;
+	int 			data_dma_ch;
+
+	struct pxa3xx_nand_host *host[NUM_CHIP_SELECT];
+	unsigned int		state;
+
+	/*
+	 * This driver supports NFCv1 (as found in PXA SoC)
+	 * and NFCv2 (as found in Armada 370/XP SoC).
+	 */
+	enum pxa3xx_nand_variant variant;
+
+	int			cs;
+	int			use_ecc;	/* use HW ECC ? */
+	int			ecc_bch;	/* using BCH ECC? */
+	int			use_dma;	/* use DMA ? */
+	int			use_spare;	/* use spare ? */
+	int			need_wait;
+
+	/* Amount of real data per full chunk */
+	unsigned int		chunk_size;
+
+	/* Amount of spare data per full chunk */
+	unsigned int		spare_size;
+
+	/* Number of full chunks (i.e chunk_size + spare_size) */
+	unsigned int            nfullchunks;
+
+	/*
+	 * Total number of chunks. If equal to nfullchunks, then there
+	 * are only full chunks. Otherwise, there is one last chunk of
+	 * size (last_chunk_size + last_spare_size)
+	 */
+	unsigned int            ntotalchunks;
+
+	/* Amount of real data in the last chunk */
+	unsigned int		last_chunk_size;
+
+	/* Amount of spare data in the last chunk */
+	unsigned int		last_spare_size;
+
+	unsigned int		ecc_size;
+	unsigned int		ecc_err_cnt;
+	unsigned int		max_bitflips;
+	int 			retcode;
+
+	/*
+	 * Variables only valid during command
+	 * execution. step_chunk_size and step_spare_size is the
+	 * amount of real data and spare data in the current
+	 * chunk. cur_chunk is the current chunk being
+	 * read/programmed.
+	 */
+	unsigned int		step_chunk_size;
+	unsigned int		step_spare_size;
+	unsigned int            cur_chunk;
+
+	/* cached register value */
+	uint32_t		reg_ndcr;
+	uint32_t		ndtr0cs0;
+	uint32_t		ndtr1cs0;
+
+	/* generated NDCBx register values */
+	uint32_t		ndcb0;
+	uint32_t		ndcb1;
+	uint32_t		ndcb2;
+	uint32_t		ndcb3;
+};
+
+static bool use_dma = 1;
+module_param(use_dma, bool, 0444);
+MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
+
+struct pxa3xx_nand_timing {
+	unsigned int	tCH;  /* Enable signal hold time */
+	unsigned int	tCS;  /* Enable signal setup time */
+	unsigned int	tWH;  /* ND_nWE high duration */
+	unsigned int	tWP;  /* ND_nWE pulse time */
+	unsigned int	tRH;  /* ND_nRE high duration */
+	unsigned int	tRP;  /* ND_nRE pulse width */
+	unsigned int	tR;   /* ND_nWE high to ND_nRE low for read */
+	unsigned int	tWHR; /* ND_nWE high to ND_nRE low for status read */
+	unsigned int	tAR;  /* ND_ALE low to ND_nRE low delay */
+};
+
+struct pxa3xx_nand_flash {
+	uint32_t	chip_id;
+	unsigned int	flash_width;	/* Width of Flash memory (DWIDTH_M) */
+	unsigned int	dfc_width;	/* Width of flash controller(DWIDTH_C) */
+	struct pxa3xx_nand_timing *timing;	/* NAND Flash timing */
+};
+
+static struct pxa3xx_nand_timing timing[] = {
+	{ 40, 80, 60, 100, 80, 100, 90000, 400, 40, },
+	{ 10,  0, 20,  40, 30,  40, 11123, 110, 10, },
+	{ 10, 25, 15,  25, 15,  30, 25000,  60, 10, },
+	{ 10, 35, 15,  25, 15,  25, 25000,  60, 10, },
+};
+
+static struct pxa3xx_nand_flash builtin_flash_types[] = {
+	{ 0x46ec, 16, 16, &timing[1] },
+	{ 0xdaec,  8,  8, &timing[1] },
+	{ 0xd7ec,  8,  8, &timing[1] },
+	{ 0xa12c,  8,  8, &timing[2] },
+	{ 0xb12c, 16, 16, &timing[2] },
+	{ 0xdc2c,  8,  8, &timing[2] },
+	{ 0xcc2c, 16, 16, &timing[2] },
+	{ 0xba20, 16, 16, &timing[3] },
+};
+
+static int pxa3xx_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	int nchunks = mtd->writesize / info->chunk_size;
+
+	if (section >= nchunks)
+		return -ERANGE;
+
+	oobregion->offset = ((info->ecc_size + info->spare_size) * section) +
+			    info->spare_size;
+	oobregion->length = info->ecc_size;
+
+	return 0;
+}
+
+static int pxa3xx_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	int nchunks = mtd->writesize / info->chunk_size;
+
+	if (section >= nchunks)
+		return -ERANGE;
+
+	if (!info->spare_size)
+		return 0;
+
+	oobregion->offset = section * (info->ecc_size + info->spare_size);
+	oobregion->length = info->spare_size;
+	if (!section) {
+		/*
+		 * Bootrom looks in bytes 0 & 5 for bad blocks for the
+		 * 4KB page / 4bit BCH combination.
+		 */
+		if (mtd->writesize == 4096 && info->chunk_size == 2048) {
+			oobregion->offset += 6;
+			oobregion->length -= 6;
+		} else {
+			oobregion->offset += 2;
+			oobregion->length -= 2;
+		}
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops pxa3xx_ooblayout_ops = {
+	.ecc = pxa3xx_ooblayout_ecc,
+	.free = pxa3xx_ooblayout_free,
+};
+
+static u8 bbt_pattern[] = {'M', 'V', 'B', 'b', 't', '0' };
+static u8 bbt_mirror_pattern[] = {'1', 't', 'b', 'B', 'V', 'M' };
+
+static struct nand_bbt_descr bbt_main_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	8,
+	.len = 6,
+	.veroffs = 14,
+	.maxblocks = 8,		/* Last 8 blocks in each chip */
+	.pattern = bbt_pattern
+};
+
+static struct nand_bbt_descr bbt_mirror_descr = {
+	.options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
+		| NAND_BBT_2BIT | NAND_BBT_VERSION,
+	.offs =	8,
+	.len = 6,
+	.veroffs = 14,
+	.maxblocks = 8,		/* Last 8 blocks in each chip */
+	.pattern = bbt_mirror_pattern
+};
+
+#define NDTR0_tCH(c)	(min((c), 7) << 19)
+#define NDTR0_tCS(c)	(min((c), 7) << 16)
+#define NDTR0_tWH(c)	(min((c), 7) << 11)
+#define NDTR0_tWP(c)	(min((c), 7) << 8)
+#define NDTR0_tRH(c)	(min((c), 7) << 3)
+#define NDTR0_tRP(c)	(min((c), 7) << 0)
+
+#define NDTR1_tR(c)	(min((c), 65535) << 16)
+#define NDTR1_tWHR(c)	(min((c), 15) << 4)
+#define NDTR1_tAR(c)	(min((c), 15) << 0)
+
+/* convert nano-seconds to nand flash controller clock cycles */
+#define ns2cycle(ns, clk)	(int)((ns) * (clk / 1000000) / 1000)
+
+static const struct of_device_id pxa3xx_nand_dt_ids[] = {
+	{
+		.compatible = "marvell,pxa3xx-nand",
+		.data       = (void *)PXA3XX_NAND_VARIANT_PXA,
+	},
+	{
+		.compatible = "marvell,armada370-nand",
+		.data       = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
+
+static enum pxa3xx_nand_variant
+pxa3xx_nand_get_variant(struct platform_device *pdev)
+{
+	const struct of_device_id *of_id =
+			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+	if (!of_id)
+		return PXA3XX_NAND_VARIANT_PXA;
+	return (enum pxa3xx_nand_variant)of_id->data;
+}
+
+static void pxa3xx_nand_set_timing(struct pxa3xx_nand_host *host,
+				   const struct pxa3xx_nand_timing *t)
+{
+	struct pxa3xx_nand_info *info = host->info_data;
+	unsigned long nand_clk = clk_get_rate(info->clk);
+	uint32_t ndtr0, ndtr1;
+
+	ndtr0 = NDTR0_tCH(ns2cycle(t->tCH, nand_clk)) |
+		NDTR0_tCS(ns2cycle(t->tCS, nand_clk)) |
+		NDTR0_tWH(ns2cycle(t->tWH, nand_clk)) |
+		NDTR0_tWP(ns2cycle(t->tWP, nand_clk)) |
+		NDTR0_tRH(ns2cycle(t->tRH, nand_clk)) |
+		NDTR0_tRP(ns2cycle(t->tRP, nand_clk));
+
+	ndtr1 = NDTR1_tR(ns2cycle(t->tR, nand_clk)) |
+		NDTR1_tWHR(ns2cycle(t->tWHR, nand_clk)) |
+		NDTR1_tAR(ns2cycle(t->tAR, nand_clk));
+
+	info->ndtr0cs0 = ndtr0;
+	info->ndtr1cs0 = ndtr1;
+	nand_writel(info, NDTR0CS0, ndtr0);
+	nand_writel(info, NDTR1CS0, ndtr1);
+}
+
+static void pxa3xx_nand_set_sdr_timing(struct pxa3xx_nand_host *host,
+				       const struct nand_sdr_timings *t)
+{
+	struct pxa3xx_nand_info *info = host->info_data;
+	struct nand_chip *chip = &host->chip;
+	unsigned long nand_clk = clk_get_rate(info->clk);
+	uint32_t ndtr0, ndtr1;
+
+	u32 tCH_min = DIV_ROUND_UP(t->tCH_min, 1000);
+	u32 tCS_min = DIV_ROUND_UP(t->tCS_min, 1000);
+	u32 tWH_min = DIV_ROUND_UP(t->tWH_min, 1000);
+	u32 tWP_min = DIV_ROUND_UP(t->tWC_min - t->tWH_min, 1000);
+	u32 tREH_min = DIV_ROUND_UP(t->tREH_min, 1000);
+	u32 tRP_min = DIV_ROUND_UP(t->tRC_min - t->tREH_min, 1000);
+	u32 tR = chip->chip_delay * 1000;
+	u32 tWHR_min = DIV_ROUND_UP(t->tWHR_min, 1000);
+	u32 tAR_min = DIV_ROUND_UP(t->tAR_min, 1000);
+
+	/* fallback to a default value if tR = 0 */
+	if (!tR)
+		tR = 20000;
+
+	ndtr0 = NDTR0_tCH(ns2cycle(tCH_min, nand_clk)) |
+		NDTR0_tCS(ns2cycle(tCS_min, nand_clk)) |
+		NDTR0_tWH(ns2cycle(tWH_min, nand_clk)) |
+		NDTR0_tWP(ns2cycle(tWP_min, nand_clk)) |
+		NDTR0_tRH(ns2cycle(tREH_min, nand_clk)) |
+		NDTR0_tRP(ns2cycle(tRP_min, nand_clk));
+
+	ndtr1 = NDTR1_tR(ns2cycle(tR, nand_clk)) |
+		NDTR1_tWHR(ns2cycle(tWHR_min, nand_clk)) |
+		NDTR1_tAR(ns2cycle(tAR_min, nand_clk));
+
+	info->ndtr0cs0 = ndtr0;
+	info->ndtr1cs0 = ndtr1;
+	nand_writel(info, NDTR0CS0, ndtr0);
+	nand_writel(info, NDTR1CS0, ndtr1);
+}
+
+static int pxa3xx_nand_init_timings_compat(struct pxa3xx_nand_host *host,
+					   unsigned int *flash_width,
+					   unsigned int *dfc_width)
+{
+	struct nand_chip *chip = &host->chip;
+	struct pxa3xx_nand_info *info = host->info_data;
+	const struct pxa3xx_nand_flash *f = NULL;
+	struct mtd_info *mtd = nand_to_mtd(&host->chip);
+	int i, id, ntypes;
+
+	ntypes = ARRAY_SIZE(builtin_flash_types);
+
+	chip->cmdfunc(mtd, NAND_CMD_READID, 0x00, -1);
+
+	id = chip->read_byte(mtd);
+	id |= chip->read_byte(mtd) << 0x8;
+
+	for (i = 0; i < ntypes; i++) {
+		f = &builtin_flash_types[i];
+
+		if (f->chip_id == id)
+			break;
+	}
+
+	if (i == ntypes) {
+		dev_err(&info->pdev->dev, "Error: timings not found\n");
+		return -EINVAL;
+	}
+
+	pxa3xx_nand_set_timing(host, f->timing);
+
+	*flash_width = f->flash_width;
+	*dfc_width = f->dfc_width;
+
+	return 0;
+}
+
+static int pxa3xx_nand_init_timings_onfi(struct pxa3xx_nand_host *host,
+					 int mode)
+{
+	const struct nand_sdr_timings *timings;
+
+	mode = fls(mode) - 1;
+	if (mode < 0)
+		mode = 0;
+
+	timings = onfi_async_timing_mode_to_sdr_timings(mode);
+	if (IS_ERR(timings))
+		return PTR_ERR(timings);
+
+	pxa3xx_nand_set_sdr_timing(host, timings);
+
+	return 0;
+}
+
+static int pxa3xx_nand_init(struct pxa3xx_nand_host *host)
+{
+	struct nand_chip *chip = &host->chip;
+	struct pxa3xx_nand_info *info = host->info_data;
+	unsigned int flash_width = 0, dfc_width = 0;
+	int mode, err;
+
+	mode = onfi_get_async_timing_mode(chip);
+	if (mode == ONFI_TIMING_MODE_UNKNOWN) {
+		err = pxa3xx_nand_init_timings_compat(host, &flash_width,
+						      &dfc_width);
+		if (err)
+			return err;
+
+		if (flash_width == 16) {
+			info->reg_ndcr |= NDCR_DWIDTH_M;
+			chip->options |= NAND_BUSWIDTH_16;
+		}
+
+		info->reg_ndcr |= (dfc_width == 16) ? NDCR_DWIDTH_C : 0;
+	} else {
+		err = pxa3xx_nand_init_timings_onfi(host, mode);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+/**
+ * NOTE: it is a must to set ND_RUN firstly, then write
+ * command buffer, otherwise, it does not work.
+ * We enable all the interrupt at the same time, and
+ * let pxa3xx_nand_irq to handle all logic.
+ */
+static void pxa3xx_nand_start(struct pxa3xx_nand_info *info)
+{
+	uint32_t ndcr;
+
+	ndcr = info->reg_ndcr;
+
+	if (info->use_ecc) {
+		ndcr |= NDCR_ECC_EN;
+		if (info->ecc_bch)
+			nand_writel(info, NDECCCTRL, 0x1);
+	} else {
+		ndcr &= ~NDCR_ECC_EN;
+		if (info->ecc_bch)
+			nand_writel(info, NDECCCTRL, 0x0);
+	}
+
+	if (info->use_dma)
+		ndcr |= NDCR_DMA_EN;
+	else
+		ndcr &= ~NDCR_DMA_EN;
+
+	if (info->use_spare)
+		ndcr |= NDCR_SPARE_EN;
+	else
+		ndcr &= ~NDCR_SPARE_EN;
+
+	ndcr |= NDCR_ND_RUN;
+
+	/* clear status bits and run */
+	nand_writel(info, NDSR, NDSR_MASK);
+	nand_writel(info, NDCR, 0);
+	nand_writel(info, NDCR, ndcr);
+}
+
+static void pxa3xx_nand_stop(struct pxa3xx_nand_info *info)
+{
+	uint32_t ndcr;
+	int timeout = NAND_STOP_DELAY;
+
+	/* wait RUN bit in NDCR become 0 */
+	ndcr = nand_readl(info, NDCR);
+	while ((ndcr & NDCR_ND_RUN) && (timeout-- > 0)) {
+		ndcr = nand_readl(info, NDCR);
+		udelay(1);
+	}
+
+	if (timeout <= 0) {
+		ndcr &= ~NDCR_ND_RUN;
+		nand_writel(info, NDCR, ndcr);
+	}
+	if (info->dma_chan)
+		dmaengine_terminate_all(info->dma_chan);
+
+	/* clear status bits */
+	nand_writel(info, NDSR, NDSR_MASK);
+}
+
+static void __maybe_unused
+enable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+{
+	uint32_t ndcr;
+
+	ndcr = nand_readl(info, NDCR);
+	nand_writel(info, NDCR, ndcr & ~int_mask);
+}
+
+static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask)
+{
+	uint32_t ndcr;
+
+	ndcr = nand_readl(info, NDCR);
+	nand_writel(info, NDCR, ndcr | int_mask);
+}
+
+static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len)
+{
+	if (info->ecc_bch) {
+		u32 val;
+		int ret;
+
+		/*
+		 * According to the datasheet, when reading from NDDB
+		 * with BCH enabled, after each 32 bytes reads, we
+		 * have to make sure that the NDSR.RDDREQ bit is set.
+		 *
+		 * Drain the FIFO 8 32 bits reads at a time, and skip
+		 * the polling on the last read.
+		 */
+		while (len > 8) {
+			ioread32_rep(info->mmio_base + NDDB, data, 8);
+
+			ret = readl_relaxed_poll_timeout(info->mmio_base + NDSR, val,
+							 val & NDSR_RDDREQ, 1000, 5000);
+			if (ret) {
+				dev_err(&info->pdev->dev,
+					"Timeout on RDDREQ while draining the FIFO\n");
+				return;
+			}
+
+			data += 32;
+			len -= 8;
+		}
+	}
+
+	ioread32_rep(info->mmio_base + NDDB, data, len);
+}
+
+static void handle_data_pio(struct pxa3xx_nand_info *info)
+{
+	switch (info->state) {
+	case STATE_PIO_WRITING:
+		if (info->step_chunk_size)
+			writesl(info->mmio_base + NDDB,
+				info->data_buff + info->data_buff_pos,
+				DIV_ROUND_UP(info->step_chunk_size, 4));
+
+		if (info->step_spare_size)
+			writesl(info->mmio_base + NDDB,
+				info->oob_buff + info->oob_buff_pos,
+				DIV_ROUND_UP(info->step_spare_size, 4));
+		break;
+	case STATE_PIO_READING:
+		if (info->step_chunk_size)
+			drain_fifo(info,
+				   info->data_buff + info->data_buff_pos,
+				   DIV_ROUND_UP(info->step_chunk_size, 4));
+
+		if (info->step_spare_size)
+			drain_fifo(info,
+				   info->oob_buff + info->oob_buff_pos,
+				   DIV_ROUND_UP(info->step_spare_size, 4));
+		break;
+	default:
+		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
+				info->state);
+		BUG();
+	}
+
+	/* Update buffer pointers for multi-page read/write */
+	info->data_buff_pos += info->step_chunk_size;
+	info->oob_buff_pos += info->step_spare_size;
+}
+
+static void pxa3xx_nand_data_dma_irq(void *data)
+{
+	struct pxa3xx_nand_info *info = data;
+	struct dma_tx_state state;
+	enum dma_status status;
+
+	status = dmaengine_tx_status(info->dma_chan, info->dma_cookie, &state);
+	if (likely(status == DMA_COMPLETE)) {
+		info->state = STATE_DMA_DONE;
+	} else {
+		dev_err(&info->pdev->dev, "DMA error on data channel\n");
+		info->retcode = ERR_DMABUSERR;
+	}
+	dma_unmap_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
+
+	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+	enable_int(info, NDCR_INT_MASK);
+}
+
+static void start_data_dma(struct pxa3xx_nand_info *info)
+{
+	enum dma_transfer_direction direction;
+	struct dma_async_tx_descriptor *tx;
+
+	switch (info->state) {
+	case STATE_DMA_WRITING:
+		info->dma_dir = DMA_TO_DEVICE;
+		direction = DMA_MEM_TO_DEV;
+		break;
+	case STATE_DMA_READING:
+		info->dma_dir = DMA_FROM_DEVICE;
+		direction = DMA_DEV_TO_MEM;
+		break;
+	default:
+		dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__,
+				info->state);
+		BUG();
+	}
+	info->sg.length = info->chunk_size;
+	if (info->use_spare)
+		info->sg.length += info->spare_size + info->ecc_size;
+	dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir);
+
+	tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction,
+				     DMA_PREP_INTERRUPT);
+	if (!tx) {
+		dev_err(&info->pdev->dev, "prep_slave_sg() failed\n");
+		return;
+	}
+	tx->callback = pxa3xx_nand_data_dma_irq;
+	tx->callback_param = info;
+	info->dma_cookie = dmaengine_submit(tx);
+	dma_async_issue_pending(info->dma_chan);
+	dev_dbg(&info->pdev->dev, "%s(dir=%d cookie=%x size=%u)\n",
+		__func__, direction, info->dma_cookie, info->sg.length);
+}
+
+static irqreturn_t pxa3xx_nand_irq_thread(int irq, void *data)
+{
+	struct pxa3xx_nand_info *info = data;
+
+	handle_data_pio(info);
+
+	info->state = STATE_CMD_DONE;
+	nand_writel(info, NDSR, NDSR_WRDREQ | NDSR_RDDREQ);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t pxa3xx_nand_irq(int irq, void *devid)
+{
+	struct pxa3xx_nand_info *info = devid;
+	unsigned int status, is_completed = 0, is_ready = 0;
+	unsigned int ready, cmd_done;
+	irqreturn_t ret = IRQ_HANDLED;
+
+	if (info->cs == 0) {
+		ready           = NDSR_FLASH_RDY;
+		cmd_done        = NDSR_CS0_CMDD;
+	} else {
+		ready           = NDSR_RDY;
+		cmd_done        = NDSR_CS1_CMDD;
+	}
+
+	status = nand_readl(info, NDSR);
+
+	if (status & NDSR_UNCORERR)
+		info->retcode = ERR_UNCORERR;
+	if (status & NDSR_CORERR) {
+		info->retcode = ERR_CORERR;
+		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370 &&
+		    info->ecc_bch)
+			info->ecc_err_cnt = NDSR_ERR_CNT(status);
+		else
+			info->ecc_err_cnt = 1;
+
+		/*
+		 * Each chunk composing a page is corrected independently,
+		 * and we need to store maximum number of corrected bitflips
+		 * to return it to the MTD layer in ecc.read_page().
+		 */
+		info->max_bitflips = max_t(unsigned int,
+					   info->max_bitflips,
+					   info->ecc_err_cnt);
+	}
+	if (status & (NDSR_RDDREQ | NDSR_WRDREQ)) {
+		/* whether use dma to transfer data */
+		if (info->use_dma) {
+			disable_int(info, NDCR_INT_MASK);
+			info->state = (status & NDSR_RDDREQ) ?
+				      STATE_DMA_READING : STATE_DMA_WRITING;
+			start_data_dma(info);
+			goto NORMAL_IRQ_EXIT;
+		} else {
+			info->state = (status & NDSR_RDDREQ) ?
+				      STATE_PIO_READING : STATE_PIO_WRITING;
+			ret = IRQ_WAKE_THREAD;
+			goto NORMAL_IRQ_EXIT;
+		}
+	}
+	if (status & cmd_done) {
+		info->state = STATE_CMD_DONE;
+		is_completed = 1;
+	}
+	if (status & ready) {
+		info->state = STATE_READY;
+		is_ready = 1;
+	}
+
+	/*
+	 * Clear all status bit before issuing the next command, which
+	 * can and will alter the status bits and will deserve a new
+	 * interrupt on its own. This lets the controller exit the IRQ
+	 */
+	nand_writel(info, NDSR, status);
+
+	if (status & NDSR_WRCMDREQ) {
+		status &= ~NDSR_WRCMDREQ;
+		info->state = STATE_CMD_HANDLE;
+
+		/*
+		 * Command buffer registers NDCB{0-2} (and optionally NDCB3)
+		 * must be loaded by writing directly either 12 or 16
+		 * bytes directly to NDCB0, four bytes at a time.
+		 *
+		 * Direct write access to NDCB1, NDCB2 and NDCB3 is ignored
+		 * but each NDCBx register can be read.
+		 */
+		nand_writel(info, NDCB0, info->ndcb0);
+		nand_writel(info, NDCB0, info->ndcb1);
+		nand_writel(info, NDCB0, info->ndcb2);
+
+		/* NDCB3 register is available in NFCv2 (Armada 370/XP SoC) */
+		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+			nand_writel(info, NDCB0, info->ndcb3);
+	}
+
+	if (is_completed)
+		complete(&info->cmd_complete);
+	if (is_ready)
+		complete(&info->dev_ready);
+NORMAL_IRQ_EXIT:
+	return ret;
+}
+
+static inline int is_buf_blank(uint8_t *buf, size_t len)
+{
+	for (; len > 0; len--)
+		if (*buf++ != 0xff)
+			return 0;
+	return 1;
+}
+
+static void set_command_address(struct pxa3xx_nand_info *info,
+		unsigned int page_size, uint16_t column, int page_addr)
+{
+	/* small page addr setting */
+	if (page_size < PAGE_CHUNK_SIZE) {
+		info->ndcb1 = ((page_addr & 0xFFFFFF) << 8)
+				| (column & 0xFF);
+
+		info->ndcb2 = 0;
+	} else {
+		info->ndcb1 = ((page_addr & 0xFFFF) << 16)
+				| (column & 0xFFFF);
+
+		if (page_addr & 0xFF0000)
+			info->ndcb2 = (page_addr & 0xFF0000) >> 16;
+		else
+			info->ndcb2 = 0;
+	}
+}
+
+static void prepare_start_command(struct pxa3xx_nand_info *info, int command)
+{
+	struct pxa3xx_nand_host *host = info->host[info->cs];
+	struct mtd_info *mtd = nand_to_mtd(&host->chip);
+
+	/* reset data and oob column point to handle data */
+	info->buf_start		= 0;
+	info->buf_count		= 0;
+	info->data_buff_pos	= 0;
+	info->oob_buff_pos	= 0;
+	info->step_chunk_size   = 0;
+	info->step_spare_size   = 0;
+	info->cur_chunk         = 0;
+	info->use_ecc		= 0;
+	info->use_spare		= 1;
+	info->retcode		= ERR_NONE;
+	info->ecc_err_cnt	= 0;
+	info->ndcb3		= 0;
+	info->need_wait		= 0;
+
+	switch (command) {
+	case NAND_CMD_READ0:
+	case NAND_CMD_PAGEPROG:
+		info->use_ecc = 1;
+		break;
+	case NAND_CMD_PARAM:
+		info->use_spare = 0;
+		break;
+	default:
+		info->ndcb1 = 0;
+		info->ndcb2 = 0;
+		break;
+	}
+
+	/*
+	 * If we are about to issue a read command, or about to set
+	 * the write address, then clean the data buffer.
+	 */
+	if (command == NAND_CMD_READ0 ||
+	    command == NAND_CMD_READOOB ||
+	    command == NAND_CMD_SEQIN) {
+
+		info->buf_count = mtd->writesize + mtd->oobsize;
+		memset(info->data_buff, 0xFF, info->buf_count);
+	}
+
+}
+
+static int prepare_set_command(struct pxa3xx_nand_info *info, int command,
+		int ext_cmd_type, uint16_t column, int page_addr)
+{
+	int addr_cycle, exec_cmd;
+	struct pxa3xx_nand_host *host;
+	struct mtd_info *mtd;
+
+	host = info->host[info->cs];
+	mtd = nand_to_mtd(&host->chip);
+	addr_cycle = 0;
+	exec_cmd = 1;
+
+	if (info->cs != 0)
+		info->ndcb0 = NDCB0_CSEL;
+	else
+		info->ndcb0 = 0;
+
+	if (command == NAND_CMD_SEQIN)
+		exec_cmd = 0;
+
+	addr_cycle = NDCB0_ADDR_CYC(host->row_addr_cycles
+				    + host->col_addr_cycles);
+
+	switch (command) {
+	case NAND_CMD_READOOB:
+	case NAND_CMD_READ0:
+		info->buf_start = column;
+		info->ndcb0 |= NDCB0_CMD_TYPE(0)
+				| addr_cycle
+				| NAND_CMD_READ0;
+
+		if (command == NAND_CMD_READOOB)
+			info->buf_start += mtd->writesize;
+
+		if (info->cur_chunk < info->nfullchunks) {
+			info->step_chunk_size = info->chunk_size;
+			info->step_spare_size = info->spare_size;
+		} else {
+			info->step_chunk_size = info->last_chunk_size;
+			info->step_spare_size = info->last_spare_size;
+		}
+
+		/*
+		 * Multiple page read needs an 'extended command type' field,
+		 * which is either naked-read or last-read according to the
+		 * state.
+		 */
+		if (mtd->writesize == PAGE_CHUNK_SIZE) {
+			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8);
+		} else if (mtd->writesize > PAGE_CHUNK_SIZE) {
+			info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8)
+					| NDCB0_LEN_OVRD
+					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+			info->ndcb3 = info->step_chunk_size +
+				info->step_spare_size;
+		}
+
+		set_command_address(info, mtd->writesize, column, page_addr);
+		break;
+
+	case NAND_CMD_SEQIN:
+
+		info->buf_start = column;
+		set_command_address(info, mtd->writesize, 0, page_addr);
+
+		/*
+		 * Multiple page programming needs to execute the initial
+		 * SEQIN command that sets the page address.
+		 */
+		if (mtd->writesize > PAGE_CHUNK_SIZE) {
+			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+				| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+				| addr_cycle
+				| command;
+			exec_cmd = 1;
+		}
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		if (is_buf_blank(info->data_buff,
+					(mtd->writesize + mtd->oobsize))) {
+			exec_cmd = 0;
+			break;
+		}
+
+		if (info->cur_chunk < info->nfullchunks) {
+			info->step_chunk_size = info->chunk_size;
+			info->step_spare_size = info->spare_size;
+		} else {
+			info->step_chunk_size = info->last_chunk_size;
+			info->step_spare_size = info->last_spare_size;
+		}
+
+		/* Second command setting for large pages */
+		if (mtd->writesize > PAGE_CHUNK_SIZE) {
+			/*
+			 * Multiple page write uses the 'extended command'
+			 * field. This can be used to issue a command dispatch
+			 * or a naked-write depending on the current stage.
+			 */
+			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+					| NDCB0_LEN_OVRD
+					| NDCB0_EXT_CMD_TYPE(ext_cmd_type);
+			info->ndcb3 = info->step_chunk_size +
+				      info->step_spare_size;
+
+			/*
+			 * This is the command dispatch that completes a chunked
+			 * page program operation.
+			 */
+			if (info->cur_chunk == info->ntotalchunks) {
+				info->ndcb0 = NDCB0_CMD_TYPE(0x1)
+					| NDCB0_EXT_CMD_TYPE(ext_cmd_type)
+					| command;
+				info->ndcb1 = 0;
+				info->ndcb2 = 0;
+				info->ndcb3 = 0;
+			}
+		} else {
+			info->ndcb0 |= NDCB0_CMD_TYPE(0x1)
+					| NDCB0_AUTO_RS
+					| NDCB0_ST_ROW_EN
+					| NDCB0_DBC
+					| (NAND_CMD_PAGEPROG << 8)
+					| NAND_CMD_SEQIN
+					| addr_cycle;
+		}
+		break;
+
+	case NAND_CMD_PARAM:
+		info->buf_count = INIT_BUFFER_SIZE;
+		info->ndcb0 |= NDCB0_CMD_TYPE(0)
+				| NDCB0_ADDR_CYC(1)
+				| NDCB0_LEN_OVRD
+				| command;
+		info->ndcb1 = (column & 0xFF);
+		info->ndcb3 = INIT_BUFFER_SIZE;
+		info->step_chunk_size = INIT_BUFFER_SIZE;
+		break;
+
+	case NAND_CMD_READID:
+		info->buf_count = READ_ID_BYTES;
+		info->ndcb0 |= NDCB0_CMD_TYPE(3)
+				| NDCB0_ADDR_CYC(1)
+				| command;
+		info->ndcb1 = (column & 0xFF);
+
+		info->step_chunk_size = 8;
+		break;
+	case NAND_CMD_STATUS:
+		info->buf_count = 1;
+		info->ndcb0 |= NDCB0_CMD_TYPE(4)
+				| NDCB0_ADDR_CYC(1)
+				| command;
+
+		info->step_chunk_size = 8;
+		break;
+
+	case NAND_CMD_ERASE1:
+		info->ndcb0 |= NDCB0_CMD_TYPE(2)
+				| NDCB0_AUTO_RS
+				| NDCB0_ADDR_CYC(3)
+				| NDCB0_DBC
+				| (NAND_CMD_ERASE2 << 8)
+				| NAND_CMD_ERASE1;
+		info->ndcb1 = page_addr;
+		info->ndcb2 = 0;
+
+		break;
+	case NAND_CMD_RESET:
+		info->ndcb0 |= NDCB0_CMD_TYPE(5)
+				| command;
+
+		break;
+
+	case NAND_CMD_ERASE2:
+		exec_cmd = 0;
+		break;
+
+	default:
+		exec_cmd = 0;
+		dev_err(&info->pdev->dev, "non-supported command %x\n",
+				command);
+		break;
+	}
+
+	return exec_cmd;
+}
+
+static void nand_cmdfunc(struct mtd_info *mtd, unsigned command,
+			 int column, int page_addr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	int exec_cmd;
+
+	/*
+	 * if this is a x16 device ,then convert the input
+	 * "byte" address into a "word" address appropriate
+	 * for indexing a word-oriented device
+	 */
+	if (info->reg_ndcr & NDCR_DWIDTH_M)
+		column /= 2;
+
+	/*
+	 * There may be different NAND chip hooked to
+	 * different chip select, so check whether
+	 * chip select has been changed, if yes, reset the timing
+	 */
+	if (info->cs != host->cs) {
+		info->cs = host->cs;
+		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+	}
+
+	prepare_start_command(info, command);
+
+	info->state = STATE_PREPARED;
+	exec_cmd = prepare_set_command(info, command, 0, column, page_addr);
+
+	if (exec_cmd) {
+		init_completion(&info->cmd_complete);
+		init_completion(&info->dev_ready);
+		info->need_wait = 1;
+		pxa3xx_nand_start(info);
+
+		if (!wait_for_completion_timeout(&info->cmd_complete,
+		    CHIP_DELAY_TIMEOUT)) {
+			dev_err(&info->pdev->dev, "Wait time out!!!\n");
+			/* Stop State Machine for next command cycle */
+			pxa3xx_nand_stop(info);
+		}
+	}
+	info->state = STATE_IDLE;
+}
+
+static void nand_cmdfunc_extended(struct mtd_info *mtd,
+				  const unsigned command,
+				  int column, int page_addr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	int exec_cmd, ext_cmd_type;
+
+	/*
+	 * if this is a x16 device then convert the input
+	 * "byte" address into a "word" address appropriate
+	 * for indexing a word-oriented device
+	 */
+	if (info->reg_ndcr & NDCR_DWIDTH_M)
+		column /= 2;
+
+	/*
+	 * There may be different NAND chip hooked to
+	 * different chip select, so check whether
+	 * chip select has been changed, if yes, reset the timing
+	 */
+	if (info->cs != host->cs) {
+		info->cs = host->cs;
+		nand_writel(info, NDTR0CS0, info->ndtr0cs0);
+		nand_writel(info, NDTR1CS0, info->ndtr1cs0);
+	}
+
+	/* Select the extended command for the first command */
+	switch (command) {
+	case NAND_CMD_READ0:
+	case NAND_CMD_READOOB:
+		ext_cmd_type = EXT_CMD_TYPE_MONO;
+		break;
+	case NAND_CMD_SEQIN:
+		ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+		break;
+	case NAND_CMD_PAGEPROG:
+		ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+		break;
+	default:
+		ext_cmd_type = 0;
+		break;
+	}
+
+	prepare_start_command(info, command);
+
+	/*
+	 * Prepare the "is ready" completion before starting a command
+	 * transaction sequence. If the command is not executed the
+	 * completion will be completed, see below.
+	 *
+	 * We can do that inside the loop because the command variable
+	 * is invariant and thus so is the exec_cmd.
+	 */
+	info->need_wait = 1;
+	init_completion(&info->dev_ready);
+	do {
+		info->state = STATE_PREPARED;
+
+		exec_cmd = prepare_set_command(info, command, ext_cmd_type,
+					       column, page_addr);
+		if (!exec_cmd) {
+			info->need_wait = 0;
+			complete(&info->dev_ready);
+			break;
+		}
+
+		init_completion(&info->cmd_complete);
+		pxa3xx_nand_start(info);
+
+		if (!wait_for_completion_timeout(&info->cmd_complete,
+		    CHIP_DELAY_TIMEOUT)) {
+			dev_err(&info->pdev->dev, "Wait time out!!!\n");
+			/* Stop State Machine for next command cycle */
+			pxa3xx_nand_stop(info);
+			break;
+		}
+
+		/* Only a few commands need several steps */
+		if (command != NAND_CMD_PAGEPROG &&
+		    command != NAND_CMD_READ0    &&
+		    command != NAND_CMD_READOOB)
+			break;
+
+		info->cur_chunk++;
+
+		/* Check if the sequence is complete */
+		if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG)
+			break;
+
+		/*
+		 * After a splitted program command sequence has issued
+		 * the command dispatch, the command sequence is complete.
+		 */
+		if (info->cur_chunk == (info->ntotalchunks + 1) &&
+		    command == NAND_CMD_PAGEPROG &&
+		    ext_cmd_type == EXT_CMD_TYPE_DISPATCH)
+			break;
+
+		if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) {
+			/* Last read: issue a 'last naked read' */
+			if (info->cur_chunk == info->ntotalchunks - 1)
+				ext_cmd_type = EXT_CMD_TYPE_LAST_RW;
+			else
+				ext_cmd_type = EXT_CMD_TYPE_NAKED_RW;
+
+		/*
+		 * If a splitted program command has no more data to transfer,
+		 * the command dispatch must be issued to complete.
+		 */
+		} else if (command == NAND_CMD_PAGEPROG &&
+			   info->cur_chunk == info->ntotalchunks) {
+				ext_cmd_type = EXT_CMD_TYPE_DISPATCH;
+		}
+	} while (1);
+
+	info->state = STATE_IDLE;
+}
+
+static int pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
+		struct nand_chip *chip, const uint8_t *buf, int oob_required,
+		int page)
+{
+	chip->write_buf(mtd, buf, mtd->writesize);
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	return 0;
+}
+
+static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
+		struct nand_chip *chip, uint8_t *buf, int oob_required,
+		int page)
+{
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+
+	chip->read_buf(mtd, buf, mtd->writesize);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	if (info->retcode == ERR_CORERR && info->use_ecc) {
+		mtd->ecc_stats.corrected += info->ecc_err_cnt;
+
+	} else if (info->retcode == ERR_UNCORERR) {
+		/*
+		 * for blank page (all 0xff), HW will calculate its ECC as
+		 * 0, which is different from the ECC information within
+		 * OOB, ignore such uncorrectable errors
+		 */
+		if (is_buf_blank(buf, mtd->writesize))
+			info->retcode = ERR_NONE;
+		else
+			mtd->ecc_stats.failed++;
+	}
+
+	return info->max_bitflips;
+}
+
+static uint8_t pxa3xx_nand_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	char retval = 0xFF;
+
+	if (info->buf_start < info->buf_count)
+		/* Has just send a new command? */
+		retval = info->data_buff[info->buf_start++];
+
+	return retval;
+}
+
+static u16 pxa3xx_nand_read_word(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	u16 retval = 0xFFFF;
+
+	if (!(info->buf_start & 0x01) && info->buf_start < info->buf_count) {
+		retval = *((u16 *)(info->data_buff+info->buf_start));
+		info->buf_start += 2;
+	}
+	return retval;
+}
+
+static void pxa3xx_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+	memcpy(buf, info->data_buff + info->buf_start, real_len);
+	info->buf_start += real_len;
+}
+
+static void pxa3xx_nand_write_buf(struct mtd_info *mtd,
+		const uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	int real_len = min_t(size_t, len, info->buf_count - info->buf_start);
+
+	memcpy(info->data_buff + info->buf_start, buf, real_len);
+	info->buf_start += real_len;
+}
+
+static void pxa3xx_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+	return;
+}
+
+static int pxa3xx_nand_waitfunc(struct mtd_info *mtd, struct nand_chip *this)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+
+	if (info->need_wait) {
+		info->need_wait = 0;
+		if (!wait_for_completion_timeout(&info->dev_ready,
+		    CHIP_DELAY_TIMEOUT)) {
+			dev_err(&info->pdev->dev, "Ready time out!!!\n");
+			return NAND_STATUS_FAIL;
+		}
+	}
+
+	/* pxa3xx_nand_send_command has waited for command complete */
+	if (this->state == FL_WRITING || this->state == FL_ERASING) {
+		if (info->retcode == ERR_NONE)
+			return 0;
+		else
+			return NAND_STATUS_FAIL;
+	}
+
+	return NAND_STATUS_READY;
+}
+
+static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info)
+{
+	struct pxa3xx_nand_host *host = info->host[info->cs];
+	struct platform_device *pdev = info->pdev;
+	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	const struct nand_sdr_timings *timings;
+
+	/* Configure default flash values */
+	info->chunk_size = PAGE_CHUNK_SIZE;
+	info->reg_ndcr = 0x0; /* enable all interrupts */
+	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
+	info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES);
+	info->reg_ndcr |= NDCR_SPARE_EN;
+
+	/* use the common timing to make a try */
+	timings = onfi_async_timing_mode_to_sdr_timings(0);
+	if (IS_ERR(timings))
+		return PTR_ERR(timings);
+
+	pxa3xx_nand_set_sdr_timing(host, timings);
+	return 0;
+}
+
+static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info)
+{
+	struct pxa3xx_nand_host *host = info->host[info->cs];
+	struct nand_chip *chip = &host->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+
+	info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0;
+	info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0;
+	info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0;
+}
+
+static void pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info)
+{
+	struct platform_device *pdev = info->pdev;
+	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	uint32_t ndcr = nand_readl(info, NDCR);
+
+	/* Set an initial chunk size */
+	info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512;
+	info->reg_ndcr = ndcr &
+		~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL);
+	info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0;
+	info->ndtr0cs0 = nand_readl(info, NDTR0CS0);
+	info->ndtr1cs0 = nand_readl(info, NDTR1CS0);
+}
+
+static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
+{
+	struct platform_device *pdev = info->pdev;
+	struct dma_slave_config	config;
+	dma_cap_mask_t mask;
+	struct pxad_param param;
+	int ret;
+
+	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+	if (info->data_buff == NULL)
+		return -ENOMEM;
+	if (use_dma == 0)
+		return 0;
+
+	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+	if (ret)
+		return ret;
+
+	sg_init_one(&info->sg, info->data_buff, info->buf_size);
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+	param.prio = PXAD_PRIO_LOWEST;
+	param.drcmr = info->drcmr_dat;
+	info->dma_chan = dma_request_slave_channel_compat(mask, pxad_filter_fn,
+							  &param, &pdev->dev,
+							  "data");
+	if (!info->dma_chan) {
+		dev_err(&pdev->dev, "unable to request data dma channel\n");
+		return -ENODEV;
+	}
+
+	memset(&config, 0, sizeof(config));
+	config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+	config.src_addr = info->mmio_phys + NDDB;
+	config.dst_addr = info->mmio_phys + NDDB;
+	config.src_maxburst = 32;
+	config.dst_maxburst = 32;
+	ret = dmaengine_slave_config(info->dma_chan, &config);
+	if (ret < 0) {
+		dev_err(&info->pdev->dev,
+			"dma channel configuration failed: %d\n",
+			ret);
+		return ret;
+	}
+
+	/*
+	 * Now that DMA buffers are allocated we turn on
+	 * DMA proper for I/O operations.
+	 */
+	info->use_dma = 1;
+	return 0;
+}
+
+static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
+{
+	if (info->use_dma) {
+		dmaengine_terminate_all(info->dma_chan);
+		dma_release_channel(info->dma_chan);
+	}
+	kfree(info->data_buff);
+}
+
+static int pxa_ecc_init(struct pxa3xx_nand_info *info,
+			struct mtd_info *mtd,
+			int strength, int ecc_stepsize, int page_size)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) {
+		info->nfullchunks = 1;
+		info->ntotalchunks = 1;
+		info->chunk_size = 2048;
+		info->spare_size = 40;
+		info->ecc_size = 24;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = 512;
+		ecc->strength = 1;
+
+	} else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) {
+		info->nfullchunks = 1;
+		info->ntotalchunks = 1;
+		info->chunk_size = 512;
+		info->spare_size = 8;
+		info->ecc_size = 8;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = 512;
+		ecc->strength = 1;
+
+	/*
+	 * Required ECC: 4-bit correction per 512 bytes
+	 * Select: 16-bit correction per 2048 bytes
+	 */
+	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) {
+		info->ecc_bch = 1;
+		info->nfullchunks = 1;
+		info->ntotalchunks = 1;
+		info->chunk_size = 2048;
+		info->spare_size = 32;
+		info->ecc_size = 32;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = info->chunk_size;
+		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
+		ecc->strength = 16;
+
+	} else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) {
+		info->ecc_bch = 1;
+		info->nfullchunks = 2;
+		info->ntotalchunks = 2;
+		info->chunk_size = 2048;
+		info->spare_size = 32;
+		info->ecc_size = 32;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = info->chunk_size;
+		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
+		ecc->strength = 16;
+
+	/*
+	 * Required ECC: 8-bit correction per 512 bytes
+	 * Select: 16-bit correction per 1024 bytes
+	 */
+	} else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) {
+		info->ecc_bch = 1;
+		info->nfullchunks = 4;
+		info->ntotalchunks = 5;
+		info->chunk_size = 1024;
+		info->spare_size = 0;
+		info->last_chunk_size = 0;
+		info->last_spare_size = 64;
+		info->ecc_size = 32;
+		ecc->mode = NAND_ECC_HW;
+		ecc->size = info->chunk_size;
+		mtd_set_ooblayout(mtd, &pxa3xx_ooblayout_ops);
+		ecc->strength = 16;
+	} else {
+		dev_err(&info->pdev->dev,
+			"ECC strength %d at page size %d is not supported\n",
+			strength, page_size);
+		return -ENODEV;
+	}
+
+	dev_info(&info->pdev->dev, "ECC strength %d, ECC step size %d\n",
+		 ecc->strength, ecc->size);
+	return 0;
+}
+
+static int pxa3xx_nand_scan(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct pxa3xx_nand_host *host = nand_get_controller_data(chip);
+	struct pxa3xx_nand_info *info = host->info_data;
+	struct platform_device *pdev = info->pdev;
+	struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	int ret;
+	uint16_t ecc_strength, ecc_step;
+
+	if (pdata->keep_config) {
+		pxa3xx_nand_detect_config(info);
+	} else {
+		ret = pxa3xx_nand_config_ident(info);
+		if (ret)
+			return ret;
+	}
+
+	if (info->reg_ndcr & NDCR_DWIDTH_M)
+		chip->options |= NAND_BUSWIDTH_16;
+
+	/* Device detection must be done with ECC disabled */
+	if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370)
+		nand_writel(info, NDECCCTRL, 0x0);
+
+	if (pdata->flash_bbt)
+		chip->bbt_options |= NAND_BBT_USE_FLASH;
+
+	chip->ecc.strength = pdata->ecc_strength;
+	chip->ecc.size = pdata->ecc_step_size;
+
+	if (nand_scan_ident(mtd, 1, NULL))
+		return -ENODEV;
+
+	if (!pdata->keep_config) {
+		ret = pxa3xx_nand_init(host);
+		if (ret) {
+			dev_err(&info->pdev->dev, "Failed to init nand: %d\n",
+				ret);
+			return ret;
+		}
+	}
+
+	if (chip->bbt_options & NAND_BBT_USE_FLASH) {
+		/*
+		 * We'll use a bad block table stored in-flash and don't
+		 * allow writing the bad block marker to the flash.
+		 */
+		chip->bbt_options |= NAND_BBT_NO_OOB_BBM;
+		chip->bbt_td = &bbt_main_descr;
+		chip->bbt_md = &bbt_mirror_descr;
+	}
+
+	/*
+	 * If the page size is bigger than the FIFO size, let's check
+	 * we are given the right variant and then switch to the extended
+	 * (aka splitted) command handling,
+	 */
+	if (mtd->writesize > PAGE_CHUNK_SIZE) {
+		if (info->variant == PXA3XX_NAND_VARIANT_ARMADA370) {
+			chip->cmdfunc = nand_cmdfunc_extended;
+		} else {
+			dev_err(&info->pdev->dev,
+				"unsupported page size on this variant\n");
+			return -ENODEV;
+		}
+	}
+
+	ecc_strength = chip->ecc.strength;
+	ecc_step = chip->ecc.size;
+	if (!ecc_strength || !ecc_step) {
+		ecc_strength = chip->ecc_strength_ds;
+		ecc_step = chip->ecc_step_ds;
+	}
+
+	/* Set default ECC strength requirements on non-ONFI devices */
+	if (ecc_strength < 1 && ecc_step < 1) {
+		ecc_strength = 1;
+		ecc_step = 512;
+	}
+
+	ret = pxa_ecc_init(info, mtd, ecc_strength,
+			   ecc_step, mtd->writesize);
+	if (ret)
+		return ret;
+
+	/* calculate addressing information */
+	if (mtd->writesize >= 2048)
+		host->col_addr_cycles = 2;
+	else
+		host->col_addr_cycles = 1;
+
+	/* release the initial buffer */
+	kfree(info->data_buff);
+
+	/* allocate the real data + oob buffer */
+	info->buf_size = mtd->writesize + mtd->oobsize;
+	ret = pxa3xx_nand_init_buff(info);
+	if (ret)
+		return ret;
+	info->oob_buff = info->data_buff + mtd->writesize;
+
+	if ((mtd->size >> chip->page_shift) > 65536)
+		host->row_addr_cycles = 3;
+	else
+		host->row_addr_cycles = 2;
+
+	if (!pdata->keep_config)
+		pxa3xx_nand_config_tail(info);
+
+	return nand_scan_tail(mtd);
+}
+
+static int alloc_nand_resource(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct pxa3xx_nand_platform_data *pdata;
+	struct pxa3xx_nand_info *info;
+	struct pxa3xx_nand_host *host;
+	struct nand_chip *chip = NULL;
+	struct mtd_info *mtd;
+	struct resource *r;
+	int ret, irq, cs;
+
+	pdata = dev_get_platdata(&pdev->dev);
+	if (pdata->num_cs <= 0)
+		return -ENODEV;
+	info = devm_kzalloc(&pdev->dev,
+			    sizeof(*info) + sizeof(*host) * pdata->num_cs,
+			    GFP_KERNEL);
+	if (!info)
+		return -ENOMEM;
+
+	info->pdev = pdev;
+	info->variant = pxa3xx_nand_get_variant(pdev);
+	for (cs = 0; cs < pdata->num_cs; cs++) {
+		host = (void *)&info[1] + sizeof(*host) * cs;
+		chip = &host->chip;
+		nand_set_controller_data(chip, host);
+		mtd = nand_to_mtd(chip);
+		info->host[cs] = host;
+		host->cs = cs;
+		host->info_data = info;
+		mtd->dev.parent = &pdev->dev;
+		/* FIXME: all chips use the same device tree partitions */
+		nand_set_flash_node(chip, np);
+
+		nand_set_controller_data(chip, host);
+		chip->ecc.read_page	= pxa3xx_nand_read_page_hwecc;
+		chip->ecc.write_page	= pxa3xx_nand_write_page_hwecc;
+		chip->controller        = &info->controller;
+		chip->waitfunc		= pxa3xx_nand_waitfunc;
+		chip->select_chip	= pxa3xx_nand_select_chip;
+		chip->read_word		= pxa3xx_nand_read_word;
+		chip->read_byte		= pxa3xx_nand_read_byte;
+		chip->read_buf		= pxa3xx_nand_read_buf;
+		chip->write_buf		= pxa3xx_nand_write_buf;
+		chip->options		|= NAND_NO_SUBPAGE_WRITE;
+		chip->cmdfunc		= nand_cmdfunc;
+	}
+
+	nand_hw_control_init(chip->controller);
+	info->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(info->clk)) {
+		dev_err(&pdev->dev, "failed to get nand clock\n");
+		return PTR_ERR(info->clk);
+	}
+	ret = clk_prepare_enable(info->clk);
+	if (ret < 0)
+		return ret;
+
+	if (!np && use_dma) {
+		r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+		if (r == NULL) {
+			dev_err(&pdev->dev,
+				"no resource defined for data DMA\n");
+			ret = -ENXIO;
+			goto fail_disable_clk;
+		}
+		info->drcmr_dat = r->start;
+	}
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "no IRQ resource defined\n");
+		ret = -ENXIO;
+		goto fail_disable_clk;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(info->mmio_base)) {
+		ret = PTR_ERR(info->mmio_base);
+		goto fail_disable_clk;
+	}
+	info->mmio_phys = r->start;
+
+	/* Allocate a buffer to allow flash detection */
+	info->buf_size = INIT_BUFFER_SIZE;
+	info->data_buff = kmalloc(info->buf_size, GFP_KERNEL);
+	if (info->data_buff == NULL) {
+		ret = -ENOMEM;
+		goto fail_disable_clk;
+	}
+
+	/* initialize all interrupts to be disabled */
+	disable_int(info, NDSR_MASK);
+
+	ret = request_threaded_irq(irq, pxa3xx_nand_irq,
+				   pxa3xx_nand_irq_thread, IRQF_ONESHOT,
+				   pdev->name, info);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to request IRQ\n");
+		goto fail_free_buf;
+	}
+
+	platform_set_drvdata(pdev, info);
+
+	return 0;
+
+fail_free_buf:
+	free_irq(irq, info);
+	kfree(info->data_buff);
+fail_disable_clk:
+	clk_disable_unprepare(info->clk);
+	return ret;
+}
+
+static int pxa3xx_nand_remove(struct platform_device *pdev)
+{
+	struct pxa3xx_nand_info *info = platform_get_drvdata(pdev);
+	struct pxa3xx_nand_platform_data *pdata;
+	int irq, cs;
+
+	if (!info)
+		return 0;
+
+	pdata = dev_get_platdata(&pdev->dev);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq >= 0)
+		free_irq(irq, info);
+	pxa3xx_nand_free_buff(info);
+
+	/*
+	 * In the pxa3xx case, the DFI bus is shared between the SMC and NFC.
+	 * In order to prevent a lockup of the system bus, the DFI bus
+	 * arbitration is granted to SMC upon driver removal. This is done by
+	 * setting the x_ARB_CNTL bit, which also prevents the NAND to have
+	 * access to the bus anymore.
+	 */
+	nand_writel(info, NDCR,
+		    (nand_readl(info, NDCR) & ~NDCR_ND_ARB_EN) |
+		    NFCV1_NDCR_ARB_CNTL);
+	clk_disable_unprepare(info->clk);
+
+	for (cs = 0; cs < pdata->num_cs; cs++)
+		nand_release(nand_to_mtd(&info->host[cs]->chip));
+	return 0;
+}
+
+static int pxa3xx_nand_probe_dt(struct platform_device *pdev)
+{
+	struct pxa3xx_nand_platform_data *pdata;
+	struct device_node *np = pdev->dev.of_node;
+	const struct of_device_id *of_id =
+			of_match_device(pxa3xx_nand_dt_ids, &pdev->dev);
+
+	if (!of_id)
+		return 0;
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return -ENOMEM;
+
+	if (of_get_property(np, "marvell,nand-enable-arbiter", NULL))
+		pdata->enable_arbiter = 1;
+	if (of_get_property(np, "marvell,nand-keep-config", NULL))
+		pdata->keep_config = 1;
+	of_property_read_u32(np, "num-cs", &pdata->num_cs);
+
+	pdev->dev.platform_data = pdata;
+
+	return 0;
+}
+
+static int pxa3xx_nand_probe(struct platform_device *pdev)
+{
+	struct pxa3xx_nand_platform_data *pdata;
+	struct pxa3xx_nand_info *info;
+	int ret, cs, probe_success, dma_available;
+
+	dma_available = IS_ENABLED(CONFIG_ARM) &&
+		(IS_ENABLED(CONFIG_ARCH_PXA) || IS_ENABLED(CONFIG_ARCH_MMP));
+	if (use_dma && !dma_available) {
+		use_dma = 0;
+		dev_warn(&pdev->dev,
+			 "This platform can't do DMA on this device\n");
+	}
+
+	ret = pxa3xx_nand_probe_dt(pdev);
+	if (ret)
+		return ret;
+
+	pdata = dev_get_platdata(&pdev->dev);
+	if (!pdata) {
+		dev_err(&pdev->dev, "no platform data defined\n");
+		return -ENODEV;
+	}
+
+	ret = alloc_nand_resource(pdev);
+	if (ret) {
+		dev_err(&pdev->dev, "alloc nand resource failed\n");
+		return ret;
+	}
+
+	info = platform_get_drvdata(pdev);
+	probe_success = 0;
+	for (cs = 0; cs < pdata->num_cs; cs++) {
+		struct mtd_info *mtd = nand_to_mtd(&info->host[cs]->chip);
+
+		/*
+		 * The mtd name matches the one used in 'mtdparts' kernel
+		 * parameter. This name cannot be changed or otherwise
+		 * user's mtd partitions configuration would get broken.
+		 */
+		mtd->name = "pxa3xx_nand-0";
+		info->cs = cs;
+		ret = pxa3xx_nand_scan(mtd);
+		if (ret) {
+			dev_warn(&pdev->dev, "failed to scan nand at cs %d\n",
+				cs);
+			continue;
+		}
+
+		ret = mtd_device_register(mtd, pdata->parts[cs],
+					  pdata->nr_parts[cs]);
+		if (!ret)
+			probe_success = 1;
+	}
+
+	if (!probe_success) {
+		pxa3xx_nand_remove(pdev);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int pxa3xx_nand_suspend(struct device *dev)
+{
+	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
+
+	if (info->state) {
+		dev_err(dev, "driver busy, state = %d\n", info->state);
+		return -EAGAIN;
+	}
+
+	clk_disable(info->clk);
+	return 0;
+}
+
+static int pxa3xx_nand_resume(struct device *dev)
+{
+	struct pxa3xx_nand_info *info = dev_get_drvdata(dev);
+	int ret;
+
+	ret = clk_enable(info->clk);
+	if (ret < 0)
+		return ret;
+
+	/* We don't want to handle interrupt without calling mtd routine */
+	disable_int(info, NDCR_INT_MASK);
+
+	/*
+	 * Directly set the chip select to a invalid value,
+	 * then the driver would reset the timing according
+	 * to current chip select at the beginning of cmdfunc
+	 */
+	info->cs = 0xff;
+
+	/*
+	 * As the spec says, the NDSR would be updated to 0x1800 when
+	 * doing the nand_clk disable/enable.
+	 * To prevent it damaging state machine of the driver, clear
+	 * all status before resume
+	 */
+	nand_writel(info, NDSR, NDSR_MASK);
+
+	return 0;
+}
+#else
+#define pxa3xx_nand_suspend	NULL
+#define pxa3xx_nand_resume	NULL
+#endif
+
+static const struct dev_pm_ops pxa3xx_nand_pm_ops = {
+	.suspend	= pxa3xx_nand_suspend,
+	.resume		= pxa3xx_nand_resume,
+};
+
+static struct platform_driver pxa3xx_nand_driver = {
+	.driver = {
+		.name	= "pxa3xx-nand",
+		.of_match_table = pxa3xx_nand_dt_ids,
+		.pm	= &pxa3xx_nand_pm_ops,
+	},
+	.probe		= pxa3xx_nand_probe,
+	.remove		= pxa3xx_nand_remove,
+};
+
+module_platform_driver(pxa3xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/rawnand/qcom_nandc.c b/drivers/mtd/nand/rawnand/qcom_nandc.c
new file mode 100644
index 000000000000..a77c66f4d8bc
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/qcom_nandc.c
@@ -0,0 +1,2208 @@ 
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/module.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+
+/* NANDc reg offsets */
+#define	NAND_FLASH_CMD			0x00
+#define	NAND_ADDR0			0x04
+#define	NAND_ADDR1			0x08
+#define	NAND_FLASH_CHIP_SELECT		0x0c
+#define	NAND_EXEC_CMD			0x10
+#define	NAND_FLASH_STATUS		0x14
+#define	NAND_BUFFER_STATUS		0x18
+#define	NAND_DEV0_CFG0			0x20
+#define	NAND_DEV0_CFG1			0x24
+#define	NAND_DEV0_ECC_CFG		0x28
+#define	NAND_DEV1_ECC_CFG		0x2c
+#define	NAND_DEV1_CFG0			0x30
+#define	NAND_DEV1_CFG1			0x34
+#define	NAND_READ_ID			0x40
+#define	NAND_READ_STATUS		0x44
+#define	NAND_DEV_CMD0			0xa0
+#define	NAND_DEV_CMD1			0xa4
+#define	NAND_DEV_CMD2			0xa8
+#define	NAND_DEV_CMD_VLD		0xac
+#define	SFLASHC_BURST_CFG		0xe0
+#define	NAND_ERASED_CW_DETECT_CFG	0xe8
+#define	NAND_ERASED_CW_DETECT_STATUS	0xec
+#define	NAND_EBI2_ECC_BUF_CFG		0xf0
+#define	FLASH_BUF_ACC			0x100
+
+#define	NAND_CTRL			0xf00
+#define	NAND_VERSION			0xf08
+#define	NAND_READ_LOCATION_0		0xf20
+#define	NAND_READ_LOCATION_1		0xf24
+
+/* dummy register offsets, used by write_reg_dma */
+#define	NAND_DEV_CMD1_RESTORE		0xdead
+#define	NAND_DEV_CMD_VLD_RESTORE	0xbeef
+
+/* NAND_FLASH_CMD bits */
+#define	PAGE_ACC			BIT(4)
+#define	LAST_PAGE			BIT(5)
+
+/* NAND_FLASH_CHIP_SELECT bits */
+#define	NAND_DEV_SEL			0
+#define	DM_EN				BIT(2)
+
+/* NAND_FLASH_STATUS bits */
+#define	FS_OP_ERR			BIT(4)
+#define	FS_READY_BSY_N			BIT(5)
+#define	FS_MPU_ERR			BIT(8)
+#define	FS_DEVICE_STS_ERR		BIT(16)
+#define	FS_DEVICE_WP			BIT(23)
+
+/* NAND_BUFFER_STATUS bits */
+#define	BS_UNCORRECTABLE_BIT		BIT(8)
+#define	BS_CORRECTABLE_ERR_MSK		0x1f
+
+/* NAND_DEVn_CFG0 bits */
+#define	DISABLE_STATUS_AFTER_WRITE	4
+#define	CW_PER_PAGE			6
+#define	UD_SIZE_BYTES			9
+#define	ECC_PARITY_SIZE_BYTES_RS	19
+#define	SPARE_SIZE_BYTES		23
+#define	NUM_ADDR_CYCLES			27
+#define	STATUS_BFR_READ			30
+#define	SET_RD_MODE_AFTER_STATUS	31
+
+/* NAND_DEVn_CFG0 bits */
+#define	DEV0_CFG1_ECC_DISABLE		0
+#define	WIDE_FLASH			1
+#define	NAND_RECOVERY_CYCLES		2
+#define	CS_ACTIVE_BSY			5
+#define	BAD_BLOCK_BYTE_NUM		6
+#define	BAD_BLOCK_IN_SPARE_AREA		16
+#define	WR_RD_BSY_GAP			17
+#define	ENABLE_BCH_ECC			27
+
+/* NAND_DEV0_ECC_CFG bits */
+#define	ECC_CFG_ECC_DISABLE		0
+#define	ECC_SW_RESET			1
+#define	ECC_MODE			4
+#define	ECC_PARITY_SIZE_BYTES_BCH	8
+#define	ECC_NUM_DATA_BYTES		16
+#define	ECC_FORCE_CLK_OPEN		30
+
+/* NAND_DEV_CMD1 bits */
+#define	READ_ADDR			0
+
+/* NAND_DEV_CMD_VLD bits */
+#define	READ_START_VLD			0
+
+/* NAND_EBI2_ECC_BUF_CFG bits */
+#define	NUM_STEPS			0
+
+/* NAND_ERASED_CW_DETECT_CFG bits */
+#define	ERASED_CW_ECC_MASK		1
+#define	AUTO_DETECT_RES			0
+#define	MASK_ECC			(1 << ERASED_CW_ECC_MASK)
+#define	RESET_ERASED_DET		(1 << AUTO_DETECT_RES)
+#define	ACTIVE_ERASED_DET		(0 << AUTO_DETECT_RES)
+#define	CLR_ERASED_PAGE_DET		(RESET_ERASED_DET | MASK_ECC)
+#define	SET_ERASED_PAGE_DET		(ACTIVE_ERASED_DET | MASK_ECC)
+
+/* NAND_ERASED_CW_DETECT_STATUS bits */
+#define	PAGE_ALL_ERASED			BIT(7)
+#define	CODEWORD_ALL_ERASED		BIT(6)
+#define	PAGE_ERASED			BIT(5)
+#define	CODEWORD_ERASED			BIT(4)
+#define	ERASED_PAGE			(PAGE_ALL_ERASED | PAGE_ERASED)
+#define	ERASED_CW			(CODEWORD_ALL_ERASED | CODEWORD_ERASED)
+
+/* Version Mask */
+#define	NAND_VERSION_MAJOR_MASK		0xf0000000
+#define	NAND_VERSION_MAJOR_SHIFT	28
+#define	NAND_VERSION_MINOR_MASK		0x0fff0000
+#define	NAND_VERSION_MINOR_SHIFT	16
+
+/* NAND OP_CMDs */
+#define	PAGE_READ			0x2
+#define	PAGE_READ_WITH_ECC		0x3
+#define	PAGE_READ_WITH_ECC_SPARE	0x4
+#define	PROGRAM_PAGE			0x6
+#define	PAGE_PROGRAM_WITH_ECC		0x7
+#define	PROGRAM_PAGE_SPARE		0x9
+#define	BLOCK_ERASE			0xa
+#define	FETCH_ID			0xb
+#define	RESET_DEVICE			0xd
+
+/*
+ * the NAND controller performs reads/writes with ECC in 516 byte chunks.
+ * the driver calls the chunks 'step' or 'codeword' interchangeably
+ */
+#define	NANDC_STEP_SIZE			512
+
+/*
+ * the largest page size we support is 8K, this will have 16 steps/codewords
+ * of 512 bytes each
+ */
+#define	MAX_NUM_STEPS			(SZ_8K / NANDC_STEP_SIZE)
+
+/* we read at most 3 registers per codeword scan */
+#define	MAX_REG_RD			(3 * MAX_NUM_STEPS)
+
+/* ECC modes supported by the controller */
+#define	ECC_NONE	BIT(0)
+#define	ECC_RS_4BIT	BIT(1)
+#define	ECC_BCH_4BIT	BIT(2)
+#define	ECC_BCH_8BIT	BIT(3)
+
+struct desc_info {
+	struct list_head node;
+
+	enum dma_data_direction dir;
+	struct scatterlist sgl;
+	struct dma_async_tx_descriptor *dma_desc;
+};
+
+/*
+ * holds the current register values that we want to write. acts as a contiguous
+ * chunk of memory which we use to write the controller registers through DMA.
+ */
+struct nandc_regs {
+	__le32 cmd;
+	__le32 addr0;
+	__le32 addr1;
+	__le32 chip_sel;
+	__le32 exec;
+
+	__le32 cfg0;
+	__le32 cfg1;
+	__le32 ecc_bch_cfg;
+
+	__le32 clrflashstatus;
+	__le32 clrreadstatus;
+
+	__le32 cmd1;
+	__le32 vld;
+
+	__le32 orig_cmd1;
+	__le32 orig_vld;
+
+	__le32 ecc_buf_cfg;
+};
+
+/*
+ * NAND controller data struct
+ *
+ * @controller:			base controller structure
+ * @host_list:			list containing all the chips attached to the
+ *				controller
+ * @dev:			parent device
+ * @base:			MMIO base
+ * @base_dma:			physical base address of controller registers
+ * @core_clk:			controller clock
+ * @aon_clk:			another controller clock
+ *
+ * @chan:			dma channel
+ * @cmd_crci:			ADM DMA CRCI for command flow control
+ * @data_crci:			ADM DMA CRCI for data flow control
+ * @desc_list:			DMA descriptor list (list of desc_infos)
+ *
+ * @data_buffer:		our local DMA buffer for page read/writes,
+ *				used when we can't use the buffer provided
+ *				by upper layers directly
+ * @buf_size/count/start:	markers for chip->read_buf/write_buf functions
+ * @reg_read_buf:		local buffer for reading back registers via DMA
+ * @reg_read_pos:		marker for data read in reg_read_buf
+ *
+ * @regs:			a contiguous chunk of memory for DMA register
+ *				writes. contains the register values to be
+ *				written to controller
+ * @cmd1/vld:			some fixed controller register values
+ * @ecc_modes:			supported ECC modes by the current controller,
+ *				initialized via DT match data
+ */
+struct qcom_nand_controller {
+	struct nand_hw_control controller;
+	struct list_head host_list;
+
+	struct device *dev;
+
+	void __iomem *base;
+	dma_addr_t base_dma;
+
+	struct clk *core_clk;
+	struct clk *aon_clk;
+
+	struct dma_chan *chan;
+	unsigned int cmd_crci;
+	unsigned int data_crci;
+	struct list_head desc_list;
+
+	u8		*data_buffer;
+	int		buf_size;
+	int		buf_count;
+	int		buf_start;
+
+	__le32 *reg_read_buf;
+	int reg_read_pos;
+
+	struct nandc_regs *regs;
+
+	u32 cmd1, vld;
+	u32 ecc_modes;
+};
+
+/*
+ * NAND chip structure
+ *
+ * @chip:			base NAND chip structure
+ * @node:			list node to add itself to host_list in
+ *				qcom_nand_controller
+ *
+ * @cs:				chip select value for this chip
+ * @cw_size:			the number of bytes in a single step/codeword
+ *				of a page, consisting of all data, ecc, spare
+ *				and reserved bytes
+ * @cw_data:			the number of bytes within a codeword protected
+ *				by ECC
+ * @use_ecc:			request the controller to use ECC for the
+ *				upcoming read/write
+ * @bch_enabled:		flag to tell whether BCH ECC mode is used
+ * @ecc_bytes_hw:		ECC bytes used by controller hardware for this
+ *				chip
+ * @status:			value to be returned if NAND_CMD_STATUS command
+ *				is executed
+ * @last_command:		keeps track of last command on this chip. used
+ *				for reading correct status
+ *
+ * @cfg0, cfg1, cfg0_raw..:	NANDc register configurations needed for
+ *				ecc/non-ecc mode for the current nand flash
+ *				device
+ */
+struct qcom_nand_host {
+	struct nand_chip chip;
+	struct list_head node;
+
+	int cs;
+	int cw_size;
+	int cw_data;
+	bool use_ecc;
+	bool bch_enabled;
+	int ecc_bytes_hw;
+	int spare_bytes;
+	int bbm_size;
+	u8 status;
+	int last_command;
+
+	u32 cfg0, cfg1;
+	u32 cfg0_raw, cfg1_raw;
+	u32 ecc_buf_cfg;
+	u32 ecc_bch_cfg;
+	u32 clrflashstatus;
+	u32 clrreadstatus;
+};
+
+static inline struct qcom_nand_host *to_qcom_nand_host(struct nand_chip *chip)
+{
+	return container_of(chip, struct qcom_nand_host, chip);
+}
+
+static inline struct qcom_nand_controller *
+get_qcom_nand_controller(struct nand_chip *chip)
+{
+	return container_of(chip->controller, struct qcom_nand_controller,
+			    controller);
+}
+
+static inline u32 nandc_read(struct qcom_nand_controller *nandc, int offset)
+{
+	return ioread32(nandc->base + offset);
+}
+
+static inline void nandc_write(struct qcom_nand_controller *nandc, int offset,
+			       u32 val)
+{
+	iowrite32(val, nandc->base + offset);
+}
+
+static __le32 *offset_to_nandc_reg(struct nandc_regs *regs, int offset)
+{
+	switch (offset) {
+	case NAND_FLASH_CMD:
+		return &regs->cmd;
+	case NAND_ADDR0:
+		return &regs->addr0;
+	case NAND_ADDR1:
+		return &regs->addr1;
+	case NAND_FLASH_CHIP_SELECT:
+		return &regs->chip_sel;
+	case NAND_EXEC_CMD:
+		return &regs->exec;
+	case NAND_FLASH_STATUS:
+		return &regs->clrflashstatus;
+	case NAND_DEV0_CFG0:
+		return &regs->cfg0;
+	case NAND_DEV0_CFG1:
+		return &regs->cfg1;
+	case NAND_DEV0_ECC_CFG:
+		return &regs->ecc_bch_cfg;
+	case NAND_READ_STATUS:
+		return &regs->clrreadstatus;
+	case NAND_DEV_CMD1:
+		return &regs->cmd1;
+	case NAND_DEV_CMD1_RESTORE:
+		return &regs->orig_cmd1;
+	case NAND_DEV_CMD_VLD:
+		return &regs->vld;
+	case NAND_DEV_CMD_VLD_RESTORE:
+		return &regs->orig_vld;
+	case NAND_EBI2_ECC_BUF_CFG:
+		return &regs->ecc_buf_cfg;
+	default:
+		return NULL;
+	}
+}
+
+static void nandc_set_reg(struct qcom_nand_controller *nandc, int offset,
+			  u32 val)
+{
+	struct nandc_regs *regs = nandc->regs;
+	__le32 *reg;
+
+	reg = offset_to_nandc_reg(regs, offset);
+
+	if (reg)
+		*reg = cpu_to_le32(val);
+}
+
+/* helper to configure address register values */
+static void set_address(struct qcom_nand_host *host, u16 column, int page)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+	if (chip->options & NAND_BUSWIDTH_16)
+		column >>= 1;
+
+	nandc_set_reg(nandc, NAND_ADDR0, page << 16 | column);
+	nandc_set_reg(nandc, NAND_ADDR1, page >> 16 & 0xff);
+}
+
+/*
+ * update_rw_regs:	set up read/write register values, these will be
+ *			written to the NAND controller registers via DMA
+ *
+ * @num_cw:		number of steps for the read/write operation
+ * @read:		read or write operation
+ */
+static void update_rw_regs(struct qcom_nand_host *host, int num_cw, bool read)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	u32 cmd, cfg0, cfg1, ecc_bch_cfg;
+
+	if (read) {
+		if (host->use_ecc)
+			cmd = PAGE_READ_WITH_ECC | PAGE_ACC | LAST_PAGE;
+		else
+			cmd = PAGE_READ | PAGE_ACC | LAST_PAGE;
+	} else {
+			cmd = PROGRAM_PAGE | PAGE_ACC | LAST_PAGE;
+	}
+
+	if (host->use_ecc) {
+		cfg0 = (host->cfg0 & ~(7U << CW_PER_PAGE)) |
+				(num_cw - 1) << CW_PER_PAGE;
+
+		cfg1 = host->cfg1;
+		ecc_bch_cfg = host->ecc_bch_cfg;
+	} else {
+		cfg0 = (host->cfg0_raw & ~(7U << CW_PER_PAGE)) |
+				(num_cw - 1) << CW_PER_PAGE;
+
+		cfg1 = host->cfg1_raw;
+		ecc_bch_cfg = 1 << ECC_CFG_ECC_DISABLE;
+	}
+
+	nandc_set_reg(nandc, NAND_FLASH_CMD, cmd);
+	nandc_set_reg(nandc, NAND_DEV0_CFG0, cfg0);
+	nandc_set_reg(nandc, NAND_DEV0_CFG1, cfg1);
+	nandc_set_reg(nandc, NAND_DEV0_ECC_CFG, ecc_bch_cfg);
+	nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, host->ecc_buf_cfg);
+	nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+	nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+}
+
+static int prep_dma_desc(struct qcom_nand_controller *nandc, bool read,
+			 int reg_off, const void *vaddr, int size,
+			 bool flow_control)
+{
+	struct desc_info *desc;
+	struct dma_async_tx_descriptor *dma_desc;
+	struct scatterlist *sgl;
+	struct dma_slave_config slave_conf;
+	enum dma_transfer_direction dir_eng;
+	int ret;
+
+	desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	sgl = &desc->sgl;
+
+	sg_init_one(sgl, vaddr, size);
+
+	if (read) {
+		dir_eng = DMA_DEV_TO_MEM;
+		desc->dir = DMA_FROM_DEVICE;
+	} else {
+		dir_eng = DMA_MEM_TO_DEV;
+		desc->dir = DMA_TO_DEVICE;
+	}
+
+	ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir);
+	if (ret == 0) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	memset(&slave_conf, 0x00, sizeof(slave_conf));
+
+	slave_conf.device_fc = flow_control;
+	if (read) {
+		slave_conf.src_maxburst = 16;
+		slave_conf.src_addr = nandc->base_dma + reg_off;
+		slave_conf.slave_id = nandc->data_crci;
+	} else {
+		slave_conf.dst_maxburst = 16;
+		slave_conf.dst_addr = nandc->base_dma + reg_off;
+		slave_conf.slave_id = nandc->cmd_crci;
+	}
+
+	ret = dmaengine_slave_config(nandc->chan, &slave_conf);
+	if (ret) {
+		dev_err(nandc->dev, "failed to configure dma channel\n");
+		goto err;
+	}
+
+	dma_desc = dmaengine_prep_slave_sg(nandc->chan, sgl, 1, dir_eng, 0);
+	if (!dma_desc) {
+		dev_err(nandc->dev, "failed to prepare desc\n");
+		ret = -EINVAL;
+		goto err;
+	}
+
+	desc->dma_desc = dma_desc;
+
+	list_add_tail(&desc->node, &nandc->desc_list);
+
+	return 0;
+err:
+	kfree(desc);
+
+	return ret;
+}
+
+/*
+ * read_reg_dma:	prepares a descriptor to read a given number of
+ *			contiguous registers to the reg_read_buf pointer
+ *
+ * @first:		offset of the first register in the contiguous block
+ * @num_regs:		number of registers to read
+ */
+static int read_reg_dma(struct qcom_nand_controller *nandc, int first,
+			int num_regs)
+{
+	bool flow_control = false;
+	void *vaddr;
+	int size;
+
+	if (first == NAND_READ_ID || first == NAND_FLASH_STATUS)
+		flow_control = true;
+
+	size = num_regs * sizeof(u32);
+	vaddr = nandc->reg_read_buf + nandc->reg_read_pos;
+	nandc->reg_read_pos += num_regs;
+
+	return prep_dma_desc(nandc, true, first, vaddr, size, flow_control);
+}
+
+/*
+ * write_reg_dma:	prepares a descriptor to write a given number of
+ *			contiguous registers
+ *
+ * @first:		offset of the first register in the contiguous block
+ * @num_regs:		number of registers to write
+ */
+static int write_reg_dma(struct qcom_nand_controller *nandc, int first,
+			 int num_regs)
+{
+	bool flow_control = false;
+	struct nandc_regs *regs = nandc->regs;
+	void *vaddr;
+	int size;
+
+	vaddr = offset_to_nandc_reg(regs, first);
+
+	if (first == NAND_FLASH_CMD)
+		flow_control = true;
+
+	if (first == NAND_DEV_CMD1_RESTORE)
+		first = NAND_DEV_CMD1;
+
+	if (first == NAND_DEV_CMD_VLD_RESTORE)
+		first = NAND_DEV_CMD_VLD;
+
+	size = num_regs * sizeof(u32);
+
+	return prep_dma_desc(nandc, false, first, vaddr, size, flow_control);
+}
+
+/*
+ * read_data_dma:	prepares a DMA descriptor to transfer data from the
+ *			controller's internal buffer to the buffer 'vaddr'
+ *
+ * @reg_off:		offset within the controller's data buffer
+ * @vaddr:		virtual address of the buffer we want to write to
+ * @size:		DMA transaction size in bytes
+ */
+static int read_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+			 const u8 *vaddr, int size)
+{
+	return prep_dma_desc(nandc, true, reg_off, vaddr, size, false);
+}
+
+/*
+ * write_data_dma:	prepares a DMA descriptor to transfer data from
+ *			'vaddr' to the controller's internal buffer
+ *
+ * @reg_off:		offset within the controller's data buffer
+ * @vaddr:		virtual address of the buffer we want to read from
+ * @size:		DMA transaction size in bytes
+ */
+static int write_data_dma(struct qcom_nand_controller *nandc, int reg_off,
+			  const u8 *vaddr, int size)
+{
+	return prep_dma_desc(nandc, false, reg_off, vaddr, size, false);
+}
+
+/*
+ * helper to prepare dma descriptors to configure registers needed for reading a
+ * codeword/step in a page
+ */
+static void config_cw_read(struct qcom_nand_controller *nandc)
+{
+	write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+	write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
+	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
+
+	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+	read_reg_dma(nandc, NAND_FLASH_STATUS, 2);
+	read_reg_dma(nandc, NAND_ERASED_CW_DETECT_STATUS, 1);
+}
+
+/*
+ * helpers to prepare dma descriptors used to configure registers needed for
+ * writing a codeword/step in a page
+ */
+static void config_cw_write_pre(struct qcom_nand_controller *nandc)
+{
+	write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+	write_reg_dma(nandc, NAND_DEV0_CFG0, 3);
+	write_reg_dma(nandc, NAND_EBI2_ECC_BUF_CFG, 1);
+}
+
+static void config_cw_write_post(struct qcom_nand_controller *nandc)
+{
+	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+	read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+
+	write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+	write_reg_dma(nandc, NAND_READ_STATUS, 1);
+}
+
+/*
+ * the following functions are used within chip->cmdfunc() to perform different
+ * NAND_CMD_* commands
+ */
+
+/* sets up descriptors for NAND_CMD_PARAM */
+static int nandc_param(struct qcom_nand_host *host)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+	/*
+	 * NAND_CMD_PARAM is called before we know much about the FLASH chip
+	 * in use. we configure the controller to perform a raw read of 512
+	 * bytes to read onfi params
+	 */
+	nandc_set_reg(nandc, NAND_FLASH_CMD, PAGE_READ | PAGE_ACC | LAST_PAGE);
+	nandc_set_reg(nandc, NAND_ADDR0, 0);
+	nandc_set_reg(nandc, NAND_ADDR1, 0);
+	nandc_set_reg(nandc, NAND_DEV0_CFG0, 0 << CW_PER_PAGE
+					| 512 << UD_SIZE_BYTES
+					| 5 << NUM_ADDR_CYCLES
+					| 0 << SPARE_SIZE_BYTES);
+	nandc_set_reg(nandc, NAND_DEV0_CFG1, 7 << NAND_RECOVERY_CYCLES
+					| 0 << CS_ACTIVE_BSY
+					| 17 << BAD_BLOCK_BYTE_NUM
+					| 1 << BAD_BLOCK_IN_SPARE_AREA
+					| 2 << WR_RD_BSY_GAP
+					| 0 << WIDE_FLASH
+					| 1 << DEV0_CFG1_ECC_DISABLE);
+	nandc_set_reg(nandc, NAND_EBI2_ECC_BUF_CFG, 1 << ECC_CFG_ECC_DISABLE);
+
+	/* configure CMD1 and VLD for ONFI param probing */
+	nandc_set_reg(nandc, NAND_DEV_CMD_VLD,
+		      (nandc->vld & ~(1 << READ_START_VLD))
+		      | 0 << READ_START_VLD);
+	nandc_set_reg(nandc, NAND_DEV_CMD1,
+		      (nandc->cmd1 & ~(0xFF << READ_ADDR))
+		      | NAND_CMD_PARAM << READ_ADDR);
+
+	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+	nandc_set_reg(nandc, NAND_DEV_CMD1_RESTORE, nandc->cmd1);
+	nandc_set_reg(nandc, NAND_DEV_CMD_VLD_RESTORE, nandc->vld);
+
+	write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1);
+	write_reg_dma(nandc, NAND_DEV_CMD1, 1);
+
+	nandc->buf_count = 512;
+	memset(nandc->data_buffer, 0xff, nandc->buf_count);
+
+	config_cw_read(nandc);
+
+	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+		      nandc->buf_count);
+
+	/* restore CMD1 and VLD regs */
+	write_reg_dma(nandc, NAND_DEV_CMD1_RESTORE, 1);
+	write_reg_dma(nandc, NAND_DEV_CMD_VLD_RESTORE, 1);
+
+	return 0;
+}
+
+/* sets up descriptors for NAND_CMD_ERASE1 */
+static int erase_block(struct qcom_nand_host *host, int page_addr)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+	nandc_set_reg(nandc, NAND_FLASH_CMD,
+		      BLOCK_ERASE | PAGE_ACC | LAST_PAGE);
+	nandc_set_reg(nandc, NAND_ADDR0, page_addr);
+	nandc_set_reg(nandc, NAND_ADDR1, 0);
+	nandc_set_reg(nandc, NAND_DEV0_CFG0,
+		      host->cfg0_raw & ~(7 << CW_PER_PAGE));
+	nandc_set_reg(nandc, NAND_DEV0_CFG1, host->cfg1_raw);
+	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+	nandc_set_reg(nandc, NAND_FLASH_STATUS, host->clrflashstatus);
+	nandc_set_reg(nandc, NAND_READ_STATUS, host->clrreadstatus);
+
+	write_reg_dma(nandc, NAND_FLASH_CMD, 3);
+	write_reg_dma(nandc, NAND_DEV0_CFG0, 2);
+	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+	read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+
+	write_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+	write_reg_dma(nandc, NAND_READ_STATUS, 1);
+
+	return 0;
+}
+
+/* sets up descriptors for NAND_CMD_READID */
+static int read_id(struct qcom_nand_host *host, int column)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+	if (column == -1)
+		return 0;
+
+	nandc_set_reg(nandc, NAND_FLASH_CMD, FETCH_ID);
+	nandc_set_reg(nandc, NAND_ADDR0, column);
+	nandc_set_reg(nandc, NAND_ADDR1, 0);
+	nandc_set_reg(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+	write_reg_dma(nandc, NAND_FLASH_CMD, 4);
+	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+	read_reg_dma(nandc, NAND_READ_ID, 1);
+
+	return 0;
+}
+
+/* sets up descriptors for NAND_CMD_RESET */
+static int reset(struct qcom_nand_host *host)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+	nandc_set_reg(nandc, NAND_FLASH_CMD, RESET_DEVICE);
+	nandc_set_reg(nandc, NAND_EXEC_CMD, 1);
+
+	write_reg_dma(nandc, NAND_FLASH_CMD, 1);
+	write_reg_dma(nandc, NAND_EXEC_CMD, 1);
+
+	read_reg_dma(nandc, NAND_FLASH_STATUS, 1);
+
+	return 0;
+}
+
+/* helpers to submit/free our list of dma descriptors */
+static int submit_descs(struct qcom_nand_controller *nandc)
+{
+	struct desc_info *desc;
+	dma_cookie_t cookie = 0;
+
+	list_for_each_entry(desc, &nandc->desc_list, node)
+		cookie = dmaengine_submit(desc->dma_desc);
+
+	if (dma_sync_wait(nandc->chan, cookie) != DMA_COMPLETE)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static void free_descs(struct qcom_nand_controller *nandc)
+{
+	struct desc_info *desc, *n;
+
+	list_for_each_entry_safe(desc, n, &nandc->desc_list, node) {
+		list_del(&desc->node);
+		dma_unmap_sg(nandc->dev, &desc->sgl, 1, desc->dir);
+		kfree(desc);
+	}
+}
+
+/* reset the register read buffer for next NAND operation */
+static void clear_read_regs(struct qcom_nand_controller *nandc)
+{
+	nandc->reg_read_pos = 0;
+	memset(nandc->reg_read_buf, 0,
+	       MAX_REG_RD * sizeof(*nandc->reg_read_buf));
+}
+
+static void pre_command(struct qcom_nand_host *host, int command)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+	nandc->buf_count = 0;
+	nandc->buf_start = 0;
+	host->use_ecc = false;
+	host->last_command = command;
+
+	clear_read_regs(nandc);
+}
+
+/*
+ * this is called after NAND_CMD_PAGEPROG and NAND_CMD_ERASE1 to set our
+ * privately maintained status byte, this status byte can be read after
+ * NAND_CMD_STATUS is called
+ */
+static void parse_erase_write_errors(struct qcom_nand_host *host, int command)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int num_cw;
+	int i;
+
+	num_cw = command == NAND_CMD_PAGEPROG ? ecc->steps : 1;
+
+	for (i = 0; i < num_cw; i++) {
+		u32 flash_status = le32_to_cpu(nandc->reg_read_buf[i]);
+
+		if (flash_status & FS_MPU_ERR)
+			host->status &= ~NAND_STATUS_WP;
+
+		if (flash_status & FS_OP_ERR || (i == (num_cw - 1) &&
+						 (flash_status &
+						  FS_DEVICE_STS_ERR)))
+			host->status |= NAND_STATUS_FAIL;
+	}
+}
+
+static void post_command(struct qcom_nand_host *host, int command)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+	switch (command) {
+	case NAND_CMD_READID:
+		memcpy(nandc->data_buffer, nandc->reg_read_buf,
+		       nandc->buf_count);
+		break;
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_ERASE1:
+		parse_erase_write_errors(host, command);
+		break;
+	default:
+		break;
+	}
+}
+
+/*
+ * Implements chip->cmdfunc. It's  only used for a limited set of commands.
+ * The rest of the commands wouldn't be called by upper layers. For example,
+ * NAND_CMD_READOOB would never be called because we have our own versions
+ * of read_oob ops for nand_ecc_ctrl.
+ */
+static void qcom_nandc_command(struct mtd_info *mtd, unsigned int command,
+			       int column, int page_addr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	bool wait = false;
+	int ret = 0;
+
+	pre_command(host, command);
+
+	switch (command) {
+	case NAND_CMD_RESET:
+		ret = reset(host);
+		wait = true;
+		break;
+
+	case NAND_CMD_READID:
+		nandc->buf_count = 4;
+		ret = read_id(host, column);
+		wait = true;
+		break;
+
+	case NAND_CMD_PARAM:
+		ret = nandc_param(host);
+		wait = true;
+		break;
+
+	case NAND_CMD_ERASE1:
+		ret = erase_block(host, page_addr);
+		wait = true;
+		break;
+
+	case NAND_CMD_READ0:
+		/* we read the entire page for now */
+		WARN_ON(column != 0);
+
+		host->use_ecc = true;
+		set_address(host, 0, page_addr);
+		update_rw_regs(host, ecc->steps, true);
+		break;
+
+	case NAND_CMD_SEQIN:
+		WARN_ON(column != 0);
+		set_address(host, 0, page_addr);
+		break;
+
+	case NAND_CMD_PAGEPROG:
+	case NAND_CMD_STATUS:
+	case NAND_CMD_NONE:
+	default:
+		break;
+	}
+
+	if (ret) {
+		dev_err(nandc->dev, "failure executing command %d\n",
+			command);
+		free_descs(nandc);
+		return;
+	}
+
+	if (wait) {
+		ret = submit_descs(nandc);
+		if (ret)
+			dev_err(nandc->dev,
+				"failure submitting descs for command %d\n",
+				command);
+	}
+
+	free_descs(nandc);
+
+	post_command(host, command);
+}
+
+/*
+ * when using BCH ECC, the HW flags an error in NAND_FLASH_STATUS if it read
+ * an erased CW, and reports an erased CW in NAND_ERASED_CW_DETECT_STATUS.
+ *
+ * when using RS ECC, the HW reports the same erros when reading an erased CW,
+ * but it notifies that it is an erased CW by placing special characters at
+ * certain offsets in the buffer.
+ *
+ * verify if the page is erased or not, and fix up the page for RS ECC by
+ * replacing the special characters with 0xff.
+ */
+static bool erased_chunk_check_and_fixup(u8 *data_buf, int data_len)
+{
+	u8 empty1, empty2;
+
+	/*
+	 * an erased page flags an error in NAND_FLASH_STATUS, check if the page
+	 * is erased by looking for 0x54s at offsets 3 and 175 from the
+	 * beginning of each codeword
+	 */
+
+	empty1 = data_buf[3];
+	empty2 = data_buf[175];
+
+	/*
+	 * if the erased codework markers, if they exist override them with
+	 * 0xffs
+	 */
+	if ((empty1 == 0x54 && empty2 == 0xff) ||
+	    (empty1 == 0xff && empty2 == 0x54)) {
+		data_buf[3] = 0xff;
+		data_buf[175] = 0xff;
+	}
+
+	/*
+	 * check if the entire chunk contains 0xffs or not. if it doesn't, then
+	 * restore the original values at the special offsets
+	 */
+	if (memchr_inv(data_buf, 0xff, data_len)) {
+		data_buf[3] = empty1;
+		data_buf[175] = empty2;
+
+		return false;
+	}
+
+	return true;
+}
+
+struct read_stats {
+	__le32 flash;
+	__le32 buffer;
+	__le32 erased_cw;
+};
+
+/*
+ * reads back status registers set by the controller to notify page read
+ * errors. this is equivalent to what 'ecc->correct()' would do.
+ */
+static int parse_read_errors(struct qcom_nand_host *host, u8 *data_buf,
+			     u8 *oob_buf)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	unsigned int max_bitflips = 0;
+	struct read_stats *buf;
+	int i;
+
+	buf = (struct read_stats *)nandc->reg_read_buf;
+
+	for (i = 0; i < ecc->steps; i++, buf++) {
+		u32 flash, buffer, erased_cw;
+		int data_len, oob_len;
+
+		if (i == (ecc->steps - 1)) {
+			data_len = ecc->size - ((ecc->steps - 1) << 2);
+			oob_len = ecc->steps << 2;
+		} else {
+			data_len = host->cw_data;
+			oob_len = 0;
+		}
+
+		flash = le32_to_cpu(buf->flash);
+		buffer = le32_to_cpu(buf->buffer);
+		erased_cw = le32_to_cpu(buf->erased_cw);
+
+		if (flash & (FS_OP_ERR | FS_MPU_ERR)) {
+			bool erased;
+
+			/* ignore erased codeword errors */
+			if (host->bch_enabled) {
+				erased = (erased_cw & ERASED_CW) == ERASED_CW ?
+					 true : false;
+			} else {
+				erased = erased_chunk_check_and_fixup(data_buf,
+								      data_len);
+			}
+
+			if (erased) {
+				data_buf += data_len;
+				if (oob_buf)
+					oob_buf += oob_len + ecc->bytes;
+				continue;
+			}
+
+			if (buffer & BS_UNCORRECTABLE_BIT) {
+				int ret, ecclen, extraooblen;
+				void *eccbuf;
+
+				eccbuf = oob_buf ? oob_buf + oob_len : NULL;
+				ecclen = oob_buf ? host->ecc_bytes_hw : 0;
+				extraooblen = oob_buf ? oob_len : 0;
+
+				/*
+				 * make sure it isn't an erased page reported
+				 * as not-erased by HW because of a few bitflips
+				 */
+				ret = nand_check_erased_ecc_chunk(data_buf,
+					data_len, eccbuf, ecclen, oob_buf,
+					extraooblen, ecc->strength);
+				if (ret < 0) {
+					mtd->ecc_stats.failed++;
+				} else {
+					mtd->ecc_stats.corrected += ret;
+					max_bitflips =
+						max_t(unsigned int, max_bitflips, ret);
+				}
+			}
+		} else {
+			unsigned int stat;
+
+			stat = buffer & BS_CORRECTABLE_ERR_MSK;
+			mtd->ecc_stats.corrected += stat;
+			max_bitflips = max(max_bitflips, stat);
+		}
+
+		data_buf += data_len;
+		if (oob_buf)
+			oob_buf += oob_len + ecc->bytes;
+	}
+
+	return max_bitflips;
+}
+
+/*
+ * helper to perform the actual page read operation, used by ecc->read_page(),
+ * ecc->read_oob()
+ */
+static int read_page_ecc(struct qcom_nand_host *host, u8 *data_buf,
+			 u8 *oob_buf)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int i, ret;
+
+	/* queue cmd descs for each codeword */
+	for (i = 0; i < ecc->steps; i++) {
+		int data_size, oob_size;
+
+		if (i == (ecc->steps - 1)) {
+			data_size = ecc->size - ((ecc->steps - 1) << 2);
+			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+				   host->spare_bytes;
+		} else {
+			data_size = host->cw_data;
+			oob_size = host->ecc_bytes_hw + host->spare_bytes;
+		}
+
+		config_cw_read(nandc);
+
+		if (data_buf)
+			read_data_dma(nandc, FLASH_BUF_ACC, data_buf,
+				      data_size);
+
+		/*
+		 * when ecc is enabled, the controller doesn't read the real
+		 * or dummy bad block markers in each chunk. To maintain a
+		 * consistent layout across RAW and ECC reads, we just
+		 * leave the real/dummy BBM offsets empty (i.e, filled with
+		 * 0xffs)
+		 */
+		if (oob_buf) {
+			int j;
+
+			for (j = 0; j < host->bbm_size; j++)
+				*oob_buf++ = 0xff;
+
+			read_data_dma(nandc, FLASH_BUF_ACC + data_size,
+				      oob_buf, oob_size);
+		}
+
+		if (data_buf)
+			data_buf += data_size;
+		if (oob_buf)
+			oob_buf += oob_size;
+	}
+
+	ret = submit_descs(nandc);
+	if (ret)
+		dev_err(nandc->dev, "failure to read page/oob\n");
+
+	free_descs(nandc);
+
+	return ret;
+}
+
+/*
+ * a helper that copies the last step/codeword of a page (containing free oob)
+ * into our local buffer
+ */
+static int copy_last_cw(struct qcom_nand_host *host, int page)
+{
+	struct nand_chip *chip = &host->chip;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int size;
+	int ret;
+
+	clear_read_regs(nandc);
+
+	size = host->use_ecc ? host->cw_data : host->cw_size;
+
+	/* prepare a clean read buffer */
+	memset(nandc->data_buffer, 0xff, size);
+
+	set_address(host, host->cw_size * (ecc->steps - 1), page);
+	update_rw_regs(host, 1, true);
+
+	config_cw_read(nandc);
+
+	read_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, size);
+
+	ret = submit_descs(nandc);
+	if (ret)
+		dev_err(nandc->dev, "failed to copy last codeword\n");
+
+	free_descs(nandc);
+
+	return ret;
+}
+
+/* implements ecc->read_page() */
+static int qcom_nandc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
+{
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	u8 *data_buf, *oob_buf = NULL;
+	int ret;
+
+	data_buf = buf;
+	oob_buf = oob_required ? chip->oob_poi : NULL;
+
+	ret = read_page_ecc(host, data_buf, oob_buf);
+	if (ret) {
+		dev_err(nandc->dev, "failure to read page\n");
+		return ret;
+	}
+
+	return parse_read_errors(host, data_buf, oob_buf);
+}
+
+/* implements ecc->read_page_raw() */
+static int qcom_nandc_read_page_raw(struct mtd_info *mtd,
+				    struct nand_chip *chip, uint8_t *buf,
+				    int oob_required, int page)
+{
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	u8 *data_buf, *oob_buf;
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int i, ret;
+
+	data_buf = buf;
+	oob_buf = chip->oob_poi;
+
+	host->use_ecc = false;
+	update_rw_regs(host, ecc->steps, true);
+
+	for (i = 0; i < ecc->steps; i++) {
+		int data_size1, data_size2, oob_size1, oob_size2;
+		int reg_off = FLASH_BUF_ACC;
+
+		data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+		oob_size1 = host->bbm_size;
+
+		if (i == (ecc->steps - 1)) {
+			data_size2 = ecc->size - data_size1 -
+				     ((ecc->steps - 1) << 2);
+			oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
+				    host->spare_bytes;
+		} else {
+			data_size2 = host->cw_data - data_size1;
+			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+		}
+
+		config_cw_read(nandc);
+
+		read_data_dma(nandc, reg_off, data_buf, data_size1);
+		reg_off += data_size1;
+		data_buf += data_size1;
+
+		read_data_dma(nandc, reg_off, oob_buf, oob_size1);
+		reg_off += oob_size1;
+		oob_buf += oob_size1;
+
+		read_data_dma(nandc, reg_off, data_buf, data_size2);
+		reg_off += data_size2;
+		data_buf += data_size2;
+
+		read_data_dma(nandc, reg_off, oob_buf, oob_size2);
+		oob_buf += oob_size2;
+	}
+
+	ret = submit_descs(nandc);
+	if (ret)
+		dev_err(nandc->dev, "failure to read raw page\n");
+
+	free_descs(nandc);
+
+	return 0;
+}
+
+/* implements ecc->read_oob() */
+static int qcom_nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			       int page)
+{
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int ret;
+
+	clear_read_regs(nandc);
+
+	host->use_ecc = true;
+	set_address(host, 0, page);
+	update_rw_regs(host, ecc->steps, true);
+
+	ret = read_page_ecc(host, NULL, chip->oob_poi);
+	if (ret)
+		dev_err(nandc->dev, "failure to read oob\n");
+
+	return ret;
+}
+
+/* implements ecc->write_page() */
+static int qcom_nandc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+				 const uint8_t *buf, int oob_required, int page)
+{
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	u8 *data_buf, *oob_buf;
+	int i, ret;
+
+	clear_read_regs(nandc);
+
+	data_buf = (u8 *)buf;
+	oob_buf = chip->oob_poi;
+
+	host->use_ecc = true;
+	update_rw_regs(host, ecc->steps, false);
+
+	for (i = 0; i < ecc->steps; i++) {
+		int data_size, oob_size;
+
+		if (i == (ecc->steps - 1)) {
+			data_size = ecc->size - ((ecc->steps - 1) << 2);
+			oob_size = (ecc->steps << 2) + host->ecc_bytes_hw +
+				   host->spare_bytes;
+		} else {
+			data_size = host->cw_data;
+			oob_size = ecc->bytes;
+		}
+
+		config_cw_write_pre(nandc);
+
+		write_data_dma(nandc, FLASH_BUF_ACC, data_buf, data_size);
+
+		/*
+		 * when ECC is enabled, we don't really need to write anything
+		 * to oob for the first n - 1 codewords since these oob regions
+		 * just contain ECC bytes that's written by the controller
+		 * itself. For the last codeword, we skip the bbm positions and
+		 * write to the free oob area.
+		 */
+		if (i == (ecc->steps - 1)) {
+			oob_buf += host->bbm_size;
+
+			write_data_dma(nandc, FLASH_BUF_ACC + data_size,
+				       oob_buf, oob_size);
+		}
+
+		config_cw_write_post(nandc);
+
+		data_buf += data_size;
+		oob_buf += oob_size;
+	}
+
+	ret = submit_descs(nandc);
+	if (ret)
+		dev_err(nandc->dev, "failure to write page\n");
+
+	free_descs(nandc);
+
+	return ret;
+}
+
+/* implements ecc->write_page_raw() */
+static int qcom_nandc_write_page_raw(struct mtd_info *mtd,
+				     struct nand_chip *chip, const uint8_t *buf,
+				     int oob_required, int page)
+{
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	u8 *data_buf, *oob_buf;
+	int i, ret;
+
+	clear_read_regs(nandc);
+
+	data_buf = (u8 *)buf;
+	oob_buf = chip->oob_poi;
+
+	host->use_ecc = false;
+	update_rw_regs(host, ecc->steps, false);
+
+	for (i = 0; i < ecc->steps; i++) {
+		int data_size1, data_size2, oob_size1, oob_size2;
+		int reg_off = FLASH_BUF_ACC;
+
+		data_size1 = mtd->writesize - host->cw_size * (ecc->steps - 1);
+		oob_size1 = host->bbm_size;
+
+		if (i == (ecc->steps - 1)) {
+			data_size2 = ecc->size - data_size1 -
+				     ((ecc->steps - 1) << 2);
+			oob_size2 = (ecc->steps << 2) + host->ecc_bytes_hw +
+				    host->spare_bytes;
+		} else {
+			data_size2 = host->cw_data - data_size1;
+			oob_size2 = host->ecc_bytes_hw + host->spare_bytes;
+		}
+
+		config_cw_write_pre(nandc);
+
+		write_data_dma(nandc, reg_off, data_buf, data_size1);
+		reg_off += data_size1;
+		data_buf += data_size1;
+
+		write_data_dma(nandc, reg_off, oob_buf, oob_size1);
+		reg_off += oob_size1;
+		oob_buf += oob_size1;
+
+		write_data_dma(nandc, reg_off, data_buf, data_size2);
+		reg_off += data_size2;
+		data_buf += data_size2;
+
+		write_data_dma(nandc, reg_off, oob_buf, oob_size2);
+		oob_buf += oob_size2;
+
+		config_cw_write_post(nandc);
+	}
+
+	ret = submit_descs(nandc);
+	if (ret)
+		dev_err(nandc->dev, "failure to write raw page\n");
+
+	free_descs(nandc);
+
+	return ret;
+}
+
+/*
+ * implements ecc->write_oob()
+ *
+ * the NAND controller cannot write only data or only oob within a codeword,
+ * since ecc is calculated for the combined codeword. we first copy the
+ * entire contents for the last codeword(data + oob), replace the old oob
+ * with the new one in chip->oob_poi, and then write the entire codeword.
+ * this read-copy-write operation results in a slight performance loss.
+ */
+static int qcom_nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
+				int page)
+{
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	u8 *oob = chip->oob_poi;
+	int data_size, oob_size;
+	int ret, status = 0;
+
+	host->use_ecc = true;
+
+	ret = copy_last_cw(host, page);
+	if (ret)
+		return ret;
+
+	clear_read_regs(nandc);
+
+	/* calculate the data and oob size for the last codeword/step */
+	data_size = ecc->size - ((ecc->steps - 1) << 2);
+	oob_size = mtd->oobavail;
+
+	/* override new oob content to last codeword */
+	mtd_ooblayout_get_databytes(mtd, nandc->data_buffer + data_size, oob,
+				    0, mtd->oobavail);
+
+	set_address(host, host->cw_size * (ecc->steps - 1), page);
+	update_rw_regs(host, 1, false);
+
+	config_cw_write_pre(nandc);
+	write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer,
+		       data_size + oob_size);
+	config_cw_write_post(nandc);
+
+	ret = submit_descs(nandc);
+
+	free_descs(nandc);
+
+	if (ret) {
+		dev_err(nandc->dev, "failure to write oob\n");
+		return -EIO;
+	}
+
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static int qcom_nandc_block_bad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int page, ret, bbpos, bad = 0;
+	u32 flash_status;
+
+	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+	/*
+	 * configure registers for a raw sub page read, the address is set to
+	 * the beginning of the last codeword, we don't care about reading ecc
+	 * portion of oob. we just want the first few bytes from this codeword
+	 * that contains the BBM
+	 */
+	host->use_ecc = false;
+
+	ret = copy_last_cw(host, page);
+	if (ret)
+		goto err;
+
+	flash_status = le32_to_cpu(nandc->reg_read_buf[0]);
+
+	if (flash_status & (FS_OP_ERR | FS_MPU_ERR)) {
+		dev_warn(nandc->dev, "error when trying to read BBM\n");
+		goto err;
+	}
+
+	bbpos = mtd->writesize - host->cw_size * (ecc->steps - 1);
+
+	bad = nandc->data_buffer[bbpos] != 0xff;
+
+	if (chip->options & NAND_BUSWIDTH_16)
+		bad = bad || (nandc->data_buffer[bbpos + 1] != 0xff);
+err:
+	return bad;
+}
+
+static int qcom_nandc_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int page, ret, status = 0;
+
+	clear_read_regs(nandc);
+
+	/*
+	 * to mark the BBM as bad, we flash the entire last codeword with 0s.
+	 * we don't care about the rest of the content in the codeword since
+	 * we aren't going to use this block again
+	 */
+	memset(nandc->data_buffer, 0x00, host->cw_size);
+
+	page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+
+	/* prepare write */
+	host->use_ecc = false;
+	set_address(host, host->cw_size * (ecc->steps - 1), page);
+	update_rw_regs(host, 1, false);
+
+	config_cw_write_pre(nandc);
+	write_data_dma(nandc, FLASH_BUF_ACC, nandc->data_buffer, host->cw_size);
+	config_cw_write_post(nandc);
+
+	ret = submit_descs(nandc);
+
+	free_descs(nandc);
+
+	if (ret) {
+		dev_err(nandc->dev, "failure to update BBM\n");
+		return -EIO;
+	}
+
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+/*
+ * the three functions below implement chip->read_byte(), chip->read_buf()
+ * and chip->write_buf() respectively. these aren't used for
+ * reading/writing page data, they are used for smaller data like reading
+ * id, status etc
+ */
+static uint8_t qcom_nandc_read_byte(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	u8 *buf = nandc->data_buffer;
+	u8 ret = 0x0;
+
+	if (host->last_command == NAND_CMD_STATUS) {
+		ret = host->status;
+
+		host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+		return ret;
+	}
+
+	if (nandc->buf_start < nandc->buf_count)
+		ret = buf[nandc->buf_start++];
+
+	return ret;
+}
+
+static void qcom_nandc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
+
+	memcpy(buf, nandc->data_buffer + nandc->buf_start, real_len);
+	nandc->buf_start += real_len;
+}
+
+static void qcom_nandc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+				 int len)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	int real_len = min_t(size_t, len, nandc->buf_count - nandc->buf_start);
+
+	memcpy(nandc->data_buffer + nandc->buf_start, buf, real_len);
+
+	nandc->buf_start += real_len;
+}
+
+/* we support only one external chip for now */
+static void qcom_nandc_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+
+	if (chipnr <= 0)
+		return;
+
+	dev_warn(nandc->dev, "invalid chip select\n");
+}
+
+/*
+ * NAND controller page layout info
+ *
+ * Layout with ECC enabled:
+ *
+ * |----------------------|  |---------------------------------|
+ * |           xx.......yy|  |             *********xx.......yy|
+ * |    DATA   xx..ECC..yy|  |    DATA     **SPARE**xx..ECC..yy|
+ * |   (516)   xx.......yy|  |  (516-n*4)  **(n*4)**xx.......yy|
+ * |           xx.......yy|  |             *********xx.......yy|
+ * |----------------------|  |---------------------------------|
+ *     codeword 1,2..n-1                  codeword n
+ *  <---(528/532 Bytes)-->    <-------(528/532 Bytes)--------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Reserved byte(s)
+ *
+ * 2K page: n = 4, spare = 16 bytes
+ * 4K page: n = 8, spare = 32 bytes
+ * 8K page: n = 16, spare = 64 bytes
+ *
+ * the qcom nand controller operates at a sub page/codeword level. each
+ * codeword is 528 and 532 bytes for 4 bit and 8 bit ECC modes respectively.
+ * the number of ECC bytes vary based on the ECC strength and the bus width.
+ *
+ * the first n - 1 codewords contains 516 bytes of user data, the remaining
+ * 12/16 bytes consist of ECC and reserved data. The nth codeword contains
+ * both user data and spare(oobavail) bytes that sum up to 516 bytes.
+ *
+ * When we access a page with ECC enabled, the reserved bytes(s) are not
+ * accessible at all. When reading, we fill up these unreadable positions
+ * with 0xffs. When writing, the controller skips writing the inaccessible
+ * bytes.
+ *
+ * Layout with ECC disabled:
+ *
+ * |------------------------------|  |---------------------------------------|
+ * |         yy          xx.......|  |         bb          *********xx.......|
+ * |  DATA1  yy  DATA2   xx..ECC..|  |  DATA1  bb  DATA2   **SPARE**xx..ECC..|
+ * | (size1) yy (size2)  xx.......|  | (size1) bb (size2)  **(n*4)**xx.......|
+ * |         yy          xx.......|  |         bb          *********xx.......|
+ * |------------------------------|  |---------------------------------------|
+ *         codeword 1,2..n-1                        codeword n
+ *  <-------(528/532 Bytes)------>    <-----------(528/532 Bytes)----------->
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = Spare/free bytes
+ * x = Unused byte(s)
+ * y = Dummy Bad Bock byte(s)
+ * b = Real Bad Block byte(s)
+ * size1/size2 = function of codeword size and 'n'
+ *
+ * when the ECC block is disabled, one reserved byte (or two for 16 bit bus
+ * width) is now accessible. For the first n - 1 codewords, these are dummy Bad
+ * Block Markers. In the last codeword, this position contains the real BBM
+ *
+ * In order to have a consistent layout between RAW and ECC modes, we assume
+ * the following OOB layout arrangement:
+ *
+ * |-----------|  |--------------------|
+ * |yyxx.......|  |bb*********xx.......|
+ * |yyxx..ECC..|  |bb*FREEOOB*xx..ECC..|
+ * |yyxx.......|  |bb*********xx.......|
+ * |yyxx.......|  |bb*********xx.......|
+ * |-----------|  |--------------------|
+ *  first n - 1       nth OOB region
+ *  OOB regions
+ *
+ * n = Number of codewords in the page
+ * . = ECC bytes
+ * * = FREE OOB bytes
+ * y = Dummy bad block byte(s) (inaccessible when ECC enabled)
+ * x = Unused byte(s)
+ * b = Real bad block byte(s) (inaccessible when ECC enabled)
+ *
+ * This layout is read as is when ECC is disabled. When ECC is enabled, the
+ * inaccessible Bad Block byte(s) are ignored when we write to a page/oob,
+ * and assumed as 0xffs when we read a page/oob. The ECC, unused and
+ * dummy/real bad block bytes are grouped as ecc bytes (i.e, ecc->bytes is
+ * the sum of the three).
+ */
+static int qcom_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+				   struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section > 1)
+		return -ERANGE;
+
+	if (!section) {
+		oobregion->length = (ecc->bytes * (ecc->steps - 1)) +
+				    host->bbm_size;
+		oobregion->offset = 0;
+	} else {
+		oobregion->length = host->ecc_bytes_hw + host->spare_bytes;
+		oobregion->offset = mtd->oobsize - oobregion->length;
+	}
+
+	return 0;
+}
+
+static int qcom_nand_ooblayout_free(struct mtd_info *mtd, int section,
+				     struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct qcom_nand_host *host = to_qcom_nand_host(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = ecc->steps * 4;
+	oobregion->offset = ((ecc->steps - 1) * ecc->bytes) + host->bbm_size;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops qcom_nand_ooblayout_ops = {
+	.ecc = qcom_nand_ooblayout_ecc,
+	.free = qcom_nand_ooblayout_free,
+};
+
+static int qcom_nand_host_setup(struct qcom_nand_host *host)
+{
+	struct nand_chip *chip = &host->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	struct qcom_nand_controller *nandc = get_qcom_nand_controller(chip);
+	int cwperpage, bad_block_byte;
+	bool wide_bus;
+	int ecc_mode = 1;
+
+	/*
+	 * the controller requires each step consists of 512 bytes of data.
+	 * bail out if DT has populated a wrong step size.
+	 */
+	if (ecc->size != NANDC_STEP_SIZE) {
+		dev_err(nandc->dev, "invalid ecc size\n");
+		return -EINVAL;
+	}
+
+	wide_bus = chip->options & NAND_BUSWIDTH_16 ? true : false;
+
+	if (ecc->strength >= 8) {
+		/* 8 bit ECC defaults to BCH ECC on all platforms */
+		host->bch_enabled = true;
+		ecc_mode = 1;
+
+		if (wide_bus) {
+			host->ecc_bytes_hw = 14;
+			host->spare_bytes = 0;
+			host->bbm_size = 2;
+		} else {
+			host->ecc_bytes_hw = 13;
+			host->spare_bytes = 2;
+			host->bbm_size = 1;
+		}
+	} else {
+		/*
+		 * if the controller supports BCH for 4 bit ECC, the controller
+		 * uses lesser bytes for ECC. If RS is used, the ECC bytes is
+		 * always 10 bytes
+		 */
+		if (nandc->ecc_modes & ECC_BCH_4BIT) {
+			/* BCH */
+			host->bch_enabled = true;
+			ecc_mode = 0;
+
+			if (wide_bus) {
+				host->ecc_bytes_hw = 8;
+				host->spare_bytes = 2;
+				host->bbm_size = 2;
+			} else {
+				host->ecc_bytes_hw = 7;
+				host->spare_bytes = 4;
+				host->bbm_size = 1;
+			}
+		} else {
+			/* RS */
+			host->ecc_bytes_hw = 10;
+
+			if (wide_bus) {
+				host->spare_bytes = 0;
+				host->bbm_size = 2;
+			} else {
+				host->spare_bytes = 1;
+				host->bbm_size = 1;
+			}
+		}
+	}
+
+	/*
+	 * we consider ecc->bytes as the sum of all the non-data content in a
+	 * step. It gives us a clean representation of the oob area (even if
+	 * all the bytes aren't used for ECC).It is always 16 bytes for 8 bit
+	 * ECC and 12 bytes for 4 bit ECC
+	 */
+	ecc->bytes = host->ecc_bytes_hw + host->spare_bytes + host->bbm_size;
+
+	ecc->read_page		= qcom_nandc_read_page;
+	ecc->read_page_raw	= qcom_nandc_read_page_raw;
+	ecc->read_oob		= qcom_nandc_read_oob;
+	ecc->write_page		= qcom_nandc_write_page;
+	ecc->write_page_raw	= qcom_nandc_write_page_raw;
+	ecc->write_oob		= qcom_nandc_write_oob;
+
+	ecc->mode = NAND_ECC_HW;
+
+	mtd_set_ooblayout(mtd, &qcom_nand_ooblayout_ops);
+
+	cwperpage = mtd->writesize / ecc->size;
+
+	/*
+	 * DATA_UD_BYTES varies based on whether the read/write command protects
+	 * spare data with ECC too. We protect spare data by default, so we set
+	 * it to main + spare data, which are 512 and 4 bytes respectively.
+	 */
+	host->cw_data = 516;
+
+	/*
+	 * total bytes in a step, either 528 bytes for 4 bit ECC, or 532 bytes
+	 * for 8 bit ECC
+	 */
+	host->cw_size = host->cw_data + ecc->bytes;
+
+	if (ecc->bytes * (mtd->writesize / ecc->size) > mtd->oobsize) {
+		dev_err(nandc->dev, "ecc data doesn't fit in OOB area\n");
+		return -EINVAL;
+	}
+
+	bad_block_byte = mtd->writesize - host->cw_size * (cwperpage - 1) + 1;
+
+	host->cfg0 = (cwperpage - 1) << CW_PER_PAGE
+				| host->cw_data << UD_SIZE_BYTES
+				| 0 << DISABLE_STATUS_AFTER_WRITE
+				| 5 << NUM_ADDR_CYCLES
+				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_RS
+				| 0 << STATUS_BFR_READ
+				| 1 << SET_RD_MODE_AFTER_STATUS
+				| host->spare_bytes << SPARE_SIZE_BYTES;
+
+	host->cfg1 = 7 << NAND_RECOVERY_CYCLES
+				| 0 <<  CS_ACTIVE_BSY
+				| bad_block_byte << BAD_BLOCK_BYTE_NUM
+				| 0 << BAD_BLOCK_IN_SPARE_AREA
+				| 2 << WR_RD_BSY_GAP
+				| wide_bus << WIDE_FLASH
+				| host->bch_enabled << ENABLE_BCH_ECC;
+
+	host->cfg0_raw = (cwperpage - 1) << CW_PER_PAGE
+				| host->cw_size << UD_SIZE_BYTES
+				| 5 << NUM_ADDR_CYCLES
+				| 0 << SPARE_SIZE_BYTES;
+
+	host->cfg1_raw = 7 << NAND_RECOVERY_CYCLES
+				| 0 << CS_ACTIVE_BSY
+				| 17 << BAD_BLOCK_BYTE_NUM
+				| 1 << BAD_BLOCK_IN_SPARE_AREA
+				| 2 << WR_RD_BSY_GAP
+				| wide_bus << WIDE_FLASH
+				| 1 << DEV0_CFG1_ECC_DISABLE;
+
+	host->ecc_bch_cfg = host->bch_enabled << ECC_CFG_ECC_DISABLE
+				| 0 << ECC_SW_RESET
+				| host->cw_data << ECC_NUM_DATA_BYTES
+				| 1 << ECC_FORCE_CLK_OPEN
+				| ecc_mode << ECC_MODE
+				| host->ecc_bytes_hw << ECC_PARITY_SIZE_BYTES_BCH;
+
+	host->ecc_buf_cfg = 0x203 << NUM_STEPS;
+
+	host->clrflashstatus = FS_READY_BSY_N;
+	host->clrreadstatus = 0xc0;
+
+	dev_dbg(nandc->dev,
+		"cfg0 %x cfg1 %x ecc_buf_cfg %x ecc_bch cfg %x cw_size %d cw_data %d strength %d parity_bytes %d steps %d\n",
+		host->cfg0, host->cfg1, host->ecc_buf_cfg, host->ecc_bch_cfg,
+		host->cw_size, host->cw_data, ecc->strength, ecc->bytes,
+		cwperpage);
+
+	return 0;
+}
+
+static int qcom_nandc_alloc(struct qcom_nand_controller *nandc)
+{
+	int ret;
+
+	ret = dma_set_coherent_mask(nandc->dev, DMA_BIT_MASK(32));
+	if (ret) {
+		dev_err(nandc->dev, "failed to set DMA mask\n");
+		return ret;
+	}
+
+	/*
+	 * we use the internal buffer for reading ONFI params, reading small
+	 * data like ID and status, and preforming read-copy-write operations
+	 * when writing to a codeword partially. 532 is the maximum possible
+	 * size of a codeword for our nand controller
+	 */
+	nandc->buf_size = 532;
+
+	nandc->data_buffer = devm_kzalloc(nandc->dev, nandc->buf_size,
+					GFP_KERNEL);
+	if (!nandc->data_buffer)
+		return -ENOMEM;
+
+	nandc->regs = devm_kzalloc(nandc->dev, sizeof(*nandc->regs),
+					GFP_KERNEL);
+	if (!nandc->regs)
+		return -ENOMEM;
+
+	nandc->reg_read_buf = devm_kzalloc(nandc->dev,
+				MAX_REG_RD * sizeof(*nandc->reg_read_buf),
+				GFP_KERNEL);
+	if (!nandc->reg_read_buf)
+		return -ENOMEM;
+
+	nandc->chan = dma_request_slave_channel(nandc->dev, "rxtx");
+	if (!nandc->chan) {
+		dev_err(nandc->dev, "failed to request slave channel\n");
+		return -ENODEV;
+	}
+
+	INIT_LIST_HEAD(&nandc->desc_list);
+	INIT_LIST_HEAD(&nandc->host_list);
+
+	nand_hw_control_init(&nandc->controller);
+
+	return 0;
+}
+
+static void qcom_nandc_unalloc(struct qcom_nand_controller *nandc)
+{
+	dma_release_channel(nandc->chan);
+}
+
+/* one time setup of a few nand controller registers */
+static int qcom_nandc_setup(struct qcom_nand_controller *nandc)
+{
+	/* kill onenand */
+	nandc_write(nandc, SFLASHC_BURST_CFG, 0);
+
+	/* enable ADM DMA */
+	nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN);
+
+	/* save the original values of these registers */
+	nandc->cmd1 = nandc_read(nandc, NAND_DEV_CMD1);
+	nandc->vld = nandc_read(nandc, NAND_DEV_CMD_VLD);
+
+	return 0;
+}
+
+static int qcom_nand_host_init(struct qcom_nand_controller *nandc,
+			       struct qcom_nand_host *host,
+			       struct device_node *dn)
+{
+	struct nand_chip *chip = &host->chip;
+	struct mtd_info *mtd = nand_to_mtd(chip);
+	struct device *dev = nandc->dev;
+	int ret;
+
+	ret = of_property_read_u32(dn, "reg", &host->cs);
+	if (ret) {
+		dev_err(dev, "can't get chip-select\n");
+		return -ENXIO;
+	}
+
+	nand_set_flash_node(chip, dn);
+	mtd->name = devm_kasprintf(dev, GFP_KERNEL, "qcom_nand.%d", host->cs);
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = dev;
+
+	chip->cmdfunc		= qcom_nandc_command;
+	chip->select_chip	= qcom_nandc_select_chip;
+	chip->read_byte		= qcom_nandc_read_byte;
+	chip->read_buf		= qcom_nandc_read_buf;
+	chip->write_buf		= qcom_nandc_write_buf;
+
+	/*
+	 * the bad block marker is readable only when we read the last codeword
+	 * of a page with ECC disabled. currently, the nand_base and nand_bbt
+	 * helpers don't allow us to read BB from a nand chip with ECC
+	 * disabled (MTD_OPS_PLACE_OOB is set by default). use the block_bad
+	 * and block_markbad helpers until we permanently switch to using
+	 * MTD_OPS_RAW for all drivers (with the help of badblockbits)
+	 */
+	chip->block_bad		= qcom_nandc_block_bad;
+	chip->block_markbad	= qcom_nandc_block_markbad;
+
+	chip->controller = &nandc->controller;
+	chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_USE_BOUNCE_BUFFER |
+			 NAND_SKIP_BBTSCAN;
+
+	/* set up initial status value */
+	host->status = NAND_STATUS_READY | NAND_STATUS_WP;
+
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (ret)
+		return ret;
+
+	ret = qcom_nand_host_setup(host);
+	if (ret)
+		return ret;
+
+	ret = nand_scan_tail(mtd);
+	if (ret)
+		return ret;
+
+	return mtd_device_register(mtd, NULL, 0);
+}
+
+/* parse custom DT properties here */
+static int qcom_nandc_parse_dt(struct platform_device *pdev)
+{
+	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+	struct device_node *np = nandc->dev->of_node;
+	int ret;
+
+	ret = of_property_read_u32(np, "qcom,cmd-crci", &nandc->cmd_crci);
+	if (ret) {
+		dev_err(nandc->dev, "command CRCI unspecified\n");
+		return ret;
+	}
+
+	ret = of_property_read_u32(np, "qcom,data-crci", &nandc->data_crci);
+	if (ret) {
+		dev_err(nandc->dev, "data CRCI unspecified\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qcom_nandc_probe(struct platform_device *pdev)
+{
+	struct qcom_nand_controller *nandc;
+	struct qcom_nand_host *host;
+	const void *dev_data;
+	struct device *dev = &pdev->dev;
+	struct device_node *dn = dev->of_node, *child;
+	struct resource *res;
+	int ret;
+
+	nandc = devm_kzalloc(&pdev->dev, sizeof(*nandc), GFP_KERNEL);
+	if (!nandc)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, nandc);
+	nandc->dev = dev;
+
+	dev_data = of_device_get_match_data(dev);
+	if (!dev_data) {
+		dev_err(&pdev->dev, "failed to get device data\n");
+		return -ENODEV;
+	}
+
+	nandc->ecc_modes = (unsigned long)dev_data;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	nandc->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(nandc->base))
+		return PTR_ERR(nandc->base);
+
+	nandc->base_dma = phys_to_dma(dev, (phys_addr_t)res->start);
+
+	nandc->core_clk = devm_clk_get(dev, "core");
+	if (IS_ERR(nandc->core_clk))
+		return PTR_ERR(nandc->core_clk);
+
+	nandc->aon_clk = devm_clk_get(dev, "aon");
+	if (IS_ERR(nandc->aon_clk))
+		return PTR_ERR(nandc->aon_clk);
+
+	ret = qcom_nandc_parse_dt(pdev);
+	if (ret)
+		return ret;
+
+	ret = qcom_nandc_alloc(nandc);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(nandc->core_clk);
+	if (ret)
+		goto err_core_clk;
+
+	ret = clk_prepare_enable(nandc->aon_clk);
+	if (ret)
+		goto err_aon_clk;
+
+	ret = qcom_nandc_setup(nandc);
+	if (ret)
+		goto err_setup;
+
+	for_each_available_child_of_node(dn, child) {
+		if (of_device_is_compatible(child, "qcom,nandcs")) {
+			host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
+			if (!host) {
+				of_node_put(child);
+				ret = -ENOMEM;
+				goto err_cs_init;
+			}
+
+			ret = qcom_nand_host_init(nandc, host, child);
+			if (ret) {
+				devm_kfree(dev, host);
+				continue;
+			}
+
+			list_add_tail(&host->node, &nandc->host_list);
+		}
+	}
+
+	if (list_empty(&nandc->host_list)) {
+		ret = -ENODEV;
+		goto err_cs_init;
+	}
+
+	return 0;
+
+err_cs_init:
+	list_for_each_entry(host, &nandc->host_list, node)
+		nand_release(nand_to_mtd(&host->chip));
+err_setup:
+	clk_disable_unprepare(nandc->aon_clk);
+err_aon_clk:
+	clk_disable_unprepare(nandc->core_clk);
+err_core_clk:
+	qcom_nandc_unalloc(nandc);
+
+	return ret;
+}
+
+static int qcom_nandc_remove(struct platform_device *pdev)
+{
+	struct qcom_nand_controller *nandc = platform_get_drvdata(pdev);
+	struct qcom_nand_host *host;
+
+	list_for_each_entry(host, &nandc->host_list, node)
+		nand_release(nand_to_mtd(&host->chip));
+
+	qcom_nandc_unalloc(nandc);
+
+	clk_disable_unprepare(nandc->aon_clk);
+	clk_disable_unprepare(nandc->core_clk);
+
+	return 0;
+}
+
+#define EBI2_NANDC_ECC_MODES	(ECC_RS_4BIT | ECC_BCH_8BIT)
+
+/*
+ * data will hold a struct pointer containing more differences once we support
+ * more controller variants
+ */
+static const struct of_device_id qcom_nandc_of_match[] = {
+	{	.compatible = "qcom,ipq806x-nand",
+		.data = (void *)EBI2_NANDC_ECC_MODES,
+	},
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_nandc_of_match);
+
+static struct platform_driver qcom_nandc_driver = {
+	.driver = {
+		.name = "qcom-nandc",
+		.of_match_table = qcom_nandc_of_match,
+	},
+	.probe   = qcom_nandc_probe,
+	.remove  = qcom_nandc_remove,
+};
+module_platform_driver(qcom_nandc_driver);
+
+MODULE_AUTHOR("Archit Taneja <architt@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm NAND Controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/rawnand/r852.c b/drivers/mtd/nand/rawnand/r852.c
new file mode 100644
index 000000000000..fc9287af4614
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/r852.c
@@ -0,0 +1,1082 @@ 
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <asm/byteorder.h>
+#include <linux/sched.h>
+#include "sm_common.h"
+#include "r852.h"
+
+
+static bool r852_enable_dma = 1;
+module_param(r852_enable_dma, bool, S_IRUGO);
+MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
+
+static int debug;
+module_param(debug, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+/* read register */
+static inline uint8_t r852_read_reg(struct r852_device *dev, int address)
+{
+	uint8_t reg = readb(dev->mmio + address);
+	return reg;
+}
+
+/* write register */
+static inline void r852_write_reg(struct r852_device *dev,
+						int address, uint8_t value)
+{
+	writeb(value, dev->mmio + address);
+	mmiowb();
+}
+
+
+/* read dword sized register */
+static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address)
+{
+	uint32_t reg = le32_to_cpu(readl(dev->mmio + address));
+	return reg;
+}
+
+/* write dword sized register */
+static inline void r852_write_reg_dword(struct r852_device *dev,
+							int address, uint32_t value)
+{
+	writel(cpu_to_le32(value), dev->mmio + address);
+	mmiowb();
+}
+
+/* returns pointer to our private structure */
+static inline struct r852_device *r852_get_dev(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	return nand_get_controller_data(chip);
+}
+
+
+/* check if controller supports dma */
+static void r852_dma_test(struct r852_device *dev)
+{
+	dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) &
+		(R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2);
+
+	if (!dev->dma_usable)
+		message("Non dma capable device detected, dma disabled");
+
+	if (!r852_enable_dma) {
+		message("disabling dma on user request");
+		dev->dma_usable = 0;
+	}
+}
+
+/*
+ * Enable dma. Enables ether first or second stage of the DMA,
+ * Expects dev->dma_dir and dev->dma_state be set
+ */
+static void r852_dma_enable(struct r852_device *dev)
+{
+	uint8_t dma_reg, dma_irq_reg;
+
+	/* Set up dma settings */
+	dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS);
+	dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY);
+
+	if (dev->dma_dir)
+		dma_reg |= R852_DMA_READ;
+
+	if (dev->dma_state == DMA_INTERNAL) {
+		dma_reg |= R852_DMA_INTERNAL;
+		/* Precaution to make sure HW doesn't write */
+			/* to random kernel memory */
+		r852_write_reg_dword(dev, R852_DMA_ADDR,
+			cpu_to_le32(dev->phys_bounce_buffer));
+	} else {
+		dma_reg |= R852_DMA_MEMORY;
+		r852_write_reg_dword(dev, R852_DMA_ADDR,
+			cpu_to_le32(dev->phys_dma_addr));
+	}
+
+	/* Precaution: make sure write reached the device */
+	r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+	r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg);
+
+	/* Set dma irq */
+	dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+	r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+		dma_irq_reg |
+		R852_DMA_IRQ_INTERNAL |
+		R852_DMA_IRQ_ERROR |
+		R852_DMA_IRQ_MEMORY);
+}
+
+/*
+ * Disable dma, called from the interrupt handler, which specifies
+ * success of the operation via 'error' argument
+ */
+static void r852_dma_done(struct r852_device *dev, int error)
+{
+	WARN_ON(dev->dma_stage == 0);
+
+	r852_write_reg_dword(dev, R852_DMA_IRQ_STA,
+			r852_read_reg_dword(dev, R852_DMA_IRQ_STA));
+
+	r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0);
+	r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0);
+
+	/* Precaution to make sure HW doesn't write to random kernel memory */
+	r852_write_reg_dword(dev, R852_DMA_ADDR,
+		cpu_to_le32(dev->phys_bounce_buffer));
+	r852_read_reg_dword(dev, R852_DMA_ADDR);
+
+	dev->dma_error = error;
+	dev->dma_stage = 0;
+
+	if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer)
+		pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN,
+			dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+}
+
+/*
+ * Wait, till dma is done, which includes both phases of it
+ */
+static int r852_dma_wait(struct r852_device *dev)
+{
+	long timeout = wait_for_completion_timeout(&dev->dma_done,
+				msecs_to_jiffies(1000));
+	if (!timeout) {
+		dbg("timeout waiting for DMA interrupt");
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+/*
+ * Read/Write one page using dma. Only pages can be read (512 bytes)
+*/
+static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read)
+{
+	int bounce = 0;
+	unsigned long flags;
+	int error;
+
+	dev->dma_error = 0;
+
+	/* Set dma direction */
+	dev->dma_dir = do_read;
+	dev->dma_stage = 1;
+	reinit_completion(&dev->dma_done);
+
+	dbg_verbose("doing dma %s ", do_read ? "read" : "write");
+
+	/* Set initial dma state: for reading first fill on board buffer,
+	  from device, for writes first fill the buffer  from memory*/
+	dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY;
+
+	/* if incoming buffer is not page aligned, we should do bounce */
+	if ((unsigned long)buf & (R852_DMA_LEN-1))
+		bounce = 1;
+
+	if (!bounce) {
+		dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf,
+			R852_DMA_LEN,
+			(do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE));
+
+		if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr))
+			bounce = 1;
+	}
+
+	if (bounce) {
+		dbg_verbose("dma: using bounce buffer");
+		dev->phys_dma_addr = dev->phys_bounce_buffer;
+		if (!do_read)
+			memcpy(dev->bounce_buffer, buf, R852_DMA_LEN);
+	}
+
+	/* Enable DMA */
+	spin_lock_irqsave(&dev->irqlock, flags);
+	r852_dma_enable(dev);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+
+	/* Wait till complete */
+	error = r852_dma_wait(dev);
+
+	if (error) {
+		r852_dma_done(dev, error);
+		return;
+	}
+
+	if (do_read && bounce)
+		memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN);
+}
+
+/*
+ * Program data lines of the nand chip to send data to it
+ */
+static void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct r852_device *dev = r852_get_dev(mtd);
+	uint32_t reg;
+
+	/* Don't allow any access to hardware if we suspect card removal */
+	if (dev->card_unstable)
+		return;
+
+	/* Special case for whole sector read */
+	if (len == R852_DMA_LEN && dev->dma_usable) {
+		r852_do_dma(dev, (uint8_t *)buf, 0);
+		return;
+	}
+
+	/* write DWORD chinks - faster */
+	while (len >= 4) {
+		reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24;
+		r852_write_reg_dword(dev, R852_DATALINE, reg);
+		buf += 4;
+		len -= 4;
+
+	}
+
+	/* write rest */
+	while (len > 0) {
+		r852_write_reg(dev, R852_DATALINE, *buf++);
+		len--;
+	}
+}
+
+/*
+ * Read data lines of the nand chip to retrieve data
+ */
+static void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct r852_device *dev = r852_get_dev(mtd);
+	uint32_t reg;
+
+	if (dev->card_unstable) {
+		/* since we can't signal error here, at least, return
+			predictable buffer */
+		memset(buf, 0, len);
+		return;
+	}
+
+	/* special case for whole sector read */
+	if (len == R852_DMA_LEN && dev->dma_usable) {
+		r852_do_dma(dev, buf, 1);
+		return;
+	}
+
+	/* read in dword sized chunks */
+	while (len >= 4) {
+
+		reg = r852_read_reg_dword(dev, R852_DATALINE);
+		*buf++ = reg & 0xFF;
+		*buf++ = (reg >> 8) & 0xFF;
+		*buf++ = (reg >> 16) & 0xFF;
+		*buf++ = (reg >> 24) & 0xFF;
+		len -= 4;
+	}
+
+	/* read the reset by bytes */
+	while (len--)
+		*buf++ = r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Read one byte from nand chip
+ */
+static uint8_t r852_read_byte(struct mtd_info *mtd)
+{
+	struct r852_device *dev = r852_get_dev(mtd);
+
+	/* Same problem as in r852_read_buf.... */
+	if (dev->card_unstable)
+		return 0;
+
+	return r852_read_reg(dev, R852_DATALINE);
+}
+
+/*
+ * Control several chip lines & send commands
+ */
+static void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl)
+{
+	struct r852_device *dev = r852_get_dev(mtd);
+
+	if (dev->card_unstable)
+		return;
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+
+		dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND |
+				 R852_CTL_ON | R852_CTL_CARDENABLE);
+
+		if (ctrl & NAND_ALE)
+			dev->ctlreg |= R852_CTL_DATA;
+
+		if (ctrl & NAND_CLE)
+			dev->ctlreg |= R852_CTL_COMMAND;
+
+		if (ctrl & NAND_NCE)
+			dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON);
+		else
+			dev->ctlreg &= ~R852_CTL_WRITE;
+
+		/* when write is stareted, enable write access */
+		if (dat == NAND_CMD_ERASE1)
+			dev->ctlreg |= R852_CTL_WRITE;
+
+		r852_write_reg(dev, R852_CTL, dev->ctlreg);
+	}
+
+	 /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need
+		to set write mode */
+	if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) {
+		dev->ctlreg |= R852_CTL_WRITE;
+		r852_write_reg(dev, R852_CTL, dev->ctlreg);
+	}
+
+	if (dat != NAND_CMD_NONE)
+		r852_write_reg(dev, R852_DATALINE, dat);
+}
+
+/*
+ * Wait till card is ready.
+ * based on nand_wait, but returns errors on DMA error
+ */
+static int r852_wait(struct mtd_info *mtd, struct nand_chip *chip)
+{
+	struct r852_device *dev = nand_get_controller_data(chip);
+
+	unsigned long timeout;
+	int status;
+
+	timeout = jiffies + (chip->state == FL_ERASING ?
+		msecs_to_jiffies(400) : msecs_to_jiffies(20));
+
+	while (time_before(jiffies, timeout))
+		if (chip->dev_ready(mtd))
+			break;
+
+	chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+	status = (int)chip->read_byte(mtd);
+
+	/* Unfortunelly, no way to send detailed error status... */
+	if (dev->dma_error) {
+		status |= NAND_STATUS_FAIL;
+		dev->dma_error = 0;
+	}
+	return status;
+}
+
+/*
+ * Check if card is ready
+ */
+
+static int r852_ready(struct mtd_info *mtd)
+{
+	struct r852_device *dev = r852_get_dev(mtd);
+	return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY);
+}
+
+
+/*
+ * Set ECC engine mode
+*/
+
+static void r852_ecc_hwctl(struct mtd_info *mtd, int mode)
+{
+	struct r852_device *dev = r852_get_dev(mtd);
+
+	if (dev->card_unstable)
+		return;
+
+	switch (mode) {
+	case NAND_ECC_READ:
+	case NAND_ECC_WRITE:
+		/* enable ecc generation/check*/
+		dev->ctlreg |= R852_CTL_ECC_ENABLE;
+
+		/* flush ecc buffer */
+		r852_write_reg(dev, R852_CTL,
+			dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+		r852_read_reg_dword(dev, R852_DATALINE);
+		r852_write_reg(dev, R852_CTL, dev->ctlreg);
+		return;
+
+	case NAND_ECC_READSYN:
+		/* disable ecc generation */
+		dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+		r852_write_reg(dev, R852_CTL, dev->ctlreg);
+	}
+}
+
+/*
+ * Calculate ECC, only used for writes
+ */
+
+static int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat,
+							uint8_t *ecc_code)
+{
+	struct r852_device *dev = r852_get_dev(mtd);
+	struct sm_oob *oob = (struct sm_oob *)ecc_code;
+	uint32_t ecc1, ecc2;
+
+	if (dev->card_unstable)
+		return 0;
+
+	dev->ctlreg &= ~R852_CTL_ECC_ENABLE;
+	r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+
+	ecc1 = r852_read_reg_dword(dev, R852_DATALINE);
+	ecc2 = r852_read_reg_dword(dev, R852_DATALINE);
+
+	oob->ecc1[0] = (ecc1) & 0xFF;
+	oob->ecc1[1] = (ecc1 >> 8) & 0xFF;
+	oob->ecc1[2] = (ecc1 >> 16) & 0xFF;
+
+	oob->ecc2[0] = (ecc2) & 0xFF;
+	oob->ecc2[1] = (ecc2 >> 8) & 0xFF;
+	oob->ecc2[2] = (ecc2 >> 16) & 0xFF;
+
+	r852_write_reg(dev, R852_CTL, dev->ctlreg);
+	return 0;
+}
+
+/*
+ * Correct the data using ECC, hw did almost everything for us
+ */
+
+static int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat,
+				uint8_t *read_ecc, uint8_t *calc_ecc)
+{
+	uint32_t ecc_reg;
+	uint8_t ecc_status, err_byte;
+	int i, error = 0;
+
+	struct r852_device *dev = r852_get_dev(mtd);
+
+	if (dev->card_unstable)
+		return 0;
+
+	if (dev->dma_error) {
+		dev->dma_error = 0;
+		return -EIO;
+	}
+
+	r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS);
+	ecc_reg = r852_read_reg_dword(dev, R852_DATALINE);
+	r852_write_reg(dev, R852_CTL, dev->ctlreg);
+
+	for (i = 0 ; i <= 1 ; i++) {
+
+		ecc_status = (ecc_reg >> 8) & 0xFF;
+
+		/* ecc uncorrectable error */
+		if (ecc_status & R852_ECC_FAIL) {
+			dbg("ecc: unrecoverable error, in half %d", i);
+			error = -EBADMSG;
+			goto exit;
+		}
+
+		/* correctable error */
+		if (ecc_status & R852_ECC_CORRECTABLE) {
+
+			err_byte = ecc_reg & 0xFF;
+			dbg("ecc: recoverable error, "
+				"in half %d, byte %d, bit %d", i,
+				err_byte, ecc_status & R852_ECC_ERR_BIT_MSK);
+
+			dat[err_byte] ^=
+				1 << (ecc_status & R852_ECC_ERR_BIT_MSK);
+			error++;
+		}
+
+		dat += 256;
+		ecc_reg >>= 16;
+	}
+exit:
+	return error;
+}
+
+/*
+ * This is copy of nand_read_oob_std
+ * nand_read_oob_syndrome assumes we can send column address - we can't
+ */
+static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
+			     int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
+	chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+/*
+ * Start the nand engine
+ */
+
+static void r852_engine_enable(struct r852_device *dev)
+{
+	if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) {
+		r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+		r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+	} else {
+		r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED);
+		r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON);
+	}
+	msleep(300);
+	r852_write_reg(dev, R852_CTL, 0);
+}
+
+
+/*
+ * Stop the nand engine
+ */
+
+static void r852_engine_disable(struct r852_device *dev)
+{
+	r852_write_reg_dword(dev, R852_HW, 0);
+	r852_write_reg(dev, R852_CTL, R852_CTL_RESET);
+}
+
+/*
+ * Test if card is present
+ */
+
+static void r852_card_update_present(struct r852_device *dev)
+{
+	unsigned long flags;
+	uint8_t reg;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	reg = r852_read_reg(dev, R852_CARD_STA);
+	dev->card_detected = !!(reg & R852_CARD_STA_PRESENT);
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Update card detection IRQ state according to current card state
+ * which is read in r852_card_update_present
+ */
+static void r852_update_card_detect(struct r852_device *dev)
+{
+	int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+	dev->card_unstable = 0;
+
+	card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT);
+	card_detect_reg |= R852_CARD_IRQ_GENABLE;
+
+	card_detect_reg |= dev->card_detected ?
+		R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT;
+
+	r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg);
+}
+
+static ssize_t r852_media_type_show(struct device *sys_dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev);
+	struct r852_device *dev = r852_get_dev(mtd);
+	char *data = dev->sm ? "smartmedia" : "xd";
+
+	strcpy(buf, data);
+	return strlen(data);
+}
+
+static DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL);
+
+
+/* Detect properties of card in slot */
+static void r852_update_media_status(struct r852_device *dev)
+{
+	uint8_t reg;
+	unsigned long flags;
+	int readonly;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+	if (!dev->card_detected) {
+		message("card removed");
+		spin_unlock_irqrestore(&dev->irqlock, flags);
+		return ;
+	}
+
+	readonly  = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO;
+	reg = r852_read_reg(dev, R852_DMA_CAP);
+	dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT);
+
+	message("detected %s %s card in slot",
+		dev->sm ? "SmartMedia" : "xD",
+		readonly ? "readonly" : "writeable");
+
+	dev->readonly = readonly;
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+}
+
+/*
+ * Register the nand device
+ * Called when the card is detected
+ */
+static int r852_register_nand_device(struct r852_device *dev)
+{
+	struct mtd_info *mtd = nand_to_mtd(dev->chip);
+
+	WARN_ON(dev->card_registred);
+
+	mtd->dev.parent = &dev->pci_dev->dev;
+
+	if (dev->readonly)
+		dev->chip->options |= NAND_ROM;
+
+	r852_engine_enable(dev);
+
+	if (sm_register_device(mtd, dev->sm))
+		goto error1;
+
+	if (device_create_file(&mtd->dev, &dev_attr_media_type)) {
+		message("can't create media type sysfs attribute");
+		goto error3;
+	}
+
+	dev->card_registred = 1;
+	return 0;
+error3:
+	nand_release(mtd);
+error1:
+	/* Force card redetect */
+	dev->card_detected = 0;
+	return -1;
+}
+
+/*
+ * Unregister the card
+ */
+
+static void r852_unregister_nand_device(struct r852_device *dev)
+{
+	struct mtd_info *mtd = nand_to_mtd(dev->chip);
+
+	if (!dev->card_registred)
+		return;
+
+	device_remove_file(&mtd->dev, &dev_attr_media_type);
+	nand_release(mtd);
+	r852_engine_disable(dev);
+	dev->card_registred = 0;
+}
+
+/* Card state updater */
+static void r852_card_detect_work(struct work_struct *work)
+{
+	struct r852_device *dev =
+		container_of(work, struct r852_device, card_detect_work.work);
+
+	r852_card_update_present(dev);
+	r852_update_card_detect(dev);
+	dev->card_unstable = 0;
+
+	/* False alarm */
+	if (dev->card_detected == dev->card_registred)
+		goto exit;
+
+	/* Read media properties */
+	r852_update_media_status(dev);
+
+	/* Register the card */
+	if (dev->card_detected)
+		r852_register_nand_device(dev);
+	else
+		r852_unregister_nand_device(dev);
+exit:
+	r852_update_card_detect(dev);
+}
+
+/* Ack + disable IRQ generation */
+static void r852_disable_irqs(struct r852_device *dev)
+{
+	uint8_t reg;
+	reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE);
+	r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK);
+
+	reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE);
+	r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE,
+					reg & ~R852_DMA_IRQ_MASK);
+
+	r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK);
+	r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK);
+}
+
+/* Interrupt handler */
+static irqreturn_t r852_irq(int irq, void *data)
+{
+	struct r852_device *dev = (struct r852_device *)data;
+
+	uint8_t card_status, dma_status;
+	unsigned long flags;
+	irqreturn_t ret = IRQ_NONE;
+
+	spin_lock_irqsave(&dev->irqlock, flags);
+
+	/* handle card detection interrupts first */
+	card_status = r852_read_reg(dev, R852_CARD_IRQ_STA);
+	r852_write_reg(dev, R852_CARD_IRQ_STA, card_status);
+
+	if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) {
+
+		ret = IRQ_HANDLED;
+		dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT);
+
+		/* we shouldn't receive any interrupts if we wait for card
+			to settle */
+		WARN_ON(dev->card_unstable);
+
+		/* disable irqs while card is unstable */
+		/* this will timeout DMA if active, but better that garbage */
+		r852_disable_irqs(dev);
+
+		if (dev->card_unstable)
+			goto out;
+
+		/* let, card state to settle a bit, and then do the work */
+		dev->card_unstable = 1;
+		queue_delayed_work(dev->card_workqueue,
+			&dev->card_detect_work, msecs_to_jiffies(100));
+		goto out;
+	}
+
+
+	/* Handle dma interrupts */
+	dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA);
+	r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status);
+
+	if (dma_status & R852_DMA_IRQ_MASK) {
+
+		ret = IRQ_HANDLED;
+
+		if (dma_status & R852_DMA_IRQ_ERROR) {
+			dbg("received dma error IRQ");
+			r852_dma_done(dev, -EIO);
+			complete(&dev->dma_done);
+			goto out;
+		}
+
+		/* received DMA interrupt out of nowhere? */
+		WARN_ON_ONCE(dev->dma_stage == 0);
+
+		if (dev->dma_stage == 0)
+			goto out;
+
+		/* done device access */
+		if (dev->dma_state == DMA_INTERNAL &&
+				(dma_status & R852_DMA_IRQ_INTERNAL)) {
+
+			dev->dma_state = DMA_MEMORY;
+			dev->dma_stage++;
+		}
+
+		/* done memory DMA */
+		if (dev->dma_state == DMA_MEMORY &&
+				(dma_status & R852_DMA_IRQ_MEMORY)) {
+			dev->dma_state = DMA_INTERNAL;
+			dev->dma_stage++;
+		}
+
+		/* Enable 2nd half of dma dance */
+		if (dev->dma_stage == 2)
+			r852_dma_enable(dev);
+
+		/* Operation done */
+		if (dev->dma_stage == 3) {
+			r852_dma_done(dev, 0);
+			complete(&dev->dma_done);
+		}
+		goto out;
+	}
+
+	/* Handle unknown interrupts */
+	if (dma_status)
+		dbg("bad dma IRQ status = %x", dma_status);
+
+	if (card_status & ~R852_CARD_STA_CD)
+		dbg("strange card status = %x", card_status);
+
+out:
+	spin_unlock_irqrestore(&dev->irqlock, flags);
+	return ret;
+}
+
+static int  r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+	int error;
+	struct nand_chip *chip;
+	struct r852_device *dev;
+
+	/* pci initialization */
+	error = pci_enable_device(pci_dev);
+
+	if (error)
+		goto error1;
+
+	pci_set_master(pci_dev);
+
+	error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+	if (error)
+		goto error2;
+
+	error = pci_request_regions(pci_dev, DRV_NAME);
+
+	if (error)
+		goto error3;
+
+	error = -ENOMEM;
+
+	/* init nand chip, but register it only on card insert */
+	chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL);
+
+	if (!chip)
+		goto error4;
+
+	/* commands */
+	chip->cmd_ctrl = r852_cmdctl;
+	chip->waitfunc = r852_wait;
+	chip->dev_ready = r852_ready;
+
+	/* I/O */
+	chip->read_byte = r852_read_byte;
+	chip->read_buf = r852_read_buf;
+	chip->write_buf = r852_write_buf;
+
+	/* ecc */
+	chip->ecc.mode = NAND_ECC_HW_SYNDROME;
+	chip->ecc.size = R852_DMA_LEN;
+	chip->ecc.bytes = SM_OOB_SIZE;
+	chip->ecc.strength = 2;
+	chip->ecc.hwctl = r852_ecc_hwctl;
+	chip->ecc.calculate = r852_ecc_calculate;
+	chip->ecc.correct = r852_ecc_correct;
+
+	/* TODO: hack */
+	chip->ecc.read_oob = r852_read_oob;
+
+	/* init our device structure */
+	dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL);
+
+	if (!dev)
+		goto error5;
+
+	nand_set_controller_data(chip, dev);
+	dev->chip = chip;
+	dev->pci_dev = pci_dev;
+	pci_set_drvdata(pci_dev, dev);
+
+	dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN,
+		&dev->phys_bounce_buffer);
+
+	if (!dev->bounce_buffer)
+		goto error6;
+
+
+	error = -ENODEV;
+	dev->mmio = pci_ioremap_bar(pci_dev, 0);
+
+	if (!dev->mmio)
+		goto error7;
+
+	error = -ENOMEM;
+	dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL);
+
+	if (!dev->tmp_buffer)
+		goto error8;
+
+	init_completion(&dev->dma_done);
+
+	dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
+
+	if (!dev->card_workqueue)
+		goto error9;
+
+	INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work);
+
+	/* shutdown everything - precation */
+	r852_engine_disable(dev);
+	r852_disable_irqs(dev);
+
+	r852_dma_test(dev);
+
+	dev->irq = pci_dev->irq;
+	spin_lock_init(&dev->irqlock);
+
+	dev->card_detected = 0;
+	r852_card_update_present(dev);
+
+	/*register irq handler*/
+	error = -ENODEV;
+	if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED,
+			  DRV_NAME, dev))
+		goto error10;
+
+	/* kick initial present test */
+	queue_delayed_work(dev->card_workqueue,
+		&dev->card_detect_work, 0);
+
+
+	printk(KERN_NOTICE DRV_NAME ": driver loaded successfully\n");
+	return 0;
+
+error10:
+	destroy_workqueue(dev->card_workqueue);
+error9:
+	kfree(dev->tmp_buffer);
+error8:
+	pci_iounmap(pci_dev, dev->mmio);
+error7:
+	pci_free_consistent(pci_dev, R852_DMA_LEN,
+		dev->bounce_buffer, dev->phys_bounce_buffer);
+error6:
+	kfree(dev);
+error5:
+	kfree(chip);
+error4:
+	pci_release_regions(pci_dev);
+error3:
+error2:
+	pci_disable_device(pci_dev);
+error1:
+	return error;
+}
+
+static void r852_remove(struct pci_dev *pci_dev)
+{
+	struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+	/* Stop detect workqueue -
+		we are going to unregister the device anyway*/
+	cancel_delayed_work_sync(&dev->card_detect_work);
+	destroy_workqueue(dev->card_workqueue);
+
+	/* Unregister the device, this might make more IO */
+	r852_unregister_nand_device(dev);
+
+	/* Stop interrupts */
+	r852_disable_irqs(dev);
+	free_irq(dev->irq, dev);
+
+	/* Cleanup */
+	kfree(dev->tmp_buffer);
+	pci_iounmap(pci_dev, dev->mmio);
+	pci_free_consistent(pci_dev, R852_DMA_LEN,
+		dev->bounce_buffer, dev->phys_bounce_buffer);
+
+	kfree(dev->chip);
+	kfree(dev);
+
+	/* Shutdown the PCI device */
+	pci_release_regions(pci_dev);
+	pci_disable_device(pci_dev);
+}
+
+static void r852_shutdown(struct pci_dev *pci_dev)
+{
+	struct r852_device *dev = pci_get_drvdata(pci_dev);
+
+	cancel_delayed_work_sync(&dev->card_detect_work);
+	r852_disable_irqs(dev);
+	synchronize_irq(dev->irq);
+	pci_disable_device(pci_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int r852_suspend(struct device *device)
+{
+	struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+
+	if (dev->ctlreg & R852_CTL_CARDENABLE)
+		return -EBUSY;
+
+	/* First make sure the detect work is gone */
+	cancel_delayed_work_sync(&dev->card_detect_work);
+
+	/* Turn off the interrupts and stop the device */
+	r852_disable_irqs(dev);
+	r852_engine_disable(dev);
+
+	/* If card was pulled off just during the suspend, which is very
+		unlikely, we will remove it on resume, it too late now
+		anyway... */
+	dev->card_unstable = 0;
+	return 0;
+}
+
+static int r852_resume(struct device *device)
+{
+	struct r852_device *dev = pci_get_drvdata(to_pci_dev(device));
+	struct mtd_info *mtd = nand_to_mtd(dev->chip);
+
+	r852_disable_irqs(dev);
+	r852_card_update_present(dev);
+	r852_engine_disable(dev);
+
+
+	/* If card status changed, just do the work */
+	if (dev->card_detected != dev->card_registred) {
+		dbg("card was %s during low power state",
+			dev->card_detected ? "added" : "removed");
+
+		queue_delayed_work(dev->card_workqueue,
+		&dev->card_detect_work, msecs_to_jiffies(1000));
+		return 0;
+	}
+
+	/* Otherwise, initialize the card */
+	if (dev->card_registred) {
+		r852_engine_enable(dev);
+		dev->chip->select_chip(mtd, 0);
+		dev->chip->cmdfunc(mtd, NAND_CMD_RESET, -1, -1);
+		dev->chip->select_chip(mtd, -1);
+	}
+
+	/* Program card detection IRQ */
+	r852_update_card_detect(dev);
+	return 0;
+}
+#endif
+
+static const struct pci_device_id r852_pci_id_tbl[] = {
+
+	{ PCI_VDEVICE(RICOH, 0x0852), },
+	{ },
+};
+
+MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl);
+
+static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume);
+
+static struct pci_driver r852_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= r852_pci_id_tbl,
+	.probe		= r852_probe,
+	.remove		= r852_remove,
+	.shutdown	= r852_shutdown,
+	.driver.pm	= &r852_pm_ops,
+};
+
+module_pci_driver(r852_pci_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
diff --git a/drivers/mtd/nand/rawnand/r852.h b/drivers/mtd/nand/rawnand/r852.h
new file mode 100644
index 000000000000..8713c57f6207
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/r852.h
@@ -0,0 +1,160 @@ 
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * driver for Ricoh xD readers
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/spinlock.h>
+
+
+/* nand interface + ecc
+   byte write/read does one cycle on nand data lines.
+   dword write/read does 4 cycles
+   if R852_CTL_ECC_ACCESS is set in R852_CTL, then dword read reads
+   results of ecc correction, if DMA read was done before.
+   If write was done two dword reads read generated ecc checksums
+*/
+#define	R852_DATALINE		0x00
+
+/* control register */
+#define R852_CTL		0x04
+#define R852_CTL_COMMAND 	0x01	/* send command (#CLE)*/
+#define R852_CTL_DATA		0x02	/* read/write data (#ALE)*/
+#define R852_CTL_ON		0x04	/* only seem to controls the hd led, */
+					/* but has to be set on start...*/
+#define R852_CTL_RESET		0x08	/* unknown, set only on start once*/
+#define R852_CTL_CARDENABLE	0x10	/* probably (#CE) - always set*/
+#define R852_CTL_ECC_ENABLE	0x20	/* enable ecc engine */
+#define R852_CTL_ECC_ACCESS	0x40	/* read/write ecc via reg #0*/
+#define R852_CTL_WRITE		0x80	/* set when performing writes (#WP) */
+
+/* card detection status */
+#define R852_CARD_STA		0x05
+
+#define R852_CARD_STA_CD	0x01	/* state of #CD line, same as 0x04 */
+#define R852_CARD_STA_RO	0x02	/* card is readonly */
+#define R852_CARD_STA_PRESENT	0x04	/* card is present (#CD) */
+#define R852_CARD_STA_ABSENT	0x08	/* card is absent */
+#define R852_CARD_STA_BUSY	0x80	/* card is busy - (#R/B) */
+
+/* card detection irq status & enable*/
+#define R852_CARD_IRQ_STA	0x06	/* IRQ status */
+#define R852_CARD_IRQ_ENABLE	0x07	/* IRQ enable */
+
+#define R852_CARD_IRQ_CD	0x01	/* fire when #CD lights, same as 0x04*/
+#define R852_CARD_IRQ_REMOVE	0x04	/* detect card removal */
+#define R852_CARD_IRQ_INSERT	0x08	/* detect card insert */
+#define R852_CARD_IRQ_UNK1	0x10	/* unknown */
+#define R852_CARD_IRQ_GENABLE	0x80	/* general enable */
+#define R852_CARD_IRQ_MASK	0x1D
+
+
+
+/* hardware enable */
+#define R852_HW			0x08
+#define R852_HW_ENABLED		0x01	/* hw enabled */
+#define R852_HW_UNKNOWN		0x80
+
+
+/* dma capabilities */
+#define R852_DMA_CAP		0x09
+#define R852_SMBIT		0x20	/* if set with bit #6 or bit #7, then */
+					/* hw is smartmedia */
+#define R852_DMA1		0x40	/* if set w/bit #7, dma is supported */
+#define R852_DMA2		0x80	/* if set w/bit #6, dma is supported */
+
+
+/* physical DMA address - 32 bit value*/
+#define R852_DMA_ADDR		0x0C
+
+
+/* dma settings */
+#define R852_DMA_SETTINGS	0x10
+#define R852_DMA_MEMORY		0x01	/* (memory <-> internal hw buffer) */
+#define R852_DMA_READ		0x02	/* 0 = write, 1 = read */
+#define R852_DMA_INTERNAL	0x04	/* (internal hw buffer <-> card) */
+
+/* dma IRQ status */
+#define R852_DMA_IRQ_STA		0x14
+
+/* dma IRQ enable */
+#define R852_DMA_IRQ_ENABLE	0x18
+
+#define R852_DMA_IRQ_MEMORY	0x01	/* (memory <-> internal hw buffer) */
+#define R852_DMA_IRQ_ERROR	0x02	/* error did happen */
+#define R852_DMA_IRQ_INTERNAL	0x04	/* (internal hw buffer <-> card) */
+#define R852_DMA_IRQ_MASK	0x07	/* mask of all IRQ bits */
+
+
+/* ECC syndrome format - read from reg #0 will return two copies of these for
+   each half of the page.
+   first byte is error byte location, and second, bit location + flags */
+#define R852_ECC_ERR_BIT_MSK	0x07	/* error bit location */
+#define R852_ECC_CORRECT		0x10	/* no errors - (guessed) */
+#define R852_ECC_CORRECTABLE	0x20	/* correctable error exist */
+#define R852_ECC_FAIL		0x40	/* non correctable error detected */
+
+#define R852_DMA_LEN		512
+
+#define DMA_INTERNAL	0
+#define DMA_MEMORY	1
+
+struct r852_device {
+	void __iomem *mmio;		/* mmio */
+	struct nand_chip *chip;		/* nand chip backpointer */
+	struct pci_dev *pci_dev;	/* pci backpointer */
+
+	/* dma area */
+	dma_addr_t phys_dma_addr;	/* bus address of buffer*/
+	struct completion dma_done;	/* data transfer done */
+
+	dma_addr_t phys_bounce_buffer;	/* bus address of bounce buffer */
+	uint8_t *bounce_buffer;		/* virtual address of bounce buffer */
+
+	int dma_dir;			/* 1 = read, 0 = write */
+	int dma_stage;			/* 0 - idle, 1 - first step,
+					   2 - second step */
+
+	int dma_state;			/* 0 = internal, 1 = memory */
+	int dma_error;			/* dma errors */
+	int dma_usable;			/* is it possible to use dma */
+
+	/* card status area */
+	struct delayed_work card_detect_work;
+	struct workqueue_struct *card_workqueue;
+	int card_registred;		/* card registered with mtd */
+	int card_detected;		/* card detected in slot */
+	int card_unstable;		/* whenever the card is inserted,
+					   is not known yet */
+	int readonly;			/* card is readonly */
+	int sm;				/* Is card smartmedia */
+
+	/* interrupt handling */
+	spinlock_t irqlock;		/* IRQ protecting lock */
+	int irq;			/* irq num */
+	/* misc */
+	void *tmp_buffer;		/* temporary buffer */
+	uint8_t ctlreg;			/* cached contents of control reg */
+};
+
+#define DRV_NAME "r852"
+
+
+#define dbg(format, ...) \
+	if (debug) \
+		printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+#define dbg_verbose(format, ...) \
+	if (debug > 1) \
+		printk(KERN_DEBUG DRV_NAME ": " format "\n", ## __VA_ARGS__)
+
+
+#define message(format, ...) \
+	printk(KERN_INFO DRV_NAME ": " format "\n", ## __VA_ARGS__)
diff --git a/drivers/mtd/nand/rawnand/s3c2410.c b/drivers/mtd/nand/rawnand/s3c2410.c
new file mode 100644
index 000000000000..6ce9f867a123
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/s3c2410.c
@@ -0,0 +1,1165 @@ 
+/* linux/drivers/mtd/nand/s3c2410.c
+ *
+ * Copyright © 2004-2008 Simtec Electronics
+ *	http://armlinux.simtec.co.uk/
+ *	Ben Dooks <ben@simtec.co.uk>
+ *
+ * Samsung S3C2410/S3C2440/S3C2412 NAND driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#define pr_fmt(fmt) "nand-s3c2410: " fmt
+
+#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
+#define DEBUG
+#endif
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/cpufreq.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+
+#include <linux/platform_data/mtd-nand-s3c2410.h>
+
+#define S3C2410_NFREG(x) (x)
+
+#define S3C2410_NFCONF		S3C2410_NFREG(0x00)
+#define S3C2410_NFCMD		S3C2410_NFREG(0x04)
+#define S3C2410_NFADDR		S3C2410_NFREG(0x08)
+#define S3C2410_NFDATA		S3C2410_NFREG(0x0C)
+#define S3C2410_NFSTAT		S3C2410_NFREG(0x10)
+#define S3C2410_NFECC		S3C2410_NFREG(0x14)
+#define S3C2440_NFCONT		S3C2410_NFREG(0x04)
+#define S3C2440_NFCMD		S3C2410_NFREG(0x08)
+#define S3C2440_NFADDR		S3C2410_NFREG(0x0C)
+#define S3C2440_NFDATA		S3C2410_NFREG(0x10)
+#define S3C2440_NFSTAT		S3C2410_NFREG(0x20)
+#define S3C2440_NFMECC0		S3C2410_NFREG(0x2C)
+#define S3C2412_NFSTAT		S3C2410_NFREG(0x28)
+#define S3C2412_NFMECC0		S3C2410_NFREG(0x34)
+#define S3C2410_NFCONF_EN		(1<<15)
+#define S3C2410_NFCONF_INITECC		(1<<12)
+#define S3C2410_NFCONF_nFCE		(1<<11)
+#define S3C2410_NFCONF_TACLS(x)		((x)<<8)
+#define S3C2410_NFCONF_TWRPH0(x)	((x)<<4)
+#define S3C2410_NFCONF_TWRPH1(x)	((x)<<0)
+#define S3C2410_NFSTAT_BUSY		(1<<0)
+#define S3C2440_NFCONF_TACLS(x)		((x)<<12)
+#define S3C2440_NFCONF_TWRPH0(x)	((x)<<8)
+#define S3C2440_NFCONF_TWRPH1(x)	((x)<<4)
+#define S3C2440_NFCONT_INITECC		(1<<4)
+#define S3C2440_NFCONT_nFCE		(1<<1)
+#define S3C2440_NFCONT_ENABLE		(1<<0)
+#define S3C2440_NFSTAT_READY		(1<<0)
+#define S3C2412_NFCONF_NANDBOOT		(1<<31)
+#define S3C2412_NFCONT_INIT_MAIN_ECC	(1<<5)
+#define S3C2412_NFCONT_nFCE0		(1<<1)
+#define S3C2412_NFSTAT_READY		(1<<0)
+
+/* new oob placement block for use with hardware ecc generation
+ */
+static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 0;
+	oobregion->length = 3;
+
+	return 0;
+}
+
+static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section,
+				  struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 8;
+	oobregion->length = 8;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = {
+	.ecc = s3c2410_ooblayout_ecc,
+	.free = s3c2410_ooblayout_free,
+};
+
+/* controller and mtd information */
+
+struct s3c2410_nand_info;
+
+/**
+ * struct s3c2410_nand_mtd - driver MTD structure
+ * @mtd: The MTD instance to pass to the MTD layer.
+ * @chip: The NAND chip information.
+ * @set: The platform information supplied for this set of NAND chips.
+ * @info: Link back to the hardware information.
+ * @scan_res: The result from calling nand_scan_ident().
+*/
+struct s3c2410_nand_mtd {
+	struct nand_chip		chip;
+	struct s3c2410_nand_set		*set;
+	struct s3c2410_nand_info	*info;
+	int				scan_res;
+};
+
+enum s3c_cpu_type {
+	TYPE_S3C2410,
+	TYPE_S3C2412,
+	TYPE_S3C2440,
+};
+
+enum s3c_nand_clk_state {
+	CLOCK_DISABLE	= 0,
+	CLOCK_ENABLE,
+	CLOCK_SUSPEND,
+};
+
+/* overview of the s3c2410 nand state */
+
+/**
+ * struct s3c2410_nand_info - NAND controller state.
+ * @mtds: An array of MTD instances on this controoler.
+ * @platform: The platform data for this board.
+ * @device: The platform device we bound to.
+ * @clk: The clock resource for this controller.
+ * @regs: The area mapped for the hardware registers.
+ * @sel_reg: Pointer to the register controlling the NAND selection.
+ * @sel_bit: The bit in @sel_reg to select the NAND chip.
+ * @mtd_count: The number of MTDs created from this controller.
+ * @save_sel: The contents of @sel_reg to be saved over suspend.
+ * @clk_rate: The clock rate from @clk.
+ * @clk_state: The current clock state.
+ * @cpu_type: The exact type of this controller.
+ */
+struct s3c2410_nand_info {
+	/* mtd info */
+	struct nand_hw_control		controller;
+	struct s3c2410_nand_mtd		*mtds;
+	struct s3c2410_platform_nand	*platform;
+
+	/* device info */
+	struct device			*device;
+	struct clk			*clk;
+	void __iomem			*regs;
+	void __iomem			*sel_reg;
+	int				sel_bit;
+	int				mtd_count;
+	unsigned long			save_sel;
+	unsigned long			clk_rate;
+	enum s3c_nand_clk_state		clk_state;
+
+	enum s3c_cpu_type		cpu_type;
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
+	struct notifier_block	freq_transition;
+#endif
+};
+
+/* conversion functions */
+
+static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct s3c2410_nand_mtd,
+			    chip);
+}
+
+static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
+{
+	return s3c2410_nand_mtd_toours(mtd)->info;
+}
+
+static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
+{
+	return platform_get_drvdata(dev);
+}
+
+static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
+{
+	return dev_get_platdata(&dev->dev);
+}
+
+static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
+{
+#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
+	return 1;
+#else
+	return 0;
+#endif
+}
+
+/**
+ * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
+ * @info: The controller instance.
+ * @new_state: State to which clock should be set.
+ */
+static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
+		enum s3c_nand_clk_state new_state)
+{
+	if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
+		return;
+
+	if (info->clk_state == CLOCK_ENABLE) {
+		if (new_state != CLOCK_ENABLE)
+			clk_disable_unprepare(info->clk);
+	} else {
+		if (new_state == CLOCK_ENABLE)
+			clk_prepare_enable(info->clk);
+	}
+
+	info->clk_state = new_state;
+}
+
+/* timing calculations */
+
+#define NS_IN_KHZ 1000000
+
+/**
+ * s3c_nand_calc_rate - calculate timing data.
+ * @wanted: The cycle time in nanoseconds.
+ * @clk: The clock rate in kHz.
+ * @max: The maximum divider value.
+ *
+ * Calculate the timing value from the given parameters.
+ */
+static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
+{
+	int result;
+
+	result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
+
+	pr_debug("result %d from %ld, %d\n", result, clk, wanted);
+
+	if (result > max) {
+		pr_err("%d ns is too big for current clock rate %ld\n",
+			wanted, clk);
+		return -1;
+	}
+
+	if (result < 1)
+		result = 1;
+
+	return result;
+}
+
+#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
+
+/* controller setup */
+
+/**
+ * s3c2410_nand_setrate - setup controller timing information.
+ * @info: The controller instance.
+ *
+ * Given the information supplied by the platform, calculate and set
+ * the necessary timing registers in the hardware to generate the
+ * necessary timing cycles to the hardware.
+ */
+static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
+{
+	struct s3c2410_platform_nand *plat = info->platform;
+	int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
+	int tacls, twrph0, twrph1;
+	unsigned long clkrate = clk_get_rate(info->clk);
+	unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
+	unsigned long flags;
+
+	/* calculate the timing information for the controller */
+
+	info->clk_rate = clkrate;
+	clkrate /= 1000;	/* turn clock into kHz for ease of use */
+
+	if (plat != NULL) {
+		tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
+		twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
+		twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
+	} else {
+		/* default timings */
+		tacls = tacls_max;
+		twrph0 = 8;
+		twrph1 = 8;
+	}
+
+	if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
+		dev_err(info->device, "cannot get suitable timings\n");
+		return -EINVAL;
+	}
+
+	dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
+		tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
+						twrph1, to_ns(twrph1, clkrate));
+
+	switch (info->cpu_type) {
+	case TYPE_S3C2410:
+		mask = (S3C2410_NFCONF_TACLS(3) |
+			S3C2410_NFCONF_TWRPH0(7) |
+			S3C2410_NFCONF_TWRPH1(7));
+		set = S3C2410_NFCONF_EN;
+		set |= S3C2410_NFCONF_TACLS(tacls - 1);
+		set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
+		set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
+		break;
+
+	case TYPE_S3C2440:
+	case TYPE_S3C2412:
+		mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
+			S3C2440_NFCONF_TWRPH0(7) |
+			S3C2440_NFCONF_TWRPH1(7));
+
+		set = S3C2440_NFCONF_TACLS(tacls - 1);
+		set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
+		set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
+		break;
+
+	default:
+		BUG();
+	}
+
+	local_irq_save(flags);
+
+	cfg = readl(info->regs + S3C2410_NFCONF);
+	cfg &= ~mask;
+	cfg |= set;
+	writel(cfg, info->regs + S3C2410_NFCONF);
+
+	local_irq_restore(flags);
+
+	dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
+
+	return 0;
+}
+
+/**
+ * s3c2410_nand_inithw - basic hardware initialisation
+ * @info: The hardware state.
+ *
+ * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
+ * to setup the hardware access speeds and set the controller to be enabled.
+*/
+static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
+{
+	int ret;
+
+	ret = s3c2410_nand_setrate(info);
+	if (ret < 0)
+		return ret;
+
+	switch (info->cpu_type) {
+	case TYPE_S3C2410:
+	default:
+		break;
+
+	case TYPE_S3C2440:
+	case TYPE_S3C2412:
+		/* enable the controller and de-assert nFCE */
+
+		writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
+	}
+
+	return 0;
+}
+
+/**
+ * s3c2410_nand_select_chip - select the given nand chip
+ * @mtd: The MTD instance for this chip.
+ * @chip: The chip number.
+ *
+ * This is called by the MTD layer to either select a given chip for the
+ * @mtd instance, or to indicate that the access has finished and the
+ * chip can be de-selected.
+ *
+ * The routine ensures that the nFCE line is correctly setup, and any
+ * platform specific selection code is called to route nFCE to the specific
+ * chip.
+ */
+static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct s3c2410_nand_info *info;
+	struct s3c2410_nand_mtd *nmtd;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	unsigned long cur;
+
+	nmtd = nand_get_controller_data(this);
+	info = nmtd->info;
+
+	if (chip != -1)
+		s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+	cur = readl(info->sel_reg);
+
+	if (chip == -1) {
+		cur |= info->sel_bit;
+	} else {
+		if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
+			dev_err(info->device, "invalid chip %d\n", chip);
+			return;
+		}
+
+		if (info->platform != NULL) {
+			if (info->platform->select_chip != NULL)
+				(info->platform->select_chip) (nmtd->set, chip);
+		}
+
+		cur &= ~info->sel_bit;
+	}
+
+	writel(cur, info->sel_reg);
+
+	if (chip == -1)
+		s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+}
+
+/* s3c2410_nand_hwcontrol
+ *
+ * Issue command and address cycles to the chip
+*/
+
+static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+				   unsigned int ctrl)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		writeb(cmd, info->regs + S3C2410_NFCMD);
+	else
+		writeb(cmd, info->regs + S3C2410_NFADDR);
+}
+
+/* command and control functions */
+
+static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+				   unsigned int ctrl)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		writeb(cmd, info->regs + S3C2440_NFCMD);
+	else
+		writeb(cmd, info->regs + S3C2440_NFADDR);
+}
+
+/* s3c2410_nand_devready()
+ *
+ * returns 0 if the nand is busy, 1 if it is ready
+*/
+
+static int s3c2410_nand_devready(struct mtd_info *mtd)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
+}
+
+static int s3c2440_nand_devready(struct mtd_info *mtd)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
+}
+
+static int s3c2412_nand_devready(struct mtd_info *mtd)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
+}
+
+/* ECC handling functions */
+
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
+static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
+				     u_char *read_ecc, u_char *calc_ecc)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	unsigned int diff0, diff1, diff2;
+	unsigned int bit, byte;
+
+	pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc);
+
+	diff0 = read_ecc[0] ^ calc_ecc[0];
+	diff1 = read_ecc[1] ^ calc_ecc[1];
+	diff2 = read_ecc[2] ^ calc_ecc[2];
+
+	pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
+		 __func__, 3, read_ecc, 3, calc_ecc,
+		 diff0, diff1, diff2);
+
+	if (diff0 == 0 && diff1 == 0 && diff2 == 0)
+		return 0;		/* ECC is ok */
+
+	/* sometimes people do not think about using the ECC, so check
+	 * to see if we have an 0xff,0xff,0xff read ECC and then ignore
+	 * the error, on the assumption that this is an un-eccd page.
+	 */
+	if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
+	    && info->platform->ignore_unset_ecc)
+		return 0;
+
+	/* Can we correct this ECC (ie, one row and column change).
+	 * Note, this is similar to the 256 error code on smartmedia */
+
+	if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 &&
+	    ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 &&
+	    ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
+		/* calculate the bit position of the error */
+
+		bit  = ((diff2 >> 3) & 1) |
+		       ((diff2 >> 4) & 2) |
+		       ((diff2 >> 5) & 4);
+
+		/* calculate the byte position of the error */
+
+		byte = ((diff2 << 7) & 0x100) |
+		       ((diff1 << 0) & 0x80)  |
+		       ((diff1 << 1) & 0x40)  |
+		       ((diff1 << 2) & 0x20)  |
+		       ((diff1 << 3) & 0x10)  |
+		       ((diff0 >> 4) & 0x08)  |
+		       ((diff0 >> 3) & 0x04)  |
+		       ((diff0 >> 2) & 0x02)  |
+		       ((diff0 >> 1) & 0x01);
+
+		dev_dbg(info->device, "correcting error bit %d, byte %d\n",
+			bit, byte);
+
+		dat[byte] ^= (1 << bit);
+		return 1;
+	}
+
+	/* if there is only one bit difference in the ECC, then
+	 * one of only a row or column parity has changed, which
+	 * means the error is most probably in the ECC itself */
+
+	diff0 |= (diff1 << 8);
+	diff0 |= (diff2 << 16);
+
+	/* equal to "(diff0 & ~(1 << __ffs(diff0)))" */
+	if ((diff0 & (diff0 - 1)) == 0)
+		return 1;
+
+	return -1;
+}
+
+/* ECC functions
+ *
+ * These allow the s3c2410 and s3c2440 to use the controller's ECC
+ * generator block to ECC the data as it passes through]
+*/
+
+static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	unsigned long ctrl;
+
+	ctrl = readl(info->regs + S3C2410_NFCONF);
+	ctrl |= S3C2410_NFCONF_INITECC;
+	writel(ctrl, info->regs + S3C2410_NFCONF);
+}
+
+static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	unsigned long ctrl;
+
+	ctrl = readl(info->regs + S3C2440_NFCONT);
+	writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
+	       info->regs + S3C2440_NFCONT);
+}
+
+static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	unsigned long ctrl;
+
+	ctrl = readl(info->regs + S3C2440_NFCONT);
+	writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
+}
+
+static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+				      u_char *ecc_code)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+	ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
+	ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
+	ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
+
+	pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
+
+	return 0;
+}
+
+static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+				      u_char *ecc_code)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
+
+	ecc_code[0] = ecc;
+	ecc_code[1] = ecc >> 8;
+	ecc_code[2] = ecc >> 16;
+
+	pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
+
+	return 0;
+}
+
+static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+				      u_char *ecc_code)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+	unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
+
+	ecc_code[0] = ecc;
+	ecc_code[1] = ecc >> 8;
+	ecc_code[2] = ecc >> 16;
+
+	pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
+
+	return 0;
+}
+#endif
+
+/* over-ride the standard functions for a little more speed. We can
+ * use read/write block to move the data buffers to/from the controller
+*/
+
+static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	readsb(this->IO_ADDR_R, buf, len);
+}
+
+static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+	readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+	/* cleanup if we've got less than a word to do */
+	if (len & 3) {
+		buf += len & ~3;
+
+		for (; len & 3; len--)
+			*buf++ = readb(info->regs + S3C2440_NFDATA);
+	}
+}
+
+static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+				   int len)
+{
+	struct nand_chip *this = mtd_to_nand(mtd);
+	writesb(this->IO_ADDR_W, buf, len);
+}
+
+static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
+				   int len)
+{
+	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
+
+	writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
+
+	/* cleanup any fractional write */
+	if (len & 3) {
+		buf += len & ~3;
+
+		for (; len & 3; len--, buf++)
+			writeb(*buf, info->regs + S3C2440_NFDATA);
+	}
+}
+
+/* cpufreq driver support */
+
+#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
+
+static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
+					  unsigned long val, void *data)
+{
+	struct s3c2410_nand_info *info;
+	unsigned long newclk;
+
+	info = container_of(nb, struct s3c2410_nand_info, freq_transition);
+	newclk = clk_get_rate(info->clk);
+
+	if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) ||
+	    (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) {
+		s3c2410_nand_setrate(info);
+	}
+
+	return 0;
+}
+
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+	info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition;
+
+	return cpufreq_register_notifier(&info->freq_transition,
+					 CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+	cpufreq_unregister_notifier(&info->freq_transition,
+				    CPUFREQ_TRANSITION_NOTIFIER);
+}
+
+#else
+static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
+{
+	return 0;
+}
+
+static inline void
+s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
+{
+}
+#endif
+
+/* device management functions */
+
+static int s3c24xx_nand_remove(struct platform_device *pdev)
+{
+	struct s3c2410_nand_info *info = to_nand_info(pdev);
+
+	if (info == NULL)
+		return 0;
+
+	s3c2410_nand_cpufreq_deregister(info);
+
+	/* Release all our mtds  and their partitions, then go through
+	 * freeing the resources used
+	 */
+
+	if (info->mtds != NULL) {
+		struct s3c2410_nand_mtd *ptr = info->mtds;
+		int mtdno;
+
+		for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
+			pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
+			nand_release(nand_to_mtd(&ptr->chip));
+		}
+	}
+
+	/* free the common resources */
+
+	if (!IS_ERR(info->clk))
+		s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+
+	return 0;
+}
+
+static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
+				      struct s3c2410_nand_mtd *mtd,
+				      struct s3c2410_nand_set *set)
+{
+	if (set) {
+		struct mtd_info *mtdinfo = nand_to_mtd(&mtd->chip);
+
+		mtdinfo->name = set->name;
+
+		return mtd_device_parse_register(mtdinfo, NULL, NULL,
+					 set->partitions, set->nr_partitions);
+	}
+
+	return -ENODEV;
+}
+
+/**
+ * s3c2410_nand_init_chip - initialise a single instance of an chip
+ * @info: The base NAND controller the chip is on.
+ * @nmtd: The new controller MTD instance to fill in.
+ * @set: The information passed from the board specific platform data.
+ *
+ * Initialise the given @nmtd from the information in @info and @set. This
+ * readies the structure for use with the MTD layer functions by ensuring
+ * all pointers are setup and the necessary control routines selected.
+ */
+static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
+				   struct s3c2410_nand_mtd *nmtd,
+				   struct s3c2410_nand_set *set)
+{
+	struct nand_chip *chip = &nmtd->chip;
+	void __iomem *regs = info->regs;
+
+	chip->write_buf    = s3c2410_nand_write_buf;
+	chip->read_buf     = s3c2410_nand_read_buf;
+	chip->select_chip  = s3c2410_nand_select_chip;
+	chip->chip_delay   = 50;
+	nand_set_controller_data(chip, nmtd);
+	chip->options	   = set->options;
+	chip->controller   = &info->controller;
+
+	switch (info->cpu_type) {
+	case TYPE_S3C2410:
+		chip->IO_ADDR_W = regs + S3C2410_NFDATA;
+		info->sel_reg   = regs + S3C2410_NFCONF;
+		info->sel_bit	= S3C2410_NFCONF_nFCE;
+		chip->cmd_ctrl  = s3c2410_nand_hwcontrol;
+		chip->dev_ready = s3c2410_nand_devready;
+		break;
+
+	case TYPE_S3C2440:
+		chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+		info->sel_reg   = regs + S3C2440_NFCONT;
+		info->sel_bit	= S3C2440_NFCONT_nFCE;
+		chip->cmd_ctrl  = s3c2440_nand_hwcontrol;
+		chip->dev_ready = s3c2440_nand_devready;
+		chip->read_buf  = s3c2440_nand_read_buf;
+		chip->write_buf	= s3c2440_nand_write_buf;
+		break;
+
+	case TYPE_S3C2412:
+		chip->IO_ADDR_W = regs + S3C2440_NFDATA;
+		info->sel_reg   = regs + S3C2440_NFCONT;
+		info->sel_bit	= S3C2412_NFCONT_nFCE0;
+		chip->cmd_ctrl  = s3c2440_nand_hwcontrol;
+		chip->dev_ready = s3c2412_nand_devready;
+
+		if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
+			dev_info(info->device, "System booted from NAND\n");
+
+		break;
+	}
+
+	chip->IO_ADDR_R = chip->IO_ADDR_W;
+
+	nmtd->info	   = info;
+	nmtd->set	   = set;
+
+#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
+	chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+	chip->ecc.correct   = s3c2410_nand_correct_data;
+	chip->ecc.mode	    = NAND_ECC_HW;
+	chip->ecc.strength  = 1;
+
+	switch (info->cpu_type) {
+	case TYPE_S3C2410:
+		chip->ecc.hwctl	    = s3c2410_nand_enable_hwecc;
+		chip->ecc.calculate = s3c2410_nand_calculate_ecc;
+		break;
+
+	case TYPE_S3C2412:
+		chip->ecc.hwctl     = s3c2412_nand_enable_hwecc;
+		chip->ecc.calculate = s3c2412_nand_calculate_ecc;
+		break;
+
+	case TYPE_S3C2440:
+		chip->ecc.hwctl     = s3c2440_nand_enable_hwecc;
+		chip->ecc.calculate = s3c2440_nand_calculate_ecc;
+		break;
+	}
+#else
+	chip->ecc.mode	    = NAND_ECC_SOFT;
+	chip->ecc.algo	= NAND_ECC_HAMMING;
+#endif
+
+	if (set->disable_ecc)
+		chip->ecc.mode	= NAND_ECC_NONE;
+
+	switch (chip->ecc.mode) {
+	case NAND_ECC_NONE:
+		dev_info(info->device, "NAND ECC disabled\n");
+		break;
+	case NAND_ECC_SOFT:
+		dev_info(info->device, "NAND soft ECC\n");
+		break;
+	case NAND_ECC_HW:
+		dev_info(info->device, "NAND hardware ECC\n");
+		break;
+	default:
+		dev_info(info->device, "NAND ECC UNKNOWN\n");
+		break;
+	}
+
+	/* If you use u-boot BBT creation code, specifying this flag will
+	 * let the kernel fish out the BBT from the NAND, and also skip the
+	 * full NAND scan that can take 1/2s or so. Little things... */
+	if (set->flash_bbt) {
+		chip->bbt_options |= NAND_BBT_USE_FLASH;
+		chip->options |= NAND_SKIP_BBTSCAN;
+	}
+}
+
+/**
+ * s3c2410_nand_update_chip - post probe update
+ * @info: The controller instance.
+ * @nmtd: The driver version of the MTD instance.
+ *
+ * This routine is called after the chip probe has successfully completed
+ * and the relevant per-chip information updated. This call ensure that
+ * we update the internal state accordingly.
+ *
+ * The internal state is currently limited to the ECC state information.
+*/
+static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
+				     struct s3c2410_nand_mtd *nmtd)
+{
+	struct nand_chip *chip = &nmtd->chip;
+
+	dev_dbg(info->device, "chip %p => page shift %d\n",
+		chip, chip->page_shift);
+
+	if (chip->ecc.mode != NAND_ECC_HW)
+		return;
+
+		/* change the behaviour depending on whether we are using
+		 * the large or small page nand device */
+
+	if (chip->page_shift > 10) {
+		chip->ecc.size	    = 256;
+		chip->ecc.bytes	    = 3;
+	} else {
+		chip->ecc.size	    = 512;
+		chip->ecc.bytes	    = 3;
+		mtd_set_ooblayout(nand_to_mtd(chip), &s3c2410_ooblayout_ops);
+	}
+}
+
+/* s3c24xx_nand_probe
+ *
+ * called by device layer when it finds a device matching
+ * one our driver can handled. This code checks to see if
+ * it can allocate all necessary resources then calls the
+ * nand layer to look for devices
+*/
+static int s3c24xx_nand_probe(struct platform_device *pdev)
+{
+	struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
+	enum s3c_cpu_type cpu_type;
+	struct s3c2410_nand_info *info;
+	struct s3c2410_nand_mtd *nmtd;
+	struct s3c2410_nand_set *sets;
+	struct resource *res;
+	int err = 0;
+	int size;
+	int nr_sets;
+	int setno;
+
+	cpu_type = platform_get_device_id(pdev)->driver_data;
+
+	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+	if (info == NULL) {
+		err = -ENOMEM;
+		goto exit_error;
+	}
+
+	platform_set_drvdata(pdev, info);
+
+	nand_hw_control_init(&info->controller);
+
+	/* get the clock source and enable it */
+
+	info->clk = devm_clk_get(&pdev->dev, "nand");
+	if (IS_ERR(info->clk)) {
+		dev_err(&pdev->dev, "failed to get clock\n");
+		err = -ENOENT;
+		goto exit_error;
+	}
+
+	s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+
+	/* allocate and map the resource */
+
+	/* currently we assume we have the one resource */
+	res = pdev->resource;
+	size = resource_size(res);
+
+	info->device	= &pdev->dev;
+	info->platform	= plat;
+	info->cpu_type	= cpu_type;
+
+	info->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(info->regs)) {
+		err = PTR_ERR(info->regs);
+		goto exit_error;
+	}
+
+	dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
+
+	/* initialise the hardware */
+
+	err = s3c2410_nand_inithw(info);
+	if (err != 0)
+		goto exit_error;
+
+	sets = (plat != NULL) ? plat->sets : NULL;
+	nr_sets = (plat != NULL) ? plat->nr_sets : 1;
+
+	info->mtd_count = nr_sets;
+
+	/* allocate our information */
+
+	size = nr_sets * sizeof(*info->mtds);
+	info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (info->mtds == NULL) {
+		err = -ENOMEM;
+		goto exit_error;
+	}
+
+	/* initialise all possible chips */
+
+	nmtd = info->mtds;
+
+	for (setno = 0; setno < nr_sets; setno++, nmtd++) {
+		struct mtd_info *mtd = nand_to_mtd(&nmtd->chip);
+
+		pr_debug("initialising set %d (%p, info %p)\n",
+			 setno, nmtd, info);
+
+		mtd->dev.parent = &pdev->dev;
+		s3c2410_nand_init_chip(info, nmtd, sets);
+
+		nmtd->scan_res = nand_scan_ident(mtd,
+						 (sets) ? sets->nr_chips : 1,
+						 NULL);
+
+		if (nmtd->scan_res == 0) {
+			s3c2410_nand_update_chip(info, nmtd);
+			nand_scan_tail(mtd);
+			s3c2410_nand_add_partition(info, nmtd, sets);
+		}
+
+		if (sets != NULL)
+			sets++;
+	}
+
+	err = s3c2410_nand_cpufreq_register(info);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to init cpufreq support\n");
+		goto exit_error;
+	}
+
+	if (allow_clk_suspend(info)) {
+		dev_info(&pdev->dev, "clock idle support enabled\n");
+		s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+	}
+
+	return 0;
+
+ exit_error:
+	s3c24xx_nand_remove(pdev);
+
+	if (err == 0)
+		err = -EINVAL;
+	return err;
+}
+
+/* PM Support */
+#ifdef CONFIG_PM
+
+static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
+{
+	struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+
+	if (info) {
+		info->save_sel = readl(info->sel_reg);
+
+		/* For the moment, we must ensure nFCE is high during
+		 * the time we are suspended. This really should be
+		 * handled by suspending the MTDs we are using, but
+		 * that is currently not the case. */
+
+		writel(info->save_sel | info->sel_bit, info->sel_reg);
+
+		s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
+	}
+
+	return 0;
+}
+
+static int s3c24xx_nand_resume(struct platform_device *dev)
+{
+	struct s3c2410_nand_info *info = platform_get_drvdata(dev);
+	unsigned long sel;
+
+	if (info) {
+		s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
+		s3c2410_nand_inithw(info);
+
+		/* Restore the state of the nFCE line. */
+
+		sel = readl(info->sel_reg);
+		sel &= ~info->sel_bit;
+		sel |= info->save_sel & info->sel_bit;
+		writel(sel, info->sel_reg);
+
+		s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
+	}
+
+	return 0;
+}
+
+#else
+#define s3c24xx_nand_suspend NULL
+#define s3c24xx_nand_resume NULL
+#endif
+
+/* driver device registration */
+
+static const struct platform_device_id s3c24xx_driver_ids[] = {
+	{
+		.name		= "s3c2410-nand",
+		.driver_data	= TYPE_S3C2410,
+	}, {
+		.name		= "s3c2440-nand",
+		.driver_data	= TYPE_S3C2440,
+	}, {
+		.name		= "s3c2412-nand",
+		.driver_data	= TYPE_S3C2412,
+	}, {
+		.name		= "s3c6400-nand",
+		.driver_data	= TYPE_S3C2412, /* compatible with 2412 */
+	},
+	{ }
+};
+
+MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
+
+static struct platform_driver s3c24xx_nand_driver = {
+	.probe		= s3c24xx_nand_probe,
+	.remove		= s3c24xx_nand_remove,
+	.suspend	= s3c24xx_nand_suspend,
+	.resume		= s3c24xx_nand_resume,
+	.id_table	= s3c24xx_driver_ids,
+	.driver		= {
+		.name	= "s3c24xx-nand",
+	},
+};
+
+module_platform_driver(s3c24xx_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
diff --git a/drivers/mtd/nand/rawnand/sh_flctl.c b/drivers/mtd/nand/rawnand/sh_flctl.c
new file mode 100644
index 000000000000..492705fb23f2
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/sh_flctl.c
@@ -0,0 +1,1251 @@ 
+/*
+ * SuperH FLCTL nand controller
+ *
+ * Copyright (c) 2008 Renesas Solutions Corp.
+ * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
+ *
+ * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sh_flctl.h>
+
+static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
+					struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 0;
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->offset = 12;
+	oobregion->length = 4;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
+	.ecc = flctl_4secc_ooblayout_sp_ecc,
+	.free = flctl_4secc_ooblayout_sp_free,
+};
+
+static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
+					struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = (section * 16) + 6;
+	oobregion->length = chip->ecc.bytes;
+
+	return 0;
+}
+
+static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
+					 struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (section >= chip->ecc.steps)
+		return -ERANGE;
+
+	oobregion->offset = section * 16;
+	oobregion->length = 6;
+
+	if (!section) {
+		oobregion->offset += 2;
+		oobregion->length -= 2;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
+	.ecc = flctl_4secc_ooblayout_lp_ecc,
+	.free = flctl_4secc_ooblayout_lp_free,
+};
+
+static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
+
+static struct nand_bbt_descr flctl_4secc_smallpage = {
+	.options = NAND_BBT_SCAN2NDPAGE,
+	.offs = 11,
+	.len = 1,
+	.pattern = scan_ff_pattern,
+};
+
+static struct nand_bbt_descr flctl_4secc_largepage = {
+	.options = NAND_BBT_SCAN2NDPAGE,
+	.offs = 0,
+	.len = 2,
+	.pattern = scan_ff_pattern,
+};
+
+static void empty_fifo(struct sh_flctl *flctl)
+{
+	writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
+	writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+}
+
+static void start_translation(struct sh_flctl *flctl)
+{
+	writeb(TRSTRT, FLTRCR(flctl));
+}
+
+static void timeout_error(struct sh_flctl *flctl, const char *str)
+{
+	dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
+}
+
+static void wait_completion(struct sh_flctl *flctl)
+{
+	uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+	while (timeout--) {
+		if (readb(FLTRCR(flctl)) & TREND) {
+			writeb(0x0, FLTRCR(flctl));
+			return;
+		}
+		udelay(1);
+	}
+
+	timeout_error(flctl, __func__);
+	writeb(0x0, FLTRCR(flctl));
+}
+
+static void flctl_dma_complete(void *param)
+{
+	struct sh_flctl *flctl = param;
+
+	complete(&flctl->dma_complete);
+}
+
+static void flctl_release_dma(struct sh_flctl *flctl)
+{
+	if (flctl->chan_fifo0_rx) {
+		dma_release_channel(flctl->chan_fifo0_rx);
+		flctl->chan_fifo0_rx = NULL;
+	}
+	if (flctl->chan_fifo0_tx) {
+		dma_release_channel(flctl->chan_fifo0_tx);
+		flctl->chan_fifo0_tx = NULL;
+	}
+}
+
+static void flctl_setup_dma(struct sh_flctl *flctl)
+{
+	dma_cap_mask_t mask;
+	struct dma_slave_config cfg;
+	struct platform_device *pdev = flctl->pdev;
+	struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
+	int ret;
+
+	if (!pdata)
+		return;
+
+	if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
+		return;
+
+	/* We can only either use DMA for both Tx and Rx or not use it at all */
+	dma_cap_zero(mask);
+	dma_cap_set(DMA_SLAVE, mask);
+
+	flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
+				(void *)(uintptr_t)pdata->slave_id_fifo0_tx);
+	dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
+		flctl->chan_fifo0_tx);
+
+	if (!flctl->chan_fifo0_tx)
+		return;
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.direction = DMA_MEM_TO_DEV;
+	cfg.dst_addr = flctl->fifo;
+	cfg.src_addr = 0;
+	ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
+	if (ret < 0)
+		goto err;
+
+	flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
+				(void *)(uintptr_t)pdata->slave_id_fifo0_rx);
+	dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
+		flctl->chan_fifo0_rx);
+
+	if (!flctl->chan_fifo0_rx)
+		goto err;
+
+	cfg.direction = DMA_DEV_TO_MEM;
+	cfg.dst_addr = 0;
+	cfg.src_addr = flctl->fifo;
+	ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
+	if (ret < 0)
+		goto err;
+
+	init_completion(&flctl->dma_complete);
+
+	return;
+
+err:
+	flctl_release_dma(flctl);
+}
+
+static void set_addr(struct mtd_info *mtd, int column, int page_addr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	uint32_t addr = 0;
+
+	if (column == -1) {
+		addr = page_addr;	/* ERASE1 */
+	} else if (page_addr != -1) {
+		/* SEQIN, READ0, etc.. */
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			column >>= 1;
+		if (flctl->page_size) {
+			addr = column & 0x0FFF;
+			addr |= (page_addr & 0xff) << 16;
+			addr |= ((page_addr >> 8) & 0xff) << 24;
+			/* big than 128MB */
+			if (flctl->rw_ADRCNT == ADRCNT2_E) {
+				uint32_t 	addr2;
+				addr2 = (page_addr >> 16) & 0xff;
+				writel(addr2, FLADR2(flctl));
+			}
+		} else {
+			addr = column;
+			addr |= (page_addr & 0xff) << 8;
+			addr |= ((page_addr >> 8) & 0xff) << 16;
+			addr |= ((page_addr >> 16) & 0xff) << 24;
+		}
+	}
+	writel(addr, FLADR(flctl));
+}
+
+static void wait_rfifo_ready(struct sh_flctl *flctl)
+{
+	uint32_t timeout = LOOP_TIMEOUT_MAX;
+
+	while (timeout--) {
+		uint32_t val;
+		/* check FIFO */
+		val = readl(FLDTCNTR(flctl)) >> 16;
+		if (val & 0xFF)
+			return;
+		udelay(1);
+	}
+	timeout_error(flctl, __func__);
+}
+
+static void wait_wfifo_ready(struct sh_flctl *flctl)
+{
+	uint32_t len, timeout = LOOP_TIMEOUT_MAX;
+
+	while (timeout--) {
+		/* check FIFO */
+		len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
+		if (len >= 4)
+			return;
+		udelay(1);
+	}
+	timeout_error(flctl, __func__);
+}
+
+static enum flctl_ecc_res_t wait_recfifo_ready
+		(struct sh_flctl *flctl, int sector_number)
+{
+	uint32_t timeout = LOOP_TIMEOUT_MAX;
+	void __iomem *ecc_reg[4];
+	int i;
+	int state = FL_SUCCESS;
+	uint32_t data, size;
+
+	/*
+	 * First this loops checks in FLDTCNTR if we are ready to read out the
+	 * oob data. This is the case if either all went fine without errors or
+	 * if the bottom part of the loop corrected the errors or marked them as
+	 * uncorrectable and the controller is given time to push the data into
+	 * the FIFO.
+	 */
+	while (timeout--) {
+		/* check if all is ok and we can read out the OOB */
+		size = readl(FLDTCNTR(flctl)) >> 24;
+		if ((size & 0xFF) == 4)
+			return state;
+
+		/* check if a correction code has been calculated */
+		if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
+			/*
+			 * either we wait for the fifo to be filled or a
+			 * correction pattern is being generated
+			 */
+			udelay(1);
+			continue;
+		}
+
+		/* check for an uncorrectable error */
+		if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
+			/* check if we face a non-empty page */
+			for (i = 0; i < 512; i++) {
+				if (flctl->done_buff[i] != 0xff) {
+					state = FL_ERROR; /* can't correct */
+					break;
+				}
+			}
+
+			if (state == FL_SUCCESS)
+				dev_dbg(&flctl->pdev->dev,
+				"reading empty sector %d, ecc error ignored\n",
+				sector_number);
+
+			writel(0, FL4ECCCR(flctl));
+			continue;
+		}
+
+		/* start error correction */
+		ecc_reg[0] = FL4ECCRESULT0(flctl);
+		ecc_reg[1] = FL4ECCRESULT1(flctl);
+		ecc_reg[2] = FL4ECCRESULT2(flctl);
+		ecc_reg[3] = FL4ECCRESULT3(flctl);
+
+		for (i = 0; i < 3; i++) {
+			uint8_t org;
+			unsigned int index;
+
+			data = readl(ecc_reg[i]);
+
+			if (flctl->page_size)
+				index = (512 * sector_number) +
+					(data >> 16);
+			else
+				index = data >> 16;
+
+			org = flctl->done_buff[index];
+			flctl->done_buff[index] = org ^ (data & 0xFF);
+		}
+		state = FL_REPAIRABLE;
+		writel(0, FL4ECCCR(flctl));
+	}
+
+	timeout_error(flctl, __func__);
+	return FL_TIMEOUT;	/* timeout */
+}
+
+static void wait_wecfifo_ready(struct sh_flctl *flctl)
+{
+	uint32_t timeout = LOOP_TIMEOUT_MAX;
+	uint32_t len;
+
+	while (timeout--) {
+		/* check FLECFIFO */
+		len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
+		if (len >= 4)
+			return;
+		udelay(1);
+	}
+	timeout_error(flctl, __func__);
+}
+
+static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
+					int len, enum dma_data_direction dir)
+{
+	struct dma_async_tx_descriptor *desc = NULL;
+	struct dma_chan *chan;
+	enum dma_transfer_direction tr_dir;
+	dma_addr_t dma_addr;
+	dma_cookie_t cookie;
+	uint32_t reg;
+	int ret;
+
+	if (dir == DMA_FROM_DEVICE) {
+		chan = flctl->chan_fifo0_rx;
+		tr_dir = DMA_DEV_TO_MEM;
+	} else {
+		chan = flctl->chan_fifo0_tx;
+		tr_dir = DMA_MEM_TO_DEV;
+	}
+
+	dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
+
+	if (dma_addr)
+		desc = dmaengine_prep_slave_single(chan, dma_addr, len,
+			tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+	if (desc) {
+		reg = readl(FLINTDMACR(flctl));
+		reg |= DREQ0EN;
+		writel(reg, FLINTDMACR(flctl));
+
+		desc->callback = flctl_dma_complete;
+		desc->callback_param = flctl;
+		cookie = dmaengine_submit(desc);
+		if (dma_submit_error(cookie)) {
+			ret = dma_submit_error(cookie);
+			dev_warn(&flctl->pdev->dev,
+				 "DMA submit failed, falling back to PIO\n");
+			goto out;
+		}
+
+		dma_async_issue_pending(chan);
+	} else {
+		/* DMA failed, fall back to PIO */
+		flctl_release_dma(flctl);
+		dev_warn(&flctl->pdev->dev,
+			 "DMA failed, falling back to PIO\n");
+		ret = -EIO;
+		goto out;
+	}
+
+	ret =
+	wait_for_completion_timeout(&flctl->dma_complete,
+				msecs_to_jiffies(3000));
+
+	if (ret <= 0) {
+		dmaengine_terminate_all(chan);
+		dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
+	}
+
+out:
+	reg = readl(FLINTDMACR(flctl));
+	reg &= ~DREQ0EN;
+	writel(reg, FLINTDMACR(flctl));
+
+	dma_unmap_single(chan->device->dev, dma_addr, len, dir);
+
+	/* ret > 0 is success */
+	return ret;
+}
+
+static void read_datareg(struct sh_flctl *flctl, int offset)
+{
+	unsigned long data;
+	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+	wait_completion(flctl);
+
+	data = readl(FLDATAR(flctl));
+	*buf = le32_to_cpu(data);
+}
+
+static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
+{
+	int i, len_4align;
+	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+	len_4align = (rlen + 3) / 4;
+
+	/* initiate DMA transfer */
+	if (flctl->chan_fifo0_rx && rlen >= 32 &&
+		flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
+			goto convert;	/* DMA success */
+
+	/* do polling transfer */
+	for (i = 0; i < len_4align; i++) {
+		wait_rfifo_ready(flctl);
+		buf[i] = readl(FLDTFIFO(flctl));
+	}
+
+convert:
+	for (i = 0; i < len_4align; i++)
+		buf[i] = be32_to_cpu(buf[i]);
+}
+
+static enum flctl_ecc_res_t read_ecfiforeg
+		(struct sh_flctl *flctl, uint8_t *buff, int sector)
+{
+	int i;
+	enum flctl_ecc_res_t res;
+	unsigned long *ecc_buf = (unsigned long *)buff;
+
+	res = wait_recfifo_ready(flctl , sector);
+
+	if (res != FL_ERROR) {
+		for (i = 0; i < 4; i++) {
+			ecc_buf[i] = readl(FLECFIFO(flctl));
+			ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
+		}
+	}
+
+	return res;
+}
+
+static void write_fiforeg(struct sh_flctl *flctl, int rlen,
+						unsigned int offset)
+{
+	int i, len_4align;
+	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+	len_4align = (rlen + 3) / 4;
+	for (i = 0; i < len_4align; i++) {
+		wait_wfifo_ready(flctl);
+		writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
+	}
+}
+
+static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
+						unsigned int offset)
+{
+	int i, len_4align;
+	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
+
+	len_4align = (rlen + 3) / 4;
+
+	for (i = 0; i < len_4align; i++)
+		buf[i] = cpu_to_be32(buf[i]);
+
+	/* initiate DMA transfer */
+	if (flctl->chan_fifo0_tx && rlen >= 32 &&
+		flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
+			return;	/* DMA success */
+
+	/* do polling transfer */
+	for (i = 0; i < len_4align; i++) {
+		wait_wecfifo_ready(flctl);
+		writel(buf[i], FLECFIFO(flctl));
+	}
+}
+
+static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
+	uint32_t flcmdcr_val, addr_len_bytes = 0;
+
+	/* Set SNAND bit if page size is 2048byte */
+	if (flctl->page_size)
+		flcmncr_val |= SNAND_E;
+	else
+		flcmncr_val &= ~SNAND_E;
+
+	/* default FLCMDCR val */
+	flcmdcr_val = DOCMD1_E | DOADR_E;
+
+	/* Set for FLCMDCR */
+	switch (cmd) {
+	case NAND_CMD_ERASE1:
+		addr_len_bytes = flctl->erase_ADRCNT;
+		flcmdcr_val |= DOCMD2_E;
+		break;
+	case NAND_CMD_READ0:
+	case NAND_CMD_READOOB:
+	case NAND_CMD_RNDOUT:
+		addr_len_bytes = flctl->rw_ADRCNT;
+		flcmdcr_val |= CDSRC_E;
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			flcmncr_val |= SEL_16BIT;
+		break;
+	case NAND_CMD_SEQIN:
+		/* This case is that cmd is READ0 or READ1 or READ00 */
+		flcmdcr_val &= ~DOADR_E;	/* ONLY execute 1st cmd */
+		break;
+	case NAND_CMD_PAGEPROG:
+		addr_len_bytes = flctl->rw_ADRCNT;
+		flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			flcmncr_val |= SEL_16BIT;
+		break;
+	case NAND_CMD_READID:
+		flcmncr_val &= ~SNAND_E;
+		flcmdcr_val |= CDSRC_E;
+		addr_len_bytes = ADRCNT_1;
+		break;
+	case NAND_CMD_STATUS:
+	case NAND_CMD_RESET:
+		flcmncr_val &= ~SNAND_E;
+		flcmdcr_val &= ~(DOADR_E | DOSR_E);
+		break;
+	default:
+		break;
+	}
+
+	/* Set address bytes parameter */
+	flcmdcr_val |= addr_len_bytes;
+
+	/* Now actually write */
+	writel(flcmncr_val, FLCMNCR(flctl));
+	writel(flcmdcr_val, FLCMDCR(flctl));
+	writel(flcmcdr_val, FLCMCDR(flctl));
+}
+
+static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
+{
+	chip->read_buf(mtd, buf, mtd->writesize);
+	if (oob_required)
+		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
+				  const uint8_t *buf, int oob_required,
+				  int page)
+{
+	chip->write_buf(mtd, buf, mtd->writesize);
+	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
+	return 0;
+}
+
+static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int sector, page_sectors;
+	enum flctl_ecc_res_t ecc_result;
+
+	page_sectors = flctl->page_size ? 4 : 1;
+
+	set_cmd_regs(mtd, NAND_CMD_READ0,
+		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
+		 FLCMNCR(flctl));
+	writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+	writel(page_addr << 2, FLADR(flctl));
+
+	empty_fifo(flctl);
+	start_translation(flctl);
+
+	for (sector = 0; sector < page_sectors; sector++) {
+		read_fiforeg(flctl, 512, 512 * sector);
+
+		ecc_result = read_ecfiforeg(flctl,
+			&flctl->done_buff[mtd->writesize + 16 * sector],
+			sector);
+
+		switch (ecc_result) {
+		case FL_REPAIRABLE:
+			dev_info(&flctl->pdev->dev,
+				"applied ecc on page 0x%x", page_addr);
+			mtd->ecc_stats.corrected++;
+			break;
+		case FL_ERROR:
+			dev_warn(&flctl->pdev->dev,
+				"page 0x%x contains corrupted data\n",
+				page_addr);
+			mtd->ecc_stats.failed++;
+			break;
+		default:
+			;
+		}
+	}
+
+	wait_completion(flctl);
+
+	writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
+			FLCMNCR(flctl));
+}
+
+static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int page_sectors = flctl->page_size ? 4 : 1;
+	int i;
+
+	set_cmd_regs(mtd, NAND_CMD_READ0,
+		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
+
+	empty_fifo(flctl);
+
+	for (i = 0; i < page_sectors; i++) {
+		set_addr(mtd, (512 + 16) * i + 512 , page_addr);
+		writel(16, FLDTCNTR(flctl));
+
+		start_translation(flctl);
+		read_fiforeg(flctl, 16, 16 * i);
+		wait_completion(flctl);
+	}
+}
+
+static void execmd_write_page_sector(struct mtd_info *mtd)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int page_addr = flctl->seqin_page_addr;
+	int sector, page_sectors;
+
+	page_sectors = flctl->page_size ? 4 : 1;
+
+	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+	empty_fifo(flctl);
+	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
+	writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
+	writel(page_addr << 2, FLADR(flctl));
+	start_translation(flctl);
+
+	for (sector = 0; sector < page_sectors; sector++) {
+		write_fiforeg(flctl, 512, 512 * sector);
+		write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
+	}
+
+	wait_completion(flctl);
+	writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
+}
+
+static void execmd_write_oob(struct mtd_info *mtd)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int page_addr = flctl->seqin_page_addr;
+	int sector, page_sectors;
+
+	page_sectors = flctl->page_size ? 4 : 1;
+
+	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
+			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
+
+	for (sector = 0; sector < page_sectors; sector++) {
+		empty_fifo(flctl);
+		set_addr(mtd, sector * 528 + 512, page_addr);
+		writel(16, FLDTCNTR(flctl));	/* set read size */
+
+		start_translation(flctl);
+		write_fiforeg(flctl, 16, 16 * sector);
+		wait_completion(flctl);
+	}
+}
+
+static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
+			int column, int page_addr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	uint32_t read_cmd = 0;
+
+	pm_runtime_get_sync(&flctl->pdev->dev);
+
+	flctl->read_bytes = 0;
+	if (command != NAND_CMD_PAGEPROG)
+		flctl->index = 0;
+
+	switch (command) {
+	case NAND_CMD_READ1:
+	case NAND_CMD_READ0:
+		if (flctl->hwecc) {
+			/* read page with hwecc */
+			execmd_read_page_sector(mtd, page_addr);
+			break;
+		}
+		if (flctl->page_size)
+			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+				| command);
+		else
+			set_cmd_regs(mtd, command, command);
+
+		set_addr(mtd, 0, page_addr);
+
+		flctl->read_bytes = mtd->writesize + mtd->oobsize;
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			column >>= 1;
+		flctl->index += column;
+		goto read_normal_exit;
+
+	case NAND_CMD_READOOB:
+		if (flctl->hwecc) {
+			/* read page with hwecc */
+			execmd_read_oob(mtd, page_addr);
+			break;
+		}
+
+		if (flctl->page_size) {
+			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
+				| NAND_CMD_READ0);
+			set_addr(mtd, mtd->writesize, page_addr);
+		} else {
+			set_cmd_regs(mtd, command, command);
+			set_addr(mtd, 0, page_addr);
+		}
+		flctl->read_bytes = mtd->oobsize;
+		goto read_normal_exit;
+
+	case NAND_CMD_RNDOUT:
+		if (flctl->hwecc)
+			break;
+
+		if (flctl->page_size)
+			set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
+				| command);
+		else
+			set_cmd_regs(mtd, command, command);
+
+		set_addr(mtd, column, 0);
+
+		flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
+		goto read_normal_exit;
+
+	case NAND_CMD_READID:
+		set_cmd_regs(mtd, command, command);
+
+		/* READID is always performed using an 8-bit bus */
+		if (flctl->chip.options & NAND_BUSWIDTH_16)
+			column <<= 1;
+		set_addr(mtd, column, 0);
+
+		flctl->read_bytes = 8;
+		writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+		empty_fifo(flctl);
+		start_translation(flctl);
+		read_fiforeg(flctl, flctl->read_bytes, 0);
+		wait_completion(flctl);
+		break;
+
+	case NAND_CMD_ERASE1:
+		flctl->erase1_page_addr = page_addr;
+		break;
+
+	case NAND_CMD_ERASE2:
+		set_cmd_regs(mtd, NAND_CMD_ERASE1,
+			(command << 8) | NAND_CMD_ERASE1);
+		set_addr(mtd, -1, flctl->erase1_page_addr);
+		start_translation(flctl);
+		wait_completion(flctl);
+		break;
+
+	case NAND_CMD_SEQIN:
+		if (!flctl->page_size) {
+			/* output read command */
+			if (column >= mtd->writesize) {
+				column -= mtd->writesize;
+				read_cmd = NAND_CMD_READOOB;
+			} else if (column < 256) {
+				read_cmd = NAND_CMD_READ0;
+			} else {
+				column -= 256;
+				read_cmd = NAND_CMD_READ1;
+			}
+		}
+		flctl->seqin_column = column;
+		flctl->seqin_page_addr = page_addr;
+		flctl->seqin_read_cmd = read_cmd;
+		break;
+
+	case NAND_CMD_PAGEPROG:
+		empty_fifo(flctl);
+		if (!flctl->page_size) {
+			set_cmd_regs(mtd, NAND_CMD_SEQIN,
+					flctl->seqin_read_cmd);
+			set_addr(mtd, -1, -1);
+			writel(0, FLDTCNTR(flctl));	/* set 0 size */
+			start_translation(flctl);
+			wait_completion(flctl);
+		}
+		if (flctl->hwecc) {
+			/* write page with hwecc */
+			if (flctl->seqin_column == mtd->writesize)
+				execmd_write_oob(mtd);
+			else if (!flctl->seqin_column)
+				execmd_write_page_sector(mtd);
+			else
+				printk(KERN_ERR "Invalid address !?\n");
+			break;
+		}
+		set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
+		set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
+		writel(flctl->index, FLDTCNTR(flctl));	/* set write size */
+		start_translation(flctl);
+		write_fiforeg(flctl, flctl->index, 0);
+		wait_completion(flctl);
+		break;
+
+	case NAND_CMD_STATUS:
+		set_cmd_regs(mtd, command, command);
+		set_addr(mtd, -1, -1);
+
+		flctl->read_bytes = 1;
+		writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
+		start_translation(flctl);
+		read_datareg(flctl, 0); /* read and end */
+		break;
+
+	case NAND_CMD_RESET:
+		set_cmd_regs(mtd, command, command);
+		set_addr(mtd, -1, -1);
+
+		writel(0, FLDTCNTR(flctl));	/* set 0 size */
+		start_translation(flctl);
+		wait_completion(flctl);
+		break;
+
+	default:
+		break;
+	}
+	goto runtime_exit;
+
+read_normal_exit:
+	writel(flctl->read_bytes, FLDTCNTR(flctl));	/* set read size */
+	empty_fifo(flctl);
+	start_translation(flctl);
+	read_fiforeg(flctl, flctl->read_bytes, 0);
+	wait_completion(flctl);
+runtime_exit:
+	pm_runtime_put_sync(&flctl->pdev->dev);
+	return;
+}
+
+static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	int ret;
+
+	switch (chipnr) {
+	case -1:
+		flctl->flcmncr_base &= ~CE0_ENABLE;
+
+		pm_runtime_get_sync(&flctl->pdev->dev);
+		writel(flctl->flcmncr_base, FLCMNCR(flctl));
+
+		if (flctl->qos_request) {
+			dev_pm_qos_remove_request(&flctl->pm_qos);
+			flctl->qos_request = 0;
+		}
+
+		pm_runtime_put_sync(&flctl->pdev->dev);
+		break;
+	case 0:
+		flctl->flcmncr_base |= CE0_ENABLE;
+
+		if (!flctl->qos_request) {
+			ret = dev_pm_qos_add_request(&flctl->pdev->dev,
+							&flctl->pm_qos,
+							DEV_PM_QOS_RESUME_LATENCY,
+							100);
+			if (ret < 0)
+				dev_err(&flctl->pdev->dev,
+					"PM QoS request failed: %d\n", ret);
+			flctl->qos_request = 1;
+		}
+
+		if (flctl->holden) {
+			pm_runtime_get_sync(&flctl->pdev->dev);
+			writel(HOLDEN, FLHOLDCR(flctl));
+			pm_runtime_put_sync(&flctl->pdev->dev);
+		}
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+	memcpy(&flctl->done_buff[flctl->index], buf, len);
+	flctl->index += len;
+}
+
+static uint8_t flctl_read_byte(struct mtd_info *mtd)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	uint8_t data;
+
+	data = flctl->done_buff[flctl->index];
+	flctl->index++;
+	return data;
+}
+
+static uint16_t flctl_read_word(struct mtd_info *mtd)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
+
+	flctl->index += 2;
+	return *buf;
+}
+
+static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+
+	memcpy(buf, &flctl->done_buff[flctl->index], len);
+	flctl->index += len;
+}
+
+static int flctl_chip_init_tail(struct mtd_info *mtd)
+{
+	struct sh_flctl *flctl = mtd_to_flctl(mtd);
+	struct nand_chip *chip = &flctl->chip;
+
+	if (mtd->writesize == 512) {
+		flctl->page_size = 0;
+		if (chip->chipsize > (32 << 20)) {
+			/* big than 32MB */
+			flctl->rw_ADRCNT = ADRCNT_4;
+			flctl->erase_ADRCNT = ADRCNT_3;
+		} else if (chip->chipsize > (2 << 16)) {
+			/* big than 128KB */
+			flctl->rw_ADRCNT = ADRCNT_3;
+			flctl->erase_ADRCNT = ADRCNT_2;
+		} else {
+			flctl->rw_ADRCNT = ADRCNT_2;
+			flctl->erase_ADRCNT = ADRCNT_1;
+		}
+	} else {
+		flctl->page_size = 1;
+		if (chip->chipsize > (128 << 20)) {
+			/* big than 128MB */
+			flctl->rw_ADRCNT = ADRCNT2_E;
+			flctl->erase_ADRCNT = ADRCNT_3;
+		} else if (chip->chipsize > (8 << 16)) {
+			/* big than 512KB */
+			flctl->rw_ADRCNT = ADRCNT_4;
+			flctl->erase_ADRCNT = ADRCNT_2;
+		} else {
+			flctl->rw_ADRCNT = ADRCNT_3;
+			flctl->erase_ADRCNT = ADRCNT_1;
+		}
+	}
+
+	if (flctl->hwecc) {
+		if (mtd->writesize == 512) {
+			mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
+			chip->badblock_pattern = &flctl_4secc_smallpage;
+		} else {
+			mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
+			chip->badblock_pattern = &flctl_4secc_largepage;
+		}
+
+		chip->ecc.size = 512;
+		chip->ecc.bytes = 10;
+		chip->ecc.strength = 4;
+		chip->ecc.read_page = flctl_read_page_hwecc;
+		chip->ecc.write_page = flctl_write_page_hwecc;
+		chip->ecc.mode = NAND_ECC_HW;
+
+		/* 4 symbols ECC enabled */
+		flctl->flcmncr_base |= _4ECCEN;
+	} else {
+		chip->ecc.mode = NAND_ECC_SOFT;
+		chip->ecc.algo = NAND_ECC_HAMMING;
+	}
+
+	return 0;
+}
+
+static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
+{
+	struct sh_flctl *flctl = dev_id;
+
+	dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
+	writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
+
+	return IRQ_HANDLED;
+}
+
+struct flctl_soc_config {
+	unsigned long flcmncr_val;
+	unsigned has_hwecc:1;
+	unsigned use_holden:1;
+};
+
+static struct flctl_soc_config flctl_sh7372_config = {
+	.flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
+	.has_hwecc = 1,
+	.use_holden = 1,
+};
+
+static const struct of_device_id of_flctl_match[] = {
+	{ .compatible = "renesas,shmobile-flctl-sh7372",
+				.data = &flctl_sh7372_config },
+	{},
+};
+MODULE_DEVICE_TABLE(of, of_flctl_match);
+
+static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
+{
+	const struct of_device_id *match;
+	struct flctl_soc_config *config;
+	struct sh_flctl_platform_data *pdata;
+
+	match = of_match_device(of_flctl_match, dev);
+	if (match)
+		config = (struct flctl_soc_config *)match->data;
+	else {
+		dev_err(dev, "%s: no OF configuration attached\n", __func__);
+		return NULL;
+	}
+
+	pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
+								GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	/* set SoC specific options */
+	pdata->flcmncr_val = config->flcmncr_val;
+	pdata->has_hwecc = config->has_hwecc;
+	pdata->use_holden = config->use_holden;
+
+	return pdata;
+}
+
+static int flctl_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct sh_flctl *flctl;
+	struct mtd_info *flctl_mtd;
+	struct nand_chip *nand;
+	struct sh_flctl_platform_data *pdata;
+	int ret;
+	int irq;
+
+	flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
+	if (!flctl)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	flctl->reg = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(flctl->reg))
+		return PTR_ERR(flctl->reg);
+	flctl->fifo = res->start + 0x24; /* FLDTFIFO */
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(&pdev->dev, "failed to get flste irq data\n");
+		return -ENXIO;
+	}
+
+	ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
+			       "flste", flctl);
+	if (ret) {
+		dev_err(&pdev->dev, "request interrupt failed.\n");
+		return ret;
+	}
+
+	if (pdev->dev.of_node)
+		pdata = flctl_parse_dt(&pdev->dev);
+	else
+		pdata = dev_get_platdata(&pdev->dev);
+
+	if (!pdata) {
+		dev_err(&pdev->dev, "no setup data defined\n");
+		return -EINVAL;
+	}
+
+	platform_set_drvdata(pdev, flctl);
+	nand = &flctl->chip;
+	flctl_mtd = nand_to_mtd(nand);
+	nand_set_flash_node(nand, pdev->dev.of_node);
+	flctl_mtd->dev.parent = &pdev->dev;
+	flctl->pdev = pdev;
+	flctl->hwecc = pdata->has_hwecc;
+	flctl->holden = pdata->use_holden;
+	flctl->flcmncr_base = pdata->flcmncr_val;
+	flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
+
+	/* Set address of hardware control function */
+	/* 20 us command delay time */
+	nand->chip_delay = 20;
+
+	nand->read_byte = flctl_read_byte;
+	nand->read_word = flctl_read_word;
+	nand->write_buf = flctl_write_buf;
+	nand->read_buf = flctl_read_buf;
+	nand->select_chip = flctl_select_chip;
+	nand->cmdfunc = flctl_cmdfunc;
+
+	if (pdata->flcmncr_val & SEL_16BIT)
+		nand->options |= NAND_BUSWIDTH_16;
+
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_resume(&pdev->dev);
+
+	flctl_setup_dma(flctl);
+
+	ret = nand_scan_ident(flctl_mtd, 1, NULL);
+	if (ret)
+		goto err_chip;
+
+	if (nand->options & NAND_BUSWIDTH_16) {
+		/*
+		 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
+		 * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign
+		 * flctl->flcmncr_base to pdata->flcmncr_val.
+		 */
+		pdata->flcmncr_val |= SEL_16BIT;
+		flctl->flcmncr_base = pdata->flcmncr_val;
+	}
+
+	ret = flctl_chip_init_tail(flctl_mtd);
+	if (ret)
+		goto err_chip;
+
+	ret = nand_scan_tail(flctl_mtd);
+	if (ret)
+		goto err_chip;
+
+	ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
+
+	return 0;
+
+err_chip:
+	flctl_release_dma(flctl);
+	pm_runtime_disable(&pdev->dev);
+	return ret;
+}
+
+static int flctl_remove(struct platform_device *pdev)
+{
+	struct sh_flctl *flctl = platform_get_drvdata(pdev);
+
+	flctl_release_dma(flctl);
+	nand_release(nand_to_mtd(&flctl->chip));
+	pm_runtime_disable(&pdev->dev);
+
+	return 0;
+}
+
+static struct platform_driver flctl_driver = {
+	.remove		= flctl_remove,
+	.driver = {
+		.name	= "sh_flctl",
+		.of_match_table = of_match_ptr(of_flctl_match),
+	},
+};
+
+module_platform_driver_probe(flctl_driver, flctl_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_DESCRIPTION("SuperH FLCTL driver");
+MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/rawnand/sharpsl.c b/drivers/mtd/nand/rawnand/sharpsl.c
new file mode 100644
index 000000000000..737efe83cd36
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/sharpsl.c
@@ -0,0 +1,235 @@ 
+/*
+ * drivers/mtd/nand/sharpsl.c
+ *
+ *  Copyright (C) 2004 Richard Purdie
+ *  Copyright (C) 2008 Dmitry Baryshkov
+ *
+ *  Based on Sharp's NAND driver sharp_sl.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/genhd.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/mtd/sharpsl.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+
+#include <asm/io.h>
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+
+struct sharpsl_nand {
+	struct nand_chip	chip;
+
+	void __iomem		*io;
+};
+
+static inline struct sharpsl_nand *mtd_to_sharpsl(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct sharpsl_nand, chip);
+}
+
+/* register offset */
+#define ECCLPLB		0x00	/* line parity 7 - 0 bit */
+#define ECCLPUB		0x04	/* line parity 15 - 8 bit */
+#define ECCCP		0x08	/* column parity 5 - 0 bit */
+#define ECCCNTR		0x0C	/* ECC byte counter */
+#define ECCCLRR		0x10	/* cleare ECC */
+#define FLASHIO		0x14	/* Flash I/O */
+#define FLASHCTL	0x18	/* Flash Control */
+
+/* Flash control bit */
+#define FLRYBY		(1 << 5)
+#define FLCE1		(1 << 4)
+#define FLWP		(1 << 3)
+#define FLALE		(1 << 2)
+#define FLCLE		(1 << 1)
+#define FLCE0		(1 << 0)
+
+/*
+ *	hardware specific access to control-lines
+ *	ctrl:
+ *	NAND_CNE: bit 0 -> ! bit 0 & 4
+ *	NAND_CLE: bit 1 -> bit 1
+ *	NAND_ALE: bit 2 -> bit 2
+ *
+ */
+static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+				   unsigned int ctrl)
+{
+	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		unsigned char bits = ctrl & 0x07;
+
+		bits |= (ctrl & 0x01) << 4;
+
+		bits ^= 0x11;
+
+		writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
+	}
+
+	if (cmd != NAND_CMD_NONE)
+		writeb(cmd, chip->IO_ADDR_W);
+}
+
+static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
+{
+	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+	return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
+}
+
+static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+	writeb(0, sharpsl->io + ECCCLRR);
+}
+
+static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
+{
+	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
+	ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
+	ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
+	ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
+	return readb(sharpsl->io + ECCCNTR) != 0;
+}
+
+/*
+ * Main initialization routine
+ */
+static int sharpsl_nand_probe(struct platform_device *pdev)
+{
+	struct nand_chip *this;
+	struct mtd_info *mtd;
+	struct resource *r;
+	int err = 0;
+	struct sharpsl_nand *sharpsl;
+	struct sharpsl_nand_platform_data *data = dev_get_platdata(&pdev->dev);
+
+	if (!data) {
+		dev_err(&pdev->dev, "no platform data!\n");
+		return -EINVAL;
+	}
+
+	/* Allocate memory for MTD device structure and private data */
+	sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
+	if (!sharpsl)
+		return -ENOMEM;
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		dev_err(&pdev->dev, "no io memory resource defined!\n");
+		err = -ENODEV;
+		goto err_get_res;
+	}
+
+	/* map physical address */
+	sharpsl->io = ioremap(r->start, resource_size(r));
+	if (!sharpsl->io) {
+		dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
+		err = -EIO;
+		goto err_ioremap;
+	}
+
+	/* Get pointer to private data */
+	this = (struct nand_chip *)(&sharpsl->chip);
+
+	/* Link the private data with the MTD structure */
+	mtd = nand_to_mtd(this);
+	mtd->dev.parent = &pdev->dev;
+	mtd_set_ooblayout(mtd, data->ecc_layout);
+
+	platform_set_drvdata(pdev, sharpsl);
+
+	/*
+	 * PXA initialize
+	 */
+	writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
+
+	/* Set address of NAND IO lines */
+	this->IO_ADDR_R = sharpsl->io + FLASHIO;
+	this->IO_ADDR_W = sharpsl->io + FLASHIO;
+	/* Set address of hardware control function */
+	this->cmd_ctrl = sharpsl_nand_hwcontrol;
+	this->dev_ready = sharpsl_nand_dev_ready;
+	/* 15 us command delay time */
+	this->chip_delay = 15;
+	/* set eccmode using hardware ECC */
+	this->ecc.mode = NAND_ECC_HW;
+	this->ecc.size = 256;
+	this->ecc.bytes = 3;
+	this->ecc.strength = 1;
+	this->badblock_pattern = data->badblock_pattern;
+	this->ecc.hwctl = sharpsl_nand_enable_hwecc;
+	this->ecc.calculate = sharpsl_nand_calculate_ecc;
+	this->ecc.correct = nand_correct_data;
+
+	/* Scan to find existence of the device */
+	err = nand_scan(mtd, 1);
+	if (err)
+		goto err_scan;
+
+	/* Register the partitions */
+	mtd->name = "sharpsl-nand";
+
+	err = mtd_device_parse_register(mtd, NULL, NULL,
+					data->partitions, data->nr_partitions);
+	if (err)
+		goto err_add;
+
+	/* Return happy */
+	return 0;
+
+err_add:
+	nand_release(mtd);
+
+err_scan:
+	iounmap(sharpsl->io);
+err_ioremap:
+err_get_res:
+	kfree(sharpsl);
+	return err;
+}
+
+/*
+ * Clean up routine
+ */
+static int sharpsl_nand_remove(struct platform_device *pdev)
+{
+	struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
+
+	/* Release resources, unregister device */
+	nand_release(nand_to_mtd(&sharpsl->chip));
+
+	iounmap(sharpsl->io);
+
+	/* Free the MTD device structure */
+	kfree(sharpsl);
+
+	return 0;
+}
+
+static struct platform_driver sharpsl_nand_driver = {
+	.driver = {
+		.name	= "sharpsl-nand",
+	},
+	.probe		= sharpsl_nand_probe,
+	.remove		= sharpsl_nand_remove,
+};
+
+module_platform_driver(sharpsl_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
+MODULE_DESCRIPTION("Device specific logic for NAND flash on Sharp SL-C7xx Series");
diff --git a/drivers/mtd/nand/rawnand/sm_common.c b/drivers/mtd/nand/rawnand/sm_common.c
new file mode 100644
index 000000000000..c378705c6e2b
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/sm_common.c
@@ -0,0 +1,202 @@ 
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/module.h>
+#include <linux/sizes.h>
+#include "sm_common.h"
+
+static int oob_sm_ooblayout_ecc(struct mtd_info *mtd, int section,
+				struct mtd_oob_region *oobregion)
+{
+	if (section > 1)
+		return -ERANGE;
+
+	oobregion->length = 3;
+	oobregion->offset = ((section + 1) * 8) - 3;
+
+	return 0;
+}
+
+static int oob_sm_ooblayout_free(struct mtd_info *mtd, int section,
+				 struct mtd_oob_region *oobregion)
+{
+	switch (section) {
+	case 0:
+		/* reserved */
+		oobregion->offset = 0;
+		oobregion->length = 4;
+		break;
+	case 1:
+		/* LBA1 */
+		oobregion->offset = 6;
+		oobregion->length = 2;
+		break;
+	case 2:
+		/* LBA2 */
+		oobregion->offset = 11;
+		oobregion->length = 2;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_ops = {
+	.ecc = oob_sm_ooblayout_ecc,
+	.free = oob_sm_ooblayout_free,
+};
+
+/* NOTE: This layout is is not compatabable with SmartMedia, */
+/* because the 256 byte devices have page depenent oob layout */
+/* However it does preserve the bad block markers */
+/* If you use smftl, it will bypass this and work correctly */
+/* If you not, then you break SmartMedia compliance anyway */
+
+static int oob_sm_small_ooblayout_ecc(struct mtd_info *mtd, int section,
+				      struct mtd_oob_region *oobregion)
+{
+	if (section)
+		return -ERANGE;
+
+	oobregion->length = 3;
+	oobregion->offset = 0;
+
+	return 0;
+}
+
+static int oob_sm_small_ooblayout_free(struct mtd_info *mtd, int section,
+				       struct mtd_oob_region *oobregion)
+{
+	switch (section) {
+	case 0:
+		/* reserved */
+		oobregion->offset = 3;
+		oobregion->length = 2;
+		break;
+	case 1:
+		/* LBA1 */
+		oobregion->offset = 6;
+		oobregion->length = 2;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops oob_sm_small_ops = {
+	.ecc = oob_sm_small_ooblayout_ecc,
+	.free = oob_sm_small_ooblayout_free,
+};
+
+static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+	struct mtd_oob_ops ops;
+	struct sm_oob oob;
+	int ret;
+
+	memset(&oob, -1, SM_OOB_SIZE);
+	oob.block_status = 0x0F;
+
+	/* As long as this function is called on erase block boundaries
+		it will work correctly for 256 byte nand */
+	ops.mode = MTD_OPS_PLACE_OOB;
+	ops.ooboffs = 0;
+	ops.ooblen = mtd->oobsize;
+	ops.oobbuf = (void *)&oob;
+	ops.datbuf = NULL;
+
+
+	ret = mtd_write_oob(mtd, ofs, &ops);
+	if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
+		printk(KERN_NOTICE
+			"sm_common: can't mark sector at %i as bad\n",
+								(int)ofs);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
+	LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM",   0x5d, 2,   SZ_8K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 4MiB 3,3V",       0xe3, 4,   SZ_8K, 0),
+	LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V",     0xe5, 4,   SZ_8K, 0),
+	LEGACY_ID_NAND("SmartMedia 4MiB 5V",         0x6b, 4,   SZ_8K, 0),
+	LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM",   0xd5, 4,   SZ_8K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 8MiB 3,3V",       0xe6, 8,   SZ_8K, 0),
+	LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM",   0xd6, 8,   SZ_8K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 16MiB 3,3V",      0x73, 16,  SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM",  0x57, 16,  SZ_16K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 32MiB 3,3V",      0x75, 32,  SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM",  0x58, 32,  SZ_16K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 64MiB 3,3V",      0x76, 64,  SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM",  0xd9, 64,  SZ_16K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 128MiB 3,3V",     0x79, 128, SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
+	LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V",    0x71, 256, SZ_16K, 0),
+	LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
+	{NULL}
+};
+
+static struct nand_flash_dev nand_xd_flash_ids[] = {
+	LEGACY_ID_NAND("xD 16MiB 3,3V",  0x73, 16,   SZ_16K, 0),
+	LEGACY_ID_NAND("xD 32MiB 3,3V",  0x75, 32,   SZ_16K, 0),
+	LEGACY_ID_NAND("xD 64MiB 3,3V",  0x76, 64,   SZ_16K, 0),
+	LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128,  SZ_16K, 0),
+	LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256,  SZ_16K, NAND_BROKEN_XD),
+	LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512,  SZ_16K, NAND_BROKEN_XD),
+	LEGACY_ID_NAND("xD 1GiB 3,3V",   0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
+	LEGACY_ID_NAND("xD 2GiB 3,3V",   0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
+	{NULL}
+};
+
+int sm_register_device(struct mtd_info *mtd, int smartmedia)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret;
+
+	chip->options |= NAND_SKIP_BBTSCAN;
+
+	/* Scan for card properties */
+	ret = nand_scan_ident(mtd, 1, smartmedia ?
+		nand_smartmedia_flash_ids : nand_xd_flash_ids);
+
+	if (ret)
+		return ret;
+
+	/* Bad block marker position */
+	chip->badblockpos = 0x05;
+	chip->badblockbits = 7;
+	chip->block_markbad = sm_block_markbad;
+
+	/* ECC layout */
+	if (mtd->writesize == SM_SECTOR_SIZE)
+		mtd_set_ooblayout(mtd, &oob_sm_ops);
+	else if (mtd->writesize == SM_SMALL_PAGE)
+		mtd_set_ooblayout(mtd, &oob_sm_small_ops);
+	else
+		return -ENODEV;
+
+	ret = nand_scan_tail(mtd);
+
+	if (ret)
+		return ret;
+
+	return mtd_device_register(mtd, NULL, 0);
+}
+EXPORT_SYMBOL_GPL(sm_register_device);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
+MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/rawnand/sm_common.h b/drivers/mtd/nand/rawnand/sm_common.h
new file mode 100644
index 000000000000..d3e028e58b0f
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/sm_common.h
@@ -0,0 +1,61 @@ 
+/*
+ * Copyright © 2009 - Maxim Levitsky
+ * Common routines & support for SmartMedia/xD format
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bitops.h>
+#include <linux/mtd/mtd.h>
+
+/* Full oob structure as written on the flash */
+struct sm_oob {
+	uint32_t reserved;
+	uint8_t data_status;
+	uint8_t block_status;
+	uint8_t lba_copy1[2];
+	uint8_t ecc2[3];
+	uint8_t lba_copy2[2];
+	uint8_t ecc1[3];
+} __packed;
+
+
+/* one sector is always 512 bytes, but it can consist of two nand pages */
+#define SM_SECTOR_SIZE		512
+
+/* oob area is also 16 bytes, but might be from two pages */
+#define SM_OOB_SIZE		16
+
+/* This is maximum zone size, and all devices that have more that one zone
+   have this size */
+#define SM_MAX_ZONE_SIZE 	1024
+
+/* support for small page nand */
+#define SM_SMALL_PAGE 		256
+#define SM_SMALL_OOB_SIZE	8
+
+
+extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
+
+
+static inline int sm_sector_valid(struct sm_oob *oob)
+{
+	return hweight16(oob->data_status) >= 5;
+}
+
+static inline int sm_block_valid(struct sm_oob *oob)
+{
+	return hweight16(oob->block_status) >= 7;
+}
+
+static inline int sm_block_erased(struct sm_oob *oob)
+{
+	static const uint32_t erased_pattern[4] = {
+		0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
+
+	/* First test for erased block */
+	if (!memcmp(oob, erased_pattern, sizeof(*oob)))
+		return 1;
+	return 0;
+}
diff --git a/drivers/mtd/nand/rawnand/socrates_nand.c b/drivers/mtd/nand/rawnand/socrates_nand.c
new file mode 100644
index 000000000000..f5a3e7252b82
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/socrates_nand.c
@@ -0,0 +1,251 @@ 
+/*
+ * drivers/mtd/nand/socrates_nand.c
+ *
+ *  Copyright © 2008 Ilya Yanok, Emcraft Systems
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#define FPGA_NAND_CMD_MASK		(0x7 << 28)
+#define FPGA_NAND_CMD_COMMAND		(0x0 << 28)
+#define FPGA_NAND_CMD_ADDR		(0x1 << 28)
+#define FPGA_NAND_CMD_READ		(0x2 << 28)
+#define FPGA_NAND_CMD_WRITE		(0x3 << 28)
+#define FPGA_NAND_BUSY			(0x1 << 15)
+#define FPGA_NAND_ENABLE		(0x1 << 31)
+#define FPGA_NAND_DATA_SHIFT		16
+
+struct socrates_nand_host {
+	struct nand_chip	nand_chip;
+	void __iomem		*io_base;
+	struct device		*dev;
+};
+
+/**
+ * socrates_nand_write_buf -  write buffer to chip
+ * @mtd:	MTD device structure
+ * @buf:	data buffer
+ * @len:	number of bytes to write
+ */
+static void socrates_nand_write_buf(struct mtd_info *mtd,
+		const uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct socrates_nand_host *host = nand_get_controller_data(this);
+
+	for (i = 0; i < len; i++) {
+		out_be32(host->io_base, FPGA_NAND_ENABLE |
+				FPGA_NAND_CMD_WRITE |
+				(buf[i] << FPGA_NAND_DATA_SHIFT));
+	}
+}
+
+/**
+ * socrates_nand_read_buf -  read chip data into buffer
+ * @mtd:	MTD device structure
+ * @buf:	buffer to store date
+ * @len:	number of bytes to read
+ */
+static void socrates_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	int i;
+	struct nand_chip *this = mtd_to_nand(mtd);
+	struct socrates_nand_host *host = nand_get_controller_data(this);
+	uint32_t val;
+
+	val = FPGA_NAND_ENABLE | FPGA_NAND_CMD_READ;
+
+	out_be32(host->io_base, val);
+	for (i = 0; i < len; i++) {
+		buf[i] = (in_be32(host->io_base) >>
+				FPGA_NAND_DATA_SHIFT) & 0xff;
+	}
+}
+
+/**
+ * socrates_nand_read_byte -  read one byte from the chip
+ * @mtd:	MTD device structure
+ */
+static uint8_t socrates_nand_read_byte(struct mtd_info *mtd)
+{
+	uint8_t byte;
+	socrates_nand_read_buf(mtd, &byte, sizeof(byte));
+	return byte;
+}
+
+/**
+ * socrates_nand_read_word -  read one word from the chip
+ * @mtd:	MTD device structure
+ */
+static uint16_t socrates_nand_read_word(struct mtd_info *mtd)
+{
+	uint16_t word;
+	socrates_nand_read_buf(mtd, (uint8_t *)&word, sizeof(word));
+	return word;
+}
+
+/*
+ * Hardware specific access to control-lines
+ */
+static void socrates_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
+		unsigned int ctrl)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
+	uint32_t val;
+
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		val = FPGA_NAND_CMD_COMMAND;
+	else
+		val = FPGA_NAND_CMD_ADDR;
+
+	if (ctrl & NAND_NCE)
+		val |= FPGA_NAND_ENABLE;
+
+	val |= (cmd & 0xff) << FPGA_NAND_DATA_SHIFT;
+
+	out_be32(host->io_base, val);
+}
+
+/*
+ * Read the Device Ready pin.
+ */
+static int socrates_nand_device_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *nand_chip = mtd_to_nand(mtd);
+	struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
+
+	if (in_be32(host->io_base) & FPGA_NAND_BUSY)
+		return 0; /* busy */
+	return 1;
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int socrates_nand_probe(struct platform_device *ofdev)
+{
+	struct socrates_nand_host *host;
+	struct mtd_info *mtd;
+	struct nand_chip *nand_chip;
+	int res;
+
+	/* Allocate memory for the device structure (and zero it) */
+	host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
+	if (!host)
+		return -ENOMEM;
+
+	host->io_base = of_iomap(ofdev->dev.of_node, 0);
+	if (host->io_base == NULL) {
+		dev_err(&ofdev->dev, "ioremap failed\n");
+		return -EIO;
+	}
+
+	nand_chip = &host->nand_chip;
+	mtd = nand_to_mtd(nand_chip);
+	host->dev = &ofdev->dev;
+
+	/* link the private data structures */
+	nand_set_controller_data(nand_chip, host);
+	nand_set_flash_node(nand_chip, ofdev->dev.of_node);
+	mtd->name = "socrates_nand";
+	mtd->dev.parent = &ofdev->dev;
+
+	/*should never be accessed directly */
+	nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
+	nand_chip->IO_ADDR_W = (void *)0xdeadbeef;
+
+	nand_chip->cmd_ctrl = socrates_nand_cmd_ctrl;
+	nand_chip->read_byte = socrates_nand_read_byte;
+	nand_chip->read_word = socrates_nand_read_word;
+	nand_chip->write_buf = socrates_nand_write_buf;
+	nand_chip->read_buf = socrates_nand_read_buf;
+	nand_chip->dev_ready = socrates_nand_device_ready;
+
+	nand_chip->ecc.mode = NAND_ECC_SOFT;	/* enable ECC */
+	nand_chip->ecc.algo = NAND_ECC_HAMMING;
+
+	/* TODO: I have no idea what real delay is. */
+	nand_chip->chip_delay = 20;		/* 20us command delay time */
+
+	dev_set_drvdata(&ofdev->dev, host);
+
+	/* first scan to find the device and get the page size */
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		res = -ENXIO;
+		goto out;
+	}
+
+	/* second phase scan */
+	if (nand_scan_tail(mtd)) {
+		res = -ENXIO;
+		goto out;
+	}
+
+	res = mtd_device_register(mtd, NULL, 0);
+	if (!res)
+		return res;
+
+	nand_release(mtd);
+
+out:
+	iounmap(host->io_base);
+	return res;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int socrates_nand_remove(struct platform_device *ofdev)
+{
+	struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
+	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
+
+	nand_release(mtd);
+
+	iounmap(host->io_base);
+
+	return 0;
+}
+
+static const struct of_device_id socrates_nand_match[] =
+{
+	{
+		.compatible   = "abb,socrates-nand",
+	},
+	{},
+};
+
+MODULE_DEVICE_TABLE(of, socrates_nand_match);
+
+static struct platform_driver socrates_nand_driver = {
+	.driver = {
+		.name = "socrates_nand",
+		.of_match_table = socrates_nand_match,
+	},
+	.probe		= socrates_nand_probe,
+	.remove		= socrates_nand_remove,
+};
+
+module_platform_driver(socrates_nand_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Ilya Yanok");
+MODULE_DESCRIPTION("NAND driver for Socrates board");
diff --git a/drivers/mtd/nand/rawnand/sunxi_nand.c b/drivers/mtd/nand/rawnand/sunxi_nand.c
new file mode 100644
index 000000000000..ccccc7ab9023
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/sunxi_nand.c
@@ -0,0 +1,2291 @@ 
+/*
+ * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
+ *
+ * Derived from:
+ *	https://github.com/yuq/sunxi-nfc-mtd
+ *	Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
+ *
+ *	https://github.com/hno/Allwinner-Info
+ *	Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
+ *
+ *	Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
+ *	Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/iopoll.h>
+#include <linux/reset.h>
+
+#define NFC_REG_CTL		0x0000
+#define NFC_REG_ST		0x0004
+#define NFC_REG_INT		0x0008
+#define NFC_REG_TIMING_CTL	0x000C
+#define NFC_REG_TIMING_CFG	0x0010
+#define NFC_REG_ADDR_LOW	0x0014
+#define NFC_REG_ADDR_HIGH	0x0018
+#define NFC_REG_SECTOR_NUM	0x001C
+#define NFC_REG_CNT		0x0020
+#define NFC_REG_CMD		0x0024
+#define NFC_REG_RCMD_SET	0x0028
+#define NFC_REG_WCMD_SET	0x002C
+#define NFC_REG_IO_DATA		0x0030
+#define NFC_REG_ECC_CTL		0x0034
+#define NFC_REG_ECC_ST		0x0038
+#define NFC_REG_DEBUG		0x003C
+#define NFC_REG_ECC_ERR_CNT(x)	((0x0040 + (x)) & ~0x3)
+#define NFC_REG_USER_DATA(x)	(0x0050 + ((x) * 4))
+#define NFC_REG_SPARE_AREA	0x00A0
+#define NFC_REG_PAT_ID		0x00A4
+#define NFC_RAM0_BASE		0x0400
+#define NFC_RAM1_BASE		0x0800
+
+/* define bit use in NFC_CTL */
+#define NFC_EN			BIT(0)
+#define NFC_RESET		BIT(1)
+#define NFC_BUS_WIDTH_MSK	BIT(2)
+#define NFC_BUS_WIDTH_8		(0 << 2)
+#define NFC_BUS_WIDTH_16	(1 << 2)
+#define NFC_RB_SEL_MSK		BIT(3)
+#define NFC_RB_SEL(x)		((x) << 3)
+#define NFC_CE_SEL_MSK		GENMASK(26, 24)
+#define NFC_CE_SEL(x)		((x) << 24)
+#define NFC_CE_CTL		BIT(6)
+#define NFC_PAGE_SHIFT_MSK	GENMASK(11, 8)
+#define NFC_PAGE_SHIFT(x)	(((x) < 10 ? 0 : (x) - 10) << 8)
+#define NFC_SAM			BIT(12)
+#define NFC_RAM_METHOD		BIT(14)
+#define NFC_DEBUG_CTL		BIT(31)
+
+/* define bit use in NFC_ST */
+#define NFC_RB_B2R		BIT(0)
+#define NFC_CMD_INT_FLAG	BIT(1)
+#define NFC_DMA_INT_FLAG	BIT(2)
+#define NFC_CMD_FIFO_STATUS	BIT(3)
+#define NFC_STA			BIT(4)
+#define NFC_NATCH_INT_FLAG	BIT(5)
+#define NFC_RB_STATE(x)		BIT(x + 8)
+
+/* define bit use in NFC_INT */
+#define NFC_B2R_INT_ENABLE	BIT(0)
+#define NFC_CMD_INT_ENABLE	BIT(1)
+#define NFC_DMA_INT_ENABLE	BIT(2)
+#define NFC_INT_MASK		(NFC_B2R_INT_ENABLE | \
+				 NFC_CMD_INT_ENABLE | \
+				 NFC_DMA_INT_ENABLE)
+
+/* define bit use in NFC_TIMING_CTL */
+#define NFC_TIMING_CTL_EDO	BIT(8)
+
+/* define NFC_TIMING_CFG register layout */
+#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD)		\
+	(((tWB) & 0x3) | (((tADL) & 0x3) << 2) |		\
+	(((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) |		\
+	(((tCAD) & 0x7) << 8))
+
+/* define bit use in NFC_CMD */
+#define NFC_CMD_LOW_BYTE_MSK	GENMASK(7, 0)
+#define NFC_CMD_HIGH_BYTE_MSK	GENMASK(15, 8)
+#define NFC_CMD(x)		(x)
+#define NFC_ADR_NUM_MSK		GENMASK(18, 16)
+#define NFC_ADR_NUM(x)		(((x) - 1) << 16)
+#define NFC_SEND_ADR		BIT(19)
+#define NFC_ACCESS_DIR		BIT(20)
+#define NFC_DATA_TRANS		BIT(21)
+#define NFC_SEND_CMD1		BIT(22)
+#define NFC_WAIT_FLAG		BIT(23)
+#define NFC_SEND_CMD2		BIT(24)
+#define NFC_SEQ			BIT(25)
+#define NFC_DATA_SWAP_METHOD	BIT(26)
+#define NFC_ROW_AUTO_INC	BIT(27)
+#define NFC_SEND_CMD3		BIT(28)
+#define NFC_SEND_CMD4		BIT(29)
+#define NFC_CMD_TYPE_MSK	GENMASK(31, 30)
+#define NFC_NORMAL_OP		(0 << 30)
+#define NFC_ECC_OP		(1 << 30)
+#define NFC_PAGE_OP		(2 << 30)
+
+/* define bit use in NFC_RCMD_SET */
+#define NFC_READ_CMD_MSK	GENMASK(7, 0)
+#define NFC_RND_READ_CMD0_MSK	GENMASK(15, 8)
+#define NFC_RND_READ_CMD1_MSK	GENMASK(23, 16)
+
+/* define bit use in NFC_WCMD_SET */
+#define NFC_PROGRAM_CMD_MSK	GENMASK(7, 0)
+#define NFC_RND_WRITE_CMD_MSK	GENMASK(15, 8)
+#define NFC_READ_CMD0_MSK	GENMASK(23, 16)
+#define NFC_READ_CMD1_MSK	GENMASK(31, 24)
+
+/* define bit use in NFC_ECC_CTL */
+#define NFC_ECC_EN		BIT(0)
+#define NFC_ECC_PIPELINE	BIT(3)
+#define NFC_ECC_EXCEPTION	BIT(4)
+#define NFC_ECC_BLOCK_SIZE_MSK	BIT(5)
+#define NFC_RANDOM_EN		BIT(9)
+#define NFC_RANDOM_DIRECTION	BIT(10)
+#define NFC_ECC_MODE_MSK	GENMASK(15, 12)
+#define NFC_ECC_MODE(x)		((x) << 12)
+#define NFC_RANDOM_SEED_MSK	GENMASK(30, 16)
+#define NFC_RANDOM_SEED(x)	((x) << 16)
+
+/* define bit use in NFC_ECC_ST */
+#define NFC_ECC_ERR(x)		BIT(x)
+#define NFC_ECC_ERR_MSK		GENMASK(15, 0)
+#define NFC_ECC_PAT_FOUND(x)	BIT(x + 16)
+#define NFC_ECC_ERR_CNT(b, x)	(((x) >> (((b) % 4) * 8)) & 0xff)
+
+#define NFC_DEFAULT_TIMEOUT_MS	1000
+
+#define NFC_SRAM_SIZE		1024
+
+#define NFC_MAX_CS		7
+
+/*
+ * Ready/Busy detection type: describes the Ready/Busy detection modes
+ *
+ * @RB_NONE:	no external detection available, rely on STATUS command
+ *		and software timeouts
+ * @RB_NATIVE:	use sunxi NAND controller Ready/Busy support. The Ready/Busy
+ *		pin of the NAND flash chip must be connected to one of the
+ *		native NAND R/B pins (those which can be muxed to the NAND
+ *		Controller)
+ * @RB_GPIO:	use a simple GPIO to handle Ready/Busy status. The Ready/Busy
+ *		pin of the NAND flash chip must be connected to a GPIO capable
+ *		pin.
+ */
+enum sunxi_nand_rb_type {
+	RB_NONE,
+	RB_NATIVE,
+	RB_GPIO,
+};
+
+/*
+ * Ready/Busy structure: stores information related to Ready/Busy detection
+ *
+ * @type:	the Ready/Busy detection mode
+ * @info:	information related to the R/B detection mode. Either a gpio
+ *		id or a native R/B id (those supported by the NAND controller).
+ */
+struct sunxi_nand_rb {
+	enum sunxi_nand_rb_type type;
+	union {
+		int gpio;
+		int nativeid;
+	} info;
+};
+
+/*
+ * Chip Select structure: stores information related to NAND Chip Select
+ *
+ * @cs:		the NAND CS id used to communicate with a NAND Chip
+ * @rb:		the Ready/Busy description
+ */
+struct sunxi_nand_chip_sel {
+	u8 cs;
+	struct sunxi_nand_rb rb;
+};
+
+/*
+ * sunxi HW ECC infos: stores information related to HW ECC support
+ *
+ * @mode:	the sunxi ECC mode field deduced from ECC requirements
+ */
+struct sunxi_nand_hw_ecc {
+	int mode;
+};
+
+/*
+ * NAND chip structure: stores NAND chip device related information
+ *
+ * @node:		used to store NAND chips into a list
+ * @nand:		base NAND chip structure
+ * @mtd:		base MTD structure
+ * @clk_rate:		clk_rate required for this NAND chip
+ * @timing_cfg		TIMING_CFG register value for this NAND chip
+ * @selected:		current active CS
+ * @nsels:		number of CS lines required by the NAND chip
+ * @sels:		array of CS lines descriptions
+ */
+struct sunxi_nand_chip {
+	struct list_head node;
+	struct nand_chip nand;
+	unsigned long clk_rate;
+	u32 timing_cfg;
+	u32 timing_ctl;
+	int selected;
+	int addr_cycles;
+	u32 addr[2];
+	int cmd_cycles;
+	u8 cmd[2];
+	int nsels;
+	struct sunxi_nand_chip_sel sels[0];
+};
+
+static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
+{
+	return container_of(nand, struct sunxi_nand_chip, nand);
+}
+
+/*
+ * NAND Controller structure: stores sunxi NAND controller information
+ *
+ * @controller:		base controller structure
+ * @dev:		parent device (used to print error messages)
+ * @regs:		NAND controller registers
+ * @ahb_clk:		NAND Controller AHB clock
+ * @mod_clk:		NAND Controller mod clock
+ * @assigned_cs:	bitmask describing already assigned CS lines
+ * @clk_rate:		NAND controller current clock rate
+ * @chips:		a list containing all the NAND chips attached to
+ *			this NAND controller
+ * @complete:		a completion object used to wait for NAND
+ *			controller events
+ */
+struct sunxi_nfc {
+	struct nand_hw_control controller;
+	struct device *dev;
+	void __iomem *regs;
+	struct clk *ahb_clk;
+	struct clk *mod_clk;
+	struct reset_control *reset;
+	unsigned long assigned_cs;
+	unsigned long clk_rate;
+	struct list_head chips;
+	struct completion complete;
+	struct dma_chan *dmac;
+};
+
+static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
+{
+	return container_of(ctrl, struct sunxi_nfc, controller);
+}
+
+static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
+{
+	struct sunxi_nfc *nfc = dev_id;
+	u32 st = readl(nfc->regs + NFC_REG_ST);
+	u32 ien = readl(nfc->regs + NFC_REG_INT);
+
+	if (!(ien & st))
+		return IRQ_NONE;
+
+	if ((ien & st) == ien)
+		complete(&nfc->complete);
+
+	writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+	writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
+
+	return IRQ_HANDLED;
+}
+
+static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
+				 bool use_polling, unsigned int timeout_ms)
+{
+	int ret;
+
+	if (events & ~NFC_INT_MASK)
+		return -EINVAL;
+
+	if (!timeout_ms)
+		timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
+
+	if (!use_polling) {
+		init_completion(&nfc->complete);
+
+		writel(events, nfc->regs + NFC_REG_INT);
+
+		ret = wait_for_completion_timeout(&nfc->complete,
+						msecs_to_jiffies(timeout_ms));
+
+		writel(0, nfc->regs + NFC_REG_INT);
+	} else {
+		u32 status;
+
+		ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+					 (status & events) == events, 1,
+					 timeout_ms * 1000);
+	}
+
+	writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
+
+	if (ret)
+		dev_err(nfc->dev, "wait interrupt timedout\n");
+
+	return ret;
+}
+
+static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
+{
+	u32 status;
+	int ret;
+
+	ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
+				 !(status & NFC_CMD_FIFO_STATUS), 1,
+				 NFC_DEFAULT_TIMEOUT_MS * 1000);
+	if (ret)
+		dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
+
+	return ret;
+}
+
+static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
+{
+	u32 ctl;
+	int ret;
+
+	writel(0, nfc->regs + NFC_REG_ECC_CTL);
+	writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
+
+	ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
+				 !(ctl & NFC_RESET), 1,
+				 NFC_DEFAULT_TIMEOUT_MS * 1000);
+	if (ret)
+		dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
+
+	return ret;
+}
+
+static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
+				    int chunksize, int nchunks,
+				    enum dma_data_direction ddir,
+				    struct scatterlist *sg)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct dma_async_tx_descriptor *dmad;
+	enum dma_transfer_direction tdir;
+	dma_cookie_t dmat;
+	int ret;
+
+	if (ddir == DMA_FROM_DEVICE)
+		tdir = DMA_DEV_TO_MEM;
+	else
+		tdir = DMA_MEM_TO_DEV;
+
+	sg_init_one(sg, buf, nchunks * chunksize);
+	ret = dma_map_sg(nfc->dev, sg, 1, ddir);
+	if (!ret)
+		return -ENOMEM;
+
+	dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
+	if (!dmad) {
+		ret = -EINVAL;
+		goto err_unmap_buf;
+	}
+
+	writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
+	       nfc->regs + NFC_REG_CTL);
+	writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
+	writel(chunksize, nfc->regs + NFC_REG_CNT);
+	dmat = dmaengine_submit(dmad);
+
+	ret = dma_submit_error(dmat);
+	if (ret)
+		goto err_clr_dma_flag;
+
+	return 0;
+
+err_clr_dma_flag:
+	writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+	       nfc->regs + NFC_REG_CTL);
+
+err_unmap_buf:
+	dma_unmap_sg(nfc->dev, sg, 1, ddir);
+	return ret;
+}
+
+static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
+				     enum dma_data_direction ddir,
+				     struct scatterlist *sg)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+	dma_unmap_sg(nfc->dev, sg, 1, ddir);
+	writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
+	       nfc->regs + NFC_REG_CTL);
+}
+
+static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+	struct sunxi_nand_rb *rb;
+	int ret;
+
+	if (sunxi_nand->selected < 0)
+		return 0;
+
+	rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
+
+	switch (rb->type) {
+	case RB_NATIVE:
+		ret = !!(readl(nfc->regs + NFC_REG_ST) &
+			 NFC_RB_STATE(rb->info.nativeid));
+		break;
+	case RB_GPIO:
+		ret = gpio_get_value(rb->info.gpio);
+		break;
+	case RB_NONE:
+	default:
+		ret = 0;
+		dev_err(nfc->dev, "cannot check R/B NAND status!\n");
+		break;
+	}
+
+	return ret;
+}
+
+static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+	struct sunxi_nand_chip_sel *sel;
+	u32 ctl;
+
+	if (chip > 0 && chip >= sunxi_nand->nsels)
+		return;
+
+	if (chip == sunxi_nand->selected)
+		return;
+
+	ctl = readl(nfc->regs + NFC_REG_CTL) &
+	      ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
+
+	if (chip >= 0) {
+		sel = &sunxi_nand->sels[chip];
+
+		ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
+		       NFC_PAGE_SHIFT(nand->page_shift);
+		if (sel->rb.type == RB_NONE) {
+			nand->dev_ready = NULL;
+		} else {
+			nand->dev_ready = sunxi_nfc_dev_ready;
+			if (sel->rb.type == RB_NATIVE)
+				ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
+		}
+
+		writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
+
+		if (nfc->clk_rate != sunxi_nand->clk_rate) {
+			clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
+			nfc->clk_rate = sunxi_nand->clk_rate;
+		}
+	}
+
+	writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
+	writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
+	writel(ctl, nfc->regs + NFC_REG_CTL);
+
+	sunxi_nand->selected = chip;
+}
+
+static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+	int ret;
+	int cnt;
+	int offs = 0;
+	u32 tmp;
+
+	while (len > offs) {
+		cnt = min(len - offs, NFC_SRAM_SIZE);
+
+		ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+		if (ret)
+			break;
+
+		writel(cnt, nfc->regs + NFC_REG_CNT);
+		tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
+		writel(tmp, nfc->regs + NFC_REG_CMD);
+
+		ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+		if (ret)
+			break;
+
+		if (buf)
+			memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
+				      cnt);
+		offs += cnt;
+	}
+}
+
+static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+				int len)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+	int ret;
+	int cnt;
+	int offs = 0;
+	u32 tmp;
+
+	while (len > offs) {
+		cnt = min(len - offs, NFC_SRAM_SIZE);
+
+		ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+		if (ret)
+			break;
+
+		writel(cnt, nfc->regs + NFC_REG_CNT);
+		memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
+		tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+		      NFC_ACCESS_DIR;
+		writel(tmp, nfc->regs + NFC_REG_CMD);
+
+		ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+		if (ret)
+			break;
+
+		offs += cnt;
+	}
+}
+
+static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
+{
+	uint8_t ret;
+
+	sunxi_nfc_read_buf(mtd, &ret, 1);
+
+	return ret;
+}
+
+static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
+			       unsigned int ctrl)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+	int ret;
+
+	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+	if (ret)
+		return;
+
+	if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
+	    !(ctrl & (NAND_CLE | NAND_ALE))) {
+		u32 cmd = 0;
+
+		if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
+			return;
+
+		if (sunxi_nand->cmd_cycles--)
+			cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
+
+		if (sunxi_nand->cmd_cycles--) {
+			cmd |= NFC_SEND_CMD2;
+			writel(sunxi_nand->cmd[1],
+			       nfc->regs + NFC_REG_RCMD_SET);
+		}
+
+		sunxi_nand->cmd_cycles = 0;
+
+		if (sunxi_nand->addr_cycles) {
+			cmd |= NFC_SEND_ADR |
+			       NFC_ADR_NUM(sunxi_nand->addr_cycles);
+			writel(sunxi_nand->addr[0],
+			       nfc->regs + NFC_REG_ADDR_LOW);
+		}
+
+		if (sunxi_nand->addr_cycles > 4)
+			writel(sunxi_nand->addr[1],
+			       nfc->regs + NFC_REG_ADDR_HIGH);
+
+		writel(cmd, nfc->regs + NFC_REG_CMD);
+		sunxi_nand->addr[0] = 0;
+		sunxi_nand->addr[1] = 0;
+		sunxi_nand->addr_cycles = 0;
+		sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+	}
+
+	if (ctrl & NAND_CLE) {
+		sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
+	} else if (ctrl & NAND_ALE) {
+		sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
+				dat << ((sunxi_nand->addr_cycles % 4) * 8);
+		sunxi_nand->addr_cycles++;
+	}
+}
+
+/* These seed values have been extracted from Allwinner's BSP */
+static const u16 sunxi_nfc_randomizer_page_seeds[] = {
+	0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
+	0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
+	0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
+	0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
+	0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
+	0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
+	0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
+	0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
+	0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
+	0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
+	0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
+	0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
+	0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
+	0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
+	0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
+	0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
+};
+
+/*
+ * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
+ * have been generated using
+ * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
+ * the randomizer engine does internally before de/scrambling OOB data.
+ *
+ * Those tables are statically defined to avoid calculating randomizer state
+ * at runtime.
+ */
+static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
+	0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
+	0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
+	0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
+	0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
+	0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
+	0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
+	0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
+	0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
+	0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
+	0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
+	0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
+	0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
+	0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
+	0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
+	0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
+	0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
+};
+
+static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
+	0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
+	0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
+	0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
+	0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
+	0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
+	0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
+	0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
+	0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
+	0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
+	0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
+	0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
+	0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
+	0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
+	0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
+	0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
+	0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
+};
+
+static u16 sunxi_nfc_randomizer_step(u16 state, int count)
+{
+	state &= 0x7fff;
+
+	/*
+	 * This loop is just a simple implementation of a Fibonacci LFSR using
+	 * the x16 + x15 + 1 polynomial.
+	 */
+	while (count--)
+		state = ((state >> 1) |
+			 (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
+
+	return state;
+}
+
+static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
+{
+	const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
+	int mod = mtd_div_by_ws(mtd->erasesize, mtd);
+
+	if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
+		mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
+
+	if (ecc) {
+		if (mtd->ecc_step_size == 512)
+			seeds = sunxi_nfc_randomizer_ecc512_seeds;
+		else
+			seeds = sunxi_nfc_randomizer_ecc1024_seeds;
+	}
+
+	return seeds[page % mod];
+}
+
+static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
+					int page, bool ecc)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+	u16 state;
+
+	if (!(nand->options & NAND_NEED_SCRAMBLING))
+		return;
+
+	ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+	state = sunxi_nfc_randomizer_state(mtd, page, ecc);
+	ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
+	writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+	if (!(nand->options & NAND_NEED_SCRAMBLING))
+		return;
+
+	writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
+	       nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+	if (!(nand->options & NAND_NEED_SCRAMBLING))
+		return;
+
+	writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
+	       nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
+{
+	u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
+
+	bbm[0] ^= state;
+	bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
+}
+
+static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
+					   const uint8_t *buf, int len,
+					   bool ecc, int page)
+{
+	sunxi_nfc_randomizer_config(mtd, page, ecc);
+	sunxi_nfc_randomizer_enable(mtd);
+	sunxi_nfc_write_buf(mtd, buf, len);
+	sunxi_nfc_randomizer_disable(mtd);
+}
+
+static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
+					  int len, bool ecc, int page)
+{
+	sunxi_nfc_randomizer_config(mtd, page, ecc);
+	sunxi_nfc_randomizer_enable(mtd);
+	sunxi_nfc_read_buf(mtd, buf, len);
+	sunxi_nfc_randomizer_disable(mtd);
+}
+
+static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
+	u32 ecc_ctl;
+
+	ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
+	ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
+		     NFC_ECC_BLOCK_SIZE_MSK);
+	ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
+		   NFC_ECC_PIPELINE;
+
+	writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+	writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
+	       nfc->regs + NFC_REG_ECC_CTL);
+}
+
+static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
+{
+	buf[0] = user_data;
+	buf[1] = user_data >> 8;
+	buf[2] = user_data >> 16;
+	buf[3] = user_data >> 24;
+}
+
+static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
+{
+	return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
+}
+
+static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
+						int step, bool bbm, int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+
+	sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
+				   oob);
+
+	/* De-randomize the Bad Block Marker. */
+	if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
+		sunxi_nfc_randomize_bbm(mtd, page, oob);
+}
+
+static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
+						const u8 *oob, int step,
+						bool bbm, int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	u8 user_data[4];
+
+	/* Randomize the Bad Block Marker. */
+	if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
+		memcpy(user_data, oob, sizeof(user_data));
+		sunxi_nfc_randomize_bbm(mtd, page, user_data);
+		oob = user_data;
+	}
+
+	writel(sunxi_nfc_buf_to_user_data(oob),
+	       nfc->regs + NFC_REG_USER_DATA(step));
+}
+
+static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
+					  unsigned int *max_bitflips, int ret)
+{
+	if (ret < 0) {
+		mtd->ecc_stats.failed++;
+	} else {
+		mtd->ecc_stats.corrected += ret;
+		*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
+	}
+}
+
+static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
+				    int step, u32 status, bool *erased)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	u32 tmp;
+
+	*erased = false;
+
+	if (status & NFC_ECC_ERR(step))
+		return -EBADMSG;
+
+	if (status & NFC_ECC_PAT_FOUND(step)) {
+		u8 pattern;
+
+		if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
+			pattern = 0x0;
+		} else {
+			pattern = 0xff;
+			*erased = true;
+		}
+
+		if (data)
+			memset(data, pattern, ecc->size);
+
+		if (oob)
+			memset(oob, pattern, ecc->bytes + 4);
+
+		return 0;
+	}
+
+	tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
+
+	return NFC_ECC_ERR_CNT(step, tmp);
+}
+
+static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
+				       u8 *data, int data_off,
+				       u8 *oob, int oob_off,
+				       int *cur_off,
+				       unsigned int *max_bitflips,
+				       bool bbm, bool oob_required, int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	int raw_mode = 0;
+	bool erased;
+	int ret;
+
+	if (*cur_off != data_off)
+		nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+
+	sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
+
+	if (data_off + ecc->size != oob_off)
+		nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+
+	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+	if (ret)
+		return ret;
+
+	sunxi_nfc_randomizer_enable(mtd);
+	writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
+	       nfc->regs + NFC_REG_CMD);
+
+	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+	sunxi_nfc_randomizer_disable(mtd);
+	if (ret)
+		return ret;
+
+	*cur_off = oob_off + ecc->bytes + 4;
+
+	ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
+				       readl(nfc->regs + NFC_REG_ECC_ST),
+				       &erased);
+	if (erased)
+		return 1;
+
+	if (ret < 0) {
+		/*
+		 * Re-read the data with the randomizer disabled to identify
+		 * bitflips in erased pages.
+		 */
+		if (nand->options & NAND_NEED_SCRAMBLING) {
+			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
+			nand->read_buf(mtd, data, ecc->size);
+		} else {
+			memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
+				      ecc->size);
+		}
+
+		nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+		nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+		ret = nand_check_erased_ecc_chunk(data,	ecc->size,
+						  oob, ecc->bytes + 4,
+						  NULL, 0, ecc->strength);
+		if (ret >= 0)
+			raw_mode = 1;
+	} else {
+		memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
+
+		if (oob_required) {
+			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
+			sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
+						      true, page);
+
+			sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
+							    bbm, page);
+		}
+	}
+
+	sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
+
+	return raw_mode;
+}
+
+static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
+					    u8 *oob, int *cur_off,
+					    bool randomize, int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	int offset = ((ecc->bytes + 4) * ecc->steps);
+	int len = mtd->oobsize - offset;
+
+	if (len <= 0)
+		return;
+
+	if (!cur_off || *cur_off != offset)
+		nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+			      offset + mtd->writesize, -1);
+
+	if (!randomize)
+		sunxi_nfc_read_buf(mtd, oob + offset, len);
+	else
+		sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
+					      false, page);
+
+	if (cur_off)
+		*cur_off = mtd->oobsize + mtd->writesize;
+}
+
+static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
+					    int oob_required, int page,
+					    int nchunks)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	bool randomized = nand->options & NAND_NEED_SCRAMBLING;
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	unsigned int max_bitflips = 0;
+	int ret, i, raw_mode = 0;
+	struct scatterlist sg;
+	u32 status;
+
+	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+	if (ret)
+		return ret;
+
+	ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
+				       DMA_FROM_DEVICE, &sg);
+	if (ret)
+		return ret;
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+	sunxi_nfc_randomizer_config(mtd, page, false);
+	sunxi_nfc_randomizer_enable(mtd);
+
+	writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
+	       NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
+
+	dma_async_issue_pending(nfc->dmac);
+
+	writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
+	       nfc->regs + NFC_REG_CMD);
+
+	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+	if (ret)
+		dmaengine_terminate_all(nfc->dmac);
+
+	sunxi_nfc_randomizer_disable(mtd);
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
+
+	if (ret)
+		return ret;
+
+	status = readl(nfc->regs + NFC_REG_ECC_ST);
+
+	for (i = 0; i < nchunks; i++) {
+		int data_off = i * ecc->size;
+		int oob_off = i * (ecc->bytes + 4);
+		u8 *data = buf + data_off;
+		u8 *oob = nand->oob_poi + oob_off;
+		bool erased;
+
+		ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
+					       oob_required ? oob : NULL,
+					       i, status, &erased);
+
+		/* ECC errors are handled in the second loop. */
+		if (ret < 0)
+			continue;
+
+		if (oob_required && !erased) {
+			/* TODO: use DMA to retrieve OOB */
+			nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+				      mtd->writesize + oob_off, -1);
+			nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+			sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
+							    !i, page);
+		}
+
+		if (erased)
+			raw_mode = 1;
+
+		sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+	}
+
+	if (status & NFC_ECC_ERR_MSK) {
+		for (i = 0; i < nchunks; i++) {
+			int data_off = i * ecc->size;
+			int oob_off = i * (ecc->bytes + 4);
+			u8 *data = buf + data_off;
+			u8 *oob = nand->oob_poi + oob_off;
+
+			if (!(status & NFC_ECC_ERR(i)))
+				continue;
+
+			/*
+			 * Re-read the data with the randomizer disabled to
+			 * identify bitflips in erased pages.
+			 */
+			if (randomized) {
+				/* TODO: use DMA to read page in raw mode */
+				nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+					      data_off, -1);
+				nand->read_buf(mtd, data, ecc->size);
+			}
+
+			/* TODO: use DMA to retrieve OOB */
+			nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
+				      mtd->writesize + oob_off, -1);
+			nand->read_buf(mtd, oob, ecc->bytes + 4);
+
+			ret = nand_check_erased_ecc_chunk(data,	ecc->size,
+							  oob, ecc->bytes + 4,
+							  NULL, 0,
+							  ecc->strength);
+			if (ret >= 0)
+				raw_mode = 1;
+
+			sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
+		}
+	}
+
+	if (oob_required)
+		sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
+						NULL, !raw_mode,
+						page);
+
+	return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
+					const u8 *data, int data_off,
+					const u8 *oob, int oob_off,
+					int *cur_off, bool bbm,
+					int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	int ret;
+
+	if (data_off != *cur_off)
+		nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
+
+	sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
+
+	if (data_off + ecc->size != oob_off)
+		nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
+
+	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+	if (ret)
+		return ret;
+
+	sunxi_nfc_randomizer_enable(mtd);
+	sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
+
+	writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
+	       NFC_ACCESS_DIR | NFC_ECC_OP,
+	       nfc->regs + NFC_REG_CMD);
+
+	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+	sunxi_nfc_randomizer_disable(mtd);
+	if (ret)
+		return ret;
+
+	*cur_off = oob_off + ecc->bytes + 4;
+
+	return 0;
+}
+
+static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
+					     u8 *oob, int *cur_off,
+					     int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	int offset = ((ecc->bytes + 4) * ecc->steps);
+	int len = mtd->oobsize - offset;
+
+	if (len <= 0)
+		return;
+
+	if (!cur_off || *cur_off != offset)
+		nand->cmdfunc(mtd, NAND_CMD_RNDIN,
+			      offset + mtd->writesize, -1);
+
+	sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
+
+	if (cur_off)
+		*cur_off = mtd->oobsize + mtd->writesize;
+}
+
+static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
+				      struct nand_chip *chip, uint8_t *buf,
+				      int oob_required, int page)
+{
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	unsigned int max_bitflips = 0;
+	int ret, i, cur_off = 0;
+	bool raw_mode = false;
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+
+	for (i = 0; i < ecc->steps; i++) {
+		int data_off = i * ecc->size;
+		int oob_off = i * (ecc->bytes + 4);
+		u8 *data = buf + data_off;
+		u8 *oob = chip->oob_poi + oob_off;
+
+		ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
+						  oob_off + mtd->writesize,
+						  &cur_off, &max_bitflips,
+						  !i, oob_required, page);
+		if (ret < 0)
+			return ret;
+		else if (ret)
+			raw_mode = true;
+	}
+
+	if (oob_required)
+		sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+						!raw_mode, page);
+
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
+					  struct nand_chip *chip, u8 *buf,
+					  int oob_required, int page)
+{
+	int ret;
+
+	ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
+					       chip->ecc.steps);
+	if (ret >= 0)
+		return ret;
+
+	/* Fallback to PIO mode */
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+	return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
+}
+
+static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
+					 struct nand_chip *chip,
+					 u32 data_offs, u32 readlen,
+					 u8 *bufpoi, int page)
+{
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int ret, i, cur_off = 0;
+	unsigned int max_bitflips = 0;
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+	for (i = data_offs / ecc->size;
+	     i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
+		int data_off = i * ecc->size;
+		int oob_off = i * (ecc->bytes + 4);
+		u8 *data = bufpoi + data_off;
+		u8 *oob = chip->oob_poi + oob_off;
+
+		ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
+						  oob,
+						  oob_off + mtd->writesize,
+						  &cur_off, &max_bitflips, !i,
+						  false, page);
+		if (ret < 0)
+			return ret;
+	}
+
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	return max_bitflips;
+}
+
+static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
+					     struct nand_chip *chip,
+					     u32 data_offs, u32 readlen,
+					     u8 *buf, int page)
+{
+	int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
+	int ret;
+
+	ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
+	if (ret >= 0)
+		return ret;
+
+	/* Fallback to PIO mode */
+	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
+
+	return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
+					     buf, page);
+}
+
+static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
+				       struct nand_chip *chip,
+				       const uint8_t *buf, int oob_required,
+				       int page)
+{
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int ret, i, cur_off = 0;
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+
+	for (i = 0; i < ecc->steps; i++) {
+		int data_off = i * ecc->size;
+		int oob_off = i * (ecc->bytes + 4);
+		const u8 *data = buf + data_off;
+		const u8 *oob = chip->oob_poi + oob_off;
+
+		ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+						   oob_off + mtd->writesize,
+						   &cur_off, !i, page);
+		if (ret)
+			return ret;
+	}
+
+	if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+		sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+						 &cur_off, page);
+
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	return 0;
+}
+
+static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
+					  struct nand_chip *chip,
+					  u32 data_offs, u32 data_len,
+					  const u8 *buf, int oob_required,
+					  int page)
+{
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int ret, i, cur_off = 0;
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+
+	for (i = data_offs / ecc->size;
+	     i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
+		int data_off = i * ecc->size;
+		int oob_off = i * (ecc->bytes + 4);
+		const u8 *data = buf + data_off;
+		const u8 *oob = chip->oob_poi + oob_off;
+
+		ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
+						   oob_off + mtd->writesize,
+						   &cur_off, !i, page);
+		if (ret)
+			return ret;
+	}
+
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	return 0;
+}
+
+static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
+					   struct nand_chip *chip,
+					   const u8 *buf,
+					   int oob_required,
+					   int page)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+	struct scatterlist sg;
+	int ret, i;
+
+	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
+	if (ret)
+		return ret;
+
+	ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
+				       DMA_TO_DEVICE, &sg);
+	if (ret)
+		goto pio_fallback;
+
+	for (i = 0; i < ecc->steps; i++) {
+		const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
+
+		sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
+	}
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+	sunxi_nfc_randomizer_config(mtd, page, false);
+	sunxi_nfc_randomizer_enable(mtd);
+
+	writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
+	       nfc->regs + NFC_REG_RCMD_SET);
+
+	dma_async_issue_pending(nfc->dmac);
+
+	writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
+	       NFC_DATA_TRANS | NFC_ACCESS_DIR,
+	       nfc->regs + NFC_REG_CMD);
+
+	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
+	if (ret)
+		dmaengine_terminate_all(nfc->dmac);
+
+	sunxi_nfc_randomizer_disable(mtd);
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
+
+	if (ret)
+		return ret;
+
+	if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+		/* TODO: use DMA to transfer extra OOB bytes ? */
+		sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+						 NULL, page);
+
+	return 0;
+
+pio_fallback:
+	return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
+					       struct nand_chip *chip,
+					       uint8_t *buf, int oob_required,
+					       int page)
+{
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	unsigned int max_bitflips = 0;
+	int ret, i, cur_off = 0;
+	bool raw_mode = false;
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+
+	for (i = 0; i < ecc->steps; i++) {
+		int data_off = i * (ecc->size + ecc->bytes + 4);
+		int oob_off = data_off + ecc->size;
+		u8 *data = buf + (i * ecc->size);
+		u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
+
+		ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
+						  oob_off, &cur_off,
+						  &max_bitflips, !i,
+						  oob_required,
+						  page);
+		if (ret < 0)
+			return ret;
+		else if (ret)
+			raw_mode = true;
+	}
+
+	if (oob_required)
+		sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
+						!raw_mode, page);
+
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	return max_bitflips;
+}
+
+static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
+						struct nand_chip *chip,
+						const uint8_t *buf,
+						int oob_required, int page)
+{
+	struct nand_ecc_ctrl *ecc = &chip->ecc;
+	int ret, i, cur_off = 0;
+
+	sunxi_nfc_hw_ecc_enable(mtd);
+
+	for (i = 0; i < ecc->steps; i++) {
+		int data_off = i * (ecc->size + ecc->bytes + 4);
+		int oob_off = data_off + ecc->size;
+		const u8 *data = buf + (i * ecc->size);
+		const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
+
+		ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
+						   oob, oob_off, &cur_off,
+						   false, page);
+		if (ret)
+			return ret;
+	}
+
+	if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
+		sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
+						 &cur_off, page);
+
+	sunxi_nfc_hw_ecc_disable(mtd);
+
+	return 0;
+}
+
+static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
+					    struct nand_chip *chip,
+					    int page)
+{
+	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
+
+	chip->pagebuf = -1;
+
+	return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
+}
+
+static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
+					     struct nand_chip *chip,
+					     int page)
+{
+	int ret, status;
+
+	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
+
+	chip->pagebuf = -1;
+
+	memset(chip->buffers->databuf, 0xff, mtd->writesize);
+	ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
+	if (ret)
+		return ret;
+
+	/* Send command to program the OOB data */
+	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
+
+	status = chip->waitfunc(mtd, chip);
+
+	return status & NAND_STATUS_FAIL ? -EIO : 0;
+}
+
+static const s32 tWB_lut[] = {6, 12, 16, 20};
+static const s32 tRHW_lut[] = {4, 8, 12, 20};
+
+static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
+		u32 clk_period)
+{
+	u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
+	int i;
+
+	for (i = 0; i < lut_size; i++) {
+		if (clk_cycles <= lut[i])
+			return i;
+	}
+
+	/* Doesn't fit */
+	return -EINVAL;
+}
+
+#define sunxi_nand_lookup_timing(l, p, c) \
+			_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
+
+static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd,
+					const struct nand_data_interface *conf,
+					bool check_only)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
+	const struct nand_sdr_timings *timings;
+	u32 min_clk_period = 0;
+	s32 tWB, tADL, tWHR, tRHW, tCAD;
+	long real_clk_rate;
+
+	timings = nand_get_sdr_timings(conf);
+	if (IS_ERR(timings))
+		return -ENOTSUPP;
+
+	/* T1 <=> tCLS */
+	if (timings->tCLS_min > min_clk_period)
+		min_clk_period = timings->tCLS_min;
+
+	/* T2 <=> tCLH */
+	if (timings->tCLH_min > min_clk_period)
+		min_clk_period = timings->tCLH_min;
+
+	/* T3 <=> tCS */
+	if (timings->tCS_min > min_clk_period)
+		min_clk_period = timings->tCS_min;
+
+	/* T4 <=> tCH */
+	if (timings->tCH_min > min_clk_period)
+		min_clk_period = timings->tCH_min;
+
+	/* T5 <=> tWP */
+	if (timings->tWP_min > min_clk_period)
+		min_clk_period = timings->tWP_min;
+
+	/* T6 <=> tWH */
+	if (timings->tWH_min > min_clk_period)
+		min_clk_period = timings->tWH_min;
+
+	/* T7 <=> tALS */
+	if (timings->tALS_min > min_clk_period)
+		min_clk_period = timings->tALS_min;
+
+	/* T8 <=> tDS */
+	if (timings->tDS_min > min_clk_period)
+		min_clk_period = timings->tDS_min;
+
+	/* T9 <=> tDH */
+	if (timings->tDH_min > min_clk_period)
+		min_clk_period = timings->tDH_min;
+
+	/* T10 <=> tRR */
+	if (timings->tRR_min > (min_clk_period * 3))
+		min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
+
+	/* T11 <=> tALH */
+	if (timings->tALH_min > min_clk_period)
+		min_clk_period = timings->tALH_min;
+
+	/* T12 <=> tRP */
+	if (timings->tRP_min > min_clk_period)
+		min_clk_period = timings->tRP_min;
+
+	/* T13 <=> tREH */
+	if (timings->tREH_min > min_clk_period)
+		min_clk_period = timings->tREH_min;
+
+	/* T14 <=> tRC */
+	if (timings->tRC_min > (min_clk_period * 2))
+		min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
+
+	/* T15 <=> tWC */
+	if (timings->tWC_min > (min_clk_period * 2))
+		min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
+
+	/* T16 - T19 + tCAD */
+	if (timings->tWB_max > (min_clk_period * 20))
+		min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
+
+	if (timings->tADL_min > (min_clk_period * 32))
+		min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
+
+	if (timings->tWHR_min > (min_clk_period * 32))
+		min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
+
+	if (timings->tRHW_min > (min_clk_period * 20))
+		min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
+
+	tWB  = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
+					min_clk_period);
+	if (tWB < 0) {
+		dev_err(nfc->dev, "unsupported tWB\n");
+		return tWB;
+	}
+
+	tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
+	if (tADL > 3) {
+		dev_err(nfc->dev, "unsupported tADL\n");
+		return -EINVAL;
+	}
+
+	tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
+	if (tWHR > 3) {
+		dev_err(nfc->dev, "unsupported tWHR\n");
+		return -EINVAL;
+	}
+
+	tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
+					min_clk_period);
+	if (tRHW < 0) {
+		dev_err(nfc->dev, "unsupported tRHW\n");
+		return tRHW;
+	}
+
+	if (check_only)
+		return 0;
+
+	/*
+	 * TODO: according to ONFI specs this value only applies for DDR NAND,
+	 * but Allwinner seems to set this to 0x7. Mimic them for now.
+	 */
+	tCAD = 0x7;
+
+	/* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
+	chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
+
+	/* Convert min_clk_period from picoseconds to nanoseconds */
+	min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
+
+	/*
+	 * Unlike what is stated in Allwinner datasheet, the clk_rate should
+	 * be set to (1 / min_clk_period), and not (2 / min_clk_period).
+	 * This new formula was verified with a scope and validated by
+	 * Allwinner engineers.
+	 */
+	chip->clk_rate = NSEC_PER_SEC / min_clk_period;
+	real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
+
+	/*
+	 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
+	 * output cycle timings shall be used if the host drives tRC less than
+	 * 30 ns.
+	 */
+	min_clk_period = NSEC_PER_SEC / real_clk_rate;
+	chip->timing_ctl = ((min_clk_period * 2) < 30) ?
+			   NFC_TIMING_CTL_EDO : 0;
+
+	return 0;
+}
+
+static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
+				    struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+	if (section >= ecc->steps)
+		return -ERANGE;
+
+	oobregion->offset = section * (ecc->bytes + 4) + 4;
+	oobregion->length = ecc->bytes;
+
+	return 0;
+}
+
+static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
+				     struct mtd_oob_region *oobregion)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct nand_ecc_ctrl *ecc = &nand->ecc;
+
+	if (section > ecc->steps)
+		return -ERANGE;
+
+	/*
+	 * The first 2 bytes are used for BB markers, hence we
+	 * only have 2 bytes available in the first user data
+	 * section.
+	 */
+	if (!section && ecc->mode == NAND_ECC_HW) {
+		oobregion->offset = 2;
+		oobregion->length = 2;
+
+		return 0;
+	}
+
+	oobregion->offset = section * (ecc->bytes + 4);
+
+	if (section < ecc->steps)
+		oobregion->length = 4;
+	else
+		oobregion->offset = mtd->oobsize - oobregion->offset;
+
+	return 0;
+}
+
+static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
+	.ecc = sunxi_nand_ooblayout_ecc,
+	.free = sunxi_nand_ooblayout_free,
+};
+
+static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
+					      struct nand_ecc_ctrl *ecc,
+					      struct device_node *np)
+{
+	static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+	struct sunxi_nand_hw_ecc *data;
+	int nsectors;
+	int ret;
+	int i;
+
+	if (ecc->options & NAND_ECC_MAXIMIZE) {
+		int bytes;
+
+		ecc->size = 1024;
+		nsectors = mtd->writesize / ecc->size;
+
+		/* Reserve 2 bytes for the BBM */
+		bytes = (mtd->oobsize - 2) / nsectors;
+
+		/* 4 non-ECC bytes are added before each ECC bytes section */
+		bytes -= 4;
+
+		/* and bytes has to be even. */
+		if (bytes % 2)
+			bytes--;
+
+		ecc->strength = bytes * 8 / fls(8 * ecc->size);
+
+		for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+			if (strengths[i] > ecc->strength)
+				break;
+		}
+
+		if (!i)
+			ecc->strength = 0;
+		else
+			ecc->strength = strengths[i - 1];
+	}
+
+	if (ecc->size != 512 && ecc->size != 1024)
+		return -EINVAL;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	/* Prefer 1k ECC chunk over 512 ones */
+	if (ecc->size == 512 && mtd->writesize > 512) {
+		ecc->size = 1024;
+		ecc->strength *= 2;
+	}
+
+	/* Add ECC info retrieval from DT */
+	for (i = 0; i < ARRAY_SIZE(strengths); i++) {
+		if (ecc->strength <= strengths[i])
+			break;
+	}
+
+	if (i >= ARRAY_SIZE(strengths)) {
+		dev_err(nfc->dev, "unsupported strength\n");
+		ret = -ENOTSUPP;
+		goto err;
+	}
+
+	data->mode = i;
+
+	/* HW ECC always request ECC bytes for 1024 bytes blocks */
+	ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
+
+	/* HW ECC always work with even numbers of ECC bytes */
+	ecc->bytes = ALIGN(ecc->bytes, 2);
+
+	nsectors = mtd->writesize / ecc->size;
+
+	if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob;
+	ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob;
+	mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
+	ecc->priv = data;
+
+	return 0;
+
+err:
+	kfree(data);
+
+	return ret;
+}
+
+static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
+{
+	kfree(ecc->priv);
+}
+
+static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
+				       struct nand_ecc_ctrl *ecc,
+				       struct device_node *np)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
+	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
+	int ret;
+
+	ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
+	if (ret)
+		return ret;
+
+	if (nfc->dmac) {
+		ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
+		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
+		ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
+		nand->options |= NAND_USE_BOUNCE_BUFFER;
+	} else {
+		ecc->read_page = sunxi_nfc_hw_ecc_read_page;
+		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
+		ecc->write_page = sunxi_nfc_hw_ecc_write_page;
+	}
+
+	/* TODO: support DMA for raw accesses and subpage write */
+	ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
+	ecc->read_oob_raw = nand_read_oob_std;
+	ecc->write_oob_raw = nand_write_oob_std;
+	ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
+
+	return 0;
+}
+
+static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
+						struct nand_ecc_ctrl *ecc,
+						struct device_node *np)
+{
+	int ret;
+
+	ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
+	if (ret)
+		return ret;
+
+	ecc->prepad = 4;
+	ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
+	ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
+	ecc->read_oob_raw = nand_read_oob_syndrome;
+	ecc->write_oob_raw = nand_write_oob_syndrome;
+
+	return 0;
+}
+
+static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
+{
+	switch (ecc->mode) {
+	case NAND_ECC_HW:
+	case NAND_ECC_HW_SYNDROME:
+		sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
+		break;
+	case NAND_ECC_NONE:
+	default:
+		break;
+	}
+}
+
+static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
+			       struct device_node *np)
+{
+	struct nand_chip *nand = mtd_to_nand(mtd);
+	int ret;
+
+	if (!ecc->size) {
+		ecc->size = nand->ecc_step_ds;
+		ecc->strength = nand->ecc_strength_ds;
+	}
+
+	if (!ecc->size || !ecc->strength)
+		return -EINVAL;
+
+	switch (ecc->mode) {
+	case NAND_ECC_HW:
+		ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
+		if (ret)
+			return ret;
+		break;
+	case NAND_ECC_HW_SYNDROME:
+		ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc, np);
+		if (ret)
+			return ret;
+		break;
+	case NAND_ECC_NONE:
+	case NAND_ECC_SOFT:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
+				struct device_node *np)
+{
+	struct sunxi_nand_chip *chip;
+	struct mtd_info *mtd;
+	struct nand_chip *nand;
+	int nsels;
+	int ret;
+	int i;
+	u32 tmp;
+
+	if (!of_get_property(np, "reg", &nsels))
+		return -EINVAL;
+
+	nsels /= sizeof(u32);
+	if (!nsels) {
+		dev_err(dev, "invalid reg property size\n");
+		return -EINVAL;
+	}
+
+	chip = devm_kzalloc(dev,
+			    sizeof(*chip) +
+			    (nsels * sizeof(struct sunxi_nand_chip_sel)),
+			    GFP_KERNEL);
+	if (!chip) {
+		dev_err(dev, "could not allocate chip\n");
+		return -ENOMEM;
+	}
+
+	chip->nsels = nsels;
+	chip->selected = -1;
+
+	for (i = 0; i < nsels; i++) {
+		ret = of_property_read_u32_index(np, "reg", i, &tmp);
+		if (ret) {
+			dev_err(dev, "could not retrieve reg property: %d\n",
+				ret);
+			return ret;
+		}
+
+		if (tmp > NFC_MAX_CS) {
+			dev_err(dev,
+				"invalid reg value: %u (max CS = 7)\n",
+				tmp);
+			return -EINVAL;
+		}
+
+		if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
+			dev_err(dev, "CS %d already assigned\n", tmp);
+			return -EINVAL;
+		}
+
+		chip->sels[i].cs = tmp;
+
+		if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
+		    tmp < 2) {
+			chip->sels[i].rb.type = RB_NATIVE;
+			chip->sels[i].rb.info.nativeid = tmp;
+		} else {
+			ret = of_get_named_gpio(np, "rb-gpios", i);
+			if (ret >= 0) {
+				tmp = ret;
+				chip->sels[i].rb.type = RB_GPIO;
+				chip->sels[i].rb.info.gpio = tmp;
+				ret = devm_gpio_request(dev, tmp, "nand-rb");
+				if (ret)
+					return ret;
+
+				ret = gpio_direction_input(tmp);
+				if (ret)
+					return ret;
+			} else {
+				chip->sels[i].rb.type = RB_NONE;
+			}
+		}
+	}
+
+	nand = &chip->nand;
+	/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
+	nand->chip_delay = 200;
+	nand->controller = &nfc->controller;
+	/*
+	 * Set the ECC mode to the default value in case nothing is specified
+	 * in the DT.
+	 */
+	nand->ecc.mode = NAND_ECC_HW;
+	nand_set_flash_node(nand, np);
+	nand->select_chip = sunxi_nfc_select_chip;
+	nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
+	nand->read_buf = sunxi_nfc_read_buf;
+	nand->write_buf = sunxi_nfc_write_buf;
+	nand->read_byte = sunxi_nfc_read_byte;
+	nand->setup_data_interface = sunxi_nfc_setup_data_interface;
+
+	mtd = nand_to_mtd(nand);
+	mtd->dev.parent = dev;
+
+	ret = nand_scan_ident(mtd, nsels, NULL);
+	if (ret)
+		return ret;
+
+	if (nand->bbt_options & NAND_BBT_USE_FLASH)
+		nand->bbt_options |= NAND_BBT_NO_OOB;
+
+	if (nand->options & NAND_NEED_SCRAMBLING)
+		nand->options |= NAND_NO_SUBPAGE_WRITE;
+
+	nand->options |= NAND_SUBPAGE_READ;
+
+	ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
+	if (ret) {
+		dev_err(dev, "ECC init failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = nand_scan_tail(mtd);
+	if (ret) {
+		dev_err(dev, "nand_scan_tail failed: %d\n", ret);
+		return ret;
+	}
+
+	ret = mtd_device_register(mtd, NULL, 0);
+	if (ret) {
+		dev_err(dev, "failed to register mtd device: %d\n", ret);
+		nand_release(mtd);
+		return ret;
+	}
+
+	list_add_tail(&chip->node, &nfc->chips);
+
+	return 0;
+}
+
+static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
+{
+	struct device_node *np = dev->of_node;
+	struct device_node *nand_np;
+	int nchips = of_get_child_count(np);
+	int ret;
+
+	if (nchips > 8) {
+		dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
+		return -EINVAL;
+	}
+
+	for_each_child_of_node(np, nand_np) {
+		ret = sunxi_nand_chip_init(dev, nfc, nand_np);
+		if (ret) {
+			of_node_put(nand_np);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
+{
+	struct sunxi_nand_chip *chip;
+
+	while (!list_empty(&nfc->chips)) {
+		chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
+					node);
+		nand_release(nand_to_mtd(&chip->nand));
+		sunxi_nand_ecc_cleanup(&chip->nand.ecc);
+		list_del(&chip->node);
+	}
+}
+
+static int sunxi_nfc_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct resource *r;
+	struct sunxi_nfc *nfc;
+	int irq;
+	int ret;
+
+	nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
+	if (!nfc)
+		return -ENOMEM;
+
+	nfc->dev = dev;
+	nand_hw_control_init(&nfc->controller);
+	INIT_LIST_HEAD(&nfc->chips);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	nfc->regs = devm_ioremap_resource(dev, r);
+	if (IS_ERR(nfc->regs))
+		return PTR_ERR(nfc->regs);
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0) {
+		dev_err(dev, "failed to retrieve irq\n");
+		return irq;
+	}
+
+	nfc->ahb_clk = devm_clk_get(dev, "ahb");
+	if (IS_ERR(nfc->ahb_clk)) {
+		dev_err(dev, "failed to retrieve ahb clk\n");
+		return PTR_ERR(nfc->ahb_clk);
+	}
+
+	ret = clk_prepare_enable(nfc->ahb_clk);
+	if (ret)
+		return ret;
+
+	nfc->mod_clk = devm_clk_get(dev, "mod");
+	if (IS_ERR(nfc->mod_clk)) {
+		dev_err(dev, "failed to retrieve mod clk\n");
+		ret = PTR_ERR(nfc->mod_clk);
+		goto out_ahb_clk_unprepare;
+	}
+
+	ret = clk_prepare_enable(nfc->mod_clk);
+	if (ret)
+		goto out_ahb_clk_unprepare;
+
+	nfc->reset = devm_reset_control_get_optional(dev, "ahb");
+	if (!IS_ERR(nfc->reset)) {
+		ret = reset_control_deassert(nfc->reset);
+		if (ret) {
+			dev_err(dev, "reset err %d\n", ret);
+			goto out_mod_clk_unprepare;
+		}
+	} else if (PTR_ERR(nfc->reset) != -ENOENT) {
+		ret = PTR_ERR(nfc->reset);
+		goto out_mod_clk_unprepare;
+	}
+
+	ret = sunxi_nfc_rst(nfc);
+	if (ret)
+		goto out_ahb_reset_reassert;
+
+	writel(0, nfc->regs + NFC_REG_INT);
+	ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
+			       0, "sunxi-nand", nfc);
+	if (ret)
+		goto out_ahb_reset_reassert;
+
+	nfc->dmac = dma_request_slave_channel(dev, "rxtx");
+	if (nfc->dmac) {
+		struct dma_slave_config dmac_cfg = { };
+
+		dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
+		dmac_cfg.dst_addr = dmac_cfg.src_addr;
+		dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+		dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
+		dmac_cfg.src_maxburst = 4;
+		dmac_cfg.dst_maxburst = 4;
+		dmaengine_slave_config(nfc->dmac, &dmac_cfg);
+	} else {
+		dev_warn(dev, "failed to request rxtx DMA channel\n");
+	}
+
+	platform_set_drvdata(pdev, nfc);
+
+	ret = sunxi_nand_chips_init(dev, nfc);
+	if (ret) {
+		dev_err(dev, "failed to init nand chips\n");
+		goto out_release_dmac;
+	}
+
+	return 0;
+
+out_release_dmac:
+	if (nfc->dmac)
+		dma_release_channel(nfc->dmac);
+out_ahb_reset_reassert:
+	if (!IS_ERR(nfc->reset))
+		reset_control_assert(nfc->reset);
+out_mod_clk_unprepare:
+	clk_disable_unprepare(nfc->mod_clk);
+out_ahb_clk_unprepare:
+	clk_disable_unprepare(nfc->ahb_clk);
+
+	return ret;
+}
+
+static int sunxi_nfc_remove(struct platform_device *pdev)
+{
+	struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
+
+	sunxi_nand_chips_cleanup(nfc);
+
+	if (!IS_ERR(nfc->reset))
+		reset_control_assert(nfc->reset);
+
+	if (nfc->dmac)
+		dma_release_channel(nfc->dmac);
+	clk_disable_unprepare(nfc->mod_clk);
+	clk_disable_unprepare(nfc->ahb_clk);
+
+	return 0;
+}
+
+static const struct of_device_id sunxi_nfc_ids[] = {
+	{ .compatible = "allwinner,sun4i-a10-nand" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
+
+static struct platform_driver sunxi_nfc_driver = {
+	.driver = {
+		.name = "sunxi_nand",
+		.of_match_table = sunxi_nfc_ids,
+	},
+	.probe = sunxi_nfc_probe,
+	.remove = sunxi_nfc_remove,
+};
+module_platform_driver(sunxi_nfc_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Boris BREZILLON");
+MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
+MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/rawnand/tmio_nand.c b/drivers/mtd/nand/rawnand/tmio_nand.c
new file mode 100644
index 000000000000..e599ada12cd0
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/tmio_nand.c
@@ -0,0 +1,510 @@ 
+/*
+ * Toshiba TMIO NAND flash controller driver
+ *
+ * Slightly murky pre-git history of the driver:
+ *
+ * Copyright (c) Ian Molton 2004, 2005, 2008
+ *    Original work, independent of sharps code. Included hardware ECC support.
+ *    Hard ECC did not work for writes in the early revisions.
+ * Copyright (c) Dirk Opfer 2005.
+ *    Modifications developed from sharps code but
+ *    NOT containing any, ported onto Ians base.
+ * Copyright (c) Chris Humbert 2005
+ * Copyright (c) Dmitry Baryshkov 2008
+ *    Minor fixes
+ *
+ * Parts copyright Sebastian Carlier
+ *
+ * This file is licensed under
+ * the terms of the GNU General Public License version 2. This program
+ * is licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ */
+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/tmio.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/slab.h>
+
+/*--------------------------------------------------------------------------*/
+
+/*
+ * NAND Flash Host Controller Configuration Register
+ */
+#define CCR_COMMAND	0x04	/* w Command				*/
+#define CCR_BASE	0x10	/* l NAND Flash Control Reg Base Addr	*/
+#define CCR_INTP	0x3d	/* b Interrupt Pin			*/
+#define CCR_INTE	0x48	/* b Interrupt Enable			*/
+#define CCR_EC		0x4a	/* b Event Control			*/
+#define CCR_ICC		0x4c	/* b Internal Clock Control		*/
+#define CCR_ECCC	0x5b	/* b ECC Control			*/
+#define CCR_NFTC	0x60	/* b NAND Flash Transaction Control	*/
+#define CCR_NFM		0x61	/* b NAND Flash Monitor			*/
+#define CCR_NFPSC	0x62	/* b NAND Flash Power Supply Control	*/
+#define CCR_NFDC	0x63	/* b NAND Flash Detect Control		*/
+
+/*
+ * NAND Flash Control Register
+ */
+#define FCR_DATA	0x00	/* bwl Data Register			*/
+#define FCR_MODE	0x04	/* b Mode Register			*/
+#define FCR_STATUS	0x05	/* b Status Register			*/
+#define FCR_ISR		0x06	/* b Interrupt Status Register		*/
+#define FCR_IMR		0x07	/* b Interrupt Mask Register		*/
+
+/* FCR_MODE Register Command List */
+#define FCR_MODE_DATA	0x94	/* Data Data_Mode */
+#define FCR_MODE_COMMAND 0x95	/* Data Command_Mode */
+#define FCR_MODE_ADDRESS 0x96	/* Data Address_Mode */
+
+#define FCR_MODE_HWECC_CALC	0xB4	/* HW-ECC Data */
+#define FCR_MODE_HWECC_RESULT	0xD4	/* HW-ECC Calc result Read_Mode */
+#define FCR_MODE_HWECC_RESET	0xF4	/* HW-ECC Reset */
+
+#define FCR_MODE_POWER_ON	0x0C	/* Power Supply ON  to SSFDC card */
+#define FCR_MODE_POWER_OFF	0x08	/* Power Supply OFF to SSFDC card */
+
+#define FCR_MODE_LED_OFF	0x00	/* LED OFF */
+#define FCR_MODE_LED_ON		0x04	/* LED ON */
+
+#define FCR_MODE_EJECT_ON	0x68	/* Ejection events active  */
+#define FCR_MODE_EJECT_OFF	0x08	/* Ejection events ignored */
+
+#define FCR_MODE_LOCK		0x6C	/* Lock_Mode. Eject Switch Invalid */
+#define FCR_MODE_UNLOCK		0x0C	/* UnLock_Mode. Eject Switch is valid */
+
+#define FCR_MODE_CONTROLLER_ID	0x40	/* Controller ID Read */
+#define FCR_MODE_STANDBY	0x00	/* SSFDC card Changes Standby State */
+
+#define FCR_MODE_WE		0x80
+#define FCR_MODE_ECC1		0x40
+#define FCR_MODE_ECC0		0x20
+#define FCR_MODE_CE		0x10
+#define FCR_MODE_PCNT1		0x08
+#define FCR_MODE_PCNT0		0x04
+#define FCR_MODE_ALE		0x02
+#define FCR_MODE_CLE		0x01
+
+#define FCR_STATUS_BUSY		0x80
+
+/*--------------------------------------------------------------------------*/
+
+struct tmio_nand {
+	struct nand_chip chip;
+
+	struct platform_device *dev;
+
+	void __iomem *ccr;
+	void __iomem *fcr;
+	unsigned long fcr_base;
+
+	unsigned int irq;
+
+	/* for tmio_nand_read_byte */
+	u8			read;
+	unsigned read_good:1;
+};
+
+static inline struct tmio_nand *mtd_to_tmio(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct tmio_nand, chip);
+}
+
+
+/*--------------------------------------------------------------------------*/
+
+static void tmio_nand_hwcontrol(struct mtd_info *mtd, int cmd,
+				   unsigned int ctrl)
+{
+	struct tmio_nand *tmio = mtd_to_tmio(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		u8 mode;
+
+		if (ctrl & NAND_NCE) {
+			mode = FCR_MODE_DATA;
+
+			if (ctrl & NAND_CLE)
+				mode |=  FCR_MODE_CLE;
+			else
+				mode &= ~FCR_MODE_CLE;
+
+			if (ctrl & NAND_ALE)
+				mode |=  FCR_MODE_ALE;
+			else
+				mode &= ~FCR_MODE_ALE;
+		} else {
+			mode = FCR_MODE_STANDBY;
+		}
+
+		tmio_iowrite8(mode, tmio->fcr + FCR_MODE);
+		tmio->read_good = 0;
+	}
+
+	if (cmd != NAND_CMD_NONE)
+		tmio_iowrite8(cmd, chip->IO_ADDR_W);
+}
+
+static int tmio_nand_dev_ready(struct mtd_info *mtd)
+{
+	struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+	return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
+}
+
+static irqreturn_t tmio_irq(int irq, void *__tmio)
+{
+	struct tmio_nand *tmio = __tmio;
+	struct nand_chip *nand_chip = &tmio->chip;
+
+	/* disable RDYREQ interrupt */
+	tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+
+	if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
+		dev_warn(&tmio->dev->dev, "spurious interrupt\n");
+
+	wake_up(&nand_chip->controller->wq);
+	return IRQ_HANDLED;
+}
+
+/*
+  *The TMIO core has a RDYREQ interrupt on the posedge of #SMRB.
+  *This interrupt is normally disabled, but for long operations like
+  *erase and write, we enable it to wake us up.  The irq handler
+  *disables the interrupt.
+ */
+static int
+tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
+{
+	struct tmio_nand *tmio = mtd_to_tmio(mtd);
+	long timeout;
+
+	/* enable RDYREQ interrupt */
+	tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+	tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
+
+	timeout = wait_event_timeout(nand_chip->controller->wq,
+		tmio_nand_dev_ready(mtd),
+		msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
+
+	if (unlikely(!tmio_nand_dev_ready(mtd))) {
+		tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+		dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
+			nand_chip->state == FL_ERASING ? "erase" : "program",
+			nand_chip->state == FL_ERASING ? 400 : 20);
+
+	} else if (unlikely(!timeout)) {
+		tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
+		dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
+	}
+
+	nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
+	return nand_chip->read_byte(mtd);
+}
+
+/*
+  *The TMIO controller combines two 8-bit data bytes into one 16-bit
+  *word. This function separates them so nand_base.c works as expected,
+  *especially its NAND_CMD_READID routines.
+ *
+  *To prevent stale data from being read, tmio_nand_hwcontrol() clears
+  *tmio->read_good.
+ */
+static u_char tmio_nand_read_byte(struct mtd_info *mtd)
+{
+	struct tmio_nand *tmio = mtd_to_tmio(mtd);
+	unsigned int data;
+
+	if (tmio->read_good--)
+		return tmio->read;
+
+	data = tmio_ioread16(tmio->fcr + FCR_DATA);
+	tmio->read = data >> 8;
+	return data;
+}
+
+/*
+  *The TMIO controller converts an 8-bit NAND interface to a 16-bit
+  *bus interface, so all data reads and writes must be 16-bit wide.
+  *Thus, we implement 16-bit versions of the read, write, and verify
+  *buffer functions.
+ */
+static void
+tmio_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+	tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
+}
+
+static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+	tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
+}
+
+static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct tmio_nand *tmio = mtd_to_tmio(mtd);
+
+	tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
+	tmio_ioread8(tmio->fcr + FCR_DATA);	/* dummy read */
+	tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
+}
+
+static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
+							u_char *ecc_code)
+{
+	struct tmio_nand *tmio = mtd_to_tmio(mtd);
+	unsigned int ecc;
+
+	tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
+
+	ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+	ecc_code[1] = ecc;	/* 000-255 LP7-0 */
+	ecc_code[0] = ecc >> 8;	/* 000-255 LP15-8 */
+	ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+	ecc_code[2] = ecc;	/* 000-255 CP5-0,11b */
+	ecc_code[4] = ecc >> 8;	/* 256-511 LP7-0 */
+	ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
+	ecc_code[3] = ecc;	/* 256-511 LP15-8 */
+	ecc_code[5] = ecc >> 8;	/* 256-511 CP5-0,11b */
+
+	tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE);
+	return 0;
+}
+
+static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
+		unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+	int r0, r1;
+
+	/* assume ecc.size = 512 and ecc.bytes = 6 */
+	r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+	if (r0 < 0)
+		return r0;
+	r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256);
+	if (r1 < 0)
+		return r1;
+	return r0 + r1;
+}
+
+static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
+{
+	const struct mfd_cell *cell = mfd_get_cell(dev);
+	int ret;
+
+	if (cell->enable) {
+		ret = cell->enable(dev);
+		if (ret)
+			return ret;
+	}
+
+	/* (4Ch) CLKRUN Enable    1st spcrunc */
+	tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
+
+	/* (10h)BaseAddress    0x1000 spba.spba2 */
+	tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE);
+	tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2);
+
+	/* (04h)Command Register I/O spcmd */
+	tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
+
+	/* (62h) Power Supply Control ssmpwc */
+	/* HardPowerOFF - SuspendOFF - PowerSupplyWait_4MS */
+	tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC);
+
+	/* (63h) Detect Control ssmdtc */
+	tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC);
+
+	/* Interrupt status register clear sintst */
+	tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
+
+	/* After power supply, Media are reset smode */
+	tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE);
+	tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE);
+	tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA);
+
+	/* Standby Mode smode */
+	tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE);
+
+	mdelay(5);
+
+	return 0;
+}
+
+static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
+{
+	const struct mfd_cell *cell = mfd_get_cell(dev);
+
+	tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
+	if (cell->disable)
+		cell->disable(dev);
+}
+
+static int tmio_probe(struct platform_device *dev)
+{
+	struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
+	struct resource *fcr = platform_get_resource(dev,
+			IORESOURCE_MEM, 0);
+	struct resource *ccr = platform_get_resource(dev,
+			IORESOURCE_MEM, 1);
+	int irq = platform_get_irq(dev, 0);
+	struct tmio_nand *tmio;
+	struct mtd_info *mtd;
+	struct nand_chip *nand_chip;
+	int retval;
+
+	if (data == NULL)
+		dev_warn(&dev->dev, "NULL platform data!\n");
+
+	tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL);
+	if (!tmio)
+		return -ENOMEM;
+
+	tmio->dev = dev;
+
+	platform_set_drvdata(dev, tmio);
+	nand_chip = &tmio->chip;
+	mtd = nand_to_mtd(nand_chip);
+	mtd->name = "tmio-nand";
+	mtd->dev.parent = &dev->dev;
+
+	tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
+	if (!tmio->ccr)
+		return -EIO;
+
+	tmio->fcr_base = fcr->start & 0xfffff;
+	tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr));
+	if (!tmio->fcr)
+		return -EIO;
+
+	retval = tmio_hw_init(dev, tmio);
+	if (retval)
+		return retval;
+
+	/* Set address of NAND IO lines */
+	nand_chip->IO_ADDR_R = tmio->fcr;
+	nand_chip->IO_ADDR_W = tmio->fcr;
+
+	/* Set address of hardware control function */
+	nand_chip->cmd_ctrl = tmio_nand_hwcontrol;
+	nand_chip->dev_ready = tmio_nand_dev_ready;
+	nand_chip->read_byte = tmio_nand_read_byte;
+	nand_chip->write_buf = tmio_nand_write_buf;
+	nand_chip->read_buf = tmio_nand_read_buf;
+
+	/* set eccmode using hardware ECC */
+	nand_chip->ecc.mode = NAND_ECC_HW;
+	nand_chip->ecc.size = 512;
+	nand_chip->ecc.bytes = 6;
+	nand_chip->ecc.strength = 2;
+	nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
+	nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
+	nand_chip->ecc.correct = tmio_nand_correct_data;
+
+	if (data)
+		nand_chip->badblock_pattern = data->badblock_pattern;
+
+	/* 15 us command delay time */
+	nand_chip->chip_delay = 15;
+
+	retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
+				  dev_name(&dev->dev), tmio);
+	if (retval) {
+		dev_err(&dev->dev, "request_irq error %d\n", retval);
+		goto err_irq;
+	}
+
+	tmio->irq = irq;
+	nand_chip->waitfunc = tmio_nand_wait;
+
+	/* Scan to find existence of the device */
+	if (nand_scan(mtd, 1)) {
+		retval = -ENODEV;
+		goto err_irq;
+	}
+	/* Register the partitions */
+	retval = mtd_device_parse_register(mtd, NULL, NULL,
+					   data ? data->partition : NULL,
+					   data ? data->num_partitions : 0);
+	if (!retval)
+		return retval;
+
+	nand_release(mtd);
+
+err_irq:
+	tmio_hw_stop(dev, tmio);
+	return retval;
+}
+
+static int tmio_remove(struct platform_device *dev)
+{
+	struct tmio_nand *tmio = platform_get_drvdata(dev);
+
+	nand_release(nand_to_mtd(&tmio->chip));
+	tmio_hw_stop(dev, tmio);
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int tmio_suspend(struct platform_device *dev, pm_message_t state)
+{
+	const struct mfd_cell *cell = mfd_get_cell(dev);
+
+	if (cell->suspend)
+		cell->suspend(dev);
+
+	tmio_hw_stop(dev, platform_get_drvdata(dev));
+	return 0;
+}
+
+static int tmio_resume(struct platform_device *dev)
+{
+	const struct mfd_cell *cell = mfd_get_cell(dev);
+
+	/* FIXME - is this required or merely another attack of the broken
+	 * SHARP platform? Looks suspicious.
+	 */
+	tmio_hw_init(dev, platform_get_drvdata(dev));
+
+	if (cell->resume)
+		cell->resume(dev);
+
+	return 0;
+}
+#else
+#define tmio_suspend NULL
+#define tmio_resume NULL
+#endif
+
+static struct platform_driver tmio_driver = {
+	.driver.name	= "tmio-nand",
+	.driver.owner	= THIS_MODULE,
+	.probe		= tmio_probe,
+	.remove		= tmio_remove,
+	.suspend	= tmio_suspend,
+	.resume		= tmio_resume,
+};
+
+module_platform_driver(tmio_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
+MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller");
+MODULE_ALIAS("platform:tmio-nand");
diff --git a/drivers/mtd/nand/rawnand/txx9ndfmc.c b/drivers/mtd/nand/rawnand/txx9ndfmc.c
new file mode 100644
index 000000000000..b567d212fe7d
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/txx9ndfmc.c
@@ -0,0 +1,423 @@ 
+/*
+ * TXx9 NAND flash memory controller driver
+ * Based on RBTX49xx patch from CELF patch archive.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2004-2007
+ * All Rights Reserved.
+ */
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/partitions.h>
+#include <linux/io.h>
+#include <asm/txx9/ndfmc.h>
+
+/* TXX9 NDFMC Registers */
+#define TXX9_NDFDTR	0x00
+#define TXX9_NDFMCR	0x04
+#define TXX9_NDFSR	0x08
+#define TXX9_NDFISR	0x0c
+#define TXX9_NDFIMR	0x10
+#define TXX9_NDFSPR	0x14
+#define TXX9_NDFRSTR	0x18	/* not TX4939 */
+
+/* NDFMCR : NDFMC Mode Control */
+#define TXX9_NDFMCR_WE	0x80
+#define TXX9_NDFMCR_ECC_ALL	0x60
+#define TXX9_NDFMCR_ECC_RESET	0x60
+#define TXX9_NDFMCR_ECC_READ	0x40
+#define TXX9_NDFMCR_ECC_ON	0x20
+#define TXX9_NDFMCR_ECC_OFF	0x00
+#define TXX9_NDFMCR_CE	0x10
+#define TXX9_NDFMCR_BSPRT	0x04	/* TX4925/TX4926 only */
+#define TXX9_NDFMCR_ALE	0x02
+#define TXX9_NDFMCR_CLE	0x01
+/* TX4939 only */
+#define TXX9_NDFMCR_X16	0x0400
+#define TXX9_NDFMCR_DMAREQ_MASK	0x0300
+#define TXX9_NDFMCR_DMAREQ_NODMA	0x0000
+#define TXX9_NDFMCR_DMAREQ_128	0x0100
+#define TXX9_NDFMCR_DMAREQ_256	0x0200
+#define TXX9_NDFMCR_DMAREQ_512	0x0300
+#define TXX9_NDFMCR_CS_MASK	0x0c
+#define TXX9_NDFMCR_CS(ch)	((ch) << 2)
+
+/* NDFMCR : NDFMC Status */
+#define TXX9_NDFSR_BUSY	0x80
+/* TX4939 only */
+#define TXX9_NDFSR_DMARUN	0x40
+
+/* NDFMCR : NDFMC Reset */
+#define TXX9_NDFRSTR_RST	0x01
+
+struct txx9ndfmc_priv {
+	struct platform_device *dev;
+	struct nand_chip chip;
+	int cs;
+	const char *mtdname;
+};
+
+#define MAX_TXX9NDFMC_DEV	4
+struct txx9ndfmc_drvdata {
+	struct mtd_info *mtds[MAX_TXX9NDFMC_DEV];
+	void __iomem *base;
+	unsigned char hold;	/* in gbusclock */
+	unsigned char spw;	/* in gbusclock */
+	struct nand_hw_control hw_control;
+};
+
+static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
+	return txx9_priv->dev;
+}
+
+static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
+{
+	struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+	struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+
+	return drvdata->base + (reg << plat->shift);
+}
+
+static u32 txx9ndfmc_read(struct platform_device *dev, unsigned int reg)
+{
+	return __raw_readl(ndregaddr(dev, reg));
+}
+
+static void txx9ndfmc_write(struct platform_device *dev,
+			    u32 val, unsigned int reg)
+{
+	__raw_writel(val, ndregaddr(dev, reg));
+}
+
+static uint8_t txx9ndfmc_read_byte(struct mtd_info *mtd)
+{
+	struct platform_device *dev = mtd_to_platdev(mtd);
+
+	return txx9ndfmc_read(dev, TXX9_NDFDTR);
+}
+
+static void txx9ndfmc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+				int len)
+{
+	struct platform_device *dev = mtd_to_platdev(mtd);
+	void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+	u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_WE, TXX9_NDFMCR);
+	while (len--)
+		__raw_writel(*buf++, ndfdtr);
+	txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
+{
+	struct platform_device *dev = mtd_to_platdev(mtd);
+	void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
+
+	while (len--)
+		*buf++ = __raw_readl(ndfdtr);
+}
+
+static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
+			       unsigned int ctrl)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
+	struct platform_device *dev = txx9_priv->dev;
+	struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+
+	if (ctrl & NAND_CTRL_CHANGE) {
+		u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+		mcr &= ~(TXX9_NDFMCR_CLE | TXX9_NDFMCR_ALE | TXX9_NDFMCR_CE);
+		mcr |= ctrl & NAND_CLE ? TXX9_NDFMCR_CLE : 0;
+		mcr |= ctrl & NAND_ALE ? TXX9_NDFMCR_ALE : 0;
+		/* TXX9_NDFMCR_CE bit is 0:high 1:low */
+		mcr |= ctrl & NAND_NCE ? TXX9_NDFMCR_CE : 0;
+		if (txx9_priv->cs >= 0 && (ctrl & NAND_NCE)) {
+			mcr &= ~TXX9_NDFMCR_CS_MASK;
+			mcr |= TXX9_NDFMCR_CS(txx9_priv->cs);
+		}
+		txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
+	}
+	if (cmd != NAND_CMD_NONE)
+		txx9ndfmc_write(dev, cmd & 0xff, TXX9_NDFDTR);
+	if (plat->flags & NDFMC_PLAT_FLAG_DUMMYWRITE) {
+		/* dummy write to update external latch */
+		if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
+			txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
+	}
+	mmiowb();
+}
+
+static int txx9ndfmc_dev_ready(struct mtd_info *mtd)
+{
+	struct platform_device *dev = mtd_to_platdev(mtd);
+
+	return !(txx9ndfmc_read(dev, TXX9_NDFSR) & TXX9_NDFSR_BUSY);
+}
+
+static int txx9ndfmc_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
+				   uint8_t *ecc_code)
+{
+	struct platform_device *dev = mtd_to_platdev(mtd);
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int eccbytes;
+	u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+	mcr &= ~TXX9_NDFMCR_ECC_ALL;
+	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR);
+	for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) {
+		ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+		ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+		ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
+		ecc_code += 3;
+	}
+	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+	return 0;
+}
+
+static int txx9ndfmc_correct_data(struct mtd_info *mtd, unsigned char *buf,
+		unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int eccsize;
+	int corrected = 0;
+	int stat;
+
+	for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
+		stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
+		if (stat < 0)
+			return stat;
+		corrected += stat;
+		buf += 256;
+		read_ecc += 3;
+		calc_ecc += 3;
+	}
+	return corrected;
+}
+
+static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
+{
+	struct platform_device *dev = mtd_to_platdev(mtd);
+	u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
+
+	mcr &= ~TXX9_NDFMCR_ECC_ALL;
+	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_RESET, TXX9_NDFMCR);
+	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
+	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_ON, TXX9_NDFMCR);
+}
+
+static void txx9ndfmc_initialize(struct platform_device *dev)
+{
+	struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+	struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+	int tmout = 100;
+
+	if (plat->flags & NDFMC_PLAT_FLAG_NO_RSTR)
+		; /* no NDFRSTR.  Write to NDFSPR resets the NDFMC. */
+	else {
+		/* reset NDFMC */
+		txx9ndfmc_write(dev,
+				txx9ndfmc_read(dev, TXX9_NDFRSTR) |
+				TXX9_NDFRSTR_RST,
+				TXX9_NDFRSTR);
+		while (txx9ndfmc_read(dev, TXX9_NDFRSTR) & TXX9_NDFRSTR_RST) {
+			if (--tmout == 0) {
+				dev_err(&dev->dev, "reset failed.\n");
+				break;
+			}
+			udelay(1);
+		}
+	}
+	/* setup Hold Time, Strobe Pulse Width */
+	txx9ndfmc_write(dev, (drvdata->hold << 4) | drvdata->spw, TXX9_NDFSPR);
+	txx9ndfmc_write(dev,
+			(plat->flags & NDFMC_PLAT_FLAG_USE_BSPRT) ?
+			TXX9_NDFMCR_BSPRT : 0, TXX9_NDFMCR);
+}
+
+#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
+	DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
+
+static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	int ret;
+
+	ret = nand_scan_ident(mtd, 1, NULL);
+	if (!ret) {
+		if (mtd->writesize >= 512) {
+			/* Hardware ECC 6 byte ECC per 512 Byte data */
+			chip->ecc.size = 512;
+			chip->ecc.bytes = 6;
+		}
+		ret = nand_scan_tail(mtd);
+	}
+	return ret;
+}
+
+static int __init txx9ndfmc_probe(struct platform_device *dev)
+{
+	struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
+	int hold, spw;
+	int i;
+	struct txx9ndfmc_drvdata *drvdata;
+	unsigned long gbusclk = plat->gbus_clock;
+	struct resource *res;
+
+	drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+	drvdata->base = devm_ioremap_resource(&dev->dev, res);
+	if (IS_ERR(drvdata->base))
+		return PTR_ERR(drvdata->base);
+
+	hold = plat->hold ?: 20; /* tDH */
+	spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
+
+	hold = TXX9NDFMC_NS_TO_CYC(gbusclk, hold);
+	spw = TXX9NDFMC_NS_TO_CYC(gbusclk, spw);
+	if (plat->flags & NDFMC_PLAT_FLAG_HOLDADD)
+		hold -= 2;	/* actual hold time : (HOLD + 2) BUSCLK */
+	spw -= 1;	/* actual wait time : (SPW + 1) BUSCLK */
+	hold = clamp(hold, 1, 15);
+	drvdata->hold = hold;
+	spw = clamp(spw, 1, 15);
+	drvdata->spw = spw;
+	dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
+		 (gbusclk + 500000) / 1000000, hold, spw);
+
+	nand_hw_control_init(&drvdata->hw_control);
+
+	platform_set_drvdata(dev, drvdata);
+	txx9ndfmc_initialize(dev);
+
+	for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+		struct txx9ndfmc_priv *txx9_priv;
+		struct nand_chip *chip;
+		struct mtd_info *mtd;
+
+		if (!(plat->ch_mask & (1 << i)))
+			continue;
+		txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
+				    GFP_KERNEL);
+		if (!txx9_priv)
+			continue;
+		chip = &txx9_priv->chip;
+		mtd = nand_to_mtd(chip);
+		mtd->dev.parent = &dev->dev;
+
+		chip->read_byte = txx9ndfmc_read_byte;
+		chip->read_buf = txx9ndfmc_read_buf;
+		chip->write_buf = txx9ndfmc_write_buf;
+		chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
+		chip->dev_ready = txx9ndfmc_dev_ready;
+		chip->ecc.calculate = txx9ndfmc_calculate_ecc;
+		chip->ecc.correct = txx9ndfmc_correct_data;
+		chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
+		chip->ecc.mode = NAND_ECC_HW;
+		/* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
+		chip->ecc.size = 256;
+		chip->ecc.bytes = 3;
+		chip->ecc.strength = 1;
+		chip->chip_delay = 100;
+		chip->controller = &drvdata->hw_control;
+
+		nand_set_controller_data(chip, txx9_priv);
+		txx9_priv->dev = dev;
+
+		if (plat->ch_mask != 1) {
+			txx9_priv->cs = i;
+			txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
+						       dev_name(&dev->dev), i);
+		} else {
+			txx9_priv->cs = -1;
+			txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
+						     GFP_KERNEL);
+		}
+		if (!txx9_priv->mtdname) {
+			kfree(txx9_priv);
+			dev_err(&dev->dev, "Unable to allocate MTD name.\n");
+			continue;
+		}
+		if (plat->wide_mask & (1 << i))
+			chip->options |= NAND_BUSWIDTH_16;
+
+		if (txx9ndfmc_nand_scan(mtd)) {
+			kfree(txx9_priv->mtdname);
+			kfree(txx9_priv);
+			continue;
+		}
+		mtd->name = txx9_priv->mtdname;
+
+		mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
+		drvdata->mtds[i] = mtd;
+	}
+
+	return 0;
+}
+
+static int __exit txx9ndfmc_remove(struct platform_device *dev)
+{
+	struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
+	int i;
+
+	if (!drvdata)
+		return 0;
+	for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
+		struct mtd_info *mtd = drvdata->mtds[i];
+		struct nand_chip *chip;
+		struct txx9ndfmc_priv *txx9_priv;
+
+		if (!mtd)
+			continue;
+		chip = mtd_to_nand(mtd);
+		txx9_priv = nand_get_controller_data(chip);
+
+		nand_release(mtd);
+		kfree(txx9_priv->mtdname);
+		kfree(txx9_priv);
+	}
+	return 0;
+}
+
+#ifdef CONFIG_PM
+static int txx9ndfmc_resume(struct platform_device *dev)
+{
+	if (platform_get_drvdata(dev))
+		txx9ndfmc_initialize(dev);
+	return 0;
+}
+#else
+#define txx9ndfmc_resume NULL
+#endif
+
+static struct platform_driver txx9ndfmc_driver = {
+	.remove		= __exit_p(txx9ndfmc_remove),
+	.resume		= txx9ndfmc_resume,
+	.driver		= {
+		.name	= "txx9ndfmc",
+	},
+};
+
+module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
+MODULE_ALIAS("platform:txx9ndfmc");
diff --git a/drivers/mtd/nand/rawnand/vf610_nfc.c b/drivers/mtd/nand/rawnand/vf610_nfc.c
new file mode 100644
index 000000000000..c497b157d56a
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/vf610_nfc.c
@@ -0,0 +1,846 @@ 
+/*
+ * Copyright 2009-2015 Freescale Semiconductor, Inc. and others
+ *
+ * Description: MPC5125, VF610, MCF54418 and Kinetis K70 Nand driver.
+ * Jason ported to M54418TWR and MVFA5 (VF610).
+ * Authors: Stefan Agner <stefan.agner@toradex.com>
+ *          Bill Pringlemeir <bpringlemeir@nbsps.com>
+ *          Shaohui Xie <b21989@freescale.com>
+ *          Jason Jin <Jason.jin@freescale.com>
+ *
+ * Based on original driver mpc5121_nfc.c.
+ *
+ * This is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Limitations:
+ * - Untested on MPC5125 and M54418.
+ * - DMA and pipelining not used.
+ * - 2K pages or less.
+ * - HW ECC: Only 2K page with 64+ OOB.
+ * - HW ECC: Only 24 and 32-bit error correction implemented.
+ */
+
+#include <linux/module.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define	DRV_NAME		"vf610_nfc"
+
+/* Register Offsets */
+#define NFC_FLASH_CMD1			0x3F00
+#define NFC_FLASH_CMD2			0x3F04
+#define NFC_COL_ADDR			0x3F08
+#define NFC_ROW_ADDR			0x3F0c
+#define NFC_ROW_ADDR_INC		0x3F14
+#define NFC_FLASH_STATUS1		0x3F18
+#define NFC_FLASH_STATUS2		0x3F1c
+#define NFC_CACHE_SWAP			0x3F28
+#define NFC_SECTOR_SIZE			0x3F2c
+#define NFC_FLASH_CONFIG		0x3F30
+#define NFC_IRQ_STATUS			0x3F38
+
+/* Addresses for NFC MAIN RAM BUFFER areas */
+#define NFC_MAIN_AREA(n)		((n) *  0x1000)
+
+#define PAGE_2K				0x0800
+#define OOB_64				0x0040
+#define OOB_MAX				0x0100
+
+/*
+ * NFC_CMD2[CODE] values. See section:
+ *  - 31.4.7 Flash Command Code Description, Vybrid manual
+ *  - 23.8.6 Flash Command Sequencer, MPC5125 manual
+ *
+ * Briefly these are bitmasks of controller cycles.
+ */
+#define READ_PAGE_CMD_CODE		0x7EE0
+#define READ_ONFI_PARAM_CMD_CODE	0x4860
+#define PROGRAM_PAGE_CMD_CODE		0x7FC0
+#define ERASE_CMD_CODE			0x4EC0
+#define READ_ID_CMD_CODE		0x4804
+#define RESET_CMD_CODE			0x4040
+#define STATUS_READ_CMD_CODE		0x4068
+
+/* NFC ECC mode define */
+#define ECC_BYPASS			0
+#define ECC_45_BYTE			6
+#define ECC_60_BYTE			7
+
+/*** Register Mask and bit definitions */
+
+/* NFC_FLASH_CMD1 Field */
+#define CMD_BYTE2_MASK				0xFF000000
+#define CMD_BYTE2_SHIFT				24
+
+/* NFC_FLASH_CM2 Field */
+#define CMD_BYTE1_MASK				0xFF000000
+#define CMD_BYTE1_SHIFT				24
+#define CMD_CODE_MASK				0x00FFFF00
+#define CMD_CODE_SHIFT				8
+#define BUFNO_MASK				0x00000006
+#define BUFNO_SHIFT				1
+#define START_BIT				BIT(0)
+
+/* NFC_COL_ADDR Field */
+#define COL_ADDR_MASK				0x0000FFFF
+#define COL_ADDR_SHIFT				0
+
+/* NFC_ROW_ADDR Field */
+#define ROW_ADDR_MASK				0x00FFFFFF
+#define ROW_ADDR_SHIFT				0
+#define ROW_ADDR_CHIP_SEL_RB_MASK		0xF0000000
+#define ROW_ADDR_CHIP_SEL_RB_SHIFT		28
+#define ROW_ADDR_CHIP_SEL_MASK			0x0F000000
+#define ROW_ADDR_CHIP_SEL_SHIFT			24
+
+/* NFC_FLASH_STATUS2 Field */
+#define STATUS_BYTE1_MASK			0x000000FF
+
+/* NFC_FLASH_CONFIG Field */
+#define CONFIG_ECC_SRAM_ADDR_MASK		0x7FC00000
+#define CONFIG_ECC_SRAM_ADDR_SHIFT		22
+#define CONFIG_ECC_SRAM_REQ_BIT			BIT(21)
+#define CONFIG_DMA_REQ_BIT			BIT(20)
+#define CONFIG_ECC_MODE_MASK			0x000E0000
+#define CONFIG_ECC_MODE_SHIFT			17
+#define CONFIG_FAST_FLASH_BIT			BIT(16)
+#define CONFIG_16BIT				BIT(7)
+#define CONFIG_BOOT_MODE_BIT			BIT(6)
+#define CONFIG_ADDR_AUTO_INCR_BIT		BIT(5)
+#define CONFIG_BUFNO_AUTO_INCR_BIT		BIT(4)
+#define CONFIG_PAGE_CNT_MASK			0xF
+#define CONFIG_PAGE_CNT_SHIFT			0
+
+/* NFC_IRQ_STATUS Field */
+#define IDLE_IRQ_BIT				BIT(29)
+#define IDLE_EN_BIT				BIT(20)
+#define CMD_DONE_CLEAR_BIT			BIT(18)
+#define IDLE_CLEAR_BIT				BIT(17)
+
+/*
+ * ECC status - seems to consume 8 bytes (double word). The documented
+ * status byte is located in the lowest byte of the second word (which is
+ * the 4th or 7th byte depending on endianness).
+ * Calculate an offset to store the ECC status at the end of the buffer.
+ */
+#define ECC_SRAM_ADDR		(PAGE_2K + OOB_MAX - 8)
+
+#define ECC_STATUS		0x4
+#define ECC_STATUS_MASK		0x80
+#define ECC_STATUS_ERR_COUNT	0x3F
+
+enum vf610_nfc_alt_buf {
+	ALT_BUF_DATA = 0,
+	ALT_BUF_ID = 1,
+	ALT_BUF_STAT = 2,
+	ALT_BUF_ONFI = 3,
+};
+
+enum vf610_nfc_variant {
+	NFC_VFC610 = 1,
+};
+
+struct vf610_nfc {
+	struct nand_chip chip;
+	struct device *dev;
+	void __iomem *regs;
+	struct completion cmd_done;
+	uint buf_offset;
+	int write_sz;
+	/* Status and ID are in alternate locations. */
+	enum vf610_nfc_alt_buf alt_buf;
+	enum vf610_nfc_variant variant;
+	struct clk *clk;
+	bool use_hw_ecc;
+	u32 ecc_mode;
+};
+
+static inline struct vf610_nfc *mtd_to_nfc(struct mtd_info *mtd)
+{
+	return container_of(mtd_to_nand(mtd), struct vf610_nfc, chip);
+}
+
+static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg)
+{
+	return readl(nfc->regs + reg);
+}
+
+static inline void vf610_nfc_write(struct vf610_nfc *nfc, uint reg, u32 val)
+{
+	writel(val, nfc->regs + reg);
+}
+
+static inline void vf610_nfc_set(struct vf610_nfc *nfc, uint reg, u32 bits)
+{
+	vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) | bits);
+}
+
+static inline void vf610_nfc_clear(struct vf610_nfc *nfc, uint reg, u32 bits)
+{
+	vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) & ~bits);
+}
+
+static inline void vf610_nfc_set_field(struct vf610_nfc *nfc, u32 reg,
+				       u32 mask, u32 shift, u32 val)
+{
+	vf610_nfc_write(nfc, reg,
+			(vf610_nfc_read(nfc, reg) & (~mask)) | val << shift);
+}
+
+static inline void vf610_nfc_memcpy(void *dst, const void __iomem *src,
+				    size_t n)
+{
+	/*
+	 * Use this accessor for the internal SRAM buffers. On the ARM
+	 * Freescale Vybrid SoC it's known that the driver can treat
+	 * the SRAM buffer as if it's memory. Other platform might need
+	 * to treat the buffers differently.
+	 *
+	 * For the time being, use memcpy
+	 */
+	memcpy(dst, src, n);
+}
+
+/* Clear flags for upcoming command */
+static inline void vf610_nfc_clear_status(struct vf610_nfc *nfc)
+{
+	u32 tmp = vf610_nfc_read(nfc, NFC_IRQ_STATUS);
+
+	tmp |= CMD_DONE_CLEAR_BIT | IDLE_CLEAR_BIT;
+	vf610_nfc_write(nfc, NFC_IRQ_STATUS, tmp);
+}
+
+static void vf610_nfc_done(struct vf610_nfc *nfc)
+{
+	unsigned long timeout = msecs_to_jiffies(100);
+
+	/*
+	 * Barrier is needed after this write. This write need
+	 * to be done before reading the next register the first
+	 * time.
+	 * vf610_nfc_set implicates such a barrier by using writel
+	 * to write to the register.
+	 */
+	vf610_nfc_set(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
+	vf610_nfc_set(nfc, NFC_FLASH_CMD2, START_BIT);
+
+	if (!wait_for_completion_timeout(&nfc->cmd_done, timeout))
+		dev_warn(nfc->dev, "Timeout while waiting for BUSY.\n");
+
+	vf610_nfc_clear_status(nfc);
+}
+
+static u8 vf610_nfc_get_id(struct vf610_nfc *nfc, int col)
+{
+	u32 flash_id;
+
+	if (col < 4) {
+		flash_id = vf610_nfc_read(nfc, NFC_FLASH_STATUS1);
+		flash_id >>= (3 - col) * 8;
+	} else {
+		flash_id = vf610_nfc_read(nfc, NFC_FLASH_STATUS2);
+		flash_id >>= 24;
+	}
+
+	return flash_id & 0xff;
+}
+
+static u8 vf610_nfc_get_status(struct vf610_nfc *nfc)
+{
+	return vf610_nfc_read(nfc, NFC_FLASH_STATUS2) & STATUS_BYTE1_MASK;
+}
+
+static void vf610_nfc_send_command(struct vf610_nfc *nfc, u32 cmd_byte1,
+				   u32 cmd_code)
+{
+	u32 tmp;
+
+	vf610_nfc_clear_status(nfc);
+
+	tmp = vf610_nfc_read(nfc, NFC_FLASH_CMD2);
+	tmp &= ~(CMD_BYTE1_MASK | CMD_CODE_MASK | BUFNO_MASK);
+	tmp |= cmd_byte1 << CMD_BYTE1_SHIFT;
+	tmp |= cmd_code << CMD_CODE_SHIFT;
+	vf610_nfc_write(nfc, NFC_FLASH_CMD2, tmp);
+}
+
+static void vf610_nfc_send_commands(struct vf610_nfc *nfc, u32 cmd_byte1,
+				    u32 cmd_byte2, u32 cmd_code)
+{
+	u32 tmp;
+
+	vf610_nfc_send_command(nfc, cmd_byte1, cmd_code);
+
+	tmp = vf610_nfc_read(nfc, NFC_FLASH_CMD1);
+	tmp &= ~CMD_BYTE2_MASK;
+	tmp |= cmd_byte2 << CMD_BYTE2_SHIFT;
+	vf610_nfc_write(nfc, NFC_FLASH_CMD1, tmp);
+}
+
+static irqreturn_t vf610_nfc_irq(int irq, void *data)
+{
+	struct mtd_info *mtd = data;
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+	vf610_nfc_clear(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
+	complete(&nfc->cmd_done);
+
+	return IRQ_HANDLED;
+}
+
+static void vf610_nfc_addr_cycle(struct vf610_nfc *nfc, int column, int page)
+{
+	if (column != -1) {
+		if (nfc->chip.options & NAND_BUSWIDTH_16)
+			column = column / 2;
+		vf610_nfc_set_field(nfc, NFC_COL_ADDR, COL_ADDR_MASK,
+				    COL_ADDR_SHIFT, column);
+	}
+	if (page != -1)
+		vf610_nfc_set_field(nfc, NFC_ROW_ADDR, ROW_ADDR_MASK,
+				    ROW_ADDR_SHIFT, page);
+}
+
+static inline void vf610_nfc_ecc_mode(struct vf610_nfc *nfc, int ecc_mode)
+{
+	vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
+			    CONFIG_ECC_MODE_MASK,
+			    CONFIG_ECC_MODE_SHIFT, ecc_mode);
+}
+
+static inline void vf610_nfc_transfer_size(struct vf610_nfc *nfc, int size)
+{
+	vf610_nfc_write(nfc, NFC_SECTOR_SIZE, size);
+}
+
+static void vf610_nfc_command(struct mtd_info *mtd, unsigned command,
+			      int column, int page)
+{
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+	int trfr_sz = nfc->chip.options & NAND_BUSWIDTH_16 ? 1 : 0;
+
+	nfc->buf_offset = max(column, 0);
+	nfc->alt_buf = ALT_BUF_DATA;
+
+	switch (command) {
+	case NAND_CMD_SEQIN:
+		/* Use valid column/page from preread... */
+		vf610_nfc_addr_cycle(nfc, column, page);
+		nfc->buf_offset = 0;
+
+		/*
+		 * SEQIN => data => PAGEPROG sequence is done by the controller
+		 * hence we do not need to issue the command here...
+		 */
+		return;
+	case NAND_CMD_PAGEPROG:
+		trfr_sz += nfc->write_sz;
+		vf610_nfc_transfer_size(nfc, trfr_sz);
+		vf610_nfc_send_commands(nfc, NAND_CMD_SEQIN,
+					command, PROGRAM_PAGE_CMD_CODE);
+		if (nfc->use_hw_ecc)
+			vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
+		else
+			vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+		break;
+
+	case NAND_CMD_RESET:
+		vf610_nfc_transfer_size(nfc, 0);
+		vf610_nfc_send_command(nfc, command, RESET_CMD_CODE);
+		break;
+
+	case NAND_CMD_READOOB:
+		trfr_sz += mtd->oobsize;
+		column = mtd->writesize;
+		vf610_nfc_transfer_size(nfc, trfr_sz);
+		vf610_nfc_send_commands(nfc, NAND_CMD_READ0,
+					NAND_CMD_READSTART, READ_PAGE_CMD_CODE);
+		vf610_nfc_addr_cycle(nfc, column, page);
+		vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+		break;
+
+	case NAND_CMD_READ0:
+		trfr_sz += mtd->writesize + mtd->oobsize;
+		vf610_nfc_transfer_size(nfc, trfr_sz);
+		vf610_nfc_send_commands(nfc, NAND_CMD_READ0,
+					NAND_CMD_READSTART, READ_PAGE_CMD_CODE);
+		vf610_nfc_addr_cycle(nfc, column, page);
+		vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
+		break;
+
+	case NAND_CMD_PARAM:
+		nfc->alt_buf = ALT_BUF_ONFI;
+		trfr_sz = 3 * sizeof(struct nand_onfi_params);
+		vf610_nfc_transfer_size(nfc, trfr_sz);
+		vf610_nfc_send_command(nfc, command, READ_ONFI_PARAM_CMD_CODE);
+		vf610_nfc_addr_cycle(nfc, -1, column);
+		vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
+		break;
+
+	case NAND_CMD_ERASE1:
+		vf610_nfc_transfer_size(nfc, 0);
+		vf610_nfc_send_commands(nfc, command,
+					NAND_CMD_ERASE2, ERASE_CMD_CODE);
+		vf610_nfc_addr_cycle(nfc, column, page);
+		break;
+
+	case NAND_CMD_READID:
+		nfc->alt_buf = ALT_BUF_ID;
+		nfc->buf_offset = 0;
+		vf610_nfc_transfer_size(nfc, 0);
+		vf610_nfc_send_command(nfc, command, READ_ID_CMD_CODE);
+		vf610_nfc_addr_cycle(nfc, -1, column);
+		break;
+
+	case NAND_CMD_STATUS:
+		nfc->alt_buf = ALT_BUF_STAT;
+		vf610_nfc_transfer_size(nfc, 0);
+		vf610_nfc_send_command(nfc, command, STATUS_READ_CMD_CODE);
+		break;
+	default:
+		return;
+	}
+
+	vf610_nfc_done(nfc);
+
+	nfc->use_hw_ecc = false;
+	nfc->write_sz = 0;
+}
+
+static void vf610_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+	uint c = nfc->buf_offset;
+
+	/* Alternate buffers are only supported through read_byte */
+	WARN_ON(nfc->alt_buf);
+
+	vf610_nfc_memcpy(buf, nfc->regs + NFC_MAIN_AREA(0) + c, len);
+
+	nfc->buf_offset += len;
+}
+
+static void vf610_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
+				int len)
+{
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+	uint c = nfc->buf_offset;
+	uint l;
+
+	l = min_t(uint, len, mtd->writesize + mtd->oobsize - c);
+	vf610_nfc_memcpy(nfc->regs + NFC_MAIN_AREA(0) + c, buf, l);
+
+	nfc->write_sz += l;
+	nfc->buf_offset += l;
+}
+
+static uint8_t vf610_nfc_read_byte(struct mtd_info *mtd)
+{
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+	u8 tmp;
+	uint c = nfc->buf_offset;
+
+	switch (nfc->alt_buf) {
+	case ALT_BUF_ID:
+		tmp = vf610_nfc_get_id(nfc, c);
+		break;
+	case ALT_BUF_STAT:
+		tmp = vf610_nfc_get_status(nfc);
+		break;
+#ifdef __LITTLE_ENDIAN
+	case ALT_BUF_ONFI:
+		/* Reverse byte since the controller uses big endianness */
+		c = nfc->buf_offset ^ 0x3;
+		/* fall-through */
+#endif
+	default:
+		tmp = *((u8 *)(nfc->regs + NFC_MAIN_AREA(0) + c));
+		break;
+	}
+	nfc->buf_offset++;
+	return tmp;
+}
+
+static u16 vf610_nfc_read_word(struct mtd_info *mtd)
+{
+	u16 tmp;
+
+	vf610_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
+	return tmp;
+}
+
+/* If not provided, upper layers apply a fixed delay. */
+static int vf610_nfc_dev_ready(struct mtd_info *mtd)
+{
+	/* NFC handles R/B internally; always ready.  */
+	return 1;
+}
+
+/*
+ * This function supports Vybrid only (MPC5125 would have full RB and four CS)
+ */
+static void vf610_nfc_select_chip(struct mtd_info *mtd, int chip)
+{
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+	u32 tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
+
+	/* Vybrid only (MPC5125 would have full RB and four CS) */
+	if (nfc->variant != NFC_VFC610)
+		return;
+
+	tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
+
+	if (chip >= 0) {
+		tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
+		tmp |= BIT(chip) << ROW_ADDR_CHIP_SEL_SHIFT;
+	}
+
+	vf610_nfc_write(nfc, NFC_ROW_ADDR, tmp);
+}
+
+/* Count the number of 0's in buff up to max_bits */
+static inline int count_written_bits(uint8_t *buff, int size, int max_bits)
+{
+	uint32_t *buff32 = (uint32_t *)buff;
+	int k, written_bits = 0;
+
+	for (k = 0; k < (size / 4); k++) {
+		written_bits += hweight32(~buff32[k]);
+		if (unlikely(written_bits > max_bits))
+			break;
+	}
+
+	return written_bits;
+}
+
+static inline int vf610_nfc_correct_data(struct mtd_info *mtd, uint8_t *dat,
+					 uint8_t *oob, int page)
+{
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+	u32 ecc_status_off = NFC_MAIN_AREA(0) + ECC_SRAM_ADDR + ECC_STATUS;
+	u8 ecc_status;
+	u8 ecc_count;
+	int flips_threshold = nfc->chip.ecc.strength / 2;
+
+	ecc_status = vf610_nfc_read(nfc, ecc_status_off) & 0xff;
+	ecc_count = ecc_status & ECC_STATUS_ERR_COUNT;
+
+	if (!(ecc_status & ECC_STATUS_MASK))
+		return ecc_count;
+
+	/* Read OOB without ECC unit enabled */
+	vf610_nfc_command(mtd, NAND_CMD_READOOB, 0, page);
+	vf610_nfc_read_buf(mtd, oob, mtd->oobsize);
+
+	/*
+	 * On an erased page, bit count (including OOB) should be zero or
+	 * at least less then half of the ECC strength.
+	 */
+	return nand_check_erased_ecc_chunk(dat, nfc->chip.ecc.size, oob,
+					   mtd->oobsize, NULL, 0,
+					   flips_threshold);
+}
+
+static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
+				uint8_t *buf, int oob_required, int page)
+{
+	int eccsize = chip->ecc.size;
+	int stat;
+
+	vf610_nfc_read_buf(mtd, buf, eccsize);
+	if (oob_required)
+		vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	stat = vf610_nfc_correct_data(mtd, buf, chip->oob_poi, page);
+
+	if (stat < 0) {
+		mtd->ecc_stats.failed++;
+		return 0;
+	} else {
+		mtd->ecc_stats.corrected += stat;
+		return stat;
+	}
+}
+
+static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
+				const uint8_t *buf, int oob_required, int page)
+{
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+	vf610_nfc_write_buf(mtd, buf, mtd->writesize);
+	if (oob_required)
+		vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
+
+	/* Always write whole page including OOB due to HW ECC */
+	nfc->use_hw_ecc = true;
+	nfc->write_sz = mtd->writesize + mtd->oobsize;
+
+	return 0;
+}
+
+static const struct of_device_id vf610_nfc_dt_ids[] = {
+	{ .compatible = "fsl,vf610-nfc", .data = (void *)NFC_VFC610 },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, vf610_nfc_dt_ids);
+
+static void vf610_nfc_preinit_controller(struct vf610_nfc *nfc)
+{
+	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_ADDR_AUTO_INCR_BIT);
+	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BUFNO_AUTO_INCR_BIT);
+	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BOOT_MODE_BIT);
+	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_DMA_REQ_BIT);
+	vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_FAST_FLASH_BIT);
+
+	/* Disable virtual pages, only one elementary transfer unit */
+	vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG, CONFIG_PAGE_CNT_MASK,
+			    CONFIG_PAGE_CNT_SHIFT, 1);
+}
+
+static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
+{
+	if (nfc->chip.options & NAND_BUSWIDTH_16)
+		vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+	else
+		vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
+
+	if (nfc->chip.ecc.mode == NAND_ECC_HW) {
+		/* Set ECC status offset in SRAM */
+		vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
+				    CONFIG_ECC_SRAM_ADDR_MASK,
+				    CONFIG_ECC_SRAM_ADDR_SHIFT,
+				    ECC_SRAM_ADDR >> 3);
+
+		/* Enable ECC status in SRAM */
+		vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_ECC_SRAM_REQ_BIT);
+	}
+}
+
+static int vf610_nfc_probe(struct platform_device *pdev)
+{
+	struct vf610_nfc *nfc;
+	struct resource *res;
+	struct mtd_info *mtd;
+	struct nand_chip *chip;
+	struct device_node *child;
+	const struct of_device_id *of_id;
+	int err;
+	int irq;
+
+	nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
+	if (!nfc)
+		return -ENOMEM;
+
+	nfc->dev = &pdev->dev;
+	chip = &nfc->chip;
+	mtd = nand_to_mtd(chip);
+
+	mtd->owner = THIS_MODULE;
+	mtd->dev.parent = nfc->dev;
+	mtd->name = DRV_NAME;
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0)
+		return -EINVAL;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	nfc->regs = devm_ioremap_resource(nfc->dev, res);
+	if (IS_ERR(nfc->regs))
+		return PTR_ERR(nfc->regs);
+
+	nfc->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(nfc->clk))
+		return PTR_ERR(nfc->clk);
+
+	err = clk_prepare_enable(nfc->clk);
+	if (err) {
+		dev_err(nfc->dev, "Unable to enable clock!\n");
+		return err;
+	}
+
+	of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
+	nfc->variant = (enum vf610_nfc_variant)of_id->data;
+
+	for_each_available_child_of_node(nfc->dev->of_node, child) {
+		if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
+
+			if (nand_get_flash_node(chip)) {
+				dev_err(nfc->dev,
+					"Only one NAND chip supported!\n");
+				err = -EINVAL;
+				goto error;
+			}
+
+			nand_set_flash_node(chip, child);
+		}
+	}
+
+	if (!nand_get_flash_node(chip)) {
+		dev_err(nfc->dev, "NAND chip sub-node missing!\n");
+		err = -ENODEV;
+		goto err_clk;
+	}
+
+	chip->dev_ready = vf610_nfc_dev_ready;
+	chip->cmdfunc = vf610_nfc_command;
+	chip->read_byte = vf610_nfc_read_byte;
+	chip->read_word = vf610_nfc_read_word;
+	chip->read_buf = vf610_nfc_read_buf;
+	chip->write_buf = vf610_nfc_write_buf;
+	chip->select_chip = vf610_nfc_select_chip;
+
+	chip->options |= NAND_NO_SUBPAGE_WRITE;
+
+	init_completion(&nfc->cmd_done);
+
+	err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, mtd);
+	if (err) {
+		dev_err(nfc->dev, "Error requesting IRQ!\n");
+		goto error;
+	}
+
+	vf610_nfc_preinit_controller(nfc);
+
+	/* first scan to find the device and get the page size */
+	if (nand_scan_ident(mtd, 1, NULL)) {
+		err = -ENXIO;
+		goto error;
+	}
+
+	vf610_nfc_init_controller(nfc);
+
+	/* Bad block options. */
+	if (chip->bbt_options & NAND_BBT_USE_FLASH)
+		chip->bbt_options |= NAND_BBT_NO_OOB;
+
+	/* Single buffer only, max 256 OOB minus ECC status */
+	if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
+		dev_err(nfc->dev, "Unsupported flash page size\n");
+		err = -ENXIO;
+		goto error;
+	}
+
+	if (chip->ecc.mode == NAND_ECC_HW) {
+		if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
+			dev_err(nfc->dev, "Unsupported flash with hwecc\n");
+			err = -ENXIO;
+			goto error;
+		}
+
+		if (chip->ecc.size != mtd->writesize) {
+			dev_err(nfc->dev, "Step size needs to be page size\n");
+			err = -ENXIO;
+			goto error;
+		}
+
+		/* Only 64 byte ECC layouts known */
+		if (mtd->oobsize > 64)
+			mtd->oobsize = 64;
+
+		/*
+		 * mtd->ecclayout is not specified here because we're using the
+		 * default large page ECC layout defined in NAND core.
+		 */
+		if (chip->ecc.strength == 32) {
+			nfc->ecc_mode = ECC_60_BYTE;
+			chip->ecc.bytes = 60;
+		} else if (chip->ecc.strength == 24) {
+			nfc->ecc_mode = ECC_45_BYTE;
+			chip->ecc.bytes = 45;
+		} else {
+			dev_err(nfc->dev, "Unsupported ECC strength\n");
+			err = -ENXIO;
+			goto error;
+		}
+
+		chip->ecc.read_page = vf610_nfc_read_page;
+		chip->ecc.write_page = vf610_nfc_write_page;
+
+		chip->ecc.size = PAGE_2K;
+	}
+
+	/* second phase scan */
+	if (nand_scan_tail(mtd)) {
+		err = -ENXIO;
+		goto error;
+	}
+
+	platform_set_drvdata(pdev, mtd);
+
+	/* Register device in MTD */
+	return mtd_device_register(mtd, NULL, 0);
+
+error:
+	of_node_put(nand_get_flash_node(chip));
+err_clk:
+	clk_disable_unprepare(nfc->clk);
+	return err;
+}
+
+static int vf610_nfc_remove(struct platform_device *pdev)
+{
+	struct mtd_info *mtd = platform_get_drvdata(pdev);
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+	nand_release(mtd);
+	clk_disable_unprepare(nfc->clk);
+	return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int vf610_nfc_suspend(struct device *dev)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+	clk_disable_unprepare(nfc->clk);
+	return 0;
+}
+
+static int vf610_nfc_resume(struct device *dev)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
+
+	pinctrl_pm_select_default_state(dev);
+
+	clk_prepare_enable(nfc->clk);
+
+	vf610_nfc_preinit_controller(nfc);
+	vf610_nfc_init_controller(nfc);
+	return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(vf610_nfc_pm_ops, vf610_nfc_suspend, vf610_nfc_resume);
+
+static struct platform_driver vf610_nfc_driver = {
+	.driver		= {
+		.name	= DRV_NAME,
+		.of_match_table = vf610_nfc_dt_ids,
+		.pm	= &vf610_nfc_pm_ops,
+	},
+	.probe		= vf610_nfc_probe,
+	.remove		= vf610_nfc_remove,
+};
+
+module_platform_driver(vf610_nfc_driver);
+
+MODULE_AUTHOR("Stefan Agner <stefan.agner@toradex.com>");
+MODULE_DESCRIPTION("Freescale VF610/MPC5125 NFC MTD NAND driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/rawnand/xway_nand.c b/drivers/mtd/nand/rawnand/xway_nand.c
new file mode 100644
index 000000000000..3e7353e76264
--- /dev/null
+++ b/drivers/mtd/nand/rawnand/xway_nand.c
@@ -0,0 +1,248 @@ 
+/*
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License version 2 as published
+ *  by the Free Software Foundation.
+ *
+ *  Copyright © 2012 John Crispin <blogic@openwrt.org>
+ *  Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de>
+ */
+
+#include <linux/mtd/rawnand.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+/* nand registers */
+#define EBU_ADDSEL1		0x24
+#define EBU_NAND_CON		0xB0
+#define EBU_NAND_WAIT		0xB4
+#define  NAND_WAIT_RD		BIT(0) /* NAND flash status output */
+#define  NAND_WAIT_WR_C		BIT(3) /* NAND Write/Read complete */
+#define EBU_NAND_ECC0		0xB8
+#define EBU_NAND_ECC_AC		0xBC
+
+/*
+ * nand commands
+ * The pins of the NAND chip are selected based on the address bits of the
+ * "register" read and write. There are no special registers, but an
+ * address range and the lower address bits are used to activate the
+ * correct line. For example when the bit (1 << 2) is set in the address
+ * the ALE pin will be activated.
+ */
+#define NAND_CMD_ALE		BIT(2) /* address latch enable */
+#define NAND_CMD_CLE		BIT(3) /* command latch enable */
+#define NAND_CMD_CS		BIT(4) /* chip select */
+#define NAND_CMD_SE		BIT(5) /* spare area access latch */
+#define NAND_CMD_WP		BIT(6) /* write protect */
+#define NAND_WRITE_CMD		(NAND_CMD_CS | NAND_CMD_CLE)
+#define NAND_WRITE_ADDR		(NAND_CMD_CS | NAND_CMD_ALE)
+#define NAND_WRITE_DATA		(NAND_CMD_CS)
+#define NAND_READ_DATA		(NAND_CMD_CS)
+
+/* we need to tel the ebu which addr we mapped the nand to */
+#define ADDSEL1_MASK(x)		(x << 4)
+#define ADDSEL1_REGEN		1
+
+/* we need to tell the EBU that we have nand attached and set it up properly */
+#define BUSCON1_SETUP		(1 << 22)
+#define BUSCON1_BCGEN_RES	(0x3 << 12)
+#define BUSCON1_WAITWRC2	(2 << 8)
+#define BUSCON1_WAITRDC2	(2 << 6)
+#define BUSCON1_HOLDC1		(1 << 4)
+#define BUSCON1_RECOVC1		(1 << 2)
+#define BUSCON1_CMULT4		1
+
+#define NAND_CON_CE		(1 << 20)
+#define NAND_CON_OUT_CS1	(1 << 10)
+#define NAND_CON_IN_CS1		(1 << 8)
+#define NAND_CON_PRE_P		(1 << 7)
+#define NAND_CON_WP_P		(1 << 6)
+#define NAND_CON_SE_P		(1 << 5)
+#define NAND_CON_CS_P		(1 << 4)
+#define NAND_CON_CSMUX		(1 << 1)
+#define NAND_CON_NANDM		1
+
+struct xway_nand_data {
+	struct nand_chip	chip;
+	unsigned long		csflags;
+	void __iomem		*nandaddr;
+};
+
+static u8 xway_readb(struct mtd_info *mtd, int op)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct xway_nand_data *data = nand_get_controller_data(chip);
+
+	return readb(data->nandaddr + op);
+}
+
+static void xway_writeb(struct mtd_info *mtd, int op, u8 value)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct xway_nand_data *data = nand_get_controller_data(chip);
+
+	writeb(value, data->nandaddr + op);
+}
+
+static void xway_select_chip(struct mtd_info *mtd, int select)
+{
+	struct nand_chip *chip = mtd_to_nand(mtd);
+	struct xway_nand_data *data = nand_get_controller_data(chip);
+
+	switch (select) {
+	case -1:
+		ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
+		ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
+		spin_unlock_irqrestore(&ebu_lock, data->csflags);
+		break;
+	case 0:
+		spin_lock_irqsave(&ebu_lock, data->csflags);
+		ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
+		ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
+		break;
+	default:
+		BUG();
+	}
+}
+
+static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
+{
+	if (cmd == NAND_CMD_NONE)
+		return;
+
+	if (ctrl & NAND_CLE)
+		xway_writeb(mtd, NAND_WRITE_CMD, cmd);
+	else if (ctrl & NAND_ALE)
+		xway_writeb(mtd, NAND_WRITE_ADDR, cmd);
+
+	while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
+		;
+}
+
+static int xway_dev_ready(struct mtd_info *mtd)
+{
+	return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
+}
+
+static unsigned char xway_read_byte(struct mtd_info *mtd)
+{
+	return xway_readb(mtd, NAND_READ_DATA);
+}
+
+static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		buf[i] = xway_readb(mtd, NAND_WRITE_DATA);
+}
+
+static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		xway_writeb(mtd, NAND_WRITE_DATA, buf[i]);
+}
+
+/*
+ * Probe for the NAND device.
+ */
+static int xway_nand_probe(struct platform_device *pdev)
+{
+	struct xway_nand_data *data;
+	struct mtd_info *mtd;
+	struct resource *res;
+	int err;
+	u32 cs;
+	u32 cs_flag = 0;
+
+	/* Allocate memory for the device structure (and zero it) */
+	data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
+			    GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	data->nandaddr = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(data->nandaddr))
+		return PTR_ERR(data->nandaddr);
+
+	nand_set_flash_node(&data->chip, pdev->dev.of_node);
+	mtd = nand_to_mtd(&data->chip);
+	mtd->dev.parent = &pdev->dev;
+
+	data->chip.cmd_ctrl = xway_cmd_ctrl;
+	data->chip.dev_ready = xway_dev_ready;
+	data->chip.select_chip = xway_select_chip;
+	data->chip.write_buf = xway_write_buf;
+	data->chip.read_buf = xway_read_buf;
+	data->chip.read_byte = xway_read_byte;
+	data->chip.chip_delay = 30;
+
+	data->chip.ecc.mode = NAND_ECC_SOFT;
+	data->chip.ecc.algo = NAND_ECC_HAMMING;
+
+	platform_set_drvdata(pdev, data);
+	nand_set_controller_data(&data->chip, data);
+
+	/* load our CS from the DT. Either we find a valid 1 or default to 0 */
+	err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs);
+	if (!err && cs == 1)
+		cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
+
+	/* setup the EBU to run in NAND mode on our base addr */
+	ltq_ebu_w32(CPHYSADDR(data->nandaddr)
+		    | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
+
+	ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
+		    | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
+		    | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
+
+	ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
+		    | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
+		    | cs_flag, EBU_NAND_CON);
+
+	/* Scan to find existence of the device */
+	err = nand_scan(mtd, 1);
+	if (err)
+		return err;
+
+	err = mtd_device_register(mtd, NULL, 0);
+	if (err)
+		nand_release(mtd);
+
+	return err;
+}
+
+/*
+ * Remove a NAND device.
+ */
+static int xway_nand_remove(struct platform_device *pdev)
+{
+	struct xway_nand_data *data = platform_get_drvdata(pdev);
+
+	nand_release(nand_to_mtd(&data->chip));
+
+	return 0;
+}
+
+static const struct of_device_id xway_nand_match[] = {
+	{ .compatible = "lantiq,nand-xway" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, xway_nand_match);
+
+static struct platform_driver xway_nand_driver = {
+	.probe	= xway_nand_probe,
+	.remove	= xway_nand_remove,
+	.driver	= {
+		.name		= "lantiq,nand-xway",
+		.of_match_table = xway_nand_match,
+	},
+};
+
+module_platform_driver(xway_nand_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c
deleted file mode 100644
index 6ce9f867a123..000000000000
--- a/drivers/mtd/nand/s3c2410.c
+++ /dev/null
@@ -1,1165 +0,0 @@ 
-/* linux/drivers/mtd/nand/s3c2410.c
- *
- * Copyright © 2004-2008 Simtec Electronics
- *	http://armlinux.simtec.co.uk/
- *	Ben Dooks <ben@simtec.co.uk>
- *
- * Samsung S3C2410/S3C2440/S3C2412 NAND driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
-*/
-
-#define pr_fmt(fmt) "nand-s3c2410: " fmt
-
-#ifdef CONFIG_MTD_NAND_S3C2410_DEBUG
-#define DEBUG
-#endif
-
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/io.h>
-#include <linux/ioport.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/cpufreq.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-
-#include <linux/platform_data/mtd-nand-s3c2410.h>
-
-#define S3C2410_NFREG(x) (x)
-
-#define S3C2410_NFCONF		S3C2410_NFREG(0x00)
-#define S3C2410_NFCMD		S3C2410_NFREG(0x04)
-#define S3C2410_NFADDR		S3C2410_NFREG(0x08)
-#define S3C2410_NFDATA		S3C2410_NFREG(0x0C)
-#define S3C2410_NFSTAT		S3C2410_NFREG(0x10)
-#define S3C2410_NFECC		S3C2410_NFREG(0x14)
-#define S3C2440_NFCONT		S3C2410_NFREG(0x04)
-#define S3C2440_NFCMD		S3C2410_NFREG(0x08)
-#define S3C2440_NFADDR		S3C2410_NFREG(0x0C)
-#define S3C2440_NFDATA		S3C2410_NFREG(0x10)
-#define S3C2440_NFSTAT		S3C2410_NFREG(0x20)
-#define S3C2440_NFMECC0		S3C2410_NFREG(0x2C)
-#define S3C2412_NFSTAT		S3C2410_NFREG(0x28)
-#define S3C2412_NFMECC0		S3C2410_NFREG(0x34)
-#define S3C2410_NFCONF_EN		(1<<15)
-#define S3C2410_NFCONF_INITECC		(1<<12)
-#define S3C2410_NFCONF_nFCE		(1<<11)
-#define S3C2410_NFCONF_TACLS(x)		((x)<<8)
-#define S3C2410_NFCONF_TWRPH0(x)	((x)<<4)
-#define S3C2410_NFCONF_TWRPH1(x)	((x)<<0)
-#define S3C2410_NFSTAT_BUSY		(1<<0)
-#define S3C2440_NFCONF_TACLS(x)		((x)<<12)
-#define S3C2440_NFCONF_TWRPH0(x)	((x)<<8)
-#define S3C2440_NFCONF_TWRPH1(x)	((x)<<4)
-#define S3C2440_NFCONT_INITECC		(1<<4)
-#define S3C2440_NFCONT_nFCE		(1<<1)
-#define S3C2440_NFCONT_ENABLE		(1<<0)
-#define S3C2440_NFSTAT_READY		(1<<0)
-#define S3C2412_NFCONF_NANDBOOT		(1<<31)
-#define S3C2412_NFCONT_INIT_MAIN_ECC	(1<<5)
-#define S3C2412_NFCONT_nFCE0		(1<<1)
-#define S3C2412_NFSTAT_READY		(1<<0)
-
-/* new oob placement block for use with hardware ecc generation
- */
-static int s3c2410_ooblayout_ecc(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 0;
-	oobregion->length = 3;
-
-	return 0;
-}
-
-static int s3c2410_ooblayout_free(struct mtd_info *mtd, int section,
-				  struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 8;
-	oobregion->length = 8;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops s3c2410_ooblayout_ops = {
-	.ecc = s3c2410_ooblayout_ecc,
-	.free = s3c2410_ooblayout_free,
-};
-
-/* controller and mtd information */
-
-struct s3c2410_nand_info;
-
-/**
- * struct s3c2410_nand_mtd - driver MTD structure
- * @mtd: The MTD instance to pass to the MTD layer.
- * @chip: The NAND chip information.
- * @set: The platform information supplied for this set of NAND chips.
- * @info: Link back to the hardware information.
- * @scan_res: The result from calling nand_scan_ident().
-*/
-struct s3c2410_nand_mtd {
-	struct nand_chip		chip;
-	struct s3c2410_nand_set		*set;
-	struct s3c2410_nand_info	*info;
-	int				scan_res;
-};
-
-enum s3c_cpu_type {
-	TYPE_S3C2410,
-	TYPE_S3C2412,
-	TYPE_S3C2440,
-};
-
-enum s3c_nand_clk_state {
-	CLOCK_DISABLE	= 0,
-	CLOCK_ENABLE,
-	CLOCK_SUSPEND,
-};
-
-/* overview of the s3c2410 nand state */
-
-/**
- * struct s3c2410_nand_info - NAND controller state.
- * @mtds: An array of MTD instances on this controoler.
- * @platform: The platform data for this board.
- * @device: The platform device we bound to.
- * @clk: The clock resource for this controller.
- * @regs: The area mapped for the hardware registers.
- * @sel_reg: Pointer to the register controlling the NAND selection.
- * @sel_bit: The bit in @sel_reg to select the NAND chip.
- * @mtd_count: The number of MTDs created from this controller.
- * @save_sel: The contents of @sel_reg to be saved over suspend.
- * @clk_rate: The clock rate from @clk.
- * @clk_state: The current clock state.
- * @cpu_type: The exact type of this controller.
- */
-struct s3c2410_nand_info {
-	/* mtd info */
-	struct nand_hw_control		controller;
-	struct s3c2410_nand_mtd		*mtds;
-	struct s3c2410_platform_nand	*platform;
-
-	/* device info */
-	struct device			*device;
-	struct clk			*clk;
-	void __iomem			*regs;
-	void __iomem			*sel_reg;
-	int				sel_bit;
-	int				mtd_count;
-	unsigned long			save_sel;
-	unsigned long			clk_rate;
-	enum s3c_nand_clk_state		clk_state;
-
-	enum s3c_cpu_type		cpu_type;
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
-	struct notifier_block	freq_transition;
-#endif
-};
-
-/* conversion functions */
-
-static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct s3c2410_nand_mtd,
-			    chip);
-}
-
-static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd)
-{
-	return s3c2410_nand_mtd_toours(mtd)->info;
-}
-
-static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev)
-{
-	return platform_get_drvdata(dev);
-}
-
-static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev)
-{
-	return dev_get_platdata(&dev->dev);
-}
-
-static inline int allow_clk_suspend(struct s3c2410_nand_info *info)
-{
-#ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP
-	return 1;
-#else
-	return 0;
-#endif
-}
-
-/**
- * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock.
- * @info: The controller instance.
- * @new_state: State to which clock should be set.
- */
-static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info,
-		enum s3c_nand_clk_state new_state)
-{
-	if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND)
-		return;
-
-	if (info->clk_state == CLOCK_ENABLE) {
-		if (new_state != CLOCK_ENABLE)
-			clk_disable_unprepare(info->clk);
-	} else {
-		if (new_state == CLOCK_ENABLE)
-			clk_prepare_enable(info->clk);
-	}
-
-	info->clk_state = new_state;
-}
-
-/* timing calculations */
-
-#define NS_IN_KHZ 1000000
-
-/**
- * s3c_nand_calc_rate - calculate timing data.
- * @wanted: The cycle time in nanoseconds.
- * @clk: The clock rate in kHz.
- * @max: The maximum divider value.
- *
- * Calculate the timing value from the given parameters.
- */
-static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max)
-{
-	int result;
-
-	result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ);
-
-	pr_debug("result %d from %ld, %d\n", result, clk, wanted);
-
-	if (result > max) {
-		pr_err("%d ns is too big for current clock rate %ld\n",
-			wanted, clk);
-		return -1;
-	}
-
-	if (result < 1)
-		result = 1;
-
-	return result;
-}
-
-#define to_ns(ticks, clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk))
-
-/* controller setup */
-
-/**
- * s3c2410_nand_setrate - setup controller timing information.
- * @info: The controller instance.
- *
- * Given the information supplied by the platform, calculate and set
- * the necessary timing registers in the hardware to generate the
- * necessary timing cycles to the hardware.
- */
-static int s3c2410_nand_setrate(struct s3c2410_nand_info *info)
-{
-	struct s3c2410_platform_nand *plat = info->platform;
-	int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4;
-	int tacls, twrph0, twrph1;
-	unsigned long clkrate = clk_get_rate(info->clk);
-	unsigned long uninitialized_var(set), cfg, uninitialized_var(mask);
-	unsigned long flags;
-
-	/* calculate the timing information for the controller */
-
-	info->clk_rate = clkrate;
-	clkrate /= 1000;	/* turn clock into kHz for ease of use */
-
-	if (plat != NULL) {
-		tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max);
-		twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8);
-		twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8);
-	} else {
-		/* default timings */
-		tacls = tacls_max;
-		twrph0 = 8;
-		twrph1 = 8;
-	}
-
-	if (tacls < 0 || twrph0 < 0 || twrph1 < 0) {
-		dev_err(info->device, "cannot get suitable timings\n");
-		return -EINVAL;
-	}
-
-	dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n",
-		tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate),
-						twrph1, to_ns(twrph1, clkrate));
-
-	switch (info->cpu_type) {
-	case TYPE_S3C2410:
-		mask = (S3C2410_NFCONF_TACLS(3) |
-			S3C2410_NFCONF_TWRPH0(7) |
-			S3C2410_NFCONF_TWRPH1(7));
-		set = S3C2410_NFCONF_EN;
-		set |= S3C2410_NFCONF_TACLS(tacls - 1);
-		set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1);
-		set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1);
-		break;
-
-	case TYPE_S3C2440:
-	case TYPE_S3C2412:
-		mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) |
-			S3C2440_NFCONF_TWRPH0(7) |
-			S3C2440_NFCONF_TWRPH1(7));
-
-		set = S3C2440_NFCONF_TACLS(tacls - 1);
-		set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1);
-		set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1);
-		break;
-
-	default:
-		BUG();
-	}
-
-	local_irq_save(flags);
-
-	cfg = readl(info->regs + S3C2410_NFCONF);
-	cfg &= ~mask;
-	cfg |= set;
-	writel(cfg, info->regs + S3C2410_NFCONF);
-
-	local_irq_restore(flags);
-
-	dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg);
-
-	return 0;
-}
-
-/**
- * s3c2410_nand_inithw - basic hardware initialisation
- * @info: The hardware state.
- *
- * Do the basic initialisation of the hardware, using s3c2410_nand_setrate()
- * to setup the hardware access speeds and set the controller to be enabled.
-*/
-static int s3c2410_nand_inithw(struct s3c2410_nand_info *info)
-{
-	int ret;
-
-	ret = s3c2410_nand_setrate(info);
-	if (ret < 0)
-		return ret;
-
-	switch (info->cpu_type) {
-	case TYPE_S3C2410:
-	default:
-		break;
-
-	case TYPE_S3C2440:
-	case TYPE_S3C2412:
-		/* enable the controller and de-assert nFCE */
-
-		writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT);
-	}
-
-	return 0;
-}
-
-/**
- * s3c2410_nand_select_chip - select the given nand chip
- * @mtd: The MTD instance for this chip.
- * @chip: The chip number.
- *
- * This is called by the MTD layer to either select a given chip for the
- * @mtd instance, or to indicate that the access has finished and the
- * chip can be de-selected.
- *
- * The routine ensures that the nFCE line is correctly setup, and any
- * platform specific selection code is called to route nFCE to the specific
- * chip.
- */
-static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct s3c2410_nand_info *info;
-	struct s3c2410_nand_mtd *nmtd;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	unsigned long cur;
-
-	nmtd = nand_get_controller_data(this);
-	info = nmtd->info;
-
-	if (chip != -1)
-		s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
-
-	cur = readl(info->sel_reg);
-
-	if (chip == -1) {
-		cur |= info->sel_bit;
-	} else {
-		if (nmtd->set != NULL && chip > nmtd->set->nr_chips) {
-			dev_err(info->device, "invalid chip %d\n", chip);
-			return;
-		}
-
-		if (info->platform != NULL) {
-			if (info->platform->select_chip != NULL)
-				(info->platform->select_chip) (nmtd->set, chip);
-		}
-
-		cur &= ~info->sel_bit;
-	}
-
-	writel(cur, info->sel_reg);
-
-	if (chip == -1)
-		s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
-}
-
-/* s3c2410_nand_hwcontrol
- *
- * Issue command and address cycles to the chip
-*/
-
-static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd,
-				   unsigned int ctrl)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_CLE)
-		writeb(cmd, info->regs + S3C2410_NFCMD);
-	else
-		writeb(cmd, info->regs + S3C2410_NFADDR);
-}
-
-/* command and control functions */
-
-static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd,
-				   unsigned int ctrl)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_CLE)
-		writeb(cmd, info->regs + S3C2440_NFCMD);
-	else
-		writeb(cmd, info->regs + S3C2440_NFADDR);
-}
-
-/* s3c2410_nand_devready()
- *
- * returns 0 if the nand is busy, 1 if it is ready
-*/
-
-static int s3c2410_nand_devready(struct mtd_info *mtd)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-	return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY;
-}
-
-static int s3c2440_nand_devready(struct mtd_info *mtd)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-	return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY;
-}
-
-static int s3c2412_nand_devready(struct mtd_info *mtd)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-	return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY;
-}
-
-/* ECC handling functions */
-
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
-static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat,
-				     u_char *read_ecc, u_char *calc_ecc)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-	unsigned int diff0, diff1, diff2;
-	unsigned int bit, byte;
-
-	pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc);
-
-	diff0 = read_ecc[0] ^ calc_ecc[0];
-	diff1 = read_ecc[1] ^ calc_ecc[1];
-	diff2 = read_ecc[2] ^ calc_ecc[2];
-
-	pr_debug("%s: rd %*phN calc %*phN diff %02x%02x%02x\n",
-		 __func__, 3, read_ecc, 3, calc_ecc,
-		 diff0, diff1, diff2);
-
-	if (diff0 == 0 && diff1 == 0 && diff2 == 0)
-		return 0;		/* ECC is ok */
-
-	/* sometimes people do not think about using the ECC, so check
-	 * to see if we have an 0xff,0xff,0xff read ECC and then ignore
-	 * the error, on the assumption that this is an un-eccd page.
-	 */
-	if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff
-	    && info->platform->ignore_unset_ecc)
-		return 0;
-
-	/* Can we correct this ECC (ie, one row and column change).
-	 * Note, this is similar to the 256 error code on smartmedia */
-
-	if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 &&
-	    ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 &&
-	    ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) {
-		/* calculate the bit position of the error */
-
-		bit  = ((diff2 >> 3) & 1) |
-		       ((diff2 >> 4) & 2) |
-		       ((diff2 >> 5) & 4);
-
-		/* calculate the byte position of the error */
-
-		byte = ((diff2 << 7) & 0x100) |
-		       ((diff1 << 0) & 0x80)  |
-		       ((diff1 << 1) & 0x40)  |
-		       ((diff1 << 2) & 0x20)  |
-		       ((diff1 << 3) & 0x10)  |
-		       ((diff0 >> 4) & 0x08)  |
-		       ((diff0 >> 3) & 0x04)  |
-		       ((diff0 >> 2) & 0x02)  |
-		       ((diff0 >> 1) & 0x01);
-
-		dev_dbg(info->device, "correcting error bit %d, byte %d\n",
-			bit, byte);
-
-		dat[byte] ^= (1 << bit);
-		return 1;
-	}
-
-	/* if there is only one bit difference in the ECC, then
-	 * one of only a row or column parity has changed, which
-	 * means the error is most probably in the ECC itself */
-
-	diff0 |= (diff1 << 8);
-	diff0 |= (diff2 << 16);
-
-	/* equal to "(diff0 & ~(1 << __ffs(diff0)))" */
-	if ((diff0 & (diff0 - 1)) == 0)
-		return 1;
-
-	return -1;
-}
-
-/* ECC functions
- *
- * These allow the s3c2410 and s3c2440 to use the controller's ECC
- * generator block to ECC the data as it passes through]
-*/
-
-static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-	unsigned long ctrl;
-
-	ctrl = readl(info->regs + S3C2410_NFCONF);
-	ctrl |= S3C2410_NFCONF_INITECC;
-	writel(ctrl, info->regs + S3C2410_NFCONF);
-}
-
-static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-	unsigned long ctrl;
-
-	ctrl = readl(info->regs + S3C2440_NFCONT);
-	writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC,
-	       info->regs + S3C2440_NFCONT);
-}
-
-static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-	unsigned long ctrl;
-
-	ctrl = readl(info->regs + S3C2440_NFCONT);
-	writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT);
-}
-
-static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
-				      u_char *ecc_code)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
-	ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0);
-	ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1);
-	ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2);
-
-	pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
-
-	return 0;
-}
-
-static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
-				      u_char *ecc_code)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-	unsigned long ecc = readl(info->regs + S3C2412_NFMECC0);
-
-	ecc_code[0] = ecc;
-	ecc_code[1] = ecc >> 8;
-	ecc_code[2] = ecc >> 16;
-
-	pr_debug("%s: returning ecc %*phN\n", __func__, 3, ecc_code);
-
-	return 0;
-}
-
-static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
-				      u_char *ecc_code)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-	unsigned long ecc = readl(info->regs + S3C2440_NFMECC0);
-
-	ecc_code[0] = ecc;
-	ecc_code[1] = ecc >> 8;
-	ecc_code[2] = ecc >> 16;
-
-	pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff);
-
-	return 0;
-}
-#endif
-
-/* over-ride the standard functions for a little more speed. We can
- * use read/write block to move the data buffers to/from the controller
-*/
-
-static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	readsb(this->IO_ADDR_R, buf, len);
-}
-
-static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
-	readsl(info->regs + S3C2440_NFDATA, buf, len >> 2);
-
-	/* cleanup if we've got less than a word to do */
-	if (len & 3) {
-		buf += len & ~3;
-
-		for (; len & 3; len--)
-			*buf++ = readb(info->regs + S3C2440_NFDATA);
-	}
-}
-
-static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
-				   int len)
-{
-	struct nand_chip *this = mtd_to_nand(mtd);
-	writesb(this->IO_ADDR_W, buf, len);
-}
-
-static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf,
-				   int len)
-{
-	struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd);
-
-	writesl(info->regs + S3C2440_NFDATA, buf, len >> 2);
-
-	/* cleanup any fractional write */
-	if (len & 3) {
-		buf += len & ~3;
-
-		for (; len & 3; len--, buf++)
-			writeb(*buf, info->regs + S3C2440_NFDATA);
-	}
-}
-
-/* cpufreq driver support */
-
-#ifdef CONFIG_ARM_S3C24XX_CPUFREQ
-
-static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb,
-					  unsigned long val, void *data)
-{
-	struct s3c2410_nand_info *info;
-	unsigned long newclk;
-
-	info = container_of(nb, struct s3c2410_nand_info, freq_transition);
-	newclk = clk_get_rate(info->clk);
-
-	if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) ||
-	    (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) {
-		s3c2410_nand_setrate(info);
-	}
-
-	return 0;
-}
-
-static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
-{
-	info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition;
-
-	return cpufreq_register_notifier(&info->freq_transition,
-					 CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-static inline void
-s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
-{
-	cpufreq_unregister_notifier(&info->freq_transition,
-				    CPUFREQ_TRANSITION_NOTIFIER);
-}
-
-#else
-static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info)
-{
-	return 0;
-}
-
-static inline void
-s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info)
-{
-}
-#endif
-
-/* device management functions */
-
-static int s3c24xx_nand_remove(struct platform_device *pdev)
-{
-	struct s3c2410_nand_info *info = to_nand_info(pdev);
-
-	if (info == NULL)
-		return 0;
-
-	s3c2410_nand_cpufreq_deregister(info);
-
-	/* Release all our mtds  and their partitions, then go through
-	 * freeing the resources used
-	 */
-
-	if (info->mtds != NULL) {
-		struct s3c2410_nand_mtd *ptr = info->mtds;
-		int mtdno;
-
-		for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) {
-			pr_debug("releasing mtd %d (%p)\n", mtdno, ptr);
-			nand_release(nand_to_mtd(&ptr->chip));
-		}
-	}
-
-	/* free the common resources */
-
-	if (!IS_ERR(info->clk))
-		s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
-
-	return 0;
-}
-
-static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info,
-				      struct s3c2410_nand_mtd *mtd,
-				      struct s3c2410_nand_set *set)
-{
-	if (set) {
-		struct mtd_info *mtdinfo = nand_to_mtd(&mtd->chip);
-
-		mtdinfo->name = set->name;
-
-		return mtd_device_parse_register(mtdinfo, NULL, NULL,
-					 set->partitions, set->nr_partitions);
-	}
-
-	return -ENODEV;
-}
-
-/**
- * s3c2410_nand_init_chip - initialise a single instance of an chip
- * @info: The base NAND controller the chip is on.
- * @nmtd: The new controller MTD instance to fill in.
- * @set: The information passed from the board specific platform data.
- *
- * Initialise the given @nmtd from the information in @info and @set. This
- * readies the structure for use with the MTD layer functions by ensuring
- * all pointers are setup and the necessary control routines selected.
- */
-static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info,
-				   struct s3c2410_nand_mtd *nmtd,
-				   struct s3c2410_nand_set *set)
-{
-	struct nand_chip *chip = &nmtd->chip;
-	void __iomem *regs = info->regs;
-
-	chip->write_buf    = s3c2410_nand_write_buf;
-	chip->read_buf     = s3c2410_nand_read_buf;
-	chip->select_chip  = s3c2410_nand_select_chip;
-	chip->chip_delay   = 50;
-	nand_set_controller_data(chip, nmtd);
-	chip->options	   = set->options;
-	chip->controller   = &info->controller;
-
-	switch (info->cpu_type) {
-	case TYPE_S3C2410:
-		chip->IO_ADDR_W = regs + S3C2410_NFDATA;
-		info->sel_reg   = regs + S3C2410_NFCONF;
-		info->sel_bit	= S3C2410_NFCONF_nFCE;
-		chip->cmd_ctrl  = s3c2410_nand_hwcontrol;
-		chip->dev_ready = s3c2410_nand_devready;
-		break;
-
-	case TYPE_S3C2440:
-		chip->IO_ADDR_W = regs + S3C2440_NFDATA;
-		info->sel_reg   = regs + S3C2440_NFCONT;
-		info->sel_bit	= S3C2440_NFCONT_nFCE;
-		chip->cmd_ctrl  = s3c2440_nand_hwcontrol;
-		chip->dev_ready = s3c2440_nand_devready;
-		chip->read_buf  = s3c2440_nand_read_buf;
-		chip->write_buf	= s3c2440_nand_write_buf;
-		break;
-
-	case TYPE_S3C2412:
-		chip->IO_ADDR_W = regs + S3C2440_NFDATA;
-		info->sel_reg   = regs + S3C2440_NFCONT;
-		info->sel_bit	= S3C2412_NFCONT_nFCE0;
-		chip->cmd_ctrl  = s3c2440_nand_hwcontrol;
-		chip->dev_ready = s3c2412_nand_devready;
-
-		if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT)
-			dev_info(info->device, "System booted from NAND\n");
-
-		break;
-	}
-
-	chip->IO_ADDR_R = chip->IO_ADDR_W;
-
-	nmtd->info	   = info;
-	nmtd->set	   = set;
-
-#ifdef CONFIG_MTD_NAND_S3C2410_HWECC
-	chip->ecc.calculate = s3c2410_nand_calculate_ecc;
-	chip->ecc.correct   = s3c2410_nand_correct_data;
-	chip->ecc.mode	    = NAND_ECC_HW;
-	chip->ecc.strength  = 1;
-
-	switch (info->cpu_type) {
-	case TYPE_S3C2410:
-		chip->ecc.hwctl	    = s3c2410_nand_enable_hwecc;
-		chip->ecc.calculate = s3c2410_nand_calculate_ecc;
-		break;
-
-	case TYPE_S3C2412:
-		chip->ecc.hwctl     = s3c2412_nand_enable_hwecc;
-		chip->ecc.calculate = s3c2412_nand_calculate_ecc;
-		break;
-
-	case TYPE_S3C2440:
-		chip->ecc.hwctl     = s3c2440_nand_enable_hwecc;
-		chip->ecc.calculate = s3c2440_nand_calculate_ecc;
-		break;
-	}
-#else
-	chip->ecc.mode	    = NAND_ECC_SOFT;
-	chip->ecc.algo	= NAND_ECC_HAMMING;
-#endif
-
-	if (set->disable_ecc)
-		chip->ecc.mode	= NAND_ECC_NONE;
-
-	switch (chip->ecc.mode) {
-	case NAND_ECC_NONE:
-		dev_info(info->device, "NAND ECC disabled\n");
-		break;
-	case NAND_ECC_SOFT:
-		dev_info(info->device, "NAND soft ECC\n");
-		break;
-	case NAND_ECC_HW:
-		dev_info(info->device, "NAND hardware ECC\n");
-		break;
-	default:
-		dev_info(info->device, "NAND ECC UNKNOWN\n");
-		break;
-	}
-
-	/* If you use u-boot BBT creation code, specifying this flag will
-	 * let the kernel fish out the BBT from the NAND, and also skip the
-	 * full NAND scan that can take 1/2s or so. Little things... */
-	if (set->flash_bbt) {
-		chip->bbt_options |= NAND_BBT_USE_FLASH;
-		chip->options |= NAND_SKIP_BBTSCAN;
-	}
-}
-
-/**
- * s3c2410_nand_update_chip - post probe update
- * @info: The controller instance.
- * @nmtd: The driver version of the MTD instance.
- *
- * This routine is called after the chip probe has successfully completed
- * and the relevant per-chip information updated. This call ensure that
- * we update the internal state accordingly.
- *
- * The internal state is currently limited to the ECC state information.
-*/
-static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info,
-				     struct s3c2410_nand_mtd *nmtd)
-{
-	struct nand_chip *chip = &nmtd->chip;
-
-	dev_dbg(info->device, "chip %p => page shift %d\n",
-		chip, chip->page_shift);
-
-	if (chip->ecc.mode != NAND_ECC_HW)
-		return;
-
-		/* change the behaviour depending on whether we are using
-		 * the large or small page nand device */
-
-	if (chip->page_shift > 10) {
-		chip->ecc.size	    = 256;
-		chip->ecc.bytes	    = 3;
-	} else {
-		chip->ecc.size	    = 512;
-		chip->ecc.bytes	    = 3;
-		mtd_set_ooblayout(nand_to_mtd(chip), &s3c2410_ooblayout_ops);
-	}
-}
-
-/* s3c24xx_nand_probe
- *
- * called by device layer when it finds a device matching
- * one our driver can handled. This code checks to see if
- * it can allocate all necessary resources then calls the
- * nand layer to look for devices
-*/
-static int s3c24xx_nand_probe(struct platform_device *pdev)
-{
-	struct s3c2410_platform_nand *plat = to_nand_plat(pdev);
-	enum s3c_cpu_type cpu_type;
-	struct s3c2410_nand_info *info;
-	struct s3c2410_nand_mtd *nmtd;
-	struct s3c2410_nand_set *sets;
-	struct resource *res;
-	int err = 0;
-	int size;
-	int nr_sets;
-	int setno;
-
-	cpu_type = platform_get_device_id(pdev)->driver_data;
-
-	info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
-	if (info == NULL) {
-		err = -ENOMEM;
-		goto exit_error;
-	}
-
-	platform_set_drvdata(pdev, info);
-
-	nand_hw_control_init(&info->controller);
-
-	/* get the clock source and enable it */
-
-	info->clk = devm_clk_get(&pdev->dev, "nand");
-	if (IS_ERR(info->clk)) {
-		dev_err(&pdev->dev, "failed to get clock\n");
-		err = -ENOENT;
-		goto exit_error;
-	}
-
-	s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
-
-	/* allocate and map the resource */
-
-	/* currently we assume we have the one resource */
-	res = pdev->resource;
-	size = resource_size(res);
-
-	info->device	= &pdev->dev;
-	info->platform	= plat;
-	info->cpu_type	= cpu_type;
-
-	info->regs = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(info->regs)) {
-		err = PTR_ERR(info->regs);
-		goto exit_error;
-	}
-
-	dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs);
-
-	/* initialise the hardware */
-
-	err = s3c2410_nand_inithw(info);
-	if (err != 0)
-		goto exit_error;
-
-	sets = (plat != NULL) ? plat->sets : NULL;
-	nr_sets = (plat != NULL) ? plat->nr_sets : 1;
-
-	info->mtd_count = nr_sets;
-
-	/* allocate our information */
-
-	size = nr_sets * sizeof(*info->mtds);
-	info->mtds = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
-	if (info->mtds == NULL) {
-		err = -ENOMEM;
-		goto exit_error;
-	}
-
-	/* initialise all possible chips */
-
-	nmtd = info->mtds;
-
-	for (setno = 0; setno < nr_sets; setno++, nmtd++) {
-		struct mtd_info *mtd = nand_to_mtd(&nmtd->chip);
-
-		pr_debug("initialising set %d (%p, info %p)\n",
-			 setno, nmtd, info);
-
-		mtd->dev.parent = &pdev->dev;
-		s3c2410_nand_init_chip(info, nmtd, sets);
-
-		nmtd->scan_res = nand_scan_ident(mtd,
-						 (sets) ? sets->nr_chips : 1,
-						 NULL);
-
-		if (nmtd->scan_res == 0) {
-			s3c2410_nand_update_chip(info, nmtd);
-			nand_scan_tail(mtd);
-			s3c2410_nand_add_partition(info, nmtd, sets);
-		}
-
-		if (sets != NULL)
-			sets++;
-	}
-
-	err = s3c2410_nand_cpufreq_register(info);
-	if (err < 0) {
-		dev_err(&pdev->dev, "failed to init cpufreq support\n");
-		goto exit_error;
-	}
-
-	if (allow_clk_suspend(info)) {
-		dev_info(&pdev->dev, "clock idle support enabled\n");
-		s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
-	}
-
-	return 0;
-
- exit_error:
-	s3c24xx_nand_remove(pdev);
-
-	if (err == 0)
-		err = -EINVAL;
-	return err;
-}
-
-/* PM Support */
-#ifdef CONFIG_PM
-
-static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm)
-{
-	struct s3c2410_nand_info *info = platform_get_drvdata(dev);
-
-	if (info) {
-		info->save_sel = readl(info->sel_reg);
-
-		/* For the moment, we must ensure nFCE is high during
-		 * the time we are suspended. This really should be
-		 * handled by suspending the MTDs we are using, but
-		 * that is currently not the case. */
-
-		writel(info->save_sel | info->sel_bit, info->sel_reg);
-
-		s3c2410_nand_clk_set_state(info, CLOCK_DISABLE);
-	}
-
-	return 0;
-}
-
-static int s3c24xx_nand_resume(struct platform_device *dev)
-{
-	struct s3c2410_nand_info *info = platform_get_drvdata(dev);
-	unsigned long sel;
-
-	if (info) {
-		s3c2410_nand_clk_set_state(info, CLOCK_ENABLE);
-		s3c2410_nand_inithw(info);
-
-		/* Restore the state of the nFCE line. */
-
-		sel = readl(info->sel_reg);
-		sel &= ~info->sel_bit;
-		sel |= info->save_sel & info->sel_bit;
-		writel(sel, info->sel_reg);
-
-		s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND);
-	}
-
-	return 0;
-}
-
-#else
-#define s3c24xx_nand_suspend NULL
-#define s3c24xx_nand_resume NULL
-#endif
-
-/* driver device registration */
-
-static const struct platform_device_id s3c24xx_driver_ids[] = {
-	{
-		.name		= "s3c2410-nand",
-		.driver_data	= TYPE_S3C2410,
-	}, {
-		.name		= "s3c2440-nand",
-		.driver_data	= TYPE_S3C2440,
-	}, {
-		.name		= "s3c2412-nand",
-		.driver_data	= TYPE_S3C2412,
-	}, {
-		.name		= "s3c6400-nand",
-		.driver_data	= TYPE_S3C2412, /* compatible with 2412 */
-	},
-	{ }
-};
-
-MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
-
-static struct platform_driver s3c24xx_nand_driver = {
-	.probe		= s3c24xx_nand_probe,
-	.remove		= s3c24xx_nand_remove,
-	.suspend	= s3c24xx_nand_suspend,
-	.resume		= s3c24xx_nand_resume,
-	.id_table	= s3c24xx_driver_ids,
-	.driver		= {
-		.name	= "s3c24xx-nand",
-	},
-};
-
-module_platform_driver(s3c24xx_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
deleted file mode 100644
index 492705fb23f2..000000000000
--- a/drivers/mtd/nand/sh_flctl.c
+++ /dev/null
@@ -1,1251 +0,0 @@ 
-/*
- * SuperH FLCTL nand controller
- *
- * Copyright (c) 2008 Renesas Solutions Corp.
- * Copyright (c) 2008 Atom Create Engineering Co., Ltd.
- *
- * Based on fsl_elbc_nand.c, Copyright (c) 2006-2007 Freescale Semiconductor
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- *
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/dmaengine.h>
-#include <linux/dma-mapping.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/sh_dma.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/sh_flctl.h>
-
-static int flctl_4secc_ooblayout_sp_ecc(struct mtd_info *mtd, int section,
-					struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 0;
-	oobregion->length = chip->ecc.bytes;
-
-	return 0;
-}
-
-static int flctl_4secc_ooblayout_sp_free(struct mtd_info *mtd, int section,
-					 struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->offset = 12;
-	oobregion->length = 4;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops flctl_4secc_oob_smallpage_ops = {
-	.ecc = flctl_4secc_ooblayout_sp_ecc,
-	.free = flctl_4secc_ooblayout_sp_free,
-};
-
-static int flctl_4secc_ooblayout_lp_ecc(struct mtd_info *mtd, int section,
-					struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section >= chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = (section * 16) + 6;
-	oobregion->length = chip->ecc.bytes;
-
-	return 0;
-}
-
-static int flctl_4secc_ooblayout_lp_free(struct mtd_info *mtd, int section,
-					 struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (section >= chip->ecc.steps)
-		return -ERANGE;
-
-	oobregion->offset = section * 16;
-	oobregion->length = 6;
-
-	if (!section) {
-		oobregion->offset += 2;
-		oobregion->length -= 2;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops flctl_4secc_oob_largepage_ops = {
-	.ecc = flctl_4secc_ooblayout_lp_ecc,
-	.free = flctl_4secc_ooblayout_lp_free,
-};
-
-static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
-
-static struct nand_bbt_descr flctl_4secc_smallpage = {
-	.options = NAND_BBT_SCAN2NDPAGE,
-	.offs = 11,
-	.len = 1,
-	.pattern = scan_ff_pattern,
-};
-
-static struct nand_bbt_descr flctl_4secc_largepage = {
-	.options = NAND_BBT_SCAN2NDPAGE,
-	.offs = 0,
-	.len = 2,
-	.pattern = scan_ff_pattern,
-};
-
-static void empty_fifo(struct sh_flctl *flctl)
-{
-	writel(flctl->flintdmacr_base | AC1CLR | AC0CLR, FLINTDMACR(flctl));
-	writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
-}
-
-static void start_translation(struct sh_flctl *flctl)
-{
-	writeb(TRSTRT, FLTRCR(flctl));
-}
-
-static void timeout_error(struct sh_flctl *flctl, const char *str)
-{
-	dev_err(&flctl->pdev->dev, "Timeout occurred in %s\n", str);
-}
-
-static void wait_completion(struct sh_flctl *flctl)
-{
-	uint32_t timeout = LOOP_TIMEOUT_MAX;
-
-	while (timeout--) {
-		if (readb(FLTRCR(flctl)) & TREND) {
-			writeb(0x0, FLTRCR(flctl));
-			return;
-		}
-		udelay(1);
-	}
-
-	timeout_error(flctl, __func__);
-	writeb(0x0, FLTRCR(flctl));
-}
-
-static void flctl_dma_complete(void *param)
-{
-	struct sh_flctl *flctl = param;
-
-	complete(&flctl->dma_complete);
-}
-
-static void flctl_release_dma(struct sh_flctl *flctl)
-{
-	if (flctl->chan_fifo0_rx) {
-		dma_release_channel(flctl->chan_fifo0_rx);
-		flctl->chan_fifo0_rx = NULL;
-	}
-	if (flctl->chan_fifo0_tx) {
-		dma_release_channel(flctl->chan_fifo0_tx);
-		flctl->chan_fifo0_tx = NULL;
-	}
-}
-
-static void flctl_setup_dma(struct sh_flctl *flctl)
-{
-	dma_cap_mask_t mask;
-	struct dma_slave_config cfg;
-	struct platform_device *pdev = flctl->pdev;
-	struct sh_flctl_platform_data *pdata = dev_get_platdata(&pdev->dev);
-	int ret;
-
-	if (!pdata)
-		return;
-
-	if (pdata->slave_id_fifo0_tx <= 0 || pdata->slave_id_fifo0_rx <= 0)
-		return;
-
-	/* We can only either use DMA for both Tx and Rx or not use it at all */
-	dma_cap_zero(mask);
-	dma_cap_set(DMA_SLAVE, mask);
-
-	flctl->chan_fifo0_tx = dma_request_channel(mask, shdma_chan_filter,
-				(void *)(uintptr_t)pdata->slave_id_fifo0_tx);
-	dev_dbg(&pdev->dev, "%s: TX: got channel %p\n", __func__,
-		flctl->chan_fifo0_tx);
-
-	if (!flctl->chan_fifo0_tx)
-		return;
-
-	memset(&cfg, 0, sizeof(cfg));
-	cfg.direction = DMA_MEM_TO_DEV;
-	cfg.dst_addr = flctl->fifo;
-	cfg.src_addr = 0;
-	ret = dmaengine_slave_config(flctl->chan_fifo0_tx, &cfg);
-	if (ret < 0)
-		goto err;
-
-	flctl->chan_fifo0_rx = dma_request_channel(mask, shdma_chan_filter,
-				(void *)(uintptr_t)pdata->slave_id_fifo0_rx);
-	dev_dbg(&pdev->dev, "%s: RX: got channel %p\n", __func__,
-		flctl->chan_fifo0_rx);
-
-	if (!flctl->chan_fifo0_rx)
-		goto err;
-
-	cfg.direction = DMA_DEV_TO_MEM;
-	cfg.dst_addr = 0;
-	cfg.src_addr = flctl->fifo;
-	ret = dmaengine_slave_config(flctl->chan_fifo0_rx, &cfg);
-	if (ret < 0)
-		goto err;
-
-	init_completion(&flctl->dma_complete);
-
-	return;
-
-err:
-	flctl_release_dma(flctl);
-}
-
-static void set_addr(struct mtd_info *mtd, int column, int page_addr)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	uint32_t addr = 0;
-
-	if (column == -1) {
-		addr = page_addr;	/* ERASE1 */
-	} else if (page_addr != -1) {
-		/* SEQIN, READ0, etc.. */
-		if (flctl->chip.options & NAND_BUSWIDTH_16)
-			column >>= 1;
-		if (flctl->page_size) {
-			addr = column & 0x0FFF;
-			addr |= (page_addr & 0xff) << 16;
-			addr |= ((page_addr >> 8) & 0xff) << 24;
-			/* big than 128MB */
-			if (flctl->rw_ADRCNT == ADRCNT2_E) {
-				uint32_t 	addr2;
-				addr2 = (page_addr >> 16) & 0xff;
-				writel(addr2, FLADR2(flctl));
-			}
-		} else {
-			addr = column;
-			addr |= (page_addr & 0xff) << 8;
-			addr |= ((page_addr >> 8) & 0xff) << 16;
-			addr |= ((page_addr >> 16) & 0xff) << 24;
-		}
-	}
-	writel(addr, FLADR(flctl));
-}
-
-static void wait_rfifo_ready(struct sh_flctl *flctl)
-{
-	uint32_t timeout = LOOP_TIMEOUT_MAX;
-
-	while (timeout--) {
-		uint32_t val;
-		/* check FIFO */
-		val = readl(FLDTCNTR(flctl)) >> 16;
-		if (val & 0xFF)
-			return;
-		udelay(1);
-	}
-	timeout_error(flctl, __func__);
-}
-
-static void wait_wfifo_ready(struct sh_flctl *flctl)
-{
-	uint32_t len, timeout = LOOP_TIMEOUT_MAX;
-
-	while (timeout--) {
-		/* check FIFO */
-		len = (readl(FLDTCNTR(flctl)) >> 16) & 0xFF;
-		if (len >= 4)
-			return;
-		udelay(1);
-	}
-	timeout_error(flctl, __func__);
-}
-
-static enum flctl_ecc_res_t wait_recfifo_ready
-		(struct sh_flctl *flctl, int sector_number)
-{
-	uint32_t timeout = LOOP_TIMEOUT_MAX;
-	void __iomem *ecc_reg[4];
-	int i;
-	int state = FL_SUCCESS;
-	uint32_t data, size;
-
-	/*
-	 * First this loops checks in FLDTCNTR if we are ready to read out the
-	 * oob data. This is the case if either all went fine without errors or
-	 * if the bottom part of the loop corrected the errors or marked them as
-	 * uncorrectable and the controller is given time to push the data into
-	 * the FIFO.
-	 */
-	while (timeout--) {
-		/* check if all is ok and we can read out the OOB */
-		size = readl(FLDTCNTR(flctl)) >> 24;
-		if ((size & 0xFF) == 4)
-			return state;
-
-		/* check if a correction code has been calculated */
-		if (!(readl(FL4ECCCR(flctl)) & _4ECCEND)) {
-			/*
-			 * either we wait for the fifo to be filled or a
-			 * correction pattern is being generated
-			 */
-			udelay(1);
-			continue;
-		}
-
-		/* check for an uncorrectable error */
-		if (readl(FL4ECCCR(flctl)) & _4ECCFA) {
-			/* check if we face a non-empty page */
-			for (i = 0; i < 512; i++) {
-				if (flctl->done_buff[i] != 0xff) {
-					state = FL_ERROR; /* can't correct */
-					break;
-				}
-			}
-
-			if (state == FL_SUCCESS)
-				dev_dbg(&flctl->pdev->dev,
-				"reading empty sector %d, ecc error ignored\n",
-				sector_number);
-
-			writel(0, FL4ECCCR(flctl));
-			continue;
-		}
-
-		/* start error correction */
-		ecc_reg[0] = FL4ECCRESULT0(flctl);
-		ecc_reg[1] = FL4ECCRESULT1(flctl);
-		ecc_reg[2] = FL4ECCRESULT2(flctl);
-		ecc_reg[3] = FL4ECCRESULT3(flctl);
-
-		for (i = 0; i < 3; i++) {
-			uint8_t org;
-			unsigned int index;
-
-			data = readl(ecc_reg[i]);
-
-			if (flctl->page_size)
-				index = (512 * sector_number) +
-					(data >> 16);
-			else
-				index = data >> 16;
-
-			org = flctl->done_buff[index];
-			flctl->done_buff[index] = org ^ (data & 0xFF);
-		}
-		state = FL_REPAIRABLE;
-		writel(0, FL4ECCCR(flctl));
-	}
-
-	timeout_error(flctl, __func__);
-	return FL_TIMEOUT;	/* timeout */
-}
-
-static void wait_wecfifo_ready(struct sh_flctl *flctl)
-{
-	uint32_t timeout = LOOP_TIMEOUT_MAX;
-	uint32_t len;
-
-	while (timeout--) {
-		/* check FLECFIFO */
-		len = (readl(FLDTCNTR(flctl)) >> 24) & 0xFF;
-		if (len >= 4)
-			return;
-		udelay(1);
-	}
-	timeout_error(flctl, __func__);
-}
-
-static int flctl_dma_fifo0_transfer(struct sh_flctl *flctl, unsigned long *buf,
-					int len, enum dma_data_direction dir)
-{
-	struct dma_async_tx_descriptor *desc = NULL;
-	struct dma_chan *chan;
-	enum dma_transfer_direction tr_dir;
-	dma_addr_t dma_addr;
-	dma_cookie_t cookie;
-	uint32_t reg;
-	int ret;
-
-	if (dir == DMA_FROM_DEVICE) {
-		chan = flctl->chan_fifo0_rx;
-		tr_dir = DMA_DEV_TO_MEM;
-	} else {
-		chan = flctl->chan_fifo0_tx;
-		tr_dir = DMA_MEM_TO_DEV;
-	}
-
-	dma_addr = dma_map_single(chan->device->dev, buf, len, dir);
-
-	if (dma_addr)
-		desc = dmaengine_prep_slave_single(chan, dma_addr, len,
-			tr_dir, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
-
-	if (desc) {
-		reg = readl(FLINTDMACR(flctl));
-		reg |= DREQ0EN;
-		writel(reg, FLINTDMACR(flctl));
-
-		desc->callback = flctl_dma_complete;
-		desc->callback_param = flctl;
-		cookie = dmaengine_submit(desc);
-		if (dma_submit_error(cookie)) {
-			ret = dma_submit_error(cookie);
-			dev_warn(&flctl->pdev->dev,
-				 "DMA submit failed, falling back to PIO\n");
-			goto out;
-		}
-
-		dma_async_issue_pending(chan);
-	} else {
-		/* DMA failed, fall back to PIO */
-		flctl_release_dma(flctl);
-		dev_warn(&flctl->pdev->dev,
-			 "DMA failed, falling back to PIO\n");
-		ret = -EIO;
-		goto out;
-	}
-
-	ret =
-	wait_for_completion_timeout(&flctl->dma_complete,
-				msecs_to_jiffies(3000));
-
-	if (ret <= 0) {
-		dmaengine_terminate_all(chan);
-		dev_err(&flctl->pdev->dev, "wait_for_completion_timeout\n");
-	}
-
-out:
-	reg = readl(FLINTDMACR(flctl));
-	reg &= ~DREQ0EN;
-	writel(reg, FLINTDMACR(flctl));
-
-	dma_unmap_single(chan->device->dev, dma_addr, len, dir);
-
-	/* ret > 0 is success */
-	return ret;
-}
-
-static void read_datareg(struct sh_flctl *flctl, int offset)
-{
-	unsigned long data;
-	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
-
-	wait_completion(flctl);
-
-	data = readl(FLDATAR(flctl));
-	*buf = le32_to_cpu(data);
-}
-
-static void read_fiforeg(struct sh_flctl *flctl, int rlen, int offset)
-{
-	int i, len_4align;
-	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
-
-	len_4align = (rlen + 3) / 4;
-
-	/* initiate DMA transfer */
-	if (flctl->chan_fifo0_rx && rlen >= 32 &&
-		flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_DEV_TO_MEM) > 0)
-			goto convert;	/* DMA success */
-
-	/* do polling transfer */
-	for (i = 0; i < len_4align; i++) {
-		wait_rfifo_ready(flctl);
-		buf[i] = readl(FLDTFIFO(flctl));
-	}
-
-convert:
-	for (i = 0; i < len_4align; i++)
-		buf[i] = be32_to_cpu(buf[i]);
-}
-
-static enum flctl_ecc_res_t read_ecfiforeg
-		(struct sh_flctl *flctl, uint8_t *buff, int sector)
-{
-	int i;
-	enum flctl_ecc_res_t res;
-	unsigned long *ecc_buf = (unsigned long *)buff;
-
-	res = wait_recfifo_ready(flctl , sector);
-
-	if (res != FL_ERROR) {
-		for (i = 0; i < 4; i++) {
-			ecc_buf[i] = readl(FLECFIFO(flctl));
-			ecc_buf[i] = be32_to_cpu(ecc_buf[i]);
-		}
-	}
-
-	return res;
-}
-
-static void write_fiforeg(struct sh_flctl *flctl, int rlen,
-						unsigned int offset)
-{
-	int i, len_4align;
-	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
-
-	len_4align = (rlen + 3) / 4;
-	for (i = 0; i < len_4align; i++) {
-		wait_wfifo_ready(flctl);
-		writel(cpu_to_be32(buf[i]), FLDTFIFO(flctl));
-	}
-}
-
-static void write_ec_fiforeg(struct sh_flctl *flctl, int rlen,
-						unsigned int offset)
-{
-	int i, len_4align;
-	unsigned long *buf = (unsigned long *)&flctl->done_buff[offset];
-
-	len_4align = (rlen + 3) / 4;
-
-	for (i = 0; i < len_4align; i++)
-		buf[i] = cpu_to_be32(buf[i]);
-
-	/* initiate DMA transfer */
-	if (flctl->chan_fifo0_tx && rlen >= 32 &&
-		flctl_dma_fifo0_transfer(flctl, buf, rlen, DMA_MEM_TO_DEV) > 0)
-			return;	/* DMA success */
-
-	/* do polling transfer */
-	for (i = 0; i < len_4align; i++) {
-		wait_wecfifo_ready(flctl);
-		writel(buf[i], FLECFIFO(flctl));
-	}
-}
-
-static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_val)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	uint32_t flcmncr_val = flctl->flcmncr_base & ~SEL_16BIT;
-	uint32_t flcmdcr_val, addr_len_bytes = 0;
-
-	/* Set SNAND bit if page size is 2048byte */
-	if (flctl->page_size)
-		flcmncr_val |= SNAND_E;
-	else
-		flcmncr_val &= ~SNAND_E;
-
-	/* default FLCMDCR val */
-	flcmdcr_val = DOCMD1_E | DOADR_E;
-
-	/* Set for FLCMDCR */
-	switch (cmd) {
-	case NAND_CMD_ERASE1:
-		addr_len_bytes = flctl->erase_ADRCNT;
-		flcmdcr_val |= DOCMD2_E;
-		break;
-	case NAND_CMD_READ0:
-	case NAND_CMD_READOOB:
-	case NAND_CMD_RNDOUT:
-		addr_len_bytes = flctl->rw_ADRCNT;
-		flcmdcr_val |= CDSRC_E;
-		if (flctl->chip.options & NAND_BUSWIDTH_16)
-			flcmncr_val |= SEL_16BIT;
-		break;
-	case NAND_CMD_SEQIN:
-		/* This case is that cmd is READ0 or READ1 or READ00 */
-		flcmdcr_val &= ~DOADR_E;	/* ONLY execute 1st cmd */
-		break;
-	case NAND_CMD_PAGEPROG:
-		addr_len_bytes = flctl->rw_ADRCNT;
-		flcmdcr_val |= DOCMD2_E | CDSRC_E | SELRW;
-		if (flctl->chip.options & NAND_BUSWIDTH_16)
-			flcmncr_val |= SEL_16BIT;
-		break;
-	case NAND_CMD_READID:
-		flcmncr_val &= ~SNAND_E;
-		flcmdcr_val |= CDSRC_E;
-		addr_len_bytes = ADRCNT_1;
-		break;
-	case NAND_CMD_STATUS:
-	case NAND_CMD_RESET:
-		flcmncr_val &= ~SNAND_E;
-		flcmdcr_val &= ~(DOADR_E | DOSR_E);
-		break;
-	default:
-		break;
-	}
-
-	/* Set address bytes parameter */
-	flcmdcr_val |= addr_len_bytes;
-
-	/* Now actually write */
-	writel(flcmncr_val, FLCMNCR(flctl));
-	writel(flcmdcr_val, FLCMDCR(flctl));
-	writel(flcmcdr_val, FLCMCDR(flctl));
-}
-
-static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int oob_required, int page)
-{
-	chip->read_buf(mtd, buf, mtd->writesize);
-	if (oob_required)
-		chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 0;
-}
-
-static int flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
-				  const uint8_t *buf, int oob_required,
-				  int page)
-{
-	chip->write_buf(mtd, buf, mtd->writesize);
-	chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
-	return 0;
-}
-
-static void execmd_read_page_sector(struct mtd_info *mtd, int page_addr)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	int sector, page_sectors;
-	enum flctl_ecc_res_t ecc_result;
-
-	page_sectors = flctl->page_size ? 4 : 1;
-
-	set_cmd_regs(mtd, NAND_CMD_READ0,
-		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
-
-	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE | _4ECCCORRECT,
-		 FLCMNCR(flctl));
-	writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
-	writel(page_addr << 2, FLADR(flctl));
-
-	empty_fifo(flctl);
-	start_translation(flctl);
-
-	for (sector = 0; sector < page_sectors; sector++) {
-		read_fiforeg(flctl, 512, 512 * sector);
-
-		ecc_result = read_ecfiforeg(flctl,
-			&flctl->done_buff[mtd->writesize + 16 * sector],
-			sector);
-
-		switch (ecc_result) {
-		case FL_REPAIRABLE:
-			dev_info(&flctl->pdev->dev,
-				"applied ecc on page 0x%x", page_addr);
-			mtd->ecc_stats.corrected++;
-			break;
-		case FL_ERROR:
-			dev_warn(&flctl->pdev->dev,
-				"page 0x%x contains corrupted data\n",
-				page_addr);
-			mtd->ecc_stats.failed++;
-			break;
-		default:
-			;
-		}
-	}
-
-	wait_completion(flctl);
-
-	writel(readl(FLCMNCR(flctl)) & ~(ACM_SACCES_MODE | _4ECCCORRECT),
-			FLCMNCR(flctl));
-}
-
-static void execmd_read_oob(struct mtd_info *mtd, int page_addr)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	int page_sectors = flctl->page_size ? 4 : 1;
-	int i;
-
-	set_cmd_regs(mtd, NAND_CMD_READ0,
-		(NAND_CMD_READSTART << 8) | NAND_CMD_READ0);
-
-	empty_fifo(flctl);
-
-	for (i = 0; i < page_sectors; i++) {
-		set_addr(mtd, (512 + 16) * i + 512 , page_addr);
-		writel(16, FLDTCNTR(flctl));
-
-		start_translation(flctl);
-		read_fiforeg(flctl, 16, 16 * i);
-		wait_completion(flctl);
-	}
-}
-
-static void execmd_write_page_sector(struct mtd_info *mtd)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	int page_addr = flctl->seqin_page_addr;
-	int sector, page_sectors;
-
-	page_sectors = flctl->page_size ? 4 : 1;
-
-	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
-			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
-
-	empty_fifo(flctl);
-	writel(readl(FLCMNCR(flctl)) | ACM_SACCES_MODE, FLCMNCR(flctl));
-	writel(readl(FLCMDCR(flctl)) | page_sectors, FLCMDCR(flctl));
-	writel(page_addr << 2, FLADR(flctl));
-	start_translation(flctl);
-
-	for (sector = 0; sector < page_sectors; sector++) {
-		write_fiforeg(flctl, 512, 512 * sector);
-		write_ec_fiforeg(flctl, 16, mtd->writesize + 16 * sector);
-	}
-
-	wait_completion(flctl);
-	writel(readl(FLCMNCR(flctl)) & ~ACM_SACCES_MODE, FLCMNCR(flctl));
-}
-
-static void execmd_write_oob(struct mtd_info *mtd)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	int page_addr = flctl->seqin_page_addr;
-	int sector, page_sectors;
-
-	page_sectors = flctl->page_size ? 4 : 1;
-
-	set_cmd_regs(mtd, NAND_CMD_PAGEPROG,
-			(NAND_CMD_PAGEPROG << 8) | NAND_CMD_SEQIN);
-
-	for (sector = 0; sector < page_sectors; sector++) {
-		empty_fifo(flctl);
-		set_addr(mtd, sector * 528 + 512, page_addr);
-		writel(16, FLDTCNTR(flctl));	/* set read size */
-
-		start_translation(flctl);
-		write_fiforeg(flctl, 16, 16 * sector);
-		wait_completion(flctl);
-	}
-}
-
-static void flctl_cmdfunc(struct mtd_info *mtd, unsigned int command,
-			int column, int page_addr)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	uint32_t read_cmd = 0;
-
-	pm_runtime_get_sync(&flctl->pdev->dev);
-
-	flctl->read_bytes = 0;
-	if (command != NAND_CMD_PAGEPROG)
-		flctl->index = 0;
-
-	switch (command) {
-	case NAND_CMD_READ1:
-	case NAND_CMD_READ0:
-		if (flctl->hwecc) {
-			/* read page with hwecc */
-			execmd_read_page_sector(mtd, page_addr);
-			break;
-		}
-		if (flctl->page_size)
-			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
-				| command);
-		else
-			set_cmd_regs(mtd, command, command);
-
-		set_addr(mtd, 0, page_addr);
-
-		flctl->read_bytes = mtd->writesize + mtd->oobsize;
-		if (flctl->chip.options & NAND_BUSWIDTH_16)
-			column >>= 1;
-		flctl->index += column;
-		goto read_normal_exit;
-
-	case NAND_CMD_READOOB:
-		if (flctl->hwecc) {
-			/* read page with hwecc */
-			execmd_read_oob(mtd, page_addr);
-			break;
-		}
-
-		if (flctl->page_size) {
-			set_cmd_regs(mtd, command, (NAND_CMD_READSTART << 8)
-				| NAND_CMD_READ0);
-			set_addr(mtd, mtd->writesize, page_addr);
-		} else {
-			set_cmd_regs(mtd, command, command);
-			set_addr(mtd, 0, page_addr);
-		}
-		flctl->read_bytes = mtd->oobsize;
-		goto read_normal_exit;
-
-	case NAND_CMD_RNDOUT:
-		if (flctl->hwecc)
-			break;
-
-		if (flctl->page_size)
-			set_cmd_regs(mtd, command, (NAND_CMD_RNDOUTSTART << 8)
-				| command);
-		else
-			set_cmd_regs(mtd, command, command);
-
-		set_addr(mtd, column, 0);
-
-		flctl->read_bytes = mtd->writesize + mtd->oobsize - column;
-		goto read_normal_exit;
-
-	case NAND_CMD_READID:
-		set_cmd_regs(mtd, command, command);
-
-		/* READID is always performed using an 8-bit bus */
-		if (flctl->chip.options & NAND_BUSWIDTH_16)
-			column <<= 1;
-		set_addr(mtd, column, 0);
-
-		flctl->read_bytes = 8;
-		writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
-		empty_fifo(flctl);
-		start_translation(flctl);
-		read_fiforeg(flctl, flctl->read_bytes, 0);
-		wait_completion(flctl);
-		break;
-
-	case NAND_CMD_ERASE1:
-		flctl->erase1_page_addr = page_addr;
-		break;
-
-	case NAND_CMD_ERASE2:
-		set_cmd_regs(mtd, NAND_CMD_ERASE1,
-			(command << 8) | NAND_CMD_ERASE1);
-		set_addr(mtd, -1, flctl->erase1_page_addr);
-		start_translation(flctl);
-		wait_completion(flctl);
-		break;
-
-	case NAND_CMD_SEQIN:
-		if (!flctl->page_size) {
-			/* output read command */
-			if (column >= mtd->writesize) {
-				column -= mtd->writesize;
-				read_cmd = NAND_CMD_READOOB;
-			} else if (column < 256) {
-				read_cmd = NAND_CMD_READ0;
-			} else {
-				column -= 256;
-				read_cmd = NAND_CMD_READ1;
-			}
-		}
-		flctl->seqin_column = column;
-		flctl->seqin_page_addr = page_addr;
-		flctl->seqin_read_cmd = read_cmd;
-		break;
-
-	case NAND_CMD_PAGEPROG:
-		empty_fifo(flctl);
-		if (!flctl->page_size) {
-			set_cmd_regs(mtd, NAND_CMD_SEQIN,
-					flctl->seqin_read_cmd);
-			set_addr(mtd, -1, -1);
-			writel(0, FLDTCNTR(flctl));	/* set 0 size */
-			start_translation(flctl);
-			wait_completion(flctl);
-		}
-		if (flctl->hwecc) {
-			/* write page with hwecc */
-			if (flctl->seqin_column == mtd->writesize)
-				execmd_write_oob(mtd);
-			else if (!flctl->seqin_column)
-				execmd_write_page_sector(mtd);
-			else
-				printk(KERN_ERR "Invalid address !?\n");
-			break;
-		}
-		set_cmd_regs(mtd, command, (command << 8) | NAND_CMD_SEQIN);
-		set_addr(mtd, flctl->seqin_column, flctl->seqin_page_addr);
-		writel(flctl->index, FLDTCNTR(flctl));	/* set write size */
-		start_translation(flctl);
-		write_fiforeg(flctl, flctl->index, 0);
-		wait_completion(flctl);
-		break;
-
-	case NAND_CMD_STATUS:
-		set_cmd_regs(mtd, command, command);
-		set_addr(mtd, -1, -1);
-
-		flctl->read_bytes = 1;
-		writel(flctl->read_bytes, FLDTCNTR(flctl)); /* set read size */
-		start_translation(flctl);
-		read_datareg(flctl, 0); /* read and end */
-		break;
-
-	case NAND_CMD_RESET:
-		set_cmd_regs(mtd, command, command);
-		set_addr(mtd, -1, -1);
-
-		writel(0, FLDTCNTR(flctl));	/* set 0 size */
-		start_translation(flctl);
-		wait_completion(flctl);
-		break;
-
-	default:
-		break;
-	}
-	goto runtime_exit;
-
-read_normal_exit:
-	writel(flctl->read_bytes, FLDTCNTR(flctl));	/* set read size */
-	empty_fifo(flctl);
-	start_translation(flctl);
-	read_fiforeg(flctl, flctl->read_bytes, 0);
-	wait_completion(flctl);
-runtime_exit:
-	pm_runtime_put_sync(&flctl->pdev->dev);
-	return;
-}
-
-static void flctl_select_chip(struct mtd_info *mtd, int chipnr)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	int ret;
-
-	switch (chipnr) {
-	case -1:
-		flctl->flcmncr_base &= ~CE0_ENABLE;
-
-		pm_runtime_get_sync(&flctl->pdev->dev);
-		writel(flctl->flcmncr_base, FLCMNCR(flctl));
-
-		if (flctl->qos_request) {
-			dev_pm_qos_remove_request(&flctl->pm_qos);
-			flctl->qos_request = 0;
-		}
-
-		pm_runtime_put_sync(&flctl->pdev->dev);
-		break;
-	case 0:
-		flctl->flcmncr_base |= CE0_ENABLE;
-
-		if (!flctl->qos_request) {
-			ret = dev_pm_qos_add_request(&flctl->pdev->dev,
-							&flctl->pm_qos,
-							DEV_PM_QOS_RESUME_LATENCY,
-							100);
-			if (ret < 0)
-				dev_err(&flctl->pdev->dev,
-					"PM QoS request failed: %d\n", ret);
-			flctl->qos_request = 1;
-		}
-
-		if (flctl->holden) {
-			pm_runtime_get_sync(&flctl->pdev->dev);
-			writel(HOLDEN, FLHOLDCR(flctl));
-			pm_runtime_put_sync(&flctl->pdev->dev);
-		}
-		break;
-	default:
-		BUG();
-	}
-}
-
-static void flctl_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-
-	memcpy(&flctl->done_buff[flctl->index], buf, len);
-	flctl->index += len;
-}
-
-static uint8_t flctl_read_byte(struct mtd_info *mtd)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	uint8_t data;
-
-	data = flctl->done_buff[flctl->index];
-	flctl->index++;
-	return data;
-}
-
-static uint16_t flctl_read_word(struct mtd_info *mtd)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	uint16_t *buf = (uint16_t *)&flctl->done_buff[flctl->index];
-
-	flctl->index += 2;
-	return *buf;
-}
-
-static void flctl_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-
-	memcpy(buf, &flctl->done_buff[flctl->index], len);
-	flctl->index += len;
-}
-
-static int flctl_chip_init_tail(struct mtd_info *mtd)
-{
-	struct sh_flctl *flctl = mtd_to_flctl(mtd);
-	struct nand_chip *chip = &flctl->chip;
-
-	if (mtd->writesize == 512) {
-		flctl->page_size = 0;
-		if (chip->chipsize > (32 << 20)) {
-			/* big than 32MB */
-			flctl->rw_ADRCNT = ADRCNT_4;
-			flctl->erase_ADRCNT = ADRCNT_3;
-		} else if (chip->chipsize > (2 << 16)) {
-			/* big than 128KB */
-			flctl->rw_ADRCNT = ADRCNT_3;
-			flctl->erase_ADRCNT = ADRCNT_2;
-		} else {
-			flctl->rw_ADRCNT = ADRCNT_2;
-			flctl->erase_ADRCNT = ADRCNT_1;
-		}
-	} else {
-		flctl->page_size = 1;
-		if (chip->chipsize > (128 << 20)) {
-			/* big than 128MB */
-			flctl->rw_ADRCNT = ADRCNT2_E;
-			flctl->erase_ADRCNT = ADRCNT_3;
-		} else if (chip->chipsize > (8 << 16)) {
-			/* big than 512KB */
-			flctl->rw_ADRCNT = ADRCNT_4;
-			flctl->erase_ADRCNT = ADRCNT_2;
-		} else {
-			flctl->rw_ADRCNT = ADRCNT_3;
-			flctl->erase_ADRCNT = ADRCNT_1;
-		}
-	}
-
-	if (flctl->hwecc) {
-		if (mtd->writesize == 512) {
-			mtd_set_ooblayout(mtd, &flctl_4secc_oob_smallpage_ops);
-			chip->badblock_pattern = &flctl_4secc_smallpage;
-		} else {
-			mtd_set_ooblayout(mtd, &flctl_4secc_oob_largepage_ops);
-			chip->badblock_pattern = &flctl_4secc_largepage;
-		}
-
-		chip->ecc.size = 512;
-		chip->ecc.bytes = 10;
-		chip->ecc.strength = 4;
-		chip->ecc.read_page = flctl_read_page_hwecc;
-		chip->ecc.write_page = flctl_write_page_hwecc;
-		chip->ecc.mode = NAND_ECC_HW;
-
-		/* 4 symbols ECC enabled */
-		flctl->flcmncr_base |= _4ECCEN;
-	} else {
-		chip->ecc.mode = NAND_ECC_SOFT;
-		chip->ecc.algo = NAND_ECC_HAMMING;
-	}
-
-	return 0;
-}
-
-static irqreturn_t flctl_handle_flste(int irq, void *dev_id)
-{
-	struct sh_flctl *flctl = dev_id;
-
-	dev_err(&flctl->pdev->dev, "flste irq: %x\n", readl(FLINTDMACR(flctl)));
-	writel(flctl->flintdmacr_base, FLINTDMACR(flctl));
-
-	return IRQ_HANDLED;
-}
-
-struct flctl_soc_config {
-	unsigned long flcmncr_val;
-	unsigned has_hwecc:1;
-	unsigned use_holden:1;
-};
-
-static struct flctl_soc_config flctl_sh7372_config = {
-	.flcmncr_val = CLK_16B_12L_4H | TYPESEL_SET | SHBUSSEL,
-	.has_hwecc = 1,
-	.use_holden = 1,
-};
-
-static const struct of_device_id of_flctl_match[] = {
-	{ .compatible = "renesas,shmobile-flctl-sh7372",
-				.data = &flctl_sh7372_config },
-	{},
-};
-MODULE_DEVICE_TABLE(of, of_flctl_match);
-
-static struct sh_flctl_platform_data *flctl_parse_dt(struct device *dev)
-{
-	const struct of_device_id *match;
-	struct flctl_soc_config *config;
-	struct sh_flctl_platform_data *pdata;
-
-	match = of_match_device(of_flctl_match, dev);
-	if (match)
-		config = (struct flctl_soc_config *)match->data;
-	else {
-		dev_err(dev, "%s: no OF configuration attached\n", __func__);
-		return NULL;
-	}
-
-	pdata = devm_kzalloc(dev, sizeof(struct sh_flctl_platform_data),
-								GFP_KERNEL);
-	if (!pdata)
-		return NULL;
-
-	/* set SoC specific options */
-	pdata->flcmncr_val = config->flcmncr_val;
-	pdata->has_hwecc = config->has_hwecc;
-	pdata->use_holden = config->use_holden;
-
-	return pdata;
-}
-
-static int flctl_probe(struct platform_device *pdev)
-{
-	struct resource *res;
-	struct sh_flctl *flctl;
-	struct mtd_info *flctl_mtd;
-	struct nand_chip *nand;
-	struct sh_flctl_platform_data *pdata;
-	int ret;
-	int irq;
-
-	flctl = devm_kzalloc(&pdev->dev, sizeof(struct sh_flctl), GFP_KERNEL);
-	if (!flctl)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	flctl->reg = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(flctl->reg))
-		return PTR_ERR(flctl->reg);
-	flctl->fifo = res->start + 0x24; /* FLDTFIFO */
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(&pdev->dev, "failed to get flste irq data\n");
-		return -ENXIO;
-	}
-
-	ret = devm_request_irq(&pdev->dev, irq, flctl_handle_flste, IRQF_SHARED,
-			       "flste", flctl);
-	if (ret) {
-		dev_err(&pdev->dev, "request interrupt failed.\n");
-		return ret;
-	}
-
-	if (pdev->dev.of_node)
-		pdata = flctl_parse_dt(&pdev->dev);
-	else
-		pdata = dev_get_platdata(&pdev->dev);
-
-	if (!pdata) {
-		dev_err(&pdev->dev, "no setup data defined\n");
-		return -EINVAL;
-	}
-
-	platform_set_drvdata(pdev, flctl);
-	nand = &flctl->chip;
-	flctl_mtd = nand_to_mtd(nand);
-	nand_set_flash_node(nand, pdev->dev.of_node);
-	flctl_mtd->dev.parent = &pdev->dev;
-	flctl->pdev = pdev;
-	flctl->hwecc = pdata->has_hwecc;
-	flctl->holden = pdata->use_holden;
-	flctl->flcmncr_base = pdata->flcmncr_val;
-	flctl->flintdmacr_base = flctl->hwecc ? (STERINTE | ECERB) : STERINTE;
-
-	/* Set address of hardware control function */
-	/* 20 us command delay time */
-	nand->chip_delay = 20;
-
-	nand->read_byte = flctl_read_byte;
-	nand->read_word = flctl_read_word;
-	nand->write_buf = flctl_write_buf;
-	nand->read_buf = flctl_read_buf;
-	nand->select_chip = flctl_select_chip;
-	nand->cmdfunc = flctl_cmdfunc;
-
-	if (pdata->flcmncr_val & SEL_16BIT)
-		nand->options |= NAND_BUSWIDTH_16;
-
-	pm_runtime_enable(&pdev->dev);
-	pm_runtime_resume(&pdev->dev);
-
-	flctl_setup_dma(flctl);
-
-	ret = nand_scan_ident(flctl_mtd, 1, NULL);
-	if (ret)
-		goto err_chip;
-
-	if (nand->options & NAND_BUSWIDTH_16) {
-		/*
-		 * NAND_BUSWIDTH_16 may have been set by nand_scan_ident().
-		 * Add the SEL_16BIT flag in pdata->flcmncr_val and re-assign
-		 * flctl->flcmncr_base to pdata->flcmncr_val.
-		 */
-		pdata->flcmncr_val |= SEL_16BIT;
-		flctl->flcmncr_base = pdata->flcmncr_val;
-	}
-
-	ret = flctl_chip_init_tail(flctl_mtd);
-	if (ret)
-		goto err_chip;
-
-	ret = nand_scan_tail(flctl_mtd);
-	if (ret)
-		goto err_chip;
-
-	ret = mtd_device_register(flctl_mtd, pdata->parts, pdata->nr_parts);
-
-	return 0;
-
-err_chip:
-	flctl_release_dma(flctl);
-	pm_runtime_disable(&pdev->dev);
-	return ret;
-}
-
-static int flctl_remove(struct platform_device *pdev)
-{
-	struct sh_flctl *flctl = platform_get_drvdata(pdev);
-
-	flctl_release_dma(flctl);
-	nand_release(nand_to_mtd(&flctl->chip));
-	pm_runtime_disable(&pdev->dev);
-
-	return 0;
-}
-
-static struct platform_driver flctl_driver = {
-	.remove		= flctl_remove,
-	.driver = {
-		.name	= "sh_flctl",
-		.of_match_table = of_match_ptr(of_flctl_match),
-	},
-};
-
-module_platform_driver_probe(flctl_driver, flctl_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Yoshihiro Shimoda");
-MODULE_DESCRIPTION("SuperH FLCTL driver");
-MODULE_ALIAS("platform:sh_flctl");
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
deleted file mode 100644
index 737efe83cd36..000000000000
--- a/drivers/mtd/nand/sharpsl.c
+++ /dev/null
@@ -1,235 +0,0 @@ 
-/*
- * drivers/mtd/nand/sharpsl.c
- *
- *  Copyright (C) 2004 Richard Purdie
- *  Copyright (C) 2008 Dmitry Baryshkov
- *
- *  Based on Sharp's NAND driver sharp_sl.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/genhd.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-#include <linux/mtd/sharpsl.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/mach-types.h>
-
-struct sharpsl_nand {
-	struct nand_chip	chip;
-
-	void __iomem		*io;
-};
-
-static inline struct sharpsl_nand *mtd_to_sharpsl(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct sharpsl_nand, chip);
-}
-
-/* register offset */
-#define ECCLPLB		0x00	/* line parity 7 - 0 bit */
-#define ECCLPUB		0x04	/* line parity 15 - 8 bit */
-#define ECCCP		0x08	/* column parity 5 - 0 bit */
-#define ECCCNTR		0x0C	/* ECC byte counter */
-#define ECCCLRR		0x10	/* cleare ECC */
-#define FLASHIO		0x14	/* Flash I/O */
-#define FLASHCTL	0x18	/* Flash Control */
-
-/* Flash control bit */
-#define FLRYBY		(1 << 5)
-#define FLCE1		(1 << 4)
-#define FLWP		(1 << 3)
-#define FLALE		(1 << 2)
-#define FLCLE		(1 << 1)
-#define FLCE0		(1 << 0)
-
-/*
- *	hardware specific access to control-lines
- *	ctrl:
- *	NAND_CNE: bit 0 -> ! bit 0 & 4
- *	NAND_CLE: bit 1 -> bit 1
- *	NAND_ALE: bit 2 -> bit 2
- *
- */
-static void sharpsl_nand_hwcontrol(struct mtd_info *mtd, int cmd,
-				   unsigned int ctrl)
-{
-	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		unsigned char bits = ctrl & 0x07;
-
-		bits |= (ctrl & 0x01) << 4;
-
-		bits ^= 0x11;
-
-		writeb((readb(sharpsl->io + FLASHCTL) & ~0x17) | bits, sharpsl->io + FLASHCTL);
-	}
-
-	if (cmd != NAND_CMD_NONE)
-		writeb(cmd, chip->IO_ADDR_W);
-}
-
-static int sharpsl_nand_dev_ready(struct mtd_info *mtd)
-{
-	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
-	return !((readb(sharpsl->io + FLASHCTL) & FLRYBY) == 0);
-}
-
-static void sharpsl_nand_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
-	writeb(0, sharpsl->io + ECCCLRR);
-}
-
-static int sharpsl_nand_calculate_ecc(struct mtd_info *mtd, const u_char * dat, u_char * ecc_code)
-{
-	struct sharpsl_nand *sharpsl = mtd_to_sharpsl(mtd);
-	ecc_code[0] = ~readb(sharpsl->io + ECCLPUB);
-	ecc_code[1] = ~readb(sharpsl->io + ECCLPLB);
-	ecc_code[2] = (~readb(sharpsl->io + ECCCP) << 2) | 0x03;
-	return readb(sharpsl->io + ECCCNTR) != 0;
-}
-
-/*
- * Main initialization routine
- */
-static int sharpsl_nand_probe(struct platform_device *pdev)
-{
-	struct nand_chip *this;
-	struct mtd_info *mtd;
-	struct resource *r;
-	int err = 0;
-	struct sharpsl_nand *sharpsl;
-	struct sharpsl_nand_platform_data *data = dev_get_platdata(&pdev->dev);
-
-	if (!data) {
-		dev_err(&pdev->dev, "no platform data!\n");
-		return -EINVAL;
-	}
-
-	/* Allocate memory for MTD device structure and private data */
-	sharpsl = kzalloc(sizeof(struct sharpsl_nand), GFP_KERNEL);
-	if (!sharpsl)
-		return -ENOMEM;
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	if (!r) {
-		dev_err(&pdev->dev, "no io memory resource defined!\n");
-		err = -ENODEV;
-		goto err_get_res;
-	}
-
-	/* map physical address */
-	sharpsl->io = ioremap(r->start, resource_size(r));
-	if (!sharpsl->io) {
-		dev_err(&pdev->dev, "ioremap to access Sharp SL NAND chip failed\n");
-		err = -EIO;
-		goto err_ioremap;
-	}
-
-	/* Get pointer to private data */
-	this = (struct nand_chip *)(&sharpsl->chip);
-
-	/* Link the private data with the MTD structure */
-	mtd = nand_to_mtd(this);
-	mtd->dev.parent = &pdev->dev;
-	mtd_set_ooblayout(mtd, data->ecc_layout);
-
-	platform_set_drvdata(pdev, sharpsl);
-
-	/*
-	 * PXA initialize
-	 */
-	writeb(readb(sharpsl->io + FLASHCTL) | FLWP, sharpsl->io + FLASHCTL);
-
-	/* Set address of NAND IO lines */
-	this->IO_ADDR_R = sharpsl->io + FLASHIO;
-	this->IO_ADDR_W = sharpsl->io + FLASHIO;
-	/* Set address of hardware control function */
-	this->cmd_ctrl = sharpsl_nand_hwcontrol;
-	this->dev_ready = sharpsl_nand_dev_ready;
-	/* 15 us command delay time */
-	this->chip_delay = 15;
-	/* set eccmode using hardware ECC */
-	this->ecc.mode = NAND_ECC_HW;
-	this->ecc.size = 256;
-	this->ecc.bytes = 3;
-	this->ecc.strength = 1;
-	this->badblock_pattern = data->badblock_pattern;
-	this->ecc.hwctl = sharpsl_nand_enable_hwecc;
-	this->ecc.calculate = sharpsl_nand_calculate_ecc;
-	this->ecc.correct = nand_correct_data;
-
-	/* Scan to find existence of the device */
-	err = nand_scan(mtd, 1);
-	if (err)
-		goto err_scan;
-
-	/* Register the partitions */
-	mtd->name = "sharpsl-nand";
-
-	err = mtd_device_parse_register(mtd, NULL, NULL,
-					data->partitions, data->nr_partitions);
-	if (err)
-		goto err_add;
-
-	/* Return happy */
-	return 0;
-
-err_add:
-	nand_release(mtd);
-
-err_scan:
-	iounmap(sharpsl->io);
-err_ioremap:
-err_get_res:
-	kfree(sharpsl);
-	return err;
-}
-
-/*
- * Clean up routine
- */
-static int sharpsl_nand_remove(struct platform_device *pdev)
-{
-	struct sharpsl_nand *sharpsl = platform_get_drvdata(pdev);
-
-	/* Release resources, unregister device */
-	nand_release(nand_to_mtd(&sharpsl->chip));
-
-	iounmap(sharpsl->io);
-
-	/* Free the MTD device structure */
-	kfree(sharpsl);
-
-	return 0;
-}
-
-static struct platform_driver sharpsl_nand_driver = {
-	.driver = {
-		.name	= "sharpsl-nand",
-	},
-	.probe		= sharpsl_nand_probe,
-	.remove		= sharpsl_nand_remove,
-};
-
-module_platform_driver(sharpsl_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
-MODULE_DESCRIPTION("Device specific logic for NAND flash on Sharp SL-C7xx Series");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
deleted file mode 100644
index c378705c6e2b..000000000000
--- a/drivers/mtd/nand/sm_common.c
+++ /dev/null
@@ -1,202 +0,0 @@ 
-/*
- * Copyright © 2009 - Maxim Levitsky
- * Common routines & support for xD format
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/kernel.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/module.h>
-#include <linux/sizes.h>
-#include "sm_common.h"
-
-static int oob_sm_ooblayout_ecc(struct mtd_info *mtd, int section,
-				struct mtd_oob_region *oobregion)
-{
-	if (section > 1)
-		return -ERANGE;
-
-	oobregion->length = 3;
-	oobregion->offset = ((section + 1) * 8) - 3;
-
-	return 0;
-}
-
-static int oob_sm_ooblayout_free(struct mtd_info *mtd, int section,
-				 struct mtd_oob_region *oobregion)
-{
-	switch (section) {
-	case 0:
-		/* reserved */
-		oobregion->offset = 0;
-		oobregion->length = 4;
-		break;
-	case 1:
-		/* LBA1 */
-		oobregion->offset = 6;
-		oobregion->length = 2;
-		break;
-	case 2:
-		/* LBA2 */
-		oobregion->offset = 11;
-		oobregion->length = 2;
-		break;
-	default:
-		return -ERANGE;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops oob_sm_ops = {
-	.ecc = oob_sm_ooblayout_ecc,
-	.free = oob_sm_ooblayout_free,
-};
-
-/* NOTE: This layout is is not compatabable with SmartMedia, */
-/* because the 256 byte devices have page depenent oob layout */
-/* However it does preserve the bad block markers */
-/* If you use smftl, it will bypass this and work correctly */
-/* If you not, then you break SmartMedia compliance anyway */
-
-static int oob_sm_small_ooblayout_ecc(struct mtd_info *mtd, int section,
-				      struct mtd_oob_region *oobregion)
-{
-	if (section)
-		return -ERANGE;
-
-	oobregion->length = 3;
-	oobregion->offset = 0;
-
-	return 0;
-}
-
-static int oob_sm_small_ooblayout_free(struct mtd_info *mtd, int section,
-				       struct mtd_oob_region *oobregion)
-{
-	switch (section) {
-	case 0:
-		/* reserved */
-		oobregion->offset = 3;
-		oobregion->length = 2;
-		break;
-	case 1:
-		/* LBA1 */
-		oobregion->offset = 6;
-		oobregion->length = 2;
-		break;
-	default:
-		return -ERANGE;
-	}
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops oob_sm_small_ops = {
-	.ecc = oob_sm_small_ooblayout_ecc,
-	.free = oob_sm_small_ooblayout_free,
-};
-
-static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
-	struct mtd_oob_ops ops;
-	struct sm_oob oob;
-	int ret;
-
-	memset(&oob, -1, SM_OOB_SIZE);
-	oob.block_status = 0x0F;
-
-	/* As long as this function is called on erase block boundaries
-		it will work correctly for 256 byte nand */
-	ops.mode = MTD_OPS_PLACE_OOB;
-	ops.ooboffs = 0;
-	ops.ooblen = mtd->oobsize;
-	ops.oobbuf = (void *)&oob;
-	ops.datbuf = NULL;
-
-
-	ret = mtd_write_oob(mtd, ofs, &ops);
-	if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
-		printk(KERN_NOTICE
-			"sm_common: can't mark sector at %i as bad\n",
-								(int)ofs);
-		return -EIO;
-	}
-
-	return 0;
-}
-
-static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
-	LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM",   0x5d, 2,   SZ_8K, NAND_ROM),
-	LEGACY_ID_NAND("SmartMedia 4MiB 3,3V",       0xe3, 4,   SZ_8K, 0),
-	LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V",     0xe5, 4,   SZ_8K, 0),
-	LEGACY_ID_NAND("SmartMedia 4MiB 5V",         0x6b, 4,   SZ_8K, 0),
-	LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM",   0xd5, 4,   SZ_8K, NAND_ROM),
-	LEGACY_ID_NAND("SmartMedia 8MiB 3,3V",       0xe6, 8,   SZ_8K, 0),
-	LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM",   0xd6, 8,   SZ_8K, NAND_ROM),
-	LEGACY_ID_NAND("SmartMedia 16MiB 3,3V",      0x73, 16,  SZ_16K, 0),
-	LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM",  0x57, 16,  SZ_16K, NAND_ROM),
-	LEGACY_ID_NAND("SmartMedia 32MiB 3,3V",      0x75, 32,  SZ_16K, 0),
-	LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM",  0x58, 32,  SZ_16K, NAND_ROM),
-	LEGACY_ID_NAND("SmartMedia 64MiB 3,3V",      0x76, 64,  SZ_16K, 0),
-	LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM",  0xd9, 64,  SZ_16K, NAND_ROM),
-	LEGACY_ID_NAND("SmartMedia 128MiB 3,3V",     0x79, 128, SZ_16K, 0),
-	LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM),
-	LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V",    0x71, 256, SZ_16K, 0),
-	LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM),
-	{NULL}
-};
-
-static struct nand_flash_dev nand_xd_flash_ids[] = {
-	LEGACY_ID_NAND("xD 16MiB 3,3V",  0x73, 16,   SZ_16K, 0),
-	LEGACY_ID_NAND("xD 32MiB 3,3V",  0x75, 32,   SZ_16K, 0),
-	LEGACY_ID_NAND("xD 64MiB 3,3V",  0x76, 64,   SZ_16K, 0),
-	LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128,  SZ_16K, 0),
-	LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256,  SZ_16K, NAND_BROKEN_XD),
-	LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512,  SZ_16K, NAND_BROKEN_XD),
-	LEGACY_ID_NAND("xD 1GiB 3,3V",   0xd3, 1024, SZ_16K, NAND_BROKEN_XD),
-	LEGACY_ID_NAND("xD 2GiB 3,3V",   0xd5, 2048, SZ_16K, NAND_BROKEN_XD),
-	{NULL}
-};
-
-int sm_register_device(struct mtd_info *mtd, int smartmedia)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int ret;
-
-	chip->options |= NAND_SKIP_BBTSCAN;
-
-	/* Scan for card properties */
-	ret = nand_scan_ident(mtd, 1, smartmedia ?
-		nand_smartmedia_flash_ids : nand_xd_flash_ids);
-
-	if (ret)
-		return ret;
-
-	/* Bad block marker position */
-	chip->badblockpos = 0x05;
-	chip->badblockbits = 7;
-	chip->block_markbad = sm_block_markbad;
-
-	/* ECC layout */
-	if (mtd->writesize == SM_SECTOR_SIZE)
-		mtd_set_ooblayout(mtd, &oob_sm_ops);
-	else if (mtd->writesize == SM_SMALL_PAGE)
-		mtd_set_ooblayout(mtd, &oob_sm_small_ops);
-	else
-		return -ENODEV;
-
-	ret = nand_scan_tail(mtd);
-
-	if (ret)
-		return ret;
-
-	return mtd_device_register(mtd, NULL, 0);
-}
-EXPORT_SYMBOL_GPL(sm_register_device);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
-MODULE_DESCRIPTION("Common SmartMedia/xD functions");
diff --git a/drivers/mtd/nand/sm_common.h b/drivers/mtd/nand/sm_common.h
deleted file mode 100644
index d3e028e58b0f..000000000000
--- a/drivers/mtd/nand/sm_common.h
+++ /dev/null
@@ -1,61 +0,0 @@ 
-/*
- * Copyright © 2009 - Maxim Levitsky
- * Common routines & support for SmartMedia/xD format
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/bitops.h>
-#include <linux/mtd/mtd.h>
-
-/* Full oob structure as written on the flash */
-struct sm_oob {
-	uint32_t reserved;
-	uint8_t data_status;
-	uint8_t block_status;
-	uint8_t lba_copy1[2];
-	uint8_t ecc2[3];
-	uint8_t lba_copy2[2];
-	uint8_t ecc1[3];
-} __packed;
-
-
-/* one sector is always 512 bytes, but it can consist of two nand pages */
-#define SM_SECTOR_SIZE		512
-
-/* oob area is also 16 bytes, but might be from two pages */
-#define SM_OOB_SIZE		16
-
-/* This is maximum zone size, and all devices that have more that one zone
-   have this size */
-#define SM_MAX_ZONE_SIZE 	1024
-
-/* support for small page nand */
-#define SM_SMALL_PAGE 		256
-#define SM_SMALL_OOB_SIZE	8
-
-
-extern int sm_register_device(struct mtd_info *mtd, int smartmedia);
-
-
-static inline int sm_sector_valid(struct sm_oob *oob)
-{
-	return hweight16(oob->data_status) >= 5;
-}
-
-static inline int sm_block_valid(struct sm_oob *oob)
-{
-	return hweight16(oob->block_status) >= 7;
-}
-
-static inline int sm_block_erased(struct sm_oob *oob)
-{
-	static const uint32_t erased_pattern[4] = {
-		0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
-
-	/* First test for erased block */
-	if (!memcmp(oob, erased_pattern, sizeof(*oob)))
-		return 1;
-	return 0;
-}
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
deleted file mode 100644
index f5a3e7252b82..000000000000
--- a/drivers/mtd/nand/socrates_nand.c
+++ /dev/null
@@ -1,251 +0,0 @@ 
-/*
- * drivers/mtd/nand/socrates_nand.c
- *
- *  Copyright © 2008 Ilya Yanok, Emcraft Systems
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/io.h>
-
-#define FPGA_NAND_CMD_MASK		(0x7 << 28)
-#define FPGA_NAND_CMD_COMMAND		(0x0 << 28)
-#define FPGA_NAND_CMD_ADDR		(0x1 << 28)
-#define FPGA_NAND_CMD_READ		(0x2 << 28)
-#define FPGA_NAND_CMD_WRITE		(0x3 << 28)
-#define FPGA_NAND_BUSY			(0x1 << 15)
-#define FPGA_NAND_ENABLE		(0x1 << 31)
-#define FPGA_NAND_DATA_SHIFT		16
-
-struct socrates_nand_host {
-	struct nand_chip	nand_chip;
-	void __iomem		*io_base;
-	struct device		*dev;
-};
-
-/**
- * socrates_nand_write_buf -  write buffer to chip
- * @mtd:	MTD device structure
- * @buf:	data buffer
- * @len:	number of bytes to write
- */
-static void socrates_nand_write_buf(struct mtd_info *mtd,
-		const uint8_t *buf, int len)
-{
-	int i;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct socrates_nand_host *host = nand_get_controller_data(this);
-
-	for (i = 0; i < len; i++) {
-		out_be32(host->io_base, FPGA_NAND_ENABLE |
-				FPGA_NAND_CMD_WRITE |
-				(buf[i] << FPGA_NAND_DATA_SHIFT));
-	}
-}
-
-/**
- * socrates_nand_read_buf -  read chip data into buffer
- * @mtd:	MTD device structure
- * @buf:	buffer to store date
- * @len:	number of bytes to read
- */
-static void socrates_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	int i;
-	struct nand_chip *this = mtd_to_nand(mtd);
-	struct socrates_nand_host *host = nand_get_controller_data(this);
-	uint32_t val;
-
-	val = FPGA_NAND_ENABLE | FPGA_NAND_CMD_READ;
-
-	out_be32(host->io_base, val);
-	for (i = 0; i < len; i++) {
-		buf[i] = (in_be32(host->io_base) >>
-				FPGA_NAND_DATA_SHIFT) & 0xff;
-	}
-}
-
-/**
- * socrates_nand_read_byte -  read one byte from the chip
- * @mtd:	MTD device structure
- */
-static uint8_t socrates_nand_read_byte(struct mtd_info *mtd)
-{
-	uint8_t byte;
-	socrates_nand_read_buf(mtd, &byte, sizeof(byte));
-	return byte;
-}
-
-/**
- * socrates_nand_read_word -  read one word from the chip
- * @mtd:	MTD device structure
- */
-static uint16_t socrates_nand_read_word(struct mtd_info *mtd)
-{
-	uint16_t word;
-	socrates_nand_read_buf(mtd, (uint8_t *)&word, sizeof(word));
-	return word;
-}
-
-/*
- * Hardware specific access to control-lines
- */
-static void socrates_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
-		unsigned int ctrl)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
-	uint32_t val;
-
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_CLE)
-		val = FPGA_NAND_CMD_COMMAND;
-	else
-		val = FPGA_NAND_CMD_ADDR;
-
-	if (ctrl & NAND_NCE)
-		val |= FPGA_NAND_ENABLE;
-
-	val |= (cmd & 0xff) << FPGA_NAND_DATA_SHIFT;
-
-	out_be32(host->io_base, val);
-}
-
-/*
- * Read the Device Ready pin.
- */
-static int socrates_nand_device_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *nand_chip = mtd_to_nand(mtd);
-	struct socrates_nand_host *host = nand_get_controller_data(nand_chip);
-
-	if (in_be32(host->io_base) & FPGA_NAND_BUSY)
-		return 0; /* busy */
-	return 1;
-}
-
-/*
- * Probe for the NAND device.
- */
-static int socrates_nand_probe(struct platform_device *ofdev)
-{
-	struct socrates_nand_host *host;
-	struct mtd_info *mtd;
-	struct nand_chip *nand_chip;
-	int res;
-
-	/* Allocate memory for the device structure (and zero it) */
-	host = devm_kzalloc(&ofdev->dev, sizeof(*host), GFP_KERNEL);
-	if (!host)
-		return -ENOMEM;
-
-	host->io_base = of_iomap(ofdev->dev.of_node, 0);
-	if (host->io_base == NULL) {
-		dev_err(&ofdev->dev, "ioremap failed\n");
-		return -EIO;
-	}
-
-	nand_chip = &host->nand_chip;
-	mtd = nand_to_mtd(nand_chip);
-	host->dev = &ofdev->dev;
-
-	/* link the private data structures */
-	nand_set_controller_data(nand_chip, host);
-	nand_set_flash_node(nand_chip, ofdev->dev.of_node);
-	mtd->name = "socrates_nand";
-	mtd->dev.parent = &ofdev->dev;
-
-	/*should never be accessed directly */
-	nand_chip->IO_ADDR_R = (void *)0xdeadbeef;
-	nand_chip->IO_ADDR_W = (void *)0xdeadbeef;
-
-	nand_chip->cmd_ctrl = socrates_nand_cmd_ctrl;
-	nand_chip->read_byte = socrates_nand_read_byte;
-	nand_chip->read_word = socrates_nand_read_word;
-	nand_chip->write_buf = socrates_nand_write_buf;
-	nand_chip->read_buf = socrates_nand_read_buf;
-	nand_chip->dev_ready = socrates_nand_device_ready;
-
-	nand_chip->ecc.mode = NAND_ECC_SOFT;	/* enable ECC */
-	nand_chip->ecc.algo = NAND_ECC_HAMMING;
-
-	/* TODO: I have no idea what real delay is. */
-	nand_chip->chip_delay = 20;		/* 20us command delay time */
-
-	dev_set_drvdata(&ofdev->dev, host);
-
-	/* first scan to find the device and get the page size */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		res = -ENXIO;
-		goto out;
-	}
-
-	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		res = -ENXIO;
-		goto out;
-	}
-
-	res = mtd_device_register(mtd, NULL, 0);
-	if (!res)
-		return res;
-
-	nand_release(mtd);
-
-out:
-	iounmap(host->io_base);
-	return res;
-}
-
-/*
- * Remove a NAND device.
- */
-static int socrates_nand_remove(struct platform_device *ofdev)
-{
-	struct socrates_nand_host *host = dev_get_drvdata(&ofdev->dev);
-	struct mtd_info *mtd = nand_to_mtd(&host->nand_chip);
-
-	nand_release(mtd);
-
-	iounmap(host->io_base);
-
-	return 0;
-}
-
-static const struct of_device_id socrates_nand_match[] =
-{
-	{
-		.compatible   = "abb,socrates-nand",
-	},
-	{},
-};
-
-MODULE_DEVICE_TABLE(of, socrates_nand_match);
-
-static struct platform_driver socrates_nand_driver = {
-	.driver = {
-		.name = "socrates_nand",
-		.of_match_table = socrates_nand_match,
-	},
-	.probe		= socrates_nand_probe,
-	.remove		= socrates_nand_remove,
-};
-
-module_platform_driver(socrates_nand_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Ilya Yanok");
-MODULE_DESCRIPTION("NAND driver for Socrates board");
diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c
deleted file mode 100644
index ccccc7ab9023..000000000000
--- a/drivers/mtd/nand/sunxi_nand.c
+++ /dev/null
@@ -1,2291 +0,0 @@ 
-/*
- * Copyright (C) 2013 Boris BREZILLON <b.brezillon.dev@gmail.com>
- *
- * Derived from:
- *	https://github.com/yuq/sunxi-nfc-mtd
- *	Copyright (C) 2013 Qiang Yu <yuq825@gmail.com>
- *
- *	https://github.com/hno/Allwinner-Info
- *	Copyright (C) 2013 Henrik Nordström <Henrik Nordström>
- *
- *	Copyright (C) 2013 Dmitriy B. <rzk333@gmail.com>
- *	Copyright (C) 2013 Sergey Lapin <slapin@ossfans.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/dmaengine.h>
-#include <linux/gpio.h>
-#include <linux/interrupt.h>
-#include <linux/iopoll.h>
-#include <linux/reset.h>
-
-#define NFC_REG_CTL		0x0000
-#define NFC_REG_ST		0x0004
-#define NFC_REG_INT		0x0008
-#define NFC_REG_TIMING_CTL	0x000C
-#define NFC_REG_TIMING_CFG	0x0010
-#define NFC_REG_ADDR_LOW	0x0014
-#define NFC_REG_ADDR_HIGH	0x0018
-#define NFC_REG_SECTOR_NUM	0x001C
-#define NFC_REG_CNT		0x0020
-#define NFC_REG_CMD		0x0024
-#define NFC_REG_RCMD_SET	0x0028
-#define NFC_REG_WCMD_SET	0x002C
-#define NFC_REG_IO_DATA		0x0030
-#define NFC_REG_ECC_CTL		0x0034
-#define NFC_REG_ECC_ST		0x0038
-#define NFC_REG_DEBUG		0x003C
-#define NFC_REG_ECC_ERR_CNT(x)	((0x0040 + (x)) & ~0x3)
-#define NFC_REG_USER_DATA(x)	(0x0050 + ((x) * 4))
-#define NFC_REG_SPARE_AREA	0x00A0
-#define NFC_REG_PAT_ID		0x00A4
-#define NFC_RAM0_BASE		0x0400
-#define NFC_RAM1_BASE		0x0800
-
-/* define bit use in NFC_CTL */
-#define NFC_EN			BIT(0)
-#define NFC_RESET		BIT(1)
-#define NFC_BUS_WIDTH_MSK	BIT(2)
-#define NFC_BUS_WIDTH_8		(0 << 2)
-#define NFC_BUS_WIDTH_16	(1 << 2)
-#define NFC_RB_SEL_MSK		BIT(3)
-#define NFC_RB_SEL(x)		((x) << 3)
-#define NFC_CE_SEL_MSK		GENMASK(26, 24)
-#define NFC_CE_SEL(x)		((x) << 24)
-#define NFC_CE_CTL		BIT(6)
-#define NFC_PAGE_SHIFT_MSK	GENMASK(11, 8)
-#define NFC_PAGE_SHIFT(x)	(((x) < 10 ? 0 : (x) - 10) << 8)
-#define NFC_SAM			BIT(12)
-#define NFC_RAM_METHOD		BIT(14)
-#define NFC_DEBUG_CTL		BIT(31)
-
-/* define bit use in NFC_ST */
-#define NFC_RB_B2R		BIT(0)
-#define NFC_CMD_INT_FLAG	BIT(1)
-#define NFC_DMA_INT_FLAG	BIT(2)
-#define NFC_CMD_FIFO_STATUS	BIT(3)
-#define NFC_STA			BIT(4)
-#define NFC_NATCH_INT_FLAG	BIT(5)
-#define NFC_RB_STATE(x)		BIT(x + 8)
-
-/* define bit use in NFC_INT */
-#define NFC_B2R_INT_ENABLE	BIT(0)
-#define NFC_CMD_INT_ENABLE	BIT(1)
-#define NFC_DMA_INT_ENABLE	BIT(2)
-#define NFC_INT_MASK		(NFC_B2R_INT_ENABLE | \
-				 NFC_CMD_INT_ENABLE | \
-				 NFC_DMA_INT_ENABLE)
-
-/* define bit use in NFC_TIMING_CTL */
-#define NFC_TIMING_CTL_EDO	BIT(8)
-
-/* define NFC_TIMING_CFG register layout */
-#define NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD)		\
-	(((tWB) & 0x3) | (((tADL) & 0x3) << 2) |		\
-	(((tWHR) & 0x3) << 4) | (((tRHW) & 0x3) << 6) |		\
-	(((tCAD) & 0x7) << 8))
-
-/* define bit use in NFC_CMD */
-#define NFC_CMD_LOW_BYTE_MSK	GENMASK(7, 0)
-#define NFC_CMD_HIGH_BYTE_MSK	GENMASK(15, 8)
-#define NFC_CMD(x)		(x)
-#define NFC_ADR_NUM_MSK		GENMASK(18, 16)
-#define NFC_ADR_NUM(x)		(((x) - 1) << 16)
-#define NFC_SEND_ADR		BIT(19)
-#define NFC_ACCESS_DIR		BIT(20)
-#define NFC_DATA_TRANS		BIT(21)
-#define NFC_SEND_CMD1		BIT(22)
-#define NFC_WAIT_FLAG		BIT(23)
-#define NFC_SEND_CMD2		BIT(24)
-#define NFC_SEQ			BIT(25)
-#define NFC_DATA_SWAP_METHOD	BIT(26)
-#define NFC_ROW_AUTO_INC	BIT(27)
-#define NFC_SEND_CMD3		BIT(28)
-#define NFC_SEND_CMD4		BIT(29)
-#define NFC_CMD_TYPE_MSK	GENMASK(31, 30)
-#define NFC_NORMAL_OP		(0 << 30)
-#define NFC_ECC_OP		(1 << 30)
-#define NFC_PAGE_OP		(2 << 30)
-
-/* define bit use in NFC_RCMD_SET */
-#define NFC_READ_CMD_MSK	GENMASK(7, 0)
-#define NFC_RND_READ_CMD0_MSK	GENMASK(15, 8)
-#define NFC_RND_READ_CMD1_MSK	GENMASK(23, 16)
-
-/* define bit use in NFC_WCMD_SET */
-#define NFC_PROGRAM_CMD_MSK	GENMASK(7, 0)
-#define NFC_RND_WRITE_CMD_MSK	GENMASK(15, 8)
-#define NFC_READ_CMD0_MSK	GENMASK(23, 16)
-#define NFC_READ_CMD1_MSK	GENMASK(31, 24)
-
-/* define bit use in NFC_ECC_CTL */
-#define NFC_ECC_EN		BIT(0)
-#define NFC_ECC_PIPELINE	BIT(3)
-#define NFC_ECC_EXCEPTION	BIT(4)
-#define NFC_ECC_BLOCK_SIZE_MSK	BIT(5)
-#define NFC_RANDOM_EN		BIT(9)
-#define NFC_RANDOM_DIRECTION	BIT(10)
-#define NFC_ECC_MODE_MSK	GENMASK(15, 12)
-#define NFC_ECC_MODE(x)		((x) << 12)
-#define NFC_RANDOM_SEED_MSK	GENMASK(30, 16)
-#define NFC_RANDOM_SEED(x)	((x) << 16)
-
-/* define bit use in NFC_ECC_ST */
-#define NFC_ECC_ERR(x)		BIT(x)
-#define NFC_ECC_ERR_MSK		GENMASK(15, 0)
-#define NFC_ECC_PAT_FOUND(x)	BIT(x + 16)
-#define NFC_ECC_ERR_CNT(b, x)	(((x) >> (((b) % 4) * 8)) & 0xff)
-
-#define NFC_DEFAULT_TIMEOUT_MS	1000
-
-#define NFC_SRAM_SIZE		1024
-
-#define NFC_MAX_CS		7
-
-/*
- * Ready/Busy detection type: describes the Ready/Busy detection modes
- *
- * @RB_NONE:	no external detection available, rely on STATUS command
- *		and software timeouts
- * @RB_NATIVE:	use sunxi NAND controller Ready/Busy support. The Ready/Busy
- *		pin of the NAND flash chip must be connected to one of the
- *		native NAND R/B pins (those which can be muxed to the NAND
- *		Controller)
- * @RB_GPIO:	use a simple GPIO to handle Ready/Busy status. The Ready/Busy
- *		pin of the NAND flash chip must be connected to a GPIO capable
- *		pin.
- */
-enum sunxi_nand_rb_type {
-	RB_NONE,
-	RB_NATIVE,
-	RB_GPIO,
-};
-
-/*
- * Ready/Busy structure: stores information related to Ready/Busy detection
- *
- * @type:	the Ready/Busy detection mode
- * @info:	information related to the R/B detection mode. Either a gpio
- *		id or a native R/B id (those supported by the NAND controller).
- */
-struct sunxi_nand_rb {
-	enum sunxi_nand_rb_type type;
-	union {
-		int gpio;
-		int nativeid;
-	} info;
-};
-
-/*
- * Chip Select structure: stores information related to NAND Chip Select
- *
- * @cs:		the NAND CS id used to communicate with a NAND Chip
- * @rb:		the Ready/Busy description
- */
-struct sunxi_nand_chip_sel {
-	u8 cs;
-	struct sunxi_nand_rb rb;
-};
-
-/*
- * sunxi HW ECC infos: stores information related to HW ECC support
- *
- * @mode:	the sunxi ECC mode field deduced from ECC requirements
- */
-struct sunxi_nand_hw_ecc {
-	int mode;
-};
-
-/*
- * NAND chip structure: stores NAND chip device related information
- *
- * @node:		used to store NAND chips into a list
- * @nand:		base NAND chip structure
- * @mtd:		base MTD structure
- * @clk_rate:		clk_rate required for this NAND chip
- * @timing_cfg		TIMING_CFG register value for this NAND chip
- * @selected:		current active CS
- * @nsels:		number of CS lines required by the NAND chip
- * @sels:		array of CS lines descriptions
- */
-struct sunxi_nand_chip {
-	struct list_head node;
-	struct nand_chip nand;
-	unsigned long clk_rate;
-	u32 timing_cfg;
-	u32 timing_ctl;
-	int selected;
-	int addr_cycles;
-	u32 addr[2];
-	int cmd_cycles;
-	u8 cmd[2];
-	int nsels;
-	struct sunxi_nand_chip_sel sels[0];
-};
-
-static inline struct sunxi_nand_chip *to_sunxi_nand(struct nand_chip *nand)
-{
-	return container_of(nand, struct sunxi_nand_chip, nand);
-}
-
-/*
- * NAND Controller structure: stores sunxi NAND controller information
- *
- * @controller:		base controller structure
- * @dev:		parent device (used to print error messages)
- * @regs:		NAND controller registers
- * @ahb_clk:		NAND Controller AHB clock
- * @mod_clk:		NAND Controller mod clock
- * @assigned_cs:	bitmask describing already assigned CS lines
- * @clk_rate:		NAND controller current clock rate
- * @chips:		a list containing all the NAND chips attached to
- *			this NAND controller
- * @complete:		a completion object used to wait for NAND
- *			controller events
- */
-struct sunxi_nfc {
-	struct nand_hw_control controller;
-	struct device *dev;
-	void __iomem *regs;
-	struct clk *ahb_clk;
-	struct clk *mod_clk;
-	struct reset_control *reset;
-	unsigned long assigned_cs;
-	unsigned long clk_rate;
-	struct list_head chips;
-	struct completion complete;
-	struct dma_chan *dmac;
-};
-
-static inline struct sunxi_nfc *to_sunxi_nfc(struct nand_hw_control *ctrl)
-{
-	return container_of(ctrl, struct sunxi_nfc, controller);
-}
-
-static irqreturn_t sunxi_nfc_interrupt(int irq, void *dev_id)
-{
-	struct sunxi_nfc *nfc = dev_id;
-	u32 st = readl(nfc->regs + NFC_REG_ST);
-	u32 ien = readl(nfc->regs + NFC_REG_INT);
-
-	if (!(ien & st))
-		return IRQ_NONE;
-
-	if ((ien & st) == ien)
-		complete(&nfc->complete);
-
-	writel(st & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
-	writel(~st & ien & NFC_INT_MASK, nfc->regs + NFC_REG_INT);
-
-	return IRQ_HANDLED;
-}
-
-static int sunxi_nfc_wait_events(struct sunxi_nfc *nfc, u32 events,
-				 bool use_polling, unsigned int timeout_ms)
-{
-	int ret;
-
-	if (events & ~NFC_INT_MASK)
-		return -EINVAL;
-
-	if (!timeout_ms)
-		timeout_ms = NFC_DEFAULT_TIMEOUT_MS;
-
-	if (!use_polling) {
-		init_completion(&nfc->complete);
-
-		writel(events, nfc->regs + NFC_REG_INT);
-
-		ret = wait_for_completion_timeout(&nfc->complete,
-						msecs_to_jiffies(timeout_ms));
-
-		writel(0, nfc->regs + NFC_REG_INT);
-	} else {
-		u32 status;
-
-		ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
-					 (status & events) == events, 1,
-					 timeout_ms * 1000);
-	}
-
-	writel(events & NFC_INT_MASK, nfc->regs + NFC_REG_ST);
-
-	if (ret)
-		dev_err(nfc->dev, "wait interrupt timedout\n");
-
-	return ret;
-}
-
-static int sunxi_nfc_wait_cmd_fifo_empty(struct sunxi_nfc *nfc)
-{
-	u32 status;
-	int ret;
-
-	ret = readl_poll_timeout(nfc->regs + NFC_REG_ST, status,
-				 !(status & NFC_CMD_FIFO_STATUS), 1,
-				 NFC_DEFAULT_TIMEOUT_MS * 1000);
-	if (ret)
-		dev_err(nfc->dev, "wait for empty cmd FIFO timedout\n");
-
-	return ret;
-}
-
-static int sunxi_nfc_rst(struct sunxi_nfc *nfc)
-{
-	u32 ctl;
-	int ret;
-
-	writel(0, nfc->regs + NFC_REG_ECC_CTL);
-	writel(NFC_RESET, nfc->regs + NFC_REG_CTL);
-
-	ret = readl_poll_timeout(nfc->regs + NFC_REG_CTL, ctl,
-				 !(ctl & NFC_RESET), 1,
-				 NFC_DEFAULT_TIMEOUT_MS * 1000);
-	if (ret)
-		dev_err(nfc->dev, "wait for NAND controller reset timedout\n");
-
-	return ret;
-}
-
-static int sunxi_nfc_dma_op_prepare(struct mtd_info *mtd, const void *buf,
-				    int chunksize, int nchunks,
-				    enum dma_data_direction ddir,
-				    struct scatterlist *sg)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-	struct dma_async_tx_descriptor *dmad;
-	enum dma_transfer_direction tdir;
-	dma_cookie_t dmat;
-	int ret;
-
-	if (ddir == DMA_FROM_DEVICE)
-		tdir = DMA_DEV_TO_MEM;
-	else
-		tdir = DMA_MEM_TO_DEV;
-
-	sg_init_one(sg, buf, nchunks * chunksize);
-	ret = dma_map_sg(nfc->dev, sg, 1, ddir);
-	if (!ret)
-		return -ENOMEM;
-
-	dmad = dmaengine_prep_slave_sg(nfc->dmac, sg, 1, tdir, DMA_CTRL_ACK);
-	if (!dmad) {
-		ret = -EINVAL;
-		goto err_unmap_buf;
-	}
-
-	writel(readl(nfc->regs + NFC_REG_CTL) | NFC_RAM_METHOD,
-	       nfc->regs + NFC_REG_CTL);
-	writel(nchunks, nfc->regs + NFC_REG_SECTOR_NUM);
-	writel(chunksize, nfc->regs + NFC_REG_CNT);
-	dmat = dmaengine_submit(dmad);
-
-	ret = dma_submit_error(dmat);
-	if (ret)
-		goto err_clr_dma_flag;
-
-	return 0;
-
-err_clr_dma_flag:
-	writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
-	       nfc->regs + NFC_REG_CTL);
-
-err_unmap_buf:
-	dma_unmap_sg(nfc->dev, sg, 1, ddir);
-	return ret;
-}
-
-static void sunxi_nfc_dma_op_cleanup(struct mtd_info *mtd,
-				     enum dma_data_direction ddir,
-				     struct scatterlist *sg)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-
-	dma_unmap_sg(nfc->dev, sg, 1, ddir);
-	writel(readl(nfc->regs + NFC_REG_CTL) & ~NFC_RAM_METHOD,
-	       nfc->regs + NFC_REG_CTL);
-}
-
-static int sunxi_nfc_dev_ready(struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
-	struct sunxi_nand_rb *rb;
-	int ret;
-
-	if (sunxi_nand->selected < 0)
-		return 0;
-
-	rb = &sunxi_nand->sels[sunxi_nand->selected].rb;
-
-	switch (rb->type) {
-	case RB_NATIVE:
-		ret = !!(readl(nfc->regs + NFC_REG_ST) &
-			 NFC_RB_STATE(rb->info.nativeid));
-		break;
-	case RB_GPIO:
-		ret = gpio_get_value(rb->info.gpio);
-		break;
-	case RB_NONE:
-	default:
-		ret = 0;
-		dev_err(nfc->dev, "cannot check R/B NAND status!\n");
-		break;
-	}
-
-	return ret;
-}
-
-static void sunxi_nfc_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
-	struct sunxi_nand_chip_sel *sel;
-	u32 ctl;
-
-	if (chip > 0 && chip >= sunxi_nand->nsels)
-		return;
-
-	if (chip == sunxi_nand->selected)
-		return;
-
-	ctl = readl(nfc->regs + NFC_REG_CTL) &
-	      ~(NFC_PAGE_SHIFT_MSK | NFC_CE_SEL_MSK | NFC_RB_SEL_MSK | NFC_EN);
-
-	if (chip >= 0) {
-		sel = &sunxi_nand->sels[chip];
-
-		ctl |= NFC_CE_SEL(sel->cs) | NFC_EN |
-		       NFC_PAGE_SHIFT(nand->page_shift);
-		if (sel->rb.type == RB_NONE) {
-			nand->dev_ready = NULL;
-		} else {
-			nand->dev_ready = sunxi_nfc_dev_ready;
-			if (sel->rb.type == RB_NATIVE)
-				ctl |= NFC_RB_SEL(sel->rb.info.nativeid);
-		}
-
-		writel(mtd->writesize, nfc->regs + NFC_REG_SPARE_AREA);
-
-		if (nfc->clk_rate != sunxi_nand->clk_rate) {
-			clk_set_rate(nfc->mod_clk, sunxi_nand->clk_rate);
-			nfc->clk_rate = sunxi_nand->clk_rate;
-		}
-	}
-
-	writel(sunxi_nand->timing_ctl, nfc->regs + NFC_REG_TIMING_CTL);
-	writel(sunxi_nand->timing_cfg, nfc->regs + NFC_REG_TIMING_CFG);
-	writel(ctl, nfc->regs + NFC_REG_CTL);
-
-	sunxi_nand->selected = chip;
-}
-
-static void sunxi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
-	int ret;
-	int cnt;
-	int offs = 0;
-	u32 tmp;
-
-	while (len > offs) {
-		cnt = min(len - offs, NFC_SRAM_SIZE);
-
-		ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
-		if (ret)
-			break;
-
-		writel(cnt, nfc->regs + NFC_REG_CNT);
-		tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD;
-		writel(tmp, nfc->regs + NFC_REG_CMD);
-
-		ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
-		if (ret)
-			break;
-
-		if (buf)
-			memcpy_fromio(buf + offs, nfc->regs + NFC_RAM0_BASE,
-				      cnt);
-		offs += cnt;
-	}
-}
-
-static void sunxi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
-				int len)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
-	int ret;
-	int cnt;
-	int offs = 0;
-	u32 tmp;
-
-	while (len > offs) {
-		cnt = min(len - offs, NFC_SRAM_SIZE);
-
-		ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
-		if (ret)
-			break;
-
-		writel(cnt, nfc->regs + NFC_REG_CNT);
-		memcpy_toio(nfc->regs + NFC_RAM0_BASE, buf + offs, cnt);
-		tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
-		      NFC_ACCESS_DIR;
-		writel(tmp, nfc->regs + NFC_REG_CMD);
-
-		ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
-		if (ret)
-			break;
-
-		offs += cnt;
-	}
-}
-
-static uint8_t sunxi_nfc_read_byte(struct mtd_info *mtd)
-{
-	uint8_t ret;
-
-	sunxi_nfc_read_buf(mtd, &ret, 1);
-
-	return ret;
-}
-
-static void sunxi_nfc_cmd_ctrl(struct mtd_info *mtd, int dat,
-			       unsigned int ctrl)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
-	int ret;
-
-	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
-	if (ret)
-		return;
-
-	if (dat == NAND_CMD_NONE && (ctrl & NAND_NCE) &&
-	    !(ctrl & (NAND_CLE | NAND_ALE))) {
-		u32 cmd = 0;
-
-		if (!sunxi_nand->addr_cycles && !sunxi_nand->cmd_cycles)
-			return;
-
-		if (sunxi_nand->cmd_cycles--)
-			cmd |= NFC_SEND_CMD1 | sunxi_nand->cmd[0];
-
-		if (sunxi_nand->cmd_cycles--) {
-			cmd |= NFC_SEND_CMD2;
-			writel(sunxi_nand->cmd[1],
-			       nfc->regs + NFC_REG_RCMD_SET);
-		}
-
-		sunxi_nand->cmd_cycles = 0;
-
-		if (sunxi_nand->addr_cycles) {
-			cmd |= NFC_SEND_ADR |
-			       NFC_ADR_NUM(sunxi_nand->addr_cycles);
-			writel(sunxi_nand->addr[0],
-			       nfc->regs + NFC_REG_ADDR_LOW);
-		}
-
-		if (sunxi_nand->addr_cycles > 4)
-			writel(sunxi_nand->addr[1],
-			       nfc->regs + NFC_REG_ADDR_HIGH);
-
-		writel(cmd, nfc->regs + NFC_REG_CMD);
-		sunxi_nand->addr[0] = 0;
-		sunxi_nand->addr[1] = 0;
-		sunxi_nand->addr_cycles = 0;
-		sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
-	}
-
-	if (ctrl & NAND_CLE) {
-		sunxi_nand->cmd[sunxi_nand->cmd_cycles++] = dat;
-	} else if (ctrl & NAND_ALE) {
-		sunxi_nand->addr[sunxi_nand->addr_cycles / 4] |=
-				dat << ((sunxi_nand->addr_cycles % 4) * 8);
-		sunxi_nand->addr_cycles++;
-	}
-}
-
-/* These seed values have been extracted from Allwinner's BSP */
-static const u16 sunxi_nfc_randomizer_page_seeds[] = {
-	0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72,
-	0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436,
-	0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d,
-	0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130,
-	0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56,
-	0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55,
-	0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb,
-	0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17,
-	0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62,
-	0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064,
-	0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126,
-	0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e,
-	0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3,
-	0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b,
-	0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d,
-	0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db,
-};
-
-/*
- * sunxi_nfc_randomizer_ecc512_seeds and sunxi_nfc_randomizer_ecc1024_seeds
- * have been generated using
- * sunxi_nfc_randomizer_step(seed, (step_size * 8) + 15), which is what
- * the randomizer engine does internally before de/scrambling OOB data.
- *
- * Those tables are statically defined to avoid calculating randomizer state
- * at runtime.
- */
-static const u16 sunxi_nfc_randomizer_ecc512_seeds[] = {
-	0x3346, 0x367f, 0x1f18, 0x769a, 0x4f64, 0x068c, 0x2ef1, 0x6b64,
-	0x28a9, 0x15d7, 0x30f8, 0x3659, 0x53db, 0x7c5f, 0x71d4, 0x4409,
-	0x26eb, 0x03cc, 0x655d, 0x47d4, 0x4daa, 0x0877, 0x712d, 0x3617,
-	0x3264, 0x49aa, 0x7f9e, 0x588e, 0x4fbc, 0x7176, 0x7f91, 0x6c6d,
-	0x4b95, 0x5fb7, 0x3844, 0x4037, 0x0184, 0x081b, 0x0ee8, 0x5b91,
-	0x293d, 0x1f71, 0x0e6f, 0x402b, 0x5122, 0x1e52, 0x22be, 0x3d2d,
-	0x75bc, 0x7c60, 0x6291, 0x1a2f, 0x61d4, 0x74aa, 0x4140, 0x29ab,
-	0x472d, 0x2852, 0x017e, 0x15e8, 0x5ec2, 0x17cf, 0x7d0f, 0x06b8,
-	0x117a, 0x6b94, 0x789b, 0x3126, 0x6ac5, 0x5be7, 0x150f, 0x51f8,
-	0x7889, 0x0aa5, 0x663d, 0x77e8, 0x0b87, 0x3dcb, 0x360d, 0x218b,
-	0x512f, 0x7dc9, 0x6a4d, 0x630a, 0x3547, 0x1dd2, 0x5aea, 0x69a5,
-	0x7bfa, 0x5e4f, 0x1519, 0x6430, 0x3a0e, 0x5eb3, 0x5425, 0x0c7a,
-	0x5540, 0x3670, 0x63c1, 0x31e9, 0x5a39, 0x2de7, 0x5979, 0x2891,
-	0x1562, 0x014b, 0x5b05, 0x2756, 0x5a34, 0x13aa, 0x6cb5, 0x2c36,
-	0x5e72, 0x1306, 0x0861, 0x15ef, 0x1ee8, 0x5a37, 0x7ac4, 0x45dd,
-	0x44c4, 0x7266, 0x2f41, 0x3ccc, 0x045e, 0x7d40, 0x7c66, 0x0fa0,
-};
-
-static const u16 sunxi_nfc_randomizer_ecc1024_seeds[] = {
-	0x2cf5, 0x35f1, 0x63a4, 0x5274, 0x2bd2, 0x778b, 0x7285, 0x32b6,
-	0x6a5c, 0x70d6, 0x757d, 0x6769, 0x5375, 0x1e81, 0x0cf3, 0x3982,
-	0x6787, 0x042a, 0x6c49, 0x1925, 0x56a8, 0x40a9, 0x063e, 0x7bd9,
-	0x4dbf, 0x55ec, 0x672e, 0x7334, 0x5185, 0x4d00, 0x232a, 0x7e07,
-	0x445d, 0x6b92, 0x528f, 0x4255, 0x53ba, 0x7d82, 0x2a2e, 0x3a4e,
-	0x75eb, 0x450c, 0x6844, 0x1b5d, 0x581a, 0x4cc6, 0x0379, 0x37b2,
-	0x419f, 0x0e92, 0x6b27, 0x5624, 0x01e3, 0x07c1, 0x44a5, 0x130c,
-	0x13e8, 0x5910, 0x0876, 0x60c5, 0x54e3, 0x5b7f, 0x2269, 0x509f,
-	0x7665, 0x36fd, 0x3e9a, 0x0579, 0x6295, 0x14ef, 0x0a81, 0x1bcc,
-	0x4b16, 0x64db, 0x0514, 0x4f07, 0x0591, 0x3576, 0x6853, 0x0d9e,
-	0x259f, 0x38b7, 0x64fb, 0x3094, 0x4693, 0x6ddd, 0x29bb, 0x0bc8,
-	0x3f47, 0x490e, 0x0c0e, 0x7933, 0x3c9e, 0x5840, 0x398d, 0x3e68,
-	0x4af1, 0x71f5, 0x57cf, 0x1121, 0x64eb, 0x3579, 0x15ac, 0x584d,
-	0x5f2a, 0x47e2, 0x6528, 0x6eac, 0x196e, 0x6b96, 0x0450, 0x0179,
-	0x609c, 0x06e1, 0x4626, 0x42c7, 0x273e, 0x486f, 0x0705, 0x1601,
-	0x145b, 0x407e, 0x062b, 0x57a5, 0x53f9, 0x5659, 0x4410, 0x3ccd,
-};
-
-static u16 sunxi_nfc_randomizer_step(u16 state, int count)
-{
-	state &= 0x7fff;
-
-	/*
-	 * This loop is just a simple implementation of a Fibonacci LFSR using
-	 * the x16 + x15 + 1 polynomial.
-	 */
-	while (count--)
-		state = ((state >> 1) |
-			 (((state ^ (state >> 1)) & 1) << 14)) & 0x7fff;
-
-	return state;
-}
-
-static u16 sunxi_nfc_randomizer_state(struct mtd_info *mtd, int page, bool ecc)
-{
-	const u16 *seeds = sunxi_nfc_randomizer_page_seeds;
-	int mod = mtd_div_by_ws(mtd->erasesize, mtd);
-
-	if (mod > ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds))
-		mod = ARRAY_SIZE(sunxi_nfc_randomizer_page_seeds);
-
-	if (ecc) {
-		if (mtd->ecc_step_size == 512)
-			seeds = sunxi_nfc_randomizer_ecc512_seeds;
-		else
-			seeds = sunxi_nfc_randomizer_ecc1024_seeds;
-	}
-
-	return seeds[page % mod];
-}
-
-static void sunxi_nfc_randomizer_config(struct mtd_info *mtd,
-					int page, bool ecc)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-	u32 ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
-	u16 state;
-
-	if (!(nand->options & NAND_NEED_SCRAMBLING))
-		return;
-
-	ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
-	state = sunxi_nfc_randomizer_state(mtd, page, ecc);
-	ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_SEED_MSK;
-	writel(ecc_ctl | NFC_RANDOM_SEED(state), nfc->regs + NFC_REG_ECC_CTL);
-}
-
-static void sunxi_nfc_randomizer_enable(struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-
-	if (!(nand->options & NAND_NEED_SCRAMBLING))
-		return;
-
-	writel(readl(nfc->regs + NFC_REG_ECC_CTL) | NFC_RANDOM_EN,
-	       nfc->regs + NFC_REG_ECC_CTL);
-}
-
-static void sunxi_nfc_randomizer_disable(struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-
-	if (!(nand->options & NAND_NEED_SCRAMBLING))
-		return;
-
-	writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_RANDOM_EN,
-	       nfc->regs + NFC_REG_ECC_CTL);
-}
-
-static void sunxi_nfc_randomize_bbm(struct mtd_info *mtd, int page, u8 *bbm)
-{
-	u16 state = sunxi_nfc_randomizer_state(mtd, page, true);
-
-	bbm[0] ^= state;
-	bbm[1] ^= sunxi_nfc_randomizer_step(state, 8);
-}
-
-static void sunxi_nfc_randomizer_write_buf(struct mtd_info *mtd,
-					   const uint8_t *buf, int len,
-					   bool ecc, int page)
-{
-	sunxi_nfc_randomizer_config(mtd, page, ecc);
-	sunxi_nfc_randomizer_enable(mtd);
-	sunxi_nfc_write_buf(mtd, buf, len);
-	sunxi_nfc_randomizer_disable(mtd);
-}
-
-static void sunxi_nfc_randomizer_read_buf(struct mtd_info *mtd, uint8_t *buf,
-					  int len, bool ecc, int page)
-{
-	sunxi_nfc_randomizer_config(mtd, page, ecc);
-	sunxi_nfc_randomizer_enable(mtd);
-	sunxi_nfc_read_buf(mtd, buf, len);
-	sunxi_nfc_randomizer_disable(mtd);
-}
-
-static void sunxi_nfc_hw_ecc_enable(struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-	struct sunxi_nand_hw_ecc *data = nand->ecc.priv;
-	u32 ecc_ctl;
-
-	ecc_ctl = readl(nfc->regs + NFC_REG_ECC_CTL);
-	ecc_ctl &= ~(NFC_ECC_MODE_MSK | NFC_ECC_PIPELINE |
-		     NFC_ECC_BLOCK_SIZE_MSK);
-	ecc_ctl |= NFC_ECC_EN | NFC_ECC_MODE(data->mode) | NFC_ECC_EXCEPTION |
-		   NFC_ECC_PIPELINE;
-
-	writel(ecc_ctl, nfc->regs + NFC_REG_ECC_CTL);
-}
-
-static void sunxi_nfc_hw_ecc_disable(struct mtd_info *mtd)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-
-	writel(readl(nfc->regs + NFC_REG_ECC_CTL) & ~NFC_ECC_EN,
-	       nfc->regs + NFC_REG_ECC_CTL);
-}
-
-static inline void sunxi_nfc_user_data_to_buf(u32 user_data, u8 *buf)
-{
-	buf[0] = user_data;
-	buf[1] = user_data >> 8;
-	buf[2] = user_data >> 16;
-	buf[3] = user_data >> 24;
-}
-
-static inline u32 sunxi_nfc_buf_to_user_data(const u8 *buf)
-{
-	return buf[0] | (buf[1] << 8) | (buf[2] << 16) | (buf[3] << 24);
-}
-
-static void sunxi_nfc_hw_ecc_get_prot_oob_bytes(struct mtd_info *mtd, u8 *oob,
-						int step, bool bbm, int page)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-
-	sunxi_nfc_user_data_to_buf(readl(nfc->regs + NFC_REG_USER_DATA(step)),
-				   oob);
-
-	/* De-randomize the Bad Block Marker. */
-	if (bbm && (nand->options & NAND_NEED_SCRAMBLING))
-		sunxi_nfc_randomize_bbm(mtd, page, oob);
-}
-
-static void sunxi_nfc_hw_ecc_set_prot_oob_bytes(struct mtd_info *mtd,
-						const u8 *oob, int step,
-						bool bbm, int page)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-	u8 user_data[4];
-
-	/* Randomize the Bad Block Marker. */
-	if (bbm && (nand->options & NAND_NEED_SCRAMBLING)) {
-		memcpy(user_data, oob, sizeof(user_data));
-		sunxi_nfc_randomize_bbm(mtd, page, user_data);
-		oob = user_data;
-	}
-
-	writel(sunxi_nfc_buf_to_user_data(oob),
-	       nfc->regs + NFC_REG_USER_DATA(step));
-}
-
-static void sunxi_nfc_hw_ecc_update_stats(struct mtd_info *mtd,
-					  unsigned int *max_bitflips, int ret)
-{
-	if (ret < 0) {
-		mtd->ecc_stats.failed++;
-	} else {
-		mtd->ecc_stats.corrected += ret;
-		*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
-	}
-}
-
-static int sunxi_nfc_hw_ecc_correct(struct mtd_info *mtd, u8 *data, u8 *oob,
-				    int step, u32 status, bool *erased)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-	struct nand_ecc_ctrl *ecc = &nand->ecc;
-	u32 tmp;
-
-	*erased = false;
-
-	if (status & NFC_ECC_ERR(step))
-		return -EBADMSG;
-
-	if (status & NFC_ECC_PAT_FOUND(step)) {
-		u8 pattern;
-
-		if (unlikely(!(readl(nfc->regs + NFC_REG_PAT_ID) & 0x1))) {
-			pattern = 0x0;
-		} else {
-			pattern = 0xff;
-			*erased = true;
-		}
-
-		if (data)
-			memset(data, pattern, ecc->size);
-
-		if (oob)
-			memset(oob, pattern, ecc->bytes + 4);
-
-		return 0;
-	}
-
-	tmp = readl(nfc->regs + NFC_REG_ECC_ERR_CNT(step));
-
-	return NFC_ECC_ERR_CNT(step, tmp);
-}
-
-static int sunxi_nfc_hw_ecc_read_chunk(struct mtd_info *mtd,
-				       u8 *data, int data_off,
-				       u8 *oob, int oob_off,
-				       int *cur_off,
-				       unsigned int *max_bitflips,
-				       bool bbm, bool oob_required, int page)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-	struct nand_ecc_ctrl *ecc = &nand->ecc;
-	int raw_mode = 0;
-	bool erased;
-	int ret;
-
-	if (*cur_off != data_off)
-		nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
-
-	sunxi_nfc_randomizer_read_buf(mtd, NULL, ecc->size, false, page);
-
-	if (data_off + ecc->size != oob_off)
-		nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
-
-	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
-	if (ret)
-		return ret;
-
-	sunxi_nfc_randomizer_enable(mtd);
-	writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ECC_OP,
-	       nfc->regs + NFC_REG_CMD);
-
-	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
-	sunxi_nfc_randomizer_disable(mtd);
-	if (ret)
-		return ret;
-
-	*cur_off = oob_off + ecc->bytes + 4;
-
-	ret = sunxi_nfc_hw_ecc_correct(mtd, data, oob_required ? oob : NULL, 0,
-				       readl(nfc->regs + NFC_REG_ECC_ST),
-				       &erased);
-	if (erased)
-		return 1;
-
-	if (ret < 0) {
-		/*
-		 * Re-read the data with the randomizer disabled to identify
-		 * bitflips in erased pages.
-		 */
-		if (nand->options & NAND_NEED_SCRAMBLING) {
-			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, data_off, -1);
-			nand->read_buf(mtd, data, ecc->size);
-		} else {
-			memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE,
-				      ecc->size);
-		}
-
-		nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
-		nand->read_buf(mtd, oob, ecc->bytes + 4);
-
-		ret = nand_check_erased_ecc_chunk(data,	ecc->size,
-						  oob, ecc->bytes + 4,
-						  NULL, 0, ecc->strength);
-		if (ret >= 0)
-			raw_mode = 1;
-	} else {
-		memcpy_fromio(data, nfc->regs + NFC_RAM0_BASE, ecc->size);
-
-		if (oob_required) {
-			nand->cmdfunc(mtd, NAND_CMD_RNDOUT, oob_off, -1);
-			sunxi_nfc_randomizer_read_buf(mtd, oob, ecc->bytes + 4,
-						      true, page);
-
-			sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, 0,
-							    bbm, page);
-		}
-	}
-
-	sunxi_nfc_hw_ecc_update_stats(mtd, max_bitflips, ret);
-
-	return raw_mode;
-}
-
-static void sunxi_nfc_hw_ecc_read_extra_oob(struct mtd_info *mtd,
-					    u8 *oob, int *cur_off,
-					    bool randomize, int page)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &nand->ecc;
-	int offset = ((ecc->bytes + 4) * ecc->steps);
-	int len = mtd->oobsize - offset;
-
-	if (len <= 0)
-		return;
-
-	if (!cur_off || *cur_off != offset)
-		nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
-			      offset + mtd->writesize, -1);
-
-	if (!randomize)
-		sunxi_nfc_read_buf(mtd, oob + offset, len);
-	else
-		sunxi_nfc_randomizer_read_buf(mtd, oob + offset, len,
-					      false, page);
-
-	if (cur_off)
-		*cur_off = mtd->oobsize + mtd->writesize;
-}
-
-static int sunxi_nfc_hw_ecc_read_chunks_dma(struct mtd_info *mtd, uint8_t *buf,
-					    int oob_required, int page,
-					    int nchunks)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	bool randomized = nand->options & NAND_NEED_SCRAMBLING;
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-	struct nand_ecc_ctrl *ecc = &nand->ecc;
-	unsigned int max_bitflips = 0;
-	int ret, i, raw_mode = 0;
-	struct scatterlist sg;
-	u32 status;
-
-	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
-	if (ret)
-		return ret;
-
-	ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, nchunks,
-				       DMA_FROM_DEVICE, &sg);
-	if (ret)
-		return ret;
-
-	sunxi_nfc_hw_ecc_enable(mtd);
-	sunxi_nfc_randomizer_config(mtd, page, false);
-	sunxi_nfc_randomizer_enable(mtd);
-
-	writel((NAND_CMD_RNDOUTSTART << 16) | (NAND_CMD_RNDOUT << 8) |
-	       NAND_CMD_READSTART, nfc->regs + NFC_REG_RCMD_SET);
-
-	dma_async_issue_pending(nfc->dmac);
-
-	writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD | NFC_DATA_TRANS,
-	       nfc->regs + NFC_REG_CMD);
-
-	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
-	if (ret)
-		dmaengine_terminate_all(nfc->dmac);
-
-	sunxi_nfc_randomizer_disable(mtd);
-	sunxi_nfc_hw_ecc_disable(mtd);
-
-	sunxi_nfc_dma_op_cleanup(mtd, DMA_FROM_DEVICE, &sg);
-
-	if (ret)
-		return ret;
-
-	status = readl(nfc->regs + NFC_REG_ECC_ST);
-
-	for (i = 0; i < nchunks; i++) {
-		int data_off = i * ecc->size;
-		int oob_off = i * (ecc->bytes + 4);
-		u8 *data = buf + data_off;
-		u8 *oob = nand->oob_poi + oob_off;
-		bool erased;
-
-		ret = sunxi_nfc_hw_ecc_correct(mtd, randomized ? data : NULL,
-					       oob_required ? oob : NULL,
-					       i, status, &erased);
-
-		/* ECC errors are handled in the second loop. */
-		if (ret < 0)
-			continue;
-
-		if (oob_required && !erased) {
-			/* TODO: use DMA to retrieve OOB */
-			nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
-				      mtd->writesize + oob_off, -1);
-			nand->read_buf(mtd, oob, ecc->bytes + 4);
-
-			sunxi_nfc_hw_ecc_get_prot_oob_bytes(mtd, oob, i,
-							    !i, page);
-		}
-
-		if (erased)
-			raw_mode = 1;
-
-		sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
-	}
-
-	if (status & NFC_ECC_ERR_MSK) {
-		for (i = 0; i < nchunks; i++) {
-			int data_off = i * ecc->size;
-			int oob_off = i * (ecc->bytes + 4);
-			u8 *data = buf + data_off;
-			u8 *oob = nand->oob_poi + oob_off;
-
-			if (!(status & NFC_ECC_ERR(i)))
-				continue;
-
-			/*
-			 * Re-read the data with the randomizer disabled to
-			 * identify bitflips in erased pages.
-			 */
-			if (randomized) {
-				/* TODO: use DMA to read page in raw mode */
-				nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
-					      data_off, -1);
-				nand->read_buf(mtd, data, ecc->size);
-			}
-
-			/* TODO: use DMA to retrieve OOB */
-			nand->cmdfunc(mtd, NAND_CMD_RNDOUT,
-				      mtd->writesize + oob_off, -1);
-			nand->read_buf(mtd, oob, ecc->bytes + 4);
-
-			ret = nand_check_erased_ecc_chunk(data,	ecc->size,
-							  oob, ecc->bytes + 4,
-							  NULL, 0,
-							  ecc->strength);
-			if (ret >= 0)
-				raw_mode = 1;
-
-			sunxi_nfc_hw_ecc_update_stats(mtd, &max_bitflips, ret);
-		}
-	}
-
-	if (oob_required)
-		sunxi_nfc_hw_ecc_read_extra_oob(mtd, nand->oob_poi,
-						NULL, !raw_mode,
-						page);
-
-	return max_bitflips;
-}
-
-static int sunxi_nfc_hw_ecc_write_chunk(struct mtd_info *mtd,
-					const u8 *data, int data_off,
-					const u8 *oob, int oob_off,
-					int *cur_off, bool bbm,
-					int page)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-	struct nand_ecc_ctrl *ecc = &nand->ecc;
-	int ret;
-
-	if (data_off != *cur_off)
-		nand->cmdfunc(mtd, NAND_CMD_RNDIN, data_off, -1);
-
-	sunxi_nfc_randomizer_write_buf(mtd, data, ecc->size, false, page);
-
-	if (data_off + ecc->size != oob_off)
-		nand->cmdfunc(mtd, NAND_CMD_RNDIN, oob_off, -1);
-
-	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
-	if (ret)
-		return ret;
-
-	sunxi_nfc_randomizer_enable(mtd);
-	sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, 0, bbm, page);
-
-	writel(NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD |
-	       NFC_ACCESS_DIR | NFC_ECC_OP,
-	       nfc->regs + NFC_REG_CMD);
-
-	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
-	sunxi_nfc_randomizer_disable(mtd);
-	if (ret)
-		return ret;
-
-	*cur_off = oob_off + ecc->bytes + 4;
-
-	return 0;
-}
-
-static void sunxi_nfc_hw_ecc_write_extra_oob(struct mtd_info *mtd,
-					     u8 *oob, int *cur_off,
-					     int page)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &nand->ecc;
-	int offset = ((ecc->bytes + 4) * ecc->steps);
-	int len = mtd->oobsize - offset;
-
-	if (len <= 0)
-		return;
-
-	if (!cur_off || *cur_off != offset)
-		nand->cmdfunc(mtd, NAND_CMD_RNDIN,
-			      offset + mtd->writesize, -1);
-
-	sunxi_nfc_randomizer_write_buf(mtd, oob + offset, len, false, page);
-
-	if (cur_off)
-		*cur_off = mtd->oobsize + mtd->writesize;
-}
-
-static int sunxi_nfc_hw_ecc_read_page(struct mtd_info *mtd,
-				      struct nand_chip *chip, uint8_t *buf,
-				      int oob_required, int page)
-{
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	unsigned int max_bitflips = 0;
-	int ret, i, cur_off = 0;
-	bool raw_mode = false;
-
-	sunxi_nfc_hw_ecc_enable(mtd);
-
-	for (i = 0; i < ecc->steps; i++) {
-		int data_off = i * ecc->size;
-		int oob_off = i * (ecc->bytes + 4);
-		u8 *data = buf + data_off;
-		u8 *oob = chip->oob_poi + oob_off;
-
-		ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
-						  oob_off + mtd->writesize,
-						  &cur_off, &max_bitflips,
-						  !i, oob_required, page);
-		if (ret < 0)
-			return ret;
-		else if (ret)
-			raw_mode = true;
-	}
-
-	if (oob_required)
-		sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
-						!raw_mode, page);
-
-	sunxi_nfc_hw_ecc_disable(mtd);
-
-	return max_bitflips;
-}
-
-static int sunxi_nfc_hw_ecc_read_page_dma(struct mtd_info *mtd,
-					  struct nand_chip *chip, u8 *buf,
-					  int oob_required, int page)
-{
-	int ret;
-
-	ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, oob_required, page,
-					       chip->ecc.steps);
-	if (ret >= 0)
-		return ret;
-
-	/* Fallback to PIO mode */
-	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
-	return sunxi_nfc_hw_ecc_read_page(mtd, chip, buf, oob_required, page);
-}
-
-static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd,
-					 struct nand_chip *chip,
-					 u32 data_offs, u32 readlen,
-					 u8 *bufpoi, int page)
-{
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int ret, i, cur_off = 0;
-	unsigned int max_bitflips = 0;
-
-	sunxi_nfc_hw_ecc_enable(mtd);
-
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-	for (i = data_offs / ecc->size;
-	     i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) {
-		int data_off = i * ecc->size;
-		int oob_off = i * (ecc->bytes + 4);
-		u8 *data = bufpoi + data_off;
-		u8 *oob = chip->oob_poi + oob_off;
-
-		ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off,
-						  oob,
-						  oob_off + mtd->writesize,
-						  &cur_off, &max_bitflips, !i,
-						  false, page);
-		if (ret < 0)
-			return ret;
-	}
-
-	sunxi_nfc_hw_ecc_disable(mtd);
-
-	return max_bitflips;
-}
-
-static int sunxi_nfc_hw_ecc_read_subpage_dma(struct mtd_info *mtd,
-					     struct nand_chip *chip,
-					     u32 data_offs, u32 readlen,
-					     u8 *buf, int page)
-{
-	int nchunks = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size);
-	int ret;
-
-	ret = sunxi_nfc_hw_ecc_read_chunks_dma(mtd, buf, false, page, nchunks);
-	if (ret >= 0)
-		return ret;
-
-	/* Fallback to PIO mode */
-	chip->cmdfunc(mtd, NAND_CMD_RNDOUT, 0, -1);
-
-	return sunxi_nfc_hw_ecc_read_subpage(mtd, chip, data_offs, readlen,
-					     buf, page);
-}
-
-static int sunxi_nfc_hw_ecc_write_page(struct mtd_info *mtd,
-				       struct nand_chip *chip,
-				       const uint8_t *buf, int oob_required,
-				       int page)
-{
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int ret, i, cur_off = 0;
-
-	sunxi_nfc_hw_ecc_enable(mtd);
-
-	for (i = 0; i < ecc->steps; i++) {
-		int data_off = i * ecc->size;
-		int oob_off = i * (ecc->bytes + 4);
-		const u8 *data = buf + data_off;
-		const u8 *oob = chip->oob_poi + oob_off;
-
-		ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
-						   oob_off + mtd->writesize,
-						   &cur_off, !i, page);
-		if (ret)
-			return ret;
-	}
-
-	if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
-		sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
-						 &cur_off, page);
-
-	sunxi_nfc_hw_ecc_disable(mtd);
-
-	return 0;
-}
-
-static int sunxi_nfc_hw_ecc_write_subpage(struct mtd_info *mtd,
-					  struct nand_chip *chip,
-					  u32 data_offs, u32 data_len,
-					  const u8 *buf, int oob_required,
-					  int page)
-{
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int ret, i, cur_off = 0;
-
-	sunxi_nfc_hw_ecc_enable(mtd);
-
-	for (i = data_offs / ecc->size;
-	     i < DIV_ROUND_UP(data_offs + data_len, ecc->size); i++) {
-		int data_off = i * ecc->size;
-		int oob_off = i * (ecc->bytes + 4);
-		const u8 *data = buf + data_off;
-		const u8 *oob = chip->oob_poi + oob_off;
-
-		ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off, oob,
-						   oob_off + mtd->writesize,
-						   &cur_off, !i, page);
-		if (ret)
-			return ret;
-	}
-
-	sunxi_nfc_hw_ecc_disable(mtd);
-
-	return 0;
-}
-
-static int sunxi_nfc_hw_ecc_write_page_dma(struct mtd_info *mtd,
-					   struct nand_chip *chip,
-					   const u8 *buf,
-					   int oob_required,
-					   int page)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(nand->controller);
-	struct nand_ecc_ctrl *ecc = &nand->ecc;
-	struct scatterlist sg;
-	int ret, i;
-
-	ret = sunxi_nfc_wait_cmd_fifo_empty(nfc);
-	if (ret)
-		return ret;
-
-	ret = sunxi_nfc_dma_op_prepare(mtd, buf, ecc->size, ecc->steps,
-				       DMA_TO_DEVICE, &sg);
-	if (ret)
-		goto pio_fallback;
-
-	for (i = 0; i < ecc->steps; i++) {
-		const u8 *oob = nand->oob_poi + (i * (ecc->bytes + 4));
-
-		sunxi_nfc_hw_ecc_set_prot_oob_bytes(mtd, oob, i, !i, page);
-	}
-
-	sunxi_nfc_hw_ecc_enable(mtd);
-	sunxi_nfc_randomizer_config(mtd, page, false);
-	sunxi_nfc_randomizer_enable(mtd);
-
-	writel((NAND_CMD_RNDIN << 8) | NAND_CMD_PAGEPROG,
-	       nfc->regs + NFC_REG_RCMD_SET);
-
-	dma_async_issue_pending(nfc->dmac);
-
-	writel(NFC_PAGE_OP | NFC_DATA_SWAP_METHOD |
-	       NFC_DATA_TRANS | NFC_ACCESS_DIR,
-	       nfc->regs + NFC_REG_CMD);
-
-	ret = sunxi_nfc_wait_events(nfc, NFC_CMD_INT_FLAG, true, 0);
-	if (ret)
-		dmaengine_terminate_all(nfc->dmac);
-
-	sunxi_nfc_randomizer_disable(mtd);
-	sunxi_nfc_hw_ecc_disable(mtd);
-
-	sunxi_nfc_dma_op_cleanup(mtd, DMA_TO_DEVICE, &sg);
-
-	if (ret)
-		return ret;
-
-	if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
-		/* TODO: use DMA to transfer extra OOB bytes ? */
-		sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
-						 NULL, page);
-
-	return 0;
-
-pio_fallback:
-	return sunxi_nfc_hw_ecc_write_page(mtd, chip, buf, oob_required, page);
-}
-
-static int sunxi_nfc_hw_syndrome_ecc_read_page(struct mtd_info *mtd,
-					       struct nand_chip *chip,
-					       uint8_t *buf, int oob_required,
-					       int page)
-{
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	unsigned int max_bitflips = 0;
-	int ret, i, cur_off = 0;
-	bool raw_mode = false;
-
-	sunxi_nfc_hw_ecc_enable(mtd);
-
-	for (i = 0; i < ecc->steps; i++) {
-		int data_off = i * (ecc->size + ecc->bytes + 4);
-		int oob_off = data_off + ecc->size;
-		u8 *data = buf + (i * ecc->size);
-		u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
-
-		ret = sunxi_nfc_hw_ecc_read_chunk(mtd, data, data_off, oob,
-						  oob_off, &cur_off,
-						  &max_bitflips, !i,
-						  oob_required,
-						  page);
-		if (ret < 0)
-			return ret;
-		else if (ret)
-			raw_mode = true;
-	}
-
-	if (oob_required)
-		sunxi_nfc_hw_ecc_read_extra_oob(mtd, chip->oob_poi, &cur_off,
-						!raw_mode, page);
-
-	sunxi_nfc_hw_ecc_disable(mtd);
-
-	return max_bitflips;
-}
-
-static int sunxi_nfc_hw_syndrome_ecc_write_page(struct mtd_info *mtd,
-						struct nand_chip *chip,
-						const uint8_t *buf,
-						int oob_required, int page)
-{
-	struct nand_ecc_ctrl *ecc = &chip->ecc;
-	int ret, i, cur_off = 0;
-
-	sunxi_nfc_hw_ecc_enable(mtd);
-
-	for (i = 0; i < ecc->steps; i++) {
-		int data_off = i * (ecc->size + ecc->bytes + 4);
-		int oob_off = data_off + ecc->size;
-		const u8 *data = buf + (i * ecc->size);
-		const u8 *oob = chip->oob_poi + (i * (ecc->bytes + 4));
-
-		ret = sunxi_nfc_hw_ecc_write_chunk(mtd, data, data_off,
-						   oob, oob_off, &cur_off,
-						   false, page);
-		if (ret)
-			return ret;
-	}
-
-	if (oob_required || (chip->options & NAND_NEED_SCRAMBLING))
-		sunxi_nfc_hw_ecc_write_extra_oob(mtd, chip->oob_poi,
-						 &cur_off, page);
-
-	sunxi_nfc_hw_ecc_disable(mtd);
-
-	return 0;
-}
-
-static int sunxi_nfc_hw_common_ecc_read_oob(struct mtd_info *mtd,
-					    struct nand_chip *chip,
-					    int page)
-{
-	chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page);
-
-	chip->pagebuf = -1;
-
-	return chip->ecc.read_page(mtd, chip, chip->buffers->databuf, 1, page);
-}
-
-static int sunxi_nfc_hw_common_ecc_write_oob(struct mtd_info *mtd,
-					     struct nand_chip *chip,
-					     int page)
-{
-	int ret, status;
-
-	chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page);
-
-	chip->pagebuf = -1;
-
-	memset(chip->buffers->databuf, 0xff, mtd->writesize);
-	ret = chip->ecc.write_page(mtd, chip, chip->buffers->databuf, 1, page);
-	if (ret)
-		return ret;
-
-	/* Send command to program the OOB data */
-	chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
-
-	status = chip->waitfunc(mtd, chip);
-
-	return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
-
-static const s32 tWB_lut[] = {6, 12, 16, 20};
-static const s32 tRHW_lut[] = {4, 8, 12, 20};
-
-static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration,
-		u32 clk_period)
-{
-	u32 clk_cycles = DIV_ROUND_UP(duration, clk_period);
-	int i;
-
-	for (i = 0; i < lut_size; i++) {
-		if (clk_cycles <= lut[i])
-			return i;
-	}
-
-	/* Doesn't fit */
-	return -EINVAL;
-}
-
-#define sunxi_nand_lookup_timing(l, p, c) \
-			_sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c)
-
-static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd,
-					const struct nand_data_interface *conf,
-					bool check_only)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nand_chip *chip = to_sunxi_nand(nand);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(chip->nand.controller);
-	const struct nand_sdr_timings *timings;
-	u32 min_clk_period = 0;
-	s32 tWB, tADL, tWHR, tRHW, tCAD;
-	long real_clk_rate;
-
-	timings = nand_get_sdr_timings(conf);
-	if (IS_ERR(timings))
-		return -ENOTSUPP;
-
-	/* T1 <=> tCLS */
-	if (timings->tCLS_min > min_clk_period)
-		min_clk_period = timings->tCLS_min;
-
-	/* T2 <=> tCLH */
-	if (timings->tCLH_min > min_clk_period)
-		min_clk_period = timings->tCLH_min;
-
-	/* T3 <=> tCS */
-	if (timings->tCS_min > min_clk_period)
-		min_clk_period = timings->tCS_min;
-
-	/* T4 <=> tCH */
-	if (timings->tCH_min > min_clk_period)
-		min_clk_period = timings->tCH_min;
-
-	/* T5 <=> tWP */
-	if (timings->tWP_min > min_clk_period)
-		min_clk_period = timings->tWP_min;
-
-	/* T6 <=> tWH */
-	if (timings->tWH_min > min_clk_period)
-		min_clk_period = timings->tWH_min;
-
-	/* T7 <=> tALS */
-	if (timings->tALS_min > min_clk_period)
-		min_clk_period = timings->tALS_min;
-
-	/* T8 <=> tDS */
-	if (timings->tDS_min > min_clk_period)
-		min_clk_period = timings->tDS_min;
-
-	/* T9 <=> tDH */
-	if (timings->tDH_min > min_clk_period)
-		min_clk_period = timings->tDH_min;
-
-	/* T10 <=> tRR */
-	if (timings->tRR_min > (min_clk_period * 3))
-		min_clk_period = DIV_ROUND_UP(timings->tRR_min, 3);
-
-	/* T11 <=> tALH */
-	if (timings->tALH_min > min_clk_period)
-		min_clk_period = timings->tALH_min;
-
-	/* T12 <=> tRP */
-	if (timings->tRP_min > min_clk_period)
-		min_clk_period = timings->tRP_min;
-
-	/* T13 <=> tREH */
-	if (timings->tREH_min > min_clk_period)
-		min_clk_period = timings->tREH_min;
-
-	/* T14 <=> tRC */
-	if (timings->tRC_min > (min_clk_period * 2))
-		min_clk_period = DIV_ROUND_UP(timings->tRC_min, 2);
-
-	/* T15 <=> tWC */
-	if (timings->tWC_min > (min_clk_period * 2))
-		min_clk_period = DIV_ROUND_UP(timings->tWC_min, 2);
-
-	/* T16 - T19 + tCAD */
-	if (timings->tWB_max > (min_clk_period * 20))
-		min_clk_period = DIV_ROUND_UP(timings->tWB_max, 20);
-
-	if (timings->tADL_min > (min_clk_period * 32))
-		min_clk_period = DIV_ROUND_UP(timings->tADL_min, 32);
-
-	if (timings->tWHR_min > (min_clk_period * 32))
-		min_clk_period = DIV_ROUND_UP(timings->tWHR_min, 32);
-
-	if (timings->tRHW_min > (min_clk_period * 20))
-		min_clk_period = DIV_ROUND_UP(timings->tRHW_min, 20);
-
-	tWB  = sunxi_nand_lookup_timing(tWB_lut, timings->tWB_max,
-					min_clk_period);
-	if (tWB < 0) {
-		dev_err(nfc->dev, "unsupported tWB\n");
-		return tWB;
-	}
-
-	tADL = DIV_ROUND_UP(timings->tADL_min, min_clk_period) >> 3;
-	if (tADL > 3) {
-		dev_err(nfc->dev, "unsupported tADL\n");
-		return -EINVAL;
-	}
-
-	tWHR = DIV_ROUND_UP(timings->tWHR_min, min_clk_period) >> 3;
-	if (tWHR > 3) {
-		dev_err(nfc->dev, "unsupported tWHR\n");
-		return -EINVAL;
-	}
-
-	tRHW = sunxi_nand_lookup_timing(tRHW_lut, timings->tRHW_min,
-					min_clk_period);
-	if (tRHW < 0) {
-		dev_err(nfc->dev, "unsupported tRHW\n");
-		return tRHW;
-	}
-
-	if (check_only)
-		return 0;
-
-	/*
-	 * TODO: according to ONFI specs this value only applies for DDR NAND,
-	 * but Allwinner seems to set this to 0x7. Mimic them for now.
-	 */
-	tCAD = 0x7;
-
-	/* TODO: A83 has some more bits for CDQSS, CS, CLHZ, CCS, WC */
-	chip->timing_cfg = NFC_TIMING_CFG(tWB, tADL, tWHR, tRHW, tCAD);
-
-	/* Convert min_clk_period from picoseconds to nanoseconds */
-	min_clk_period = DIV_ROUND_UP(min_clk_period, 1000);
-
-	/*
-	 * Unlike what is stated in Allwinner datasheet, the clk_rate should
-	 * be set to (1 / min_clk_period), and not (2 / min_clk_period).
-	 * This new formula was verified with a scope and validated by
-	 * Allwinner engineers.
-	 */
-	chip->clk_rate = NSEC_PER_SEC / min_clk_period;
-	real_clk_rate = clk_round_rate(nfc->mod_clk, chip->clk_rate);
-
-	/*
-	 * ONFI specification 3.1, paragraph 4.15.2 dictates that EDO data
-	 * output cycle timings shall be used if the host drives tRC less than
-	 * 30 ns.
-	 */
-	min_clk_period = NSEC_PER_SEC / real_clk_rate;
-	chip->timing_ctl = ((min_clk_period * 2) < 30) ?
-			   NFC_TIMING_CTL_EDO : 0;
-
-	return 0;
-}
-
-static int sunxi_nand_ooblayout_ecc(struct mtd_info *mtd, int section,
-				    struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &nand->ecc;
-
-	if (section >= ecc->steps)
-		return -ERANGE;
-
-	oobregion->offset = section * (ecc->bytes + 4) + 4;
-	oobregion->length = ecc->bytes;
-
-	return 0;
-}
-
-static int sunxi_nand_ooblayout_free(struct mtd_info *mtd, int section,
-				     struct mtd_oob_region *oobregion)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct nand_ecc_ctrl *ecc = &nand->ecc;
-
-	if (section > ecc->steps)
-		return -ERANGE;
-
-	/*
-	 * The first 2 bytes are used for BB markers, hence we
-	 * only have 2 bytes available in the first user data
-	 * section.
-	 */
-	if (!section && ecc->mode == NAND_ECC_HW) {
-		oobregion->offset = 2;
-		oobregion->length = 2;
-
-		return 0;
-	}
-
-	oobregion->offset = section * (ecc->bytes + 4);
-
-	if (section < ecc->steps)
-		oobregion->length = 4;
-	else
-		oobregion->offset = mtd->oobsize - oobregion->offset;
-
-	return 0;
-}
-
-static const struct mtd_ooblayout_ops sunxi_nand_ooblayout_ops = {
-	.ecc = sunxi_nand_ooblayout_ecc,
-	.free = sunxi_nand_ooblayout_free,
-};
-
-static int sunxi_nand_hw_common_ecc_ctrl_init(struct mtd_info *mtd,
-					      struct nand_ecc_ctrl *ecc,
-					      struct device_node *np)
-{
-	static const u8 strengths[] = { 16, 24, 28, 32, 40, 48, 56, 60, 64 };
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
-	struct sunxi_nand_hw_ecc *data;
-	int nsectors;
-	int ret;
-	int i;
-
-	if (ecc->options & NAND_ECC_MAXIMIZE) {
-		int bytes;
-
-		ecc->size = 1024;
-		nsectors = mtd->writesize / ecc->size;
-
-		/* Reserve 2 bytes for the BBM */
-		bytes = (mtd->oobsize - 2) / nsectors;
-
-		/* 4 non-ECC bytes are added before each ECC bytes section */
-		bytes -= 4;
-
-		/* and bytes has to be even. */
-		if (bytes % 2)
-			bytes--;
-
-		ecc->strength = bytes * 8 / fls(8 * ecc->size);
-
-		for (i = 0; i < ARRAY_SIZE(strengths); i++) {
-			if (strengths[i] > ecc->strength)
-				break;
-		}
-
-		if (!i)
-			ecc->strength = 0;
-		else
-			ecc->strength = strengths[i - 1];
-	}
-
-	if (ecc->size != 512 && ecc->size != 1024)
-		return -EINVAL;
-
-	data = kzalloc(sizeof(*data), GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	/* Prefer 1k ECC chunk over 512 ones */
-	if (ecc->size == 512 && mtd->writesize > 512) {
-		ecc->size = 1024;
-		ecc->strength *= 2;
-	}
-
-	/* Add ECC info retrieval from DT */
-	for (i = 0; i < ARRAY_SIZE(strengths); i++) {
-		if (ecc->strength <= strengths[i])
-			break;
-	}
-
-	if (i >= ARRAY_SIZE(strengths)) {
-		dev_err(nfc->dev, "unsupported strength\n");
-		ret = -ENOTSUPP;
-		goto err;
-	}
-
-	data->mode = i;
-
-	/* HW ECC always request ECC bytes for 1024 bytes blocks */
-	ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * 1024), 8);
-
-	/* HW ECC always work with even numbers of ECC bytes */
-	ecc->bytes = ALIGN(ecc->bytes, 2);
-
-	nsectors = mtd->writesize / ecc->size;
-
-	if (mtd->oobsize < ((ecc->bytes + 4) * nsectors)) {
-		ret = -EINVAL;
-		goto err;
-	}
-
-	ecc->read_oob = sunxi_nfc_hw_common_ecc_read_oob;
-	ecc->write_oob = sunxi_nfc_hw_common_ecc_write_oob;
-	mtd_set_ooblayout(mtd, &sunxi_nand_ooblayout_ops);
-	ecc->priv = data;
-
-	return 0;
-
-err:
-	kfree(data);
-
-	return ret;
-}
-
-static void sunxi_nand_hw_common_ecc_ctrl_cleanup(struct nand_ecc_ctrl *ecc)
-{
-	kfree(ecc->priv);
-}
-
-static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd,
-				       struct nand_ecc_ctrl *ecc,
-				       struct device_node *np)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand);
-	struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller);
-	int ret;
-
-	ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
-	if (ret)
-		return ret;
-
-	if (nfc->dmac) {
-		ecc->read_page = sunxi_nfc_hw_ecc_read_page_dma;
-		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage_dma;
-		ecc->write_page = sunxi_nfc_hw_ecc_write_page_dma;
-		nand->options |= NAND_USE_BOUNCE_BUFFER;
-	} else {
-		ecc->read_page = sunxi_nfc_hw_ecc_read_page;
-		ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
-		ecc->write_page = sunxi_nfc_hw_ecc_write_page;
-	}
-
-	/* TODO: support DMA for raw accesses and subpage write */
-	ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage;
-	ecc->read_oob_raw = nand_read_oob_std;
-	ecc->write_oob_raw = nand_write_oob_std;
-	ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage;
-
-	return 0;
-}
-
-static int sunxi_nand_hw_syndrome_ecc_ctrl_init(struct mtd_info *mtd,
-						struct nand_ecc_ctrl *ecc,
-						struct device_node *np)
-{
-	int ret;
-
-	ret = sunxi_nand_hw_common_ecc_ctrl_init(mtd, ecc, np);
-	if (ret)
-		return ret;
-
-	ecc->prepad = 4;
-	ecc->read_page = sunxi_nfc_hw_syndrome_ecc_read_page;
-	ecc->write_page = sunxi_nfc_hw_syndrome_ecc_write_page;
-	ecc->read_oob_raw = nand_read_oob_syndrome;
-	ecc->write_oob_raw = nand_write_oob_syndrome;
-
-	return 0;
-}
-
-static void sunxi_nand_ecc_cleanup(struct nand_ecc_ctrl *ecc)
-{
-	switch (ecc->mode) {
-	case NAND_ECC_HW:
-	case NAND_ECC_HW_SYNDROME:
-		sunxi_nand_hw_common_ecc_ctrl_cleanup(ecc);
-		break;
-	case NAND_ECC_NONE:
-	default:
-		break;
-	}
-}
-
-static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc,
-			       struct device_node *np)
-{
-	struct nand_chip *nand = mtd_to_nand(mtd);
-	int ret;
-
-	if (!ecc->size) {
-		ecc->size = nand->ecc_step_ds;
-		ecc->strength = nand->ecc_strength_ds;
-	}
-
-	if (!ecc->size || !ecc->strength)
-		return -EINVAL;
-
-	switch (ecc->mode) {
-	case NAND_ECC_HW:
-		ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np);
-		if (ret)
-			return ret;
-		break;
-	case NAND_ECC_HW_SYNDROME:
-		ret = sunxi_nand_hw_syndrome_ecc_ctrl_init(mtd, ecc, np);
-		if (ret)
-			return ret;
-		break;
-	case NAND_ECC_NONE:
-	case NAND_ECC_SOFT:
-		break;
-	default:
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int sunxi_nand_chip_init(struct device *dev, struct sunxi_nfc *nfc,
-				struct device_node *np)
-{
-	struct sunxi_nand_chip *chip;
-	struct mtd_info *mtd;
-	struct nand_chip *nand;
-	int nsels;
-	int ret;
-	int i;
-	u32 tmp;
-
-	if (!of_get_property(np, "reg", &nsels))
-		return -EINVAL;
-
-	nsels /= sizeof(u32);
-	if (!nsels) {
-		dev_err(dev, "invalid reg property size\n");
-		return -EINVAL;
-	}
-
-	chip = devm_kzalloc(dev,
-			    sizeof(*chip) +
-			    (nsels * sizeof(struct sunxi_nand_chip_sel)),
-			    GFP_KERNEL);
-	if (!chip) {
-		dev_err(dev, "could not allocate chip\n");
-		return -ENOMEM;
-	}
-
-	chip->nsels = nsels;
-	chip->selected = -1;
-
-	for (i = 0; i < nsels; i++) {
-		ret = of_property_read_u32_index(np, "reg", i, &tmp);
-		if (ret) {
-			dev_err(dev, "could not retrieve reg property: %d\n",
-				ret);
-			return ret;
-		}
-
-		if (tmp > NFC_MAX_CS) {
-			dev_err(dev,
-				"invalid reg value: %u (max CS = 7)\n",
-				tmp);
-			return -EINVAL;
-		}
-
-		if (test_and_set_bit(tmp, &nfc->assigned_cs)) {
-			dev_err(dev, "CS %d already assigned\n", tmp);
-			return -EINVAL;
-		}
-
-		chip->sels[i].cs = tmp;
-
-		if (!of_property_read_u32_index(np, "allwinner,rb", i, &tmp) &&
-		    tmp < 2) {
-			chip->sels[i].rb.type = RB_NATIVE;
-			chip->sels[i].rb.info.nativeid = tmp;
-		} else {
-			ret = of_get_named_gpio(np, "rb-gpios", i);
-			if (ret >= 0) {
-				tmp = ret;
-				chip->sels[i].rb.type = RB_GPIO;
-				chip->sels[i].rb.info.gpio = tmp;
-				ret = devm_gpio_request(dev, tmp, "nand-rb");
-				if (ret)
-					return ret;
-
-				ret = gpio_direction_input(tmp);
-				if (ret)
-					return ret;
-			} else {
-				chip->sels[i].rb.type = RB_NONE;
-			}
-		}
-	}
-
-	nand = &chip->nand;
-	/* Default tR value specified in the ONFI spec (chapter 4.15.1) */
-	nand->chip_delay = 200;
-	nand->controller = &nfc->controller;
-	/*
-	 * Set the ECC mode to the default value in case nothing is specified
-	 * in the DT.
-	 */
-	nand->ecc.mode = NAND_ECC_HW;
-	nand_set_flash_node(nand, np);
-	nand->select_chip = sunxi_nfc_select_chip;
-	nand->cmd_ctrl = sunxi_nfc_cmd_ctrl;
-	nand->read_buf = sunxi_nfc_read_buf;
-	nand->write_buf = sunxi_nfc_write_buf;
-	nand->read_byte = sunxi_nfc_read_byte;
-	nand->setup_data_interface = sunxi_nfc_setup_data_interface;
-
-	mtd = nand_to_mtd(nand);
-	mtd->dev.parent = dev;
-
-	ret = nand_scan_ident(mtd, nsels, NULL);
-	if (ret)
-		return ret;
-
-	if (nand->bbt_options & NAND_BBT_USE_FLASH)
-		nand->bbt_options |= NAND_BBT_NO_OOB;
-
-	if (nand->options & NAND_NEED_SCRAMBLING)
-		nand->options |= NAND_NO_SUBPAGE_WRITE;
-
-	nand->options |= NAND_SUBPAGE_READ;
-
-	ret = sunxi_nand_ecc_init(mtd, &nand->ecc, np);
-	if (ret) {
-		dev_err(dev, "ECC init failed: %d\n", ret);
-		return ret;
-	}
-
-	ret = nand_scan_tail(mtd);
-	if (ret) {
-		dev_err(dev, "nand_scan_tail failed: %d\n", ret);
-		return ret;
-	}
-
-	ret = mtd_device_register(mtd, NULL, 0);
-	if (ret) {
-		dev_err(dev, "failed to register mtd device: %d\n", ret);
-		nand_release(mtd);
-		return ret;
-	}
-
-	list_add_tail(&chip->node, &nfc->chips);
-
-	return 0;
-}
-
-static int sunxi_nand_chips_init(struct device *dev, struct sunxi_nfc *nfc)
-{
-	struct device_node *np = dev->of_node;
-	struct device_node *nand_np;
-	int nchips = of_get_child_count(np);
-	int ret;
-
-	if (nchips > 8) {
-		dev_err(dev, "too many NAND chips: %d (max = 8)\n", nchips);
-		return -EINVAL;
-	}
-
-	for_each_child_of_node(np, nand_np) {
-		ret = sunxi_nand_chip_init(dev, nfc, nand_np);
-		if (ret) {
-			of_node_put(nand_np);
-			return ret;
-		}
-	}
-
-	return 0;
-}
-
-static void sunxi_nand_chips_cleanup(struct sunxi_nfc *nfc)
-{
-	struct sunxi_nand_chip *chip;
-
-	while (!list_empty(&nfc->chips)) {
-		chip = list_first_entry(&nfc->chips, struct sunxi_nand_chip,
-					node);
-		nand_release(nand_to_mtd(&chip->nand));
-		sunxi_nand_ecc_cleanup(&chip->nand.ecc);
-		list_del(&chip->node);
-	}
-}
-
-static int sunxi_nfc_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct resource *r;
-	struct sunxi_nfc *nfc;
-	int irq;
-	int ret;
-
-	nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL);
-	if (!nfc)
-		return -ENOMEM;
-
-	nfc->dev = dev;
-	nand_hw_control_init(&nfc->controller);
-	INIT_LIST_HEAD(&nfc->chips);
-
-	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	nfc->regs = devm_ioremap_resource(dev, r);
-	if (IS_ERR(nfc->regs))
-		return PTR_ERR(nfc->regs);
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq < 0) {
-		dev_err(dev, "failed to retrieve irq\n");
-		return irq;
-	}
-
-	nfc->ahb_clk = devm_clk_get(dev, "ahb");
-	if (IS_ERR(nfc->ahb_clk)) {
-		dev_err(dev, "failed to retrieve ahb clk\n");
-		return PTR_ERR(nfc->ahb_clk);
-	}
-
-	ret = clk_prepare_enable(nfc->ahb_clk);
-	if (ret)
-		return ret;
-
-	nfc->mod_clk = devm_clk_get(dev, "mod");
-	if (IS_ERR(nfc->mod_clk)) {
-		dev_err(dev, "failed to retrieve mod clk\n");
-		ret = PTR_ERR(nfc->mod_clk);
-		goto out_ahb_clk_unprepare;
-	}
-
-	ret = clk_prepare_enable(nfc->mod_clk);
-	if (ret)
-		goto out_ahb_clk_unprepare;
-
-	nfc->reset = devm_reset_control_get_optional(dev, "ahb");
-	if (!IS_ERR(nfc->reset)) {
-		ret = reset_control_deassert(nfc->reset);
-		if (ret) {
-			dev_err(dev, "reset err %d\n", ret);
-			goto out_mod_clk_unprepare;
-		}
-	} else if (PTR_ERR(nfc->reset) != -ENOENT) {
-		ret = PTR_ERR(nfc->reset);
-		goto out_mod_clk_unprepare;
-	}
-
-	ret = sunxi_nfc_rst(nfc);
-	if (ret)
-		goto out_ahb_reset_reassert;
-
-	writel(0, nfc->regs + NFC_REG_INT);
-	ret = devm_request_irq(dev, irq, sunxi_nfc_interrupt,
-			       0, "sunxi-nand", nfc);
-	if (ret)
-		goto out_ahb_reset_reassert;
-
-	nfc->dmac = dma_request_slave_channel(dev, "rxtx");
-	if (nfc->dmac) {
-		struct dma_slave_config dmac_cfg = { };
-
-		dmac_cfg.src_addr = r->start + NFC_REG_IO_DATA;
-		dmac_cfg.dst_addr = dmac_cfg.src_addr;
-		dmac_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
-		dmac_cfg.dst_addr_width = dmac_cfg.src_addr_width;
-		dmac_cfg.src_maxburst = 4;
-		dmac_cfg.dst_maxburst = 4;
-		dmaengine_slave_config(nfc->dmac, &dmac_cfg);
-	} else {
-		dev_warn(dev, "failed to request rxtx DMA channel\n");
-	}
-
-	platform_set_drvdata(pdev, nfc);
-
-	ret = sunxi_nand_chips_init(dev, nfc);
-	if (ret) {
-		dev_err(dev, "failed to init nand chips\n");
-		goto out_release_dmac;
-	}
-
-	return 0;
-
-out_release_dmac:
-	if (nfc->dmac)
-		dma_release_channel(nfc->dmac);
-out_ahb_reset_reassert:
-	if (!IS_ERR(nfc->reset))
-		reset_control_assert(nfc->reset);
-out_mod_clk_unprepare:
-	clk_disable_unprepare(nfc->mod_clk);
-out_ahb_clk_unprepare:
-	clk_disable_unprepare(nfc->ahb_clk);
-
-	return ret;
-}
-
-static int sunxi_nfc_remove(struct platform_device *pdev)
-{
-	struct sunxi_nfc *nfc = platform_get_drvdata(pdev);
-
-	sunxi_nand_chips_cleanup(nfc);
-
-	if (!IS_ERR(nfc->reset))
-		reset_control_assert(nfc->reset);
-
-	if (nfc->dmac)
-		dma_release_channel(nfc->dmac);
-	clk_disable_unprepare(nfc->mod_clk);
-	clk_disable_unprepare(nfc->ahb_clk);
-
-	return 0;
-}
-
-static const struct of_device_id sunxi_nfc_ids[] = {
-	{ .compatible = "allwinner,sun4i-a10-nand" },
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, sunxi_nfc_ids);
-
-static struct platform_driver sunxi_nfc_driver = {
-	.driver = {
-		.name = "sunxi_nand",
-		.of_match_table = sunxi_nfc_ids,
-	},
-	.probe = sunxi_nfc_probe,
-	.remove = sunxi_nfc_remove,
-};
-module_platform_driver(sunxi_nfc_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Boris BREZILLON");
-MODULE_DESCRIPTION("Allwinner NAND Flash Controller driver");
-MODULE_ALIAS("platform:sunxi_nand");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
deleted file mode 100644
index e599ada12cd0..000000000000
--- a/drivers/mtd/nand/tmio_nand.c
+++ /dev/null
@@ -1,510 +0,0 @@ 
-/*
- * Toshiba TMIO NAND flash controller driver
- *
- * Slightly murky pre-git history of the driver:
- *
- * Copyright (c) Ian Molton 2004, 2005, 2008
- *    Original work, independent of sharps code. Included hardware ECC support.
- *    Hard ECC did not work for writes in the early revisions.
- * Copyright (c) Dirk Opfer 2005.
- *    Modifications developed from sharps code but
- *    NOT containing any, ported onto Ians base.
- * Copyright (c) Chris Humbert 2005
- * Copyright (c) Dmitry Baryshkov 2008
- *    Minor fixes
- *
- * Parts copyright Sebastian Carlier
- *
- * This file is licensed under
- * the terms of the GNU General Public License version 2. This program
- * is licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- */
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/mfd/core.h>
-#include <linux/mfd/tmio.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-#include <linux/slab.h>
-
-/*--------------------------------------------------------------------------*/
-
-/*
- * NAND Flash Host Controller Configuration Register
- */
-#define CCR_COMMAND	0x04	/* w Command				*/
-#define CCR_BASE	0x10	/* l NAND Flash Control Reg Base Addr	*/
-#define CCR_INTP	0x3d	/* b Interrupt Pin			*/
-#define CCR_INTE	0x48	/* b Interrupt Enable			*/
-#define CCR_EC		0x4a	/* b Event Control			*/
-#define CCR_ICC		0x4c	/* b Internal Clock Control		*/
-#define CCR_ECCC	0x5b	/* b ECC Control			*/
-#define CCR_NFTC	0x60	/* b NAND Flash Transaction Control	*/
-#define CCR_NFM		0x61	/* b NAND Flash Monitor			*/
-#define CCR_NFPSC	0x62	/* b NAND Flash Power Supply Control	*/
-#define CCR_NFDC	0x63	/* b NAND Flash Detect Control		*/
-
-/*
- * NAND Flash Control Register
- */
-#define FCR_DATA	0x00	/* bwl Data Register			*/
-#define FCR_MODE	0x04	/* b Mode Register			*/
-#define FCR_STATUS	0x05	/* b Status Register			*/
-#define FCR_ISR		0x06	/* b Interrupt Status Register		*/
-#define FCR_IMR		0x07	/* b Interrupt Mask Register		*/
-
-/* FCR_MODE Register Command List */
-#define FCR_MODE_DATA	0x94	/* Data Data_Mode */
-#define FCR_MODE_COMMAND 0x95	/* Data Command_Mode */
-#define FCR_MODE_ADDRESS 0x96	/* Data Address_Mode */
-
-#define FCR_MODE_HWECC_CALC	0xB4	/* HW-ECC Data */
-#define FCR_MODE_HWECC_RESULT	0xD4	/* HW-ECC Calc result Read_Mode */
-#define FCR_MODE_HWECC_RESET	0xF4	/* HW-ECC Reset */
-
-#define FCR_MODE_POWER_ON	0x0C	/* Power Supply ON  to SSFDC card */
-#define FCR_MODE_POWER_OFF	0x08	/* Power Supply OFF to SSFDC card */
-
-#define FCR_MODE_LED_OFF	0x00	/* LED OFF */
-#define FCR_MODE_LED_ON		0x04	/* LED ON */
-
-#define FCR_MODE_EJECT_ON	0x68	/* Ejection events active  */
-#define FCR_MODE_EJECT_OFF	0x08	/* Ejection events ignored */
-
-#define FCR_MODE_LOCK		0x6C	/* Lock_Mode. Eject Switch Invalid */
-#define FCR_MODE_UNLOCK		0x0C	/* UnLock_Mode. Eject Switch is valid */
-
-#define FCR_MODE_CONTROLLER_ID	0x40	/* Controller ID Read */
-#define FCR_MODE_STANDBY	0x00	/* SSFDC card Changes Standby State */
-
-#define FCR_MODE_WE		0x80
-#define FCR_MODE_ECC1		0x40
-#define FCR_MODE_ECC0		0x20
-#define FCR_MODE_CE		0x10
-#define FCR_MODE_PCNT1		0x08
-#define FCR_MODE_PCNT0		0x04
-#define FCR_MODE_ALE		0x02
-#define FCR_MODE_CLE		0x01
-
-#define FCR_STATUS_BUSY		0x80
-
-/*--------------------------------------------------------------------------*/
-
-struct tmio_nand {
-	struct nand_chip chip;
-
-	struct platform_device *dev;
-
-	void __iomem *ccr;
-	void __iomem *fcr;
-	unsigned long fcr_base;
-
-	unsigned int irq;
-
-	/* for tmio_nand_read_byte */
-	u8			read;
-	unsigned read_good:1;
-};
-
-static inline struct tmio_nand *mtd_to_tmio(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct tmio_nand, chip);
-}
-
-
-/*--------------------------------------------------------------------------*/
-
-static void tmio_nand_hwcontrol(struct mtd_info *mtd, int cmd,
-				   unsigned int ctrl)
-{
-	struct tmio_nand *tmio = mtd_to_tmio(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		u8 mode;
-
-		if (ctrl & NAND_NCE) {
-			mode = FCR_MODE_DATA;
-
-			if (ctrl & NAND_CLE)
-				mode |=  FCR_MODE_CLE;
-			else
-				mode &= ~FCR_MODE_CLE;
-
-			if (ctrl & NAND_ALE)
-				mode |=  FCR_MODE_ALE;
-			else
-				mode &= ~FCR_MODE_ALE;
-		} else {
-			mode = FCR_MODE_STANDBY;
-		}
-
-		tmio_iowrite8(mode, tmio->fcr + FCR_MODE);
-		tmio->read_good = 0;
-	}
-
-	if (cmd != NAND_CMD_NONE)
-		tmio_iowrite8(cmd, chip->IO_ADDR_W);
-}
-
-static int tmio_nand_dev_ready(struct mtd_info *mtd)
-{
-	struct tmio_nand *tmio = mtd_to_tmio(mtd);
-
-	return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
-}
-
-static irqreturn_t tmio_irq(int irq, void *__tmio)
-{
-	struct tmio_nand *tmio = __tmio;
-	struct nand_chip *nand_chip = &tmio->chip;
-
-	/* disable RDYREQ interrupt */
-	tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
-
-	if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
-		dev_warn(&tmio->dev->dev, "spurious interrupt\n");
-
-	wake_up(&nand_chip->controller->wq);
-	return IRQ_HANDLED;
-}
-
-/*
-  *The TMIO core has a RDYREQ interrupt on the posedge of #SMRB.
-  *This interrupt is normally disabled, but for long operations like
-  *erase and write, we enable it to wake us up.  The irq handler
-  *disables the interrupt.
- */
-static int
-tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
-{
-	struct tmio_nand *tmio = mtd_to_tmio(mtd);
-	long timeout;
-
-	/* enable RDYREQ interrupt */
-	tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
-	tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
-
-	timeout = wait_event_timeout(nand_chip->controller->wq,
-		tmio_nand_dev_ready(mtd),
-		msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
-
-	if (unlikely(!tmio_nand_dev_ready(mtd))) {
-		tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
-		dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
-			nand_chip->state == FL_ERASING ? "erase" : "program",
-			nand_chip->state == FL_ERASING ? 400 : 20);
-
-	} else if (unlikely(!timeout)) {
-		tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
-		dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
-	}
-
-	nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
-	return nand_chip->read_byte(mtd);
-}
-
-/*
-  *The TMIO controller combines two 8-bit data bytes into one 16-bit
-  *word. This function separates them so nand_base.c works as expected,
-  *especially its NAND_CMD_READID routines.
- *
-  *To prevent stale data from being read, tmio_nand_hwcontrol() clears
-  *tmio->read_good.
- */
-static u_char tmio_nand_read_byte(struct mtd_info *mtd)
-{
-	struct tmio_nand *tmio = mtd_to_tmio(mtd);
-	unsigned int data;
-
-	if (tmio->read_good--)
-		return tmio->read;
-
-	data = tmio_ioread16(tmio->fcr + FCR_DATA);
-	tmio->read = data >> 8;
-	return data;
-}
-
-/*
-  *The TMIO controller converts an 8-bit NAND interface to a 16-bit
-  *bus interface, so all data reads and writes must be 16-bit wide.
-  *Thus, we implement 16-bit versions of the read, write, and verify
-  *buffer functions.
- */
-static void
-tmio_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	struct tmio_nand *tmio = mtd_to_tmio(mtd);
-
-	tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
-}
-
-static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct tmio_nand *tmio = mtd_to_tmio(mtd);
-
-	tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
-}
-
-static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct tmio_nand *tmio = mtd_to_tmio(mtd);
-
-	tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
-	tmio_ioread8(tmio->fcr + FCR_DATA);	/* dummy read */
-	tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
-}
-
-static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
-							u_char *ecc_code)
-{
-	struct tmio_nand *tmio = mtd_to_tmio(mtd);
-	unsigned int ecc;
-
-	tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
-
-	ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
-	ecc_code[1] = ecc;	/* 000-255 LP7-0 */
-	ecc_code[0] = ecc >> 8;	/* 000-255 LP15-8 */
-	ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
-	ecc_code[2] = ecc;	/* 000-255 CP5-0,11b */
-	ecc_code[4] = ecc >> 8;	/* 256-511 LP7-0 */
-	ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
-	ecc_code[3] = ecc;	/* 256-511 LP15-8 */
-	ecc_code[5] = ecc >> 8;	/* 256-511 CP5-0,11b */
-
-	tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE);
-	return 0;
-}
-
-static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
-		unsigned char *read_ecc, unsigned char *calc_ecc)
-{
-	int r0, r1;
-
-	/* assume ecc.size = 512 and ecc.bytes = 6 */
-	r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
-	if (r0 < 0)
-		return r0;
-	r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256);
-	if (r1 < 0)
-		return r1;
-	return r0 + r1;
-}
-
-static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
-{
-	const struct mfd_cell *cell = mfd_get_cell(dev);
-	int ret;
-
-	if (cell->enable) {
-		ret = cell->enable(dev);
-		if (ret)
-			return ret;
-	}
-
-	/* (4Ch) CLKRUN Enable    1st spcrunc */
-	tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
-
-	/* (10h)BaseAddress    0x1000 spba.spba2 */
-	tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE);
-	tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2);
-
-	/* (04h)Command Register I/O spcmd */
-	tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
-
-	/* (62h) Power Supply Control ssmpwc */
-	/* HardPowerOFF - SuspendOFF - PowerSupplyWait_4MS */
-	tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC);
-
-	/* (63h) Detect Control ssmdtc */
-	tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC);
-
-	/* Interrupt status register clear sintst */
-	tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
-
-	/* After power supply, Media are reset smode */
-	tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE);
-	tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE);
-	tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA);
-
-	/* Standby Mode smode */
-	tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE);
-
-	mdelay(5);
-
-	return 0;
-}
-
-static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
-{
-	const struct mfd_cell *cell = mfd_get_cell(dev);
-
-	tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
-	if (cell->disable)
-		cell->disable(dev);
-}
-
-static int tmio_probe(struct platform_device *dev)
-{
-	struct tmio_nand_data *data = dev_get_platdata(&dev->dev);
-	struct resource *fcr = platform_get_resource(dev,
-			IORESOURCE_MEM, 0);
-	struct resource *ccr = platform_get_resource(dev,
-			IORESOURCE_MEM, 1);
-	int irq = platform_get_irq(dev, 0);
-	struct tmio_nand *tmio;
-	struct mtd_info *mtd;
-	struct nand_chip *nand_chip;
-	int retval;
-
-	if (data == NULL)
-		dev_warn(&dev->dev, "NULL platform data!\n");
-
-	tmio = devm_kzalloc(&dev->dev, sizeof(*tmio), GFP_KERNEL);
-	if (!tmio)
-		return -ENOMEM;
-
-	tmio->dev = dev;
-
-	platform_set_drvdata(dev, tmio);
-	nand_chip = &tmio->chip;
-	mtd = nand_to_mtd(nand_chip);
-	mtd->name = "tmio-nand";
-	mtd->dev.parent = &dev->dev;
-
-	tmio->ccr = devm_ioremap(&dev->dev, ccr->start, resource_size(ccr));
-	if (!tmio->ccr)
-		return -EIO;
-
-	tmio->fcr_base = fcr->start & 0xfffff;
-	tmio->fcr = devm_ioremap(&dev->dev, fcr->start, resource_size(fcr));
-	if (!tmio->fcr)
-		return -EIO;
-
-	retval = tmio_hw_init(dev, tmio);
-	if (retval)
-		return retval;
-
-	/* Set address of NAND IO lines */
-	nand_chip->IO_ADDR_R = tmio->fcr;
-	nand_chip->IO_ADDR_W = tmio->fcr;
-
-	/* Set address of hardware control function */
-	nand_chip->cmd_ctrl = tmio_nand_hwcontrol;
-	nand_chip->dev_ready = tmio_nand_dev_ready;
-	nand_chip->read_byte = tmio_nand_read_byte;
-	nand_chip->write_buf = tmio_nand_write_buf;
-	nand_chip->read_buf = tmio_nand_read_buf;
-
-	/* set eccmode using hardware ECC */
-	nand_chip->ecc.mode = NAND_ECC_HW;
-	nand_chip->ecc.size = 512;
-	nand_chip->ecc.bytes = 6;
-	nand_chip->ecc.strength = 2;
-	nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
-	nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
-	nand_chip->ecc.correct = tmio_nand_correct_data;
-
-	if (data)
-		nand_chip->badblock_pattern = data->badblock_pattern;
-
-	/* 15 us command delay time */
-	nand_chip->chip_delay = 15;
-
-	retval = devm_request_irq(&dev->dev, irq, &tmio_irq, 0,
-				  dev_name(&dev->dev), tmio);
-	if (retval) {
-		dev_err(&dev->dev, "request_irq error %d\n", retval);
-		goto err_irq;
-	}
-
-	tmio->irq = irq;
-	nand_chip->waitfunc = tmio_nand_wait;
-
-	/* Scan to find existence of the device */
-	if (nand_scan(mtd, 1)) {
-		retval = -ENODEV;
-		goto err_irq;
-	}
-	/* Register the partitions */
-	retval = mtd_device_parse_register(mtd, NULL, NULL,
-					   data ? data->partition : NULL,
-					   data ? data->num_partitions : 0);
-	if (!retval)
-		return retval;
-
-	nand_release(mtd);
-
-err_irq:
-	tmio_hw_stop(dev, tmio);
-	return retval;
-}
-
-static int tmio_remove(struct platform_device *dev)
-{
-	struct tmio_nand *tmio = platform_get_drvdata(dev);
-
-	nand_release(nand_to_mtd(&tmio->chip));
-	tmio_hw_stop(dev, tmio);
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int tmio_suspend(struct platform_device *dev, pm_message_t state)
-{
-	const struct mfd_cell *cell = mfd_get_cell(dev);
-
-	if (cell->suspend)
-		cell->suspend(dev);
-
-	tmio_hw_stop(dev, platform_get_drvdata(dev));
-	return 0;
-}
-
-static int tmio_resume(struct platform_device *dev)
-{
-	const struct mfd_cell *cell = mfd_get_cell(dev);
-
-	/* FIXME - is this required or merely another attack of the broken
-	 * SHARP platform? Looks suspicious.
-	 */
-	tmio_hw_init(dev, platform_get_drvdata(dev));
-
-	if (cell->resume)
-		cell->resume(dev);
-
-	return 0;
-}
-#else
-#define tmio_suspend NULL
-#define tmio_resume NULL
-#endif
-
-static struct platform_driver tmio_driver = {
-	.driver.name	= "tmio-nand",
-	.driver.owner	= THIS_MODULE,
-	.probe		= tmio_probe,
-	.remove		= tmio_remove,
-	.suspend	= tmio_suspend,
-	.resume		= tmio_resume,
-};
-
-module_platform_driver(tmio_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
-MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller");
-MODULE_ALIAS("platform:tmio-nand");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
deleted file mode 100644
index b567d212fe7d..000000000000
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ /dev/null
@@ -1,423 +0,0 @@ 
-/*
- * TXx9 NAND flash memory controller driver
- * Based on RBTX49xx patch from CELF patch archive.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * (C) Copyright TOSHIBA CORPORATION 2004-2007
- * All Rights Reserved.
- */
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/nand_ecc.h>
-#include <linux/mtd/partitions.h>
-#include <linux/io.h>
-#include <asm/txx9/ndfmc.h>
-
-/* TXX9 NDFMC Registers */
-#define TXX9_NDFDTR	0x00
-#define TXX9_NDFMCR	0x04
-#define TXX9_NDFSR	0x08
-#define TXX9_NDFISR	0x0c
-#define TXX9_NDFIMR	0x10
-#define TXX9_NDFSPR	0x14
-#define TXX9_NDFRSTR	0x18	/* not TX4939 */
-
-/* NDFMCR : NDFMC Mode Control */
-#define TXX9_NDFMCR_WE	0x80
-#define TXX9_NDFMCR_ECC_ALL	0x60
-#define TXX9_NDFMCR_ECC_RESET	0x60
-#define TXX9_NDFMCR_ECC_READ	0x40
-#define TXX9_NDFMCR_ECC_ON	0x20
-#define TXX9_NDFMCR_ECC_OFF	0x00
-#define TXX9_NDFMCR_CE	0x10
-#define TXX9_NDFMCR_BSPRT	0x04	/* TX4925/TX4926 only */
-#define TXX9_NDFMCR_ALE	0x02
-#define TXX9_NDFMCR_CLE	0x01
-/* TX4939 only */
-#define TXX9_NDFMCR_X16	0x0400
-#define TXX9_NDFMCR_DMAREQ_MASK	0x0300
-#define TXX9_NDFMCR_DMAREQ_NODMA	0x0000
-#define TXX9_NDFMCR_DMAREQ_128	0x0100
-#define TXX9_NDFMCR_DMAREQ_256	0x0200
-#define TXX9_NDFMCR_DMAREQ_512	0x0300
-#define TXX9_NDFMCR_CS_MASK	0x0c
-#define TXX9_NDFMCR_CS(ch)	((ch) << 2)
-
-/* NDFMCR : NDFMC Status */
-#define TXX9_NDFSR_BUSY	0x80
-/* TX4939 only */
-#define TXX9_NDFSR_DMARUN	0x40
-
-/* NDFMCR : NDFMC Reset */
-#define TXX9_NDFRSTR_RST	0x01
-
-struct txx9ndfmc_priv {
-	struct platform_device *dev;
-	struct nand_chip chip;
-	int cs;
-	const char *mtdname;
-};
-
-#define MAX_TXX9NDFMC_DEV	4
-struct txx9ndfmc_drvdata {
-	struct mtd_info *mtds[MAX_TXX9NDFMC_DEV];
-	void __iomem *base;
-	unsigned char hold;	/* in gbusclock */
-	unsigned char spw;	/* in gbusclock */
-	struct nand_hw_control hw_control;
-};
-
-static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
-	return txx9_priv->dev;
-}
-
-static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
-{
-	struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
-	struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
-
-	return drvdata->base + (reg << plat->shift);
-}
-
-static u32 txx9ndfmc_read(struct platform_device *dev, unsigned int reg)
-{
-	return __raw_readl(ndregaddr(dev, reg));
-}
-
-static void txx9ndfmc_write(struct platform_device *dev,
-			    u32 val, unsigned int reg)
-{
-	__raw_writel(val, ndregaddr(dev, reg));
-}
-
-static uint8_t txx9ndfmc_read_byte(struct mtd_info *mtd)
-{
-	struct platform_device *dev = mtd_to_platdev(mtd);
-
-	return txx9ndfmc_read(dev, TXX9_NDFDTR);
-}
-
-static void txx9ndfmc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
-				int len)
-{
-	struct platform_device *dev = mtd_to_platdev(mtd);
-	void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
-	u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
-
-	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_WE, TXX9_NDFMCR);
-	while (len--)
-		__raw_writel(*buf++, ndfdtr);
-	txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
-}
-
-static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
-{
-	struct platform_device *dev = mtd_to_platdev(mtd);
-	void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
-
-	while (len--)
-		*buf++ = __raw_readl(ndfdtr);
-}
-
-static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
-			       unsigned int ctrl)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct txx9ndfmc_priv *txx9_priv = nand_get_controller_data(chip);
-	struct platform_device *dev = txx9_priv->dev;
-	struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
-
-	if (ctrl & NAND_CTRL_CHANGE) {
-		u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
-
-		mcr &= ~(TXX9_NDFMCR_CLE | TXX9_NDFMCR_ALE | TXX9_NDFMCR_CE);
-		mcr |= ctrl & NAND_CLE ? TXX9_NDFMCR_CLE : 0;
-		mcr |= ctrl & NAND_ALE ? TXX9_NDFMCR_ALE : 0;
-		/* TXX9_NDFMCR_CE bit is 0:high 1:low */
-		mcr |= ctrl & NAND_NCE ? TXX9_NDFMCR_CE : 0;
-		if (txx9_priv->cs >= 0 && (ctrl & NAND_NCE)) {
-			mcr &= ~TXX9_NDFMCR_CS_MASK;
-			mcr |= TXX9_NDFMCR_CS(txx9_priv->cs);
-		}
-		txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
-	}
-	if (cmd != NAND_CMD_NONE)
-		txx9ndfmc_write(dev, cmd & 0xff, TXX9_NDFDTR);
-	if (plat->flags & NDFMC_PLAT_FLAG_DUMMYWRITE) {
-		/* dummy write to update external latch */
-		if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
-			txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
-	}
-	mmiowb();
-}
-
-static int txx9ndfmc_dev_ready(struct mtd_info *mtd)
-{
-	struct platform_device *dev = mtd_to_platdev(mtd);
-
-	return !(txx9ndfmc_read(dev, TXX9_NDFSR) & TXX9_NDFSR_BUSY);
-}
-
-static int txx9ndfmc_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
-				   uint8_t *ecc_code)
-{
-	struct platform_device *dev = mtd_to_platdev(mtd);
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int eccbytes;
-	u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
-
-	mcr &= ~TXX9_NDFMCR_ECC_ALL;
-	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
-	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR);
-	for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) {
-		ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
-		ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
-		ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
-		ecc_code += 3;
-	}
-	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
-	return 0;
-}
-
-static int txx9ndfmc_correct_data(struct mtd_info *mtd, unsigned char *buf,
-		unsigned char *read_ecc, unsigned char *calc_ecc)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int eccsize;
-	int corrected = 0;
-	int stat;
-
-	for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
-		stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
-		if (stat < 0)
-			return stat;
-		corrected += stat;
-		buf += 256;
-		read_ecc += 3;
-		calc_ecc += 3;
-	}
-	return corrected;
-}
-
-static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
-{
-	struct platform_device *dev = mtd_to_platdev(mtd);
-	u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
-
-	mcr &= ~TXX9_NDFMCR_ECC_ALL;
-	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_RESET, TXX9_NDFMCR);
-	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
-	txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_ON, TXX9_NDFMCR);
-}
-
-static void txx9ndfmc_initialize(struct platform_device *dev)
-{
-	struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
-	struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
-	int tmout = 100;
-
-	if (plat->flags & NDFMC_PLAT_FLAG_NO_RSTR)
-		; /* no NDFRSTR.  Write to NDFSPR resets the NDFMC. */
-	else {
-		/* reset NDFMC */
-		txx9ndfmc_write(dev,
-				txx9ndfmc_read(dev, TXX9_NDFRSTR) |
-				TXX9_NDFRSTR_RST,
-				TXX9_NDFRSTR);
-		while (txx9ndfmc_read(dev, TXX9_NDFRSTR) & TXX9_NDFRSTR_RST) {
-			if (--tmout == 0) {
-				dev_err(&dev->dev, "reset failed.\n");
-				break;
-			}
-			udelay(1);
-		}
-	}
-	/* setup Hold Time, Strobe Pulse Width */
-	txx9ndfmc_write(dev, (drvdata->hold << 4) | drvdata->spw, TXX9_NDFSPR);
-	txx9ndfmc_write(dev,
-			(plat->flags & NDFMC_PLAT_FLAG_USE_BSPRT) ?
-			TXX9_NDFMCR_BSPRT : 0, TXX9_NDFMCR);
-}
-
-#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
-	DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
-
-static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	int ret;
-
-	ret = nand_scan_ident(mtd, 1, NULL);
-	if (!ret) {
-		if (mtd->writesize >= 512) {
-			/* Hardware ECC 6 byte ECC per 512 Byte data */
-			chip->ecc.size = 512;
-			chip->ecc.bytes = 6;
-		}
-		ret = nand_scan_tail(mtd);
-	}
-	return ret;
-}
-
-static int __init txx9ndfmc_probe(struct platform_device *dev)
-{
-	struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev);
-	int hold, spw;
-	int i;
-	struct txx9ndfmc_drvdata *drvdata;
-	unsigned long gbusclk = plat->gbus_clock;
-	struct resource *res;
-
-	drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
-	if (!drvdata)
-		return -ENOMEM;
-	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
-	drvdata->base = devm_ioremap_resource(&dev->dev, res);
-	if (IS_ERR(drvdata->base))
-		return PTR_ERR(drvdata->base);
-
-	hold = plat->hold ?: 20; /* tDH */
-	spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
-
-	hold = TXX9NDFMC_NS_TO_CYC(gbusclk, hold);
-	spw = TXX9NDFMC_NS_TO_CYC(gbusclk, spw);
-	if (plat->flags & NDFMC_PLAT_FLAG_HOLDADD)
-		hold -= 2;	/* actual hold time : (HOLD + 2) BUSCLK */
-	spw -= 1;	/* actual wait time : (SPW + 1) BUSCLK */
-	hold = clamp(hold, 1, 15);
-	drvdata->hold = hold;
-	spw = clamp(spw, 1, 15);
-	drvdata->spw = spw;
-	dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
-		 (gbusclk + 500000) / 1000000, hold, spw);
-
-	nand_hw_control_init(&drvdata->hw_control);
-
-	platform_set_drvdata(dev, drvdata);
-	txx9ndfmc_initialize(dev);
-
-	for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
-		struct txx9ndfmc_priv *txx9_priv;
-		struct nand_chip *chip;
-		struct mtd_info *mtd;
-
-		if (!(plat->ch_mask & (1 << i)))
-			continue;
-		txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
-				    GFP_KERNEL);
-		if (!txx9_priv)
-			continue;
-		chip = &txx9_priv->chip;
-		mtd = nand_to_mtd(chip);
-		mtd->dev.parent = &dev->dev;
-
-		chip->read_byte = txx9ndfmc_read_byte;
-		chip->read_buf = txx9ndfmc_read_buf;
-		chip->write_buf = txx9ndfmc_write_buf;
-		chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
-		chip->dev_ready = txx9ndfmc_dev_ready;
-		chip->ecc.calculate = txx9ndfmc_calculate_ecc;
-		chip->ecc.correct = txx9ndfmc_correct_data;
-		chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
-		chip->ecc.mode = NAND_ECC_HW;
-		/* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
-		chip->ecc.size = 256;
-		chip->ecc.bytes = 3;
-		chip->ecc.strength = 1;
-		chip->chip_delay = 100;
-		chip->controller = &drvdata->hw_control;
-
-		nand_set_controller_data(chip, txx9_priv);
-		txx9_priv->dev = dev;
-
-		if (plat->ch_mask != 1) {
-			txx9_priv->cs = i;
-			txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
-						       dev_name(&dev->dev), i);
-		} else {
-			txx9_priv->cs = -1;
-			txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
-						     GFP_KERNEL);
-		}
-		if (!txx9_priv->mtdname) {
-			kfree(txx9_priv);
-			dev_err(&dev->dev, "Unable to allocate MTD name.\n");
-			continue;
-		}
-		if (plat->wide_mask & (1 << i))
-			chip->options |= NAND_BUSWIDTH_16;
-
-		if (txx9ndfmc_nand_scan(mtd)) {
-			kfree(txx9_priv->mtdname);
-			kfree(txx9_priv);
-			continue;
-		}
-		mtd->name = txx9_priv->mtdname;
-
-		mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
-		drvdata->mtds[i] = mtd;
-	}
-
-	return 0;
-}
-
-static int __exit txx9ndfmc_remove(struct platform_device *dev)
-{
-	struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
-	int i;
-
-	if (!drvdata)
-		return 0;
-	for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
-		struct mtd_info *mtd = drvdata->mtds[i];
-		struct nand_chip *chip;
-		struct txx9ndfmc_priv *txx9_priv;
-
-		if (!mtd)
-			continue;
-		chip = mtd_to_nand(mtd);
-		txx9_priv = nand_get_controller_data(chip);
-
-		nand_release(mtd);
-		kfree(txx9_priv->mtdname);
-		kfree(txx9_priv);
-	}
-	return 0;
-}
-
-#ifdef CONFIG_PM
-static int txx9ndfmc_resume(struct platform_device *dev)
-{
-	if (platform_get_drvdata(dev))
-		txx9ndfmc_initialize(dev);
-	return 0;
-}
-#else
-#define txx9ndfmc_resume NULL
-#endif
-
-static struct platform_driver txx9ndfmc_driver = {
-	.remove		= __exit_p(txx9ndfmc_remove),
-	.resume		= txx9ndfmc_resume,
-	.driver		= {
-		.name	= "txx9ndfmc",
-	},
-};
-
-module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
-MODULE_ALIAS("platform:txx9ndfmc");
diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c
deleted file mode 100644
index c497b157d56a..000000000000
--- a/drivers/mtd/nand/vf610_nfc.c
+++ /dev/null
@@ -1,846 +0,0 @@ 
-/*
- * Copyright 2009-2015 Freescale Semiconductor, Inc. and others
- *
- * Description: MPC5125, VF610, MCF54418 and Kinetis K70 Nand driver.
- * Jason ported to M54418TWR and MVFA5 (VF610).
- * Authors: Stefan Agner <stefan.agner@toradex.com>
- *          Bill Pringlemeir <bpringlemeir@nbsps.com>
- *          Shaohui Xie <b21989@freescale.com>
- *          Jason Jin <Jason.jin@freescale.com>
- *
- * Based on original driver mpc5121_nfc.c.
- *
- * This is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Limitations:
- * - Untested on MPC5125 and M54418.
- * - DMA and pipelining not used.
- * - 2K pages or less.
- * - HW ECC: Only 2K page with 64+ OOB.
- * - HW ECC: Only 24 and 32-bit error correction implemented.
- */
-
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/rawnand.h>
-#include <linux/mtd/partitions.h>
-#include <linux/of_device.h>
-#include <linux/pinctrl/consumer.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#define	DRV_NAME		"vf610_nfc"
-
-/* Register Offsets */
-#define NFC_FLASH_CMD1			0x3F00
-#define NFC_FLASH_CMD2			0x3F04
-#define NFC_COL_ADDR			0x3F08
-#define NFC_ROW_ADDR			0x3F0c
-#define NFC_ROW_ADDR_INC		0x3F14
-#define NFC_FLASH_STATUS1		0x3F18
-#define NFC_FLASH_STATUS2		0x3F1c
-#define NFC_CACHE_SWAP			0x3F28
-#define NFC_SECTOR_SIZE			0x3F2c
-#define NFC_FLASH_CONFIG		0x3F30
-#define NFC_IRQ_STATUS			0x3F38
-
-/* Addresses for NFC MAIN RAM BUFFER areas */
-#define NFC_MAIN_AREA(n)		((n) *  0x1000)
-
-#define PAGE_2K				0x0800
-#define OOB_64				0x0040
-#define OOB_MAX				0x0100
-
-/*
- * NFC_CMD2[CODE] values. See section:
- *  - 31.4.7 Flash Command Code Description, Vybrid manual
- *  - 23.8.6 Flash Command Sequencer, MPC5125 manual
- *
- * Briefly these are bitmasks of controller cycles.
- */
-#define READ_PAGE_CMD_CODE		0x7EE0
-#define READ_ONFI_PARAM_CMD_CODE	0x4860
-#define PROGRAM_PAGE_CMD_CODE		0x7FC0
-#define ERASE_CMD_CODE			0x4EC0
-#define READ_ID_CMD_CODE		0x4804
-#define RESET_CMD_CODE			0x4040
-#define STATUS_READ_CMD_CODE		0x4068
-
-/* NFC ECC mode define */
-#define ECC_BYPASS			0
-#define ECC_45_BYTE			6
-#define ECC_60_BYTE			7
-
-/*** Register Mask and bit definitions */
-
-/* NFC_FLASH_CMD1 Field */
-#define CMD_BYTE2_MASK				0xFF000000
-#define CMD_BYTE2_SHIFT				24
-
-/* NFC_FLASH_CM2 Field */
-#define CMD_BYTE1_MASK				0xFF000000
-#define CMD_BYTE1_SHIFT				24
-#define CMD_CODE_MASK				0x00FFFF00
-#define CMD_CODE_SHIFT				8
-#define BUFNO_MASK				0x00000006
-#define BUFNO_SHIFT				1
-#define START_BIT				BIT(0)
-
-/* NFC_COL_ADDR Field */
-#define COL_ADDR_MASK				0x0000FFFF
-#define COL_ADDR_SHIFT				0
-
-/* NFC_ROW_ADDR Field */
-#define ROW_ADDR_MASK				0x00FFFFFF
-#define ROW_ADDR_SHIFT				0
-#define ROW_ADDR_CHIP_SEL_RB_MASK		0xF0000000
-#define ROW_ADDR_CHIP_SEL_RB_SHIFT		28
-#define ROW_ADDR_CHIP_SEL_MASK			0x0F000000
-#define ROW_ADDR_CHIP_SEL_SHIFT			24
-
-/* NFC_FLASH_STATUS2 Field */
-#define STATUS_BYTE1_MASK			0x000000FF
-
-/* NFC_FLASH_CONFIG Field */
-#define CONFIG_ECC_SRAM_ADDR_MASK		0x7FC00000
-#define CONFIG_ECC_SRAM_ADDR_SHIFT		22
-#define CONFIG_ECC_SRAM_REQ_BIT			BIT(21)
-#define CONFIG_DMA_REQ_BIT			BIT(20)
-#define CONFIG_ECC_MODE_MASK			0x000E0000
-#define CONFIG_ECC_MODE_SHIFT			17
-#define CONFIG_FAST_FLASH_BIT			BIT(16)
-#define CONFIG_16BIT				BIT(7)
-#define CONFIG_BOOT_MODE_BIT			BIT(6)
-#define CONFIG_ADDR_AUTO_INCR_BIT		BIT(5)
-#define CONFIG_BUFNO_AUTO_INCR_BIT		BIT(4)
-#define CONFIG_PAGE_CNT_MASK			0xF
-#define CONFIG_PAGE_CNT_SHIFT			0
-
-/* NFC_IRQ_STATUS Field */
-#define IDLE_IRQ_BIT				BIT(29)
-#define IDLE_EN_BIT				BIT(20)
-#define CMD_DONE_CLEAR_BIT			BIT(18)
-#define IDLE_CLEAR_BIT				BIT(17)
-
-/*
- * ECC status - seems to consume 8 bytes (double word). The documented
- * status byte is located in the lowest byte of the second word (which is
- * the 4th or 7th byte depending on endianness).
- * Calculate an offset to store the ECC status at the end of the buffer.
- */
-#define ECC_SRAM_ADDR		(PAGE_2K + OOB_MAX - 8)
-
-#define ECC_STATUS		0x4
-#define ECC_STATUS_MASK		0x80
-#define ECC_STATUS_ERR_COUNT	0x3F
-
-enum vf610_nfc_alt_buf {
-	ALT_BUF_DATA = 0,
-	ALT_BUF_ID = 1,
-	ALT_BUF_STAT = 2,
-	ALT_BUF_ONFI = 3,
-};
-
-enum vf610_nfc_variant {
-	NFC_VFC610 = 1,
-};
-
-struct vf610_nfc {
-	struct nand_chip chip;
-	struct device *dev;
-	void __iomem *regs;
-	struct completion cmd_done;
-	uint buf_offset;
-	int write_sz;
-	/* Status and ID are in alternate locations. */
-	enum vf610_nfc_alt_buf alt_buf;
-	enum vf610_nfc_variant variant;
-	struct clk *clk;
-	bool use_hw_ecc;
-	u32 ecc_mode;
-};
-
-static inline struct vf610_nfc *mtd_to_nfc(struct mtd_info *mtd)
-{
-	return container_of(mtd_to_nand(mtd), struct vf610_nfc, chip);
-}
-
-static inline u32 vf610_nfc_read(struct vf610_nfc *nfc, uint reg)
-{
-	return readl(nfc->regs + reg);
-}
-
-static inline void vf610_nfc_write(struct vf610_nfc *nfc, uint reg, u32 val)
-{
-	writel(val, nfc->regs + reg);
-}
-
-static inline void vf610_nfc_set(struct vf610_nfc *nfc, uint reg, u32 bits)
-{
-	vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) | bits);
-}
-
-static inline void vf610_nfc_clear(struct vf610_nfc *nfc, uint reg, u32 bits)
-{
-	vf610_nfc_write(nfc, reg, vf610_nfc_read(nfc, reg) & ~bits);
-}
-
-static inline void vf610_nfc_set_field(struct vf610_nfc *nfc, u32 reg,
-				       u32 mask, u32 shift, u32 val)
-{
-	vf610_nfc_write(nfc, reg,
-			(vf610_nfc_read(nfc, reg) & (~mask)) | val << shift);
-}
-
-static inline void vf610_nfc_memcpy(void *dst, const void __iomem *src,
-				    size_t n)
-{
-	/*
-	 * Use this accessor for the internal SRAM buffers. On the ARM
-	 * Freescale Vybrid SoC it's known that the driver can treat
-	 * the SRAM buffer as if it's memory. Other platform might need
-	 * to treat the buffers differently.
-	 *
-	 * For the time being, use memcpy
-	 */
-	memcpy(dst, src, n);
-}
-
-/* Clear flags for upcoming command */
-static inline void vf610_nfc_clear_status(struct vf610_nfc *nfc)
-{
-	u32 tmp = vf610_nfc_read(nfc, NFC_IRQ_STATUS);
-
-	tmp |= CMD_DONE_CLEAR_BIT | IDLE_CLEAR_BIT;
-	vf610_nfc_write(nfc, NFC_IRQ_STATUS, tmp);
-}
-
-static void vf610_nfc_done(struct vf610_nfc *nfc)
-{
-	unsigned long timeout = msecs_to_jiffies(100);
-
-	/*
-	 * Barrier is needed after this write. This write need
-	 * to be done before reading the next register the first
-	 * time.
-	 * vf610_nfc_set implicates such a barrier by using writel
-	 * to write to the register.
-	 */
-	vf610_nfc_set(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
-	vf610_nfc_set(nfc, NFC_FLASH_CMD2, START_BIT);
-
-	if (!wait_for_completion_timeout(&nfc->cmd_done, timeout))
-		dev_warn(nfc->dev, "Timeout while waiting for BUSY.\n");
-
-	vf610_nfc_clear_status(nfc);
-}
-
-static u8 vf610_nfc_get_id(struct vf610_nfc *nfc, int col)
-{
-	u32 flash_id;
-
-	if (col < 4) {
-		flash_id = vf610_nfc_read(nfc, NFC_FLASH_STATUS1);
-		flash_id >>= (3 - col) * 8;
-	} else {
-		flash_id = vf610_nfc_read(nfc, NFC_FLASH_STATUS2);
-		flash_id >>= 24;
-	}
-
-	return flash_id & 0xff;
-}
-
-static u8 vf610_nfc_get_status(struct vf610_nfc *nfc)
-{
-	return vf610_nfc_read(nfc, NFC_FLASH_STATUS2) & STATUS_BYTE1_MASK;
-}
-
-static void vf610_nfc_send_command(struct vf610_nfc *nfc, u32 cmd_byte1,
-				   u32 cmd_code)
-{
-	u32 tmp;
-
-	vf610_nfc_clear_status(nfc);
-
-	tmp = vf610_nfc_read(nfc, NFC_FLASH_CMD2);
-	tmp &= ~(CMD_BYTE1_MASK | CMD_CODE_MASK | BUFNO_MASK);
-	tmp |= cmd_byte1 << CMD_BYTE1_SHIFT;
-	tmp |= cmd_code << CMD_CODE_SHIFT;
-	vf610_nfc_write(nfc, NFC_FLASH_CMD2, tmp);
-}
-
-static void vf610_nfc_send_commands(struct vf610_nfc *nfc, u32 cmd_byte1,
-				    u32 cmd_byte2, u32 cmd_code)
-{
-	u32 tmp;
-
-	vf610_nfc_send_command(nfc, cmd_byte1, cmd_code);
-
-	tmp = vf610_nfc_read(nfc, NFC_FLASH_CMD1);
-	tmp &= ~CMD_BYTE2_MASK;
-	tmp |= cmd_byte2 << CMD_BYTE2_SHIFT;
-	vf610_nfc_write(nfc, NFC_FLASH_CMD1, tmp);
-}
-
-static irqreturn_t vf610_nfc_irq(int irq, void *data)
-{
-	struct mtd_info *mtd = data;
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-
-	vf610_nfc_clear(nfc, NFC_IRQ_STATUS, IDLE_EN_BIT);
-	complete(&nfc->cmd_done);
-
-	return IRQ_HANDLED;
-}
-
-static void vf610_nfc_addr_cycle(struct vf610_nfc *nfc, int column, int page)
-{
-	if (column != -1) {
-		if (nfc->chip.options & NAND_BUSWIDTH_16)
-			column = column / 2;
-		vf610_nfc_set_field(nfc, NFC_COL_ADDR, COL_ADDR_MASK,
-				    COL_ADDR_SHIFT, column);
-	}
-	if (page != -1)
-		vf610_nfc_set_field(nfc, NFC_ROW_ADDR, ROW_ADDR_MASK,
-				    ROW_ADDR_SHIFT, page);
-}
-
-static inline void vf610_nfc_ecc_mode(struct vf610_nfc *nfc, int ecc_mode)
-{
-	vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
-			    CONFIG_ECC_MODE_MASK,
-			    CONFIG_ECC_MODE_SHIFT, ecc_mode);
-}
-
-static inline void vf610_nfc_transfer_size(struct vf610_nfc *nfc, int size)
-{
-	vf610_nfc_write(nfc, NFC_SECTOR_SIZE, size);
-}
-
-static void vf610_nfc_command(struct mtd_info *mtd, unsigned command,
-			      int column, int page)
-{
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-	int trfr_sz = nfc->chip.options & NAND_BUSWIDTH_16 ? 1 : 0;
-
-	nfc->buf_offset = max(column, 0);
-	nfc->alt_buf = ALT_BUF_DATA;
-
-	switch (command) {
-	case NAND_CMD_SEQIN:
-		/* Use valid column/page from preread... */
-		vf610_nfc_addr_cycle(nfc, column, page);
-		nfc->buf_offset = 0;
-
-		/*
-		 * SEQIN => data => PAGEPROG sequence is done by the controller
-		 * hence we do not need to issue the command here...
-		 */
-		return;
-	case NAND_CMD_PAGEPROG:
-		trfr_sz += nfc->write_sz;
-		vf610_nfc_transfer_size(nfc, trfr_sz);
-		vf610_nfc_send_commands(nfc, NAND_CMD_SEQIN,
-					command, PROGRAM_PAGE_CMD_CODE);
-		if (nfc->use_hw_ecc)
-			vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
-		else
-			vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
-		break;
-
-	case NAND_CMD_RESET:
-		vf610_nfc_transfer_size(nfc, 0);
-		vf610_nfc_send_command(nfc, command, RESET_CMD_CODE);
-		break;
-
-	case NAND_CMD_READOOB:
-		trfr_sz += mtd->oobsize;
-		column = mtd->writesize;
-		vf610_nfc_transfer_size(nfc, trfr_sz);
-		vf610_nfc_send_commands(nfc, NAND_CMD_READ0,
-					NAND_CMD_READSTART, READ_PAGE_CMD_CODE);
-		vf610_nfc_addr_cycle(nfc, column, page);
-		vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
-		break;
-
-	case NAND_CMD_READ0:
-		trfr_sz += mtd->writesize + mtd->oobsize;
-		vf610_nfc_transfer_size(nfc, trfr_sz);
-		vf610_nfc_send_commands(nfc, NAND_CMD_READ0,
-					NAND_CMD_READSTART, READ_PAGE_CMD_CODE);
-		vf610_nfc_addr_cycle(nfc, column, page);
-		vf610_nfc_ecc_mode(nfc, nfc->ecc_mode);
-		break;
-
-	case NAND_CMD_PARAM:
-		nfc->alt_buf = ALT_BUF_ONFI;
-		trfr_sz = 3 * sizeof(struct nand_onfi_params);
-		vf610_nfc_transfer_size(nfc, trfr_sz);
-		vf610_nfc_send_command(nfc, command, READ_ONFI_PARAM_CMD_CODE);
-		vf610_nfc_addr_cycle(nfc, -1, column);
-		vf610_nfc_ecc_mode(nfc, ECC_BYPASS);
-		break;
-
-	case NAND_CMD_ERASE1:
-		vf610_nfc_transfer_size(nfc, 0);
-		vf610_nfc_send_commands(nfc, command,
-					NAND_CMD_ERASE2, ERASE_CMD_CODE);
-		vf610_nfc_addr_cycle(nfc, column, page);
-		break;
-
-	case NAND_CMD_READID:
-		nfc->alt_buf = ALT_BUF_ID;
-		nfc->buf_offset = 0;
-		vf610_nfc_transfer_size(nfc, 0);
-		vf610_nfc_send_command(nfc, command, READ_ID_CMD_CODE);
-		vf610_nfc_addr_cycle(nfc, -1, column);
-		break;
-
-	case NAND_CMD_STATUS:
-		nfc->alt_buf = ALT_BUF_STAT;
-		vf610_nfc_transfer_size(nfc, 0);
-		vf610_nfc_send_command(nfc, command, STATUS_READ_CMD_CODE);
-		break;
-	default:
-		return;
-	}
-
-	vf610_nfc_done(nfc);
-
-	nfc->use_hw_ecc = false;
-	nfc->write_sz = 0;
-}
-
-static void vf610_nfc_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-	uint c = nfc->buf_offset;
-
-	/* Alternate buffers are only supported through read_byte */
-	WARN_ON(nfc->alt_buf);
-
-	vf610_nfc_memcpy(buf, nfc->regs + NFC_MAIN_AREA(0) + c, len);
-
-	nfc->buf_offset += len;
-}
-
-static void vf610_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
-				int len)
-{
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-	uint c = nfc->buf_offset;
-	uint l;
-
-	l = min_t(uint, len, mtd->writesize + mtd->oobsize - c);
-	vf610_nfc_memcpy(nfc->regs + NFC_MAIN_AREA(0) + c, buf, l);
-
-	nfc->write_sz += l;
-	nfc->buf_offset += l;
-}
-
-static uint8_t vf610_nfc_read_byte(struct mtd_info *mtd)
-{
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-	u8 tmp;
-	uint c = nfc->buf_offset;
-
-	switch (nfc->alt_buf) {
-	case ALT_BUF_ID:
-		tmp = vf610_nfc_get_id(nfc, c);
-		break;
-	case ALT_BUF_STAT:
-		tmp = vf610_nfc_get_status(nfc);
-		break;
-#ifdef __LITTLE_ENDIAN
-	case ALT_BUF_ONFI:
-		/* Reverse byte since the controller uses big endianness */
-		c = nfc->buf_offset ^ 0x3;
-		/* fall-through */
-#endif
-	default:
-		tmp = *((u8 *)(nfc->regs + NFC_MAIN_AREA(0) + c));
-		break;
-	}
-	nfc->buf_offset++;
-	return tmp;
-}
-
-static u16 vf610_nfc_read_word(struct mtd_info *mtd)
-{
-	u16 tmp;
-
-	vf610_nfc_read_buf(mtd, (u_char *)&tmp, sizeof(tmp));
-	return tmp;
-}
-
-/* If not provided, upper layers apply a fixed delay. */
-static int vf610_nfc_dev_ready(struct mtd_info *mtd)
-{
-	/* NFC handles R/B internally; always ready.  */
-	return 1;
-}
-
-/*
- * This function supports Vybrid only (MPC5125 would have full RB and four CS)
- */
-static void vf610_nfc_select_chip(struct mtd_info *mtd, int chip)
-{
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-	u32 tmp = vf610_nfc_read(nfc, NFC_ROW_ADDR);
-
-	/* Vybrid only (MPC5125 would have full RB and four CS) */
-	if (nfc->variant != NFC_VFC610)
-		return;
-
-	tmp &= ~(ROW_ADDR_CHIP_SEL_RB_MASK | ROW_ADDR_CHIP_SEL_MASK);
-
-	if (chip >= 0) {
-		tmp |= 1 << ROW_ADDR_CHIP_SEL_RB_SHIFT;
-		tmp |= BIT(chip) << ROW_ADDR_CHIP_SEL_SHIFT;
-	}
-
-	vf610_nfc_write(nfc, NFC_ROW_ADDR, tmp);
-}
-
-/* Count the number of 0's in buff up to max_bits */
-static inline int count_written_bits(uint8_t *buff, int size, int max_bits)
-{
-	uint32_t *buff32 = (uint32_t *)buff;
-	int k, written_bits = 0;
-
-	for (k = 0; k < (size / 4); k++) {
-		written_bits += hweight32(~buff32[k]);
-		if (unlikely(written_bits > max_bits))
-			break;
-	}
-
-	return written_bits;
-}
-
-static inline int vf610_nfc_correct_data(struct mtd_info *mtd, uint8_t *dat,
-					 uint8_t *oob, int page)
-{
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-	u32 ecc_status_off = NFC_MAIN_AREA(0) + ECC_SRAM_ADDR + ECC_STATUS;
-	u8 ecc_status;
-	u8 ecc_count;
-	int flips_threshold = nfc->chip.ecc.strength / 2;
-
-	ecc_status = vf610_nfc_read(nfc, ecc_status_off) & 0xff;
-	ecc_count = ecc_status & ECC_STATUS_ERR_COUNT;
-
-	if (!(ecc_status & ECC_STATUS_MASK))
-		return ecc_count;
-
-	/* Read OOB without ECC unit enabled */
-	vf610_nfc_command(mtd, NAND_CMD_READOOB, 0, page);
-	vf610_nfc_read_buf(mtd, oob, mtd->oobsize);
-
-	/*
-	 * On an erased page, bit count (including OOB) should be zero or
-	 * at least less then half of the ECC strength.
-	 */
-	return nand_check_erased_ecc_chunk(dat, nfc->chip.ecc.size, oob,
-					   mtd->oobsize, NULL, 0,
-					   flips_threshold);
-}
-
-static int vf610_nfc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
-				uint8_t *buf, int oob_required, int page)
-{
-	int eccsize = chip->ecc.size;
-	int stat;
-
-	vf610_nfc_read_buf(mtd, buf, eccsize);
-	if (oob_required)
-		vf610_nfc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	stat = vf610_nfc_correct_data(mtd, buf, chip->oob_poi, page);
-
-	if (stat < 0) {
-		mtd->ecc_stats.failed++;
-		return 0;
-	} else {
-		mtd->ecc_stats.corrected += stat;
-		return stat;
-	}
-}
-
-static int vf610_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
-				const uint8_t *buf, int oob_required, int page)
-{
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-
-	vf610_nfc_write_buf(mtd, buf, mtd->writesize);
-	if (oob_required)
-		vf610_nfc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
-
-	/* Always write whole page including OOB due to HW ECC */
-	nfc->use_hw_ecc = true;
-	nfc->write_sz = mtd->writesize + mtd->oobsize;
-
-	return 0;
-}
-
-static const struct of_device_id vf610_nfc_dt_ids[] = {
-	{ .compatible = "fsl,vf610-nfc", .data = (void *)NFC_VFC610 },
-	{ /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, vf610_nfc_dt_ids);
-
-static void vf610_nfc_preinit_controller(struct vf610_nfc *nfc)
-{
-	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
-	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_ADDR_AUTO_INCR_BIT);
-	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BUFNO_AUTO_INCR_BIT);
-	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_BOOT_MODE_BIT);
-	vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_DMA_REQ_BIT);
-	vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_FAST_FLASH_BIT);
-
-	/* Disable virtual pages, only one elementary transfer unit */
-	vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG, CONFIG_PAGE_CNT_MASK,
-			    CONFIG_PAGE_CNT_SHIFT, 1);
-}
-
-static void vf610_nfc_init_controller(struct vf610_nfc *nfc)
-{
-	if (nfc->chip.options & NAND_BUSWIDTH_16)
-		vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
-	else
-		vf610_nfc_clear(nfc, NFC_FLASH_CONFIG, CONFIG_16BIT);
-
-	if (nfc->chip.ecc.mode == NAND_ECC_HW) {
-		/* Set ECC status offset in SRAM */
-		vf610_nfc_set_field(nfc, NFC_FLASH_CONFIG,
-				    CONFIG_ECC_SRAM_ADDR_MASK,
-				    CONFIG_ECC_SRAM_ADDR_SHIFT,
-				    ECC_SRAM_ADDR >> 3);
-
-		/* Enable ECC status in SRAM */
-		vf610_nfc_set(nfc, NFC_FLASH_CONFIG, CONFIG_ECC_SRAM_REQ_BIT);
-	}
-}
-
-static int vf610_nfc_probe(struct platform_device *pdev)
-{
-	struct vf610_nfc *nfc;
-	struct resource *res;
-	struct mtd_info *mtd;
-	struct nand_chip *chip;
-	struct device_node *child;
-	const struct of_device_id *of_id;
-	int err;
-	int irq;
-
-	nfc = devm_kzalloc(&pdev->dev, sizeof(*nfc), GFP_KERNEL);
-	if (!nfc)
-		return -ENOMEM;
-
-	nfc->dev = &pdev->dev;
-	chip = &nfc->chip;
-	mtd = nand_to_mtd(chip);
-
-	mtd->owner = THIS_MODULE;
-	mtd->dev.parent = nfc->dev;
-	mtd->name = DRV_NAME;
-
-	irq = platform_get_irq(pdev, 0);
-	if (irq <= 0)
-		return -EINVAL;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	nfc->regs = devm_ioremap_resource(nfc->dev, res);
-	if (IS_ERR(nfc->regs))
-		return PTR_ERR(nfc->regs);
-
-	nfc->clk = devm_clk_get(&pdev->dev, NULL);
-	if (IS_ERR(nfc->clk))
-		return PTR_ERR(nfc->clk);
-
-	err = clk_prepare_enable(nfc->clk);
-	if (err) {
-		dev_err(nfc->dev, "Unable to enable clock!\n");
-		return err;
-	}
-
-	of_id = of_match_device(vf610_nfc_dt_ids, &pdev->dev);
-	nfc->variant = (enum vf610_nfc_variant)of_id->data;
-
-	for_each_available_child_of_node(nfc->dev->of_node, child) {
-		if (of_device_is_compatible(child, "fsl,vf610-nfc-nandcs")) {
-
-			if (nand_get_flash_node(chip)) {
-				dev_err(nfc->dev,
-					"Only one NAND chip supported!\n");
-				err = -EINVAL;
-				goto error;
-			}
-
-			nand_set_flash_node(chip, child);
-		}
-	}
-
-	if (!nand_get_flash_node(chip)) {
-		dev_err(nfc->dev, "NAND chip sub-node missing!\n");
-		err = -ENODEV;
-		goto err_clk;
-	}
-
-	chip->dev_ready = vf610_nfc_dev_ready;
-	chip->cmdfunc = vf610_nfc_command;
-	chip->read_byte = vf610_nfc_read_byte;
-	chip->read_word = vf610_nfc_read_word;
-	chip->read_buf = vf610_nfc_read_buf;
-	chip->write_buf = vf610_nfc_write_buf;
-	chip->select_chip = vf610_nfc_select_chip;
-
-	chip->options |= NAND_NO_SUBPAGE_WRITE;
-
-	init_completion(&nfc->cmd_done);
-
-	err = devm_request_irq(nfc->dev, irq, vf610_nfc_irq, 0, DRV_NAME, mtd);
-	if (err) {
-		dev_err(nfc->dev, "Error requesting IRQ!\n");
-		goto error;
-	}
-
-	vf610_nfc_preinit_controller(nfc);
-
-	/* first scan to find the device and get the page size */
-	if (nand_scan_ident(mtd, 1, NULL)) {
-		err = -ENXIO;
-		goto error;
-	}
-
-	vf610_nfc_init_controller(nfc);
-
-	/* Bad block options. */
-	if (chip->bbt_options & NAND_BBT_USE_FLASH)
-		chip->bbt_options |= NAND_BBT_NO_OOB;
-
-	/* Single buffer only, max 256 OOB minus ECC status */
-	if (mtd->writesize + mtd->oobsize > PAGE_2K + OOB_MAX - 8) {
-		dev_err(nfc->dev, "Unsupported flash page size\n");
-		err = -ENXIO;
-		goto error;
-	}
-
-	if (chip->ecc.mode == NAND_ECC_HW) {
-		if (mtd->writesize != PAGE_2K && mtd->oobsize < 64) {
-			dev_err(nfc->dev, "Unsupported flash with hwecc\n");
-			err = -ENXIO;
-			goto error;
-		}
-
-		if (chip->ecc.size != mtd->writesize) {
-			dev_err(nfc->dev, "Step size needs to be page size\n");
-			err = -ENXIO;
-			goto error;
-		}
-
-		/* Only 64 byte ECC layouts known */
-		if (mtd->oobsize > 64)
-			mtd->oobsize = 64;
-
-		/*
-		 * mtd->ecclayout is not specified here because we're using the
-		 * default large page ECC layout defined in NAND core.
-		 */
-		if (chip->ecc.strength == 32) {
-			nfc->ecc_mode = ECC_60_BYTE;
-			chip->ecc.bytes = 60;
-		} else if (chip->ecc.strength == 24) {
-			nfc->ecc_mode = ECC_45_BYTE;
-			chip->ecc.bytes = 45;
-		} else {
-			dev_err(nfc->dev, "Unsupported ECC strength\n");
-			err = -ENXIO;
-			goto error;
-		}
-
-		chip->ecc.read_page = vf610_nfc_read_page;
-		chip->ecc.write_page = vf610_nfc_write_page;
-
-		chip->ecc.size = PAGE_2K;
-	}
-
-	/* second phase scan */
-	if (nand_scan_tail(mtd)) {
-		err = -ENXIO;
-		goto error;
-	}
-
-	platform_set_drvdata(pdev, mtd);
-
-	/* Register device in MTD */
-	return mtd_device_register(mtd, NULL, 0);
-
-error:
-	of_node_put(nand_get_flash_node(chip));
-err_clk:
-	clk_disable_unprepare(nfc->clk);
-	return err;
-}
-
-static int vf610_nfc_remove(struct platform_device *pdev)
-{
-	struct mtd_info *mtd = platform_get_drvdata(pdev);
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-
-	nand_release(mtd);
-	clk_disable_unprepare(nfc->clk);
-	return 0;
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int vf610_nfc_suspend(struct device *dev)
-{
-	struct mtd_info *mtd = dev_get_drvdata(dev);
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-
-	clk_disable_unprepare(nfc->clk);
-	return 0;
-}
-
-static int vf610_nfc_resume(struct device *dev)
-{
-	struct mtd_info *mtd = dev_get_drvdata(dev);
-	struct vf610_nfc *nfc = mtd_to_nfc(mtd);
-
-	pinctrl_pm_select_default_state(dev);
-
-	clk_prepare_enable(nfc->clk);
-
-	vf610_nfc_preinit_controller(nfc);
-	vf610_nfc_init_controller(nfc);
-	return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(vf610_nfc_pm_ops, vf610_nfc_suspend, vf610_nfc_resume);
-
-static struct platform_driver vf610_nfc_driver = {
-	.driver		= {
-		.name	= DRV_NAME,
-		.of_match_table = vf610_nfc_dt_ids,
-		.pm	= &vf610_nfc_pm_ops,
-	},
-	.probe		= vf610_nfc_probe,
-	.remove		= vf610_nfc_remove,
-};
-
-module_platform_driver(vf610_nfc_driver);
-
-MODULE_AUTHOR("Stefan Agner <stefan.agner@toradex.com>");
-MODULE_DESCRIPTION("Freescale VF610/MPC5125 NFC MTD NAND driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
deleted file mode 100644
index 3e7353e76264..000000000000
--- a/drivers/mtd/nand/xway_nand.c
+++ /dev/null
@@ -1,248 +0,0 @@ 
-/*
- *  This program is free software; you can redistribute it and/or modify it
- *  under the terms of the GNU General Public License version 2 as published
- *  by the Free Software Foundation.
- *
- *  Copyright © 2012 John Crispin <blogic@openwrt.org>
- *  Copyright © 2016 Hauke Mehrtens <hauke@hauke-m.de>
- */
-
-#include <linux/mtd/rawnand.h>
-#include <linux/of_gpio.h>
-#include <linux/of_platform.h>
-
-#include <lantiq_soc.h>
-
-/* nand registers */
-#define EBU_ADDSEL1		0x24
-#define EBU_NAND_CON		0xB0
-#define EBU_NAND_WAIT		0xB4
-#define  NAND_WAIT_RD		BIT(0) /* NAND flash status output */
-#define  NAND_WAIT_WR_C		BIT(3) /* NAND Write/Read complete */
-#define EBU_NAND_ECC0		0xB8
-#define EBU_NAND_ECC_AC		0xBC
-
-/*
- * nand commands
- * The pins of the NAND chip are selected based on the address bits of the
- * "register" read and write. There are no special registers, but an
- * address range and the lower address bits are used to activate the
- * correct line. For example when the bit (1 << 2) is set in the address
- * the ALE pin will be activated.
- */
-#define NAND_CMD_ALE		BIT(2) /* address latch enable */
-#define NAND_CMD_CLE		BIT(3) /* command latch enable */
-#define NAND_CMD_CS		BIT(4) /* chip select */
-#define NAND_CMD_SE		BIT(5) /* spare area access latch */
-#define NAND_CMD_WP		BIT(6) /* write protect */
-#define NAND_WRITE_CMD		(NAND_CMD_CS | NAND_CMD_CLE)
-#define NAND_WRITE_ADDR		(NAND_CMD_CS | NAND_CMD_ALE)
-#define NAND_WRITE_DATA		(NAND_CMD_CS)
-#define NAND_READ_DATA		(NAND_CMD_CS)
-
-/* we need to tel the ebu which addr we mapped the nand to */
-#define ADDSEL1_MASK(x)		(x << 4)
-#define ADDSEL1_REGEN		1
-
-/* we need to tell the EBU that we have nand attached and set it up properly */
-#define BUSCON1_SETUP		(1 << 22)
-#define BUSCON1_BCGEN_RES	(0x3 << 12)
-#define BUSCON1_WAITWRC2	(2 << 8)
-#define BUSCON1_WAITRDC2	(2 << 6)
-#define BUSCON1_HOLDC1		(1 << 4)
-#define BUSCON1_RECOVC1		(1 << 2)
-#define BUSCON1_CMULT4		1
-
-#define NAND_CON_CE		(1 << 20)
-#define NAND_CON_OUT_CS1	(1 << 10)
-#define NAND_CON_IN_CS1		(1 << 8)
-#define NAND_CON_PRE_P		(1 << 7)
-#define NAND_CON_WP_P		(1 << 6)
-#define NAND_CON_SE_P		(1 << 5)
-#define NAND_CON_CS_P		(1 << 4)
-#define NAND_CON_CSMUX		(1 << 1)
-#define NAND_CON_NANDM		1
-
-struct xway_nand_data {
-	struct nand_chip	chip;
-	unsigned long		csflags;
-	void __iomem		*nandaddr;
-};
-
-static u8 xway_readb(struct mtd_info *mtd, int op)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct xway_nand_data *data = nand_get_controller_data(chip);
-
-	return readb(data->nandaddr + op);
-}
-
-static void xway_writeb(struct mtd_info *mtd, int op, u8 value)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct xway_nand_data *data = nand_get_controller_data(chip);
-
-	writeb(value, data->nandaddr + op);
-}
-
-static void xway_select_chip(struct mtd_info *mtd, int select)
-{
-	struct nand_chip *chip = mtd_to_nand(mtd);
-	struct xway_nand_data *data = nand_get_controller_data(chip);
-
-	switch (select) {
-	case -1:
-		ltq_ebu_w32_mask(NAND_CON_CE, 0, EBU_NAND_CON);
-		ltq_ebu_w32_mask(NAND_CON_NANDM, 0, EBU_NAND_CON);
-		spin_unlock_irqrestore(&ebu_lock, data->csflags);
-		break;
-	case 0:
-		spin_lock_irqsave(&ebu_lock, data->csflags);
-		ltq_ebu_w32_mask(0, NAND_CON_NANDM, EBU_NAND_CON);
-		ltq_ebu_w32_mask(0, NAND_CON_CE, EBU_NAND_CON);
-		break;
-	default:
-		BUG();
-	}
-}
-
-static void xway_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
-{
-	if (cmd == NAND_CMD_NONE)
-		return;
-
-	if (ctrl & NAND_CLE)
-		xway_writeb(mtd, NAND_WRITE_CMD, cmd);
-	else if (ctrl & NAND_ALE)
-		xway_writeb(mtd, NAND_WRITE_ADDR, cmd);
-
-	while ((ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_WR_C) == 0)
-		;
-}
-
-static int xway_dev_ready(struct mtd_info *mtd)
-{
-	return ltq_ebu_r32(EBU_NAND_WAIT) & NAND_WAIT_RD;
-}
-
-static unsigned char xway_read_byte(struct mtd_info *mtd)
-{
-	return xway_readb(mtd, NAND_READ_DATA);
-}
-
-static void xway_read_buf(struct mtd_info *mtd, u_char *buf, int len)
-{
-	int i;
-
-	for (i = 0; i < len; i++)
-		buf[i] = xway_readb(mtd, NAND_WRITE_DATA);
-}
-
-static void xway_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
-{
-	int i;
-
-	for (i = 0; i < len; i++)
-		xway_writeb(mtd, NAND_WRITE_DATA, buf[i]);
-}
-
-/*
- * Probe for the NAND device.
- */
-static int xway_nand_probe(struct platform_device *pdev)
-{
-	struct xway_nand_data *data;
-	struct mtd_info *mtd;
-	struct resource *res;
-	int err;
-	u32 cs;
-	u32 cs_flag = 0;
-
-	/* Allocate memory for the device structure (and zero it) */
-	data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data),
-			    GFP_KERNEL);
-	if (!data)
-		return -ENOMEM;
-
-	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-	data->nandaddr = devm_ioremap_resource(&pdev->dev, res);
-	if (IS_ERR(data->nandaddr))
-		return PTR_ERR(data->nandaddr);
-
-	nand_set_flash_node(&data->chip, pdev->dev.of_node);
-	mtd = nand_to_mtd(&data->chip);
-	mtd->dev.parent = &pdev->dev;
-
-	data->chip.cmd_ctrl = xway_cmd_ctrl;
-	data->chip.dev_ready = xway_dev_ready;
-	data->chip.select_chip = xway_select_chip;
-	data->chip.write_buf = xway_write_buf;
-	data->chip.read_buf = xway_read_buf;
-	data->chip.read_byte = xway_read_byte;
-	data->chip.chip_delay = 30;
-
-	data->chip.ecc.mode = NAND_ECC_SOFT;
-	data->chip.ecc.algo = NAND_ECC_HAMMING;
-
-	platform_set_drvdata(pdev, data);
-	nand_set_controller_data(&data->chip, data);
-
-	/* load our CS from the DT. Either we find a valid 1 or default to 0 */
-	err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs);
-	if (!err && cs == 1)
-		cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1;
-
-	/* setup the EBU to run in NAND mode on our base addr */
-	ltq_ebu_w32(CPHYSADDR(data->nandaddr)
-		    | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1);
-
-	ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2
-		    | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1
-		    | BUSCON1_CMULT4, LTQ_EBU_BUSCON1);
-
-	ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P
-		    | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P
-		    | cs_flag, EBU_NAND_CON);
-
-	/* Scan to find existence of the device */
-	err = nand_scan(mtd, 1);
-	if (err)
-		return err;
-
-	err = mtd_device_register(mtd, NULL, 0);
-	if (err)
-		nand_release(mtd);
-
-	return err;
-}
-
-/*
- * Remove a NAND device.
- */
-static int xway_nand_remove(struct platform_device *pdev)
-{
-	struct xway_nand_data *data = platform_get_drvdata(pdev);
-
-	nand_release(nand_to_mtd(&data->chip));
-
-	return 0;
-}
-
-static const struct of_device_id xway_nand_match[] = {
-	{ .compatible = "lantiq,nand-xway" },
-	{},
-};
-MODULE_DEVICE_TABLE(of, xway_nand_match);
-
-static struct platform_driver xway_nand_driver = {
-	.probe	= xway_nand_probe,
-	.remove	= xway_nand_remove,
-	.driver	= {
-		.name		= "lantiq,nand-xway",
-		.of_match_table = xway_nand_match,
-	},
-};
-
-module_platform_driver(xway_nand_driver);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 3692dd547879..4ae09fe116a7 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -17,7 +17,7 @@ 
 #include <linux/bitops.h>
 #include <linux/slab.h>
 #include <linux/mtd/nand_ecc.h>
-#include "nand/sm_common.h"
+#include "nand/rawnand/sm_common.h"
 #include "sm_ftl.h"