diff mbox series

[13/22] nds32: use generic dma_noncoherent_ops

Message ID 20180420080313.18796-14-hch@lst.de
State Not Applicable
Delegated to: David Miller
Headers show
Series [01/22] dma-debug: move initialization to common code | expand

Commit Message

Christoph Hellwig April 20, 2018, 8:03 a.m. UTC
Switch to the generic noncoherent direct mapping implementation.

This makes sure kmap_atomic_pfn is consistently used for access to
virtual addresses instead of either using the slower plain kmap
or blindly expecting page_address() to work.

This makes sure the cache_sync routines is called in the unmap_sg
case, to match the unmap_single and sync_{single,sg}_to_cpu cases.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/nds32/Kconfig                   |   3 +
 arch/nds32/include/asm/Kbuild        |   1 +
 arch/nds32/include/asm/dma-mapping.h |  14 ---
 arch/nds32/kernel/dma.c              | 182 ++++++---------------------
 4 files changed, 39 insertions(+), 161 deletions(-)
 delete mode 100644 arch/nds32/include/asm/dma-mapping.h

Comments

Greentime Hu April 23, 2018, 6:49 a.m. UTC | #1
2018-04-20 16:03 GMT+08:00 Christoph Hellwig <hch@lst.de>:
> Switch to the generic noncoherent direct mapping implementation.
>
> This makes sure kmap_atomic_pfn is consistently used for access to
> virtual addresses instead of either using the slower plain kmap
> or blindly expecting page_address() to work.
>
> This makes sure the cache_sync routines is called in the unmap_sg
> case, to match the unmap_single and sync_{single,sg}_to_cpu cases.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/nds32/Kconfig                   |   3 +
>  arch/nds32/include/asm/Kbuild        |   1 +
>  arch/nds32/include/asm/dma-mapping.h |  14 ---
>  arch/nds32/kernel/dma.c              | 182 ++++++---------------------
>  4 files changed, 39 insertions(+), 161 deletions(-)
>  delete mode 100644 arch/nds32/include/asm/dma-mapping.h
>
> diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
> index 249f38d3388f..67d0ac0a989c 100644
> --- a/arch/nds32/Kconfig
> +++ b/arch/nds32/Kconfig
> @@ -5,10 +5,13 @@
>
>  config NDS32
>          def_bool y
> +       select ARCH_HAS_SYNC_DMA_FOR_CPU
> +       select ARCH_HAS_SYNC_DMA_FOR_DEVICE
>         select ARCH_WANT_FRAME_POINTERS if FTRACE
>         select CLKSRC_MMIO
>         select CLONE_BACKWARDS
>         select COMMON_CLK
> +       select DMA_NONCOHERENT_OPS
>         select GENERIC_ATOMIC64
>         select GENERIC_CPU_DEVICES
>         select GENERIC_CLOCKEVENTS
> diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
> index 06bdf8167f5a..b3e951f805f8 100644
> --- a/arch/nds32/include/asm/Kbuild
> +++ b/arch/nds32/include/asm/Kbuild
> @@ -13,6 +13,7 @@ generic-y += cputime.h
>  generic-y += device.h
>  generic-y += div64.h
>  generic-y += dma.h
> +generic-y += dma-mapping.h
>  generic-y += emergency-restart.h
>  generic-y += errno.h
>  generic-y += exec.h
> diff --git a/arch/nds32/include/asm/dma-mapping.h b/arch/nds32/include/asm/dma-mapping.h
> deleted file mode 100644
> index 2dd47d245c25..000000000000
> --- a/arch/nds32/include/asm/dma-mapping.h
> +++ /dev/null
> @@ -1,14 +0,0 @@
> -// SPDX-License-Identifier: GPL-2.0
> -// Copyright (C) 2005-2017 Andes Technology Corporation
> -
> -#ifndef ASMNDS32_DMA_MAPPING_H
> -#define ASMNDS32_DMA_MAPPING_H
> -
> -extern struct dma_map_ops nds32_dma_ops;
> -
> -static inline struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
> -{
> -       return &nds32_dma_ops;
> -}
> -
> -#endif
> diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
> index d291800fc621..688f1a03dee6 100644
> --- a/arch/nds32/kernel/dma.c
> +++ b/arch/nds32/kernel/dma.c
> @@ -3,17 +3,14 @@
>
>  #include <linux/types.h>
>  #include <linux/mm.h>
> -#include <linux/export.h>
>  #include <linux/string.h>
> -#include <linux/scatterlist.h>
> -#include <linux/dma-mapping.h>
> +#include <linux/dma-noncoherent.h>
>  #include <linux/io.h>
>  #include <linux/cache.h>
>  #include <linux/highmem.h>
>  #include <linux/slab.h>
>  #include <asm/cacheflush.h>
>  #include <asm/tlbflush.h>
> -#include <asm/dma-mapping.h>
>  #include <asm/proc-fns.h>
>
>  /*
> @@ -22,11 +19,6 @@
>  static pte_t *consistent_pte;
>  static DEFINE_RAW_SPINLOCK(consistent_lock);
>
> -enum master_type {
> -       FOR_CPU = 0,
> -       FOR_DEVICE = 1,
> -};
> -
>  /*
>   * VM region handling support.
>   *
> @@ -124,10 +116,8 @@ static struct arch_vm_region *vm_region_find(struct arch_vm_region *head,
>         return c;
>  }
>
> -/* FIXME: attrs is not used. */
> -static void *nds32_dma_alloc_coherent(struct device *dev, size_t size,
> -                                     dma_addr_t * handle, gfp_t gfp,
> -                                     unsigned long attrs)
> +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
> +               gfp_t gfp, unsigned long attrs)
>  {
>         struct page *page;
>         struct arch_vm_region *c;
> @@ -232,8 +222,8 @@ static void *nds32_dma_alloc_coherent(struct device *dev, size_t size,
>         return NULL;
>  }
>
> -static void nds32_dma_free(struct device *dev, size_t size, void *cpu_addr,
> -                          dma_addr_t handle, unsigned long attrs)
> +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
> +               dma_addr_t handle, unsigned long attrs)
>  {
>         struct arch_vm_region *c;
>         unsigned long flags, addr;
> @@ -333,145 +323,43 @@ static int __init consistent_init(void)
>  }
>
>  core_initcall(consistent_init);
> -static void consistent_sync(void *vaddr, size_t size, int direction, int master_type);
> -static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
> -                                    unsigned long offset, size_t size,
> -                                    enum dma_data_direction dir,
> -                                    unsigned long attrs)
> -{
> -       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -               consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE);
> -       return page_to_phys(page) + offset;
> -}
> -
> -static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
> -                                size_t size, enum dma_data_direction dir,
> -                                unsigned long attrs)
> -{
> -       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -               consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU);
> -}
>
> -/*
> - * Make an area consistent for devices.
> - */
> -static void consistent_sync(void *vaddr, size_t size, int direction, int master_type)
> +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
> +               size_t size, enum dma_data_direction dir)
>  {
> -       unsigned long start = (unsigned long)vaddr;
> -       unsigned long end = start + size;
> -
> -       if (master_type == FOR_CPU) {
> -               switch (direction) {
> -               case DMA_TO_DEVICE:
> -                       break;
> -               case DMA_FROM_DEVICE:
> -               case DMA_BIDIRECTIONAL:
> -                       cpu_dma_inval_range(start, end);
> -                       break;
> -               default:
> -                       BUG();
> -               }
> -       } else {
> -               /* FOR_DEVICE */
> -               switch (direction) {
> -               case DMA_FROM_DEVICE:
> -                       break;
> -               case DMA_TO_DEVICE:
> -               case DMA_BIDIRECTIONAL:
> -                       cpu_dma_wb_range(start, end);
> -                       break;
> -               default:
> -                       BUG();
> -               }
> +       void *addr = kmap_atomic_pfn(PHYS_PFN(paddr));
> +       unsigned long start = (unsigned long)addr;
> +
> +       switch (direction) {
> +       case DMA_FROM_DEVICE:
> +               break;
> +       case DMA_TO_DEVICE:
> +       case DMA_BIDIRECTIONAL:
> +               cpu_dma_wb_range(start, start + size);
> +               break;
> +       default:
> +               BUG();
>         }
> -}
>
> -static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
> -                           int nents, enum dma_data_direction dir,
> -                           unsigned long attrs)
> -{
> -       int i;
> -
> -       for (i = 0; i < nents; i++, sg++) {
> -               void *virt;
> -               unsigned long pfn;
> -               struct page *page = sg_page(sg);
> -
> -               sg->dma_address = sg_phys(sg);
> -               pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE;
> -               page = pfn_to_page(pfn);
> -               if (PageHighMem(page)) {
> -                       virt = kmap_atomic(page);
> -                       consistent_sync(virt, sg->length, dir, FOR_CPU);
> -                       kunmap_atomic(virt);
> -               } else {
> -                       if (sg->offset > PAGE_SIZE)
> -                               panic("sg->offset:%08x > PAGE_SIZE\n",
> -                                     sg->offset);
> -                       virt = page_address(page) + sg->offset;
> -                       consistent_sync(virt, sg->length, dir, FOR_CPU);
> -               }
> -       }
> -       return nents;
> -}
> -
> -static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
> -                              int nhwentries, enum dma_data_direction dir,
> -                              unsigned long attrs)
> -{
> +       kunmap_atomic(addr);
>  }
>
> -static void
> -nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
> -                             size_t size, enum dma_data_direction dir)
> +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
> +               size_t size, enum dma_data_direction dir)
>  {
> -       consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU);
> -}
> -
> -static void
> -nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
> -                                size_t size, enum dma_data_direction dir)
> -{
> -       consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE);
> -}
> -
> -static void
> -nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
> -                         enum dma_data_direction dir)
> -{
> -       int i;
> -
> -       for (i = 0; i < nents; i++, sg++) {
> -               char *virt =
> -                   page_address((struct page *)sg->page_link) + sg->offset;
> -               consistent_sync(virt, sg->length, dir, FOR_CPU);
> +       void *addr = kmap_atomic_pfn(PHYS_PFN(paddr));
> +       unsigned long start = (unsigned long)addr;
> +
> +       switch (direction) {
> +       case DMA_TO_DEVICE:
> +               break;
> +       case DMA_FROM_DEVICE:
> +       case DMA_BIDIRECTIONAL:
> +               cpu_dma_inval_range(start, end);
> +               break;
> +       default:
> +               BUG();
>         }

  CC      arch/nds32/kernel/dma.o
arch/nds32/kernel/dma.c: In function 'arch_sync_dma_for_device':
arch/nds32/kernel/dma.c:333:10: error: 'direction' undeclared (first
use in this function)
  switch (direction) {
          ^~~~~~~~~
arch/nds32/kernel/dma.c:333:10: note: each undeclared identifier is
reported only once for each function it appears in
arch/nds32/kernel/dma.c: In function 'arch_sync_dma_for_cpu':
arch/nds32/kernel/dma.c:353:10: error: 'direction' undeclared (first
use in this function)
  switch (direction) {
          ^~~~~~~~~
arch/nds32/kernel/dma.c:358:30: error: 'end' undeclared (first use in
this function)
   cpu_dma_inval_range(start, end);
                              ^~~
make[1]: *** [arch/nds32/kernel/dma.o] Error 1
make: *** [arch/nds32/kernel] Error 2

After this building error, the ftmac100.c driver is broken. Not sure
what happened.
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoph Hellwig April 24, 2018, 7:16 p.m. UTC | #2
Hi Greentime,

thanks for testing the patch!

It looks like nds32 doesn't have a buildbot yet, so this code didn't
even get syntax checkin, sorry.

Below is the incremental fixes based on this thread.

Can you check if my tree works if you just revert the
"nds32: use generic dma_noncoherent_ops" commit?

diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
index 688f1a03dee6..48018275e7f4 100644
--- a/arch/nds32/kernel/dma.c
+++ b/arch/nds32/kernel/dma.c
@@ -330,7 +330,7 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
 	void *addr = kmap_atomic_pfn(PHYS_PFN(paddr));
 	unsigned long start = (unsigned long)addr;
 
-	switch (direction) {
+	switch (dir) {
 	case DMA_FROM_DEVICE:
 		break;
 	case DMA_TO_DEVICE:
@@ -350,12 +350,12 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
 	void *addr = kmap_atomic_pfn(PHYS_PFN(paddr));
 	unsigned long start = (unsigned long)addr;
 
-	switch (direction) {
+	switch (dir) {
 	case DMA_TO_DEVICE:
 		break;
 	case DMA_FROM_DEVICE:
 	case DMA_BIDIRECTIONAL:
-		cpu_dma_inval_range(start, end);
+		cpu_dma_inval_range(start, start + size);
 		break;
 	default:
 		BUG();
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Greentime Hu April 25, 2018, 1:43 a.m. UTC | #3
2018-04-25 3:16 GMT+08:00 Christoph Hellwig <hch@lst.de>:
> Hi Greentime,
>
> thanks for testing the patch!
>
> It looks like nds32 doesn't have a buildbot yet, so this code didn't
> even get syntax checkin, sorry.
>
> Below is the incremental fixes based on this thread.
>
> Can you check if my tree works if you just revert the
> "nds32: use generic dma_noncoherent_ops" commit?
>
> diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
> index 688f1a03dee6..48018275e7f4 100644
> --- a/arch/nds32/kernel/dma.c
> +++ b/arch/nds32/kernel/dma.c
> @@ -330,7 +330,7 @@ void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
>         void *addr = kmap_atomic_pfn(PHYS_PFN(paddr));
>         unsigned long start = (unsigned long)addr;
>
> -       switch (direction) {
> +       switch (dir) {
>         case DMA_FROM_DEVICE:
>                 break;
>         case DMA_TO_DEVICE:
> @@ -350,12 +350,12 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
>         void *addr = kmap_atomic_pfn(PHYS_PFN(paddr));
>         unsigned long start = (unsigned long)addr;
>
> -       switch (direction) {
> +       switch (dir) {
>         case DMA_TO_DEVICE:
>                 break;
>         case DMA_FROM_DEVICE:
>         case DMA_BIDIRECTIONAL:
> -               cpu_dma_inval_range(start, end);
> +               cpu_dma_inval_range(start, start + size);
>                 break;
>         default:
>                 BUG();

Hi Crhistoph,

The ftmac100 works if I revert this commit.

commit de46b9ba5298aafc47284735a4f21baa8e4ed4b7
Author: Greentime Hu <greentime@andestech.com>
Date:   Wed Apr 25 09:33:51 2018 +0800

    Revert "nds32: use generic dma_noncoherent_ops"

    This reverts commit 0489ce952072e7542456e0d962437062916ce0df.
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoph Hellwig April 25, 2018, 6:40 a.m. UTC | #4
On Wed, Apr 25, 2018 at 09:43:43AM +0800, Greentime Hu wrote:
> Hi Crhistoph,
> 
> The ftmac100 works if I revert this commit.

Thanks.  ftmac100 only use dma_map_page, which in the old nds32 code
is just doing a plain page_address and never kmaps.  Can you apply
the patch below on the tree with the origin "nds32: use generic
dma_noncoherent_ops" reverted?  This always just uses page_address,
although that, just like the original code is broken if you actually
have highmem that needs to be mapped:

---
From 1dc5d1cae4cd7b9ce03d0e2943364ed4cca938d7 Mon Sep 17 00:00:00 2001
From: Christoph Hellwig <hch@lst.de>
Date: Mon, 16 Apr 2018 19:20:30 +0200
Subject: nds32: use generic dma_noncoherent_ops

Switch to the generic noncoherent direct mapping implementation.

This makes sure the cache_sync routines is called in the unmap_sg
case, to match the unmap_single and sync_{single,sg}_to_cpu cases.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 arch/nds32/Kconfig                   |   3 +
 arch/nds32/include/asm/Kbuild        |   1 +
 arch/nds32/include/asm/dma-mapping.h |  14 ---
 arch/nds32/kernel/dma.c              | 182 +++++----------------------
 4 files changed, 37 insertions(+), 163 deletions(-)
 delete mode 100644 arch/nds32/include/asm/dma-mapping.h

diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 249f38d3388f..67d0ac0a989c 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -5,10 +5,13 @@
 
 config NDS32
         def_bool y
+	select ARCH_HAS_SYNC_DMA_FOR_CPU
+	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
 	select ARCH_WANT_FRAME_POINTERS if FTRACE
 	select CLKSRC_MMIO
 	select CLONE_BACKWARDS
 	select COMMON_CLK
+	select DMA_NONCOHERENT_OPS
 	select GENERIC_ATOMIC64
 	select GENERIC_CPU_DEVICES
 	select GENERIC_CLOCKEVENTS
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
index 06bdf8167f5a..b3e951f805f8 100644
--- a/arch/nds32/include/asm/Kbuild
+++ b/arch/nds32/include/asm/Kbuild
@@ -13,6 +13,7 @@ generic-y += cputime.h
 generic-y += device.h
 generic-y += div64.h
 generic-y += dma.h
+generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
diff --git a/arch/nds32/include/asm/dma-mapping.h b/arch/nds32/include/asm/dma-mapping.h
deleted file mode 100644
index 2dd47d245c25..000000000000
--- a/arch/nds32/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,14 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2005-2017 Andes Technology Corporation
-
-#ifndef ASMNDS32_DMA_MAPPING_H
-#define ASMNDS32_DMA_MAPPING_H
-
-extern struct dma_map_ops nds32_dma_ops;
-
-static inline struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
-	return &nds32_dma_ops;
-}
-
-#endif
diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
index d291800fc621..78311a1e6fd1 100644
--- a/arch/nds32/kernel/dma.c
+++ b/arch/nds32/kernel/dma.c
@@ -3,17 +3,14 @@
 
 #include <linux/types.h>
 #include <linux/mm.h>
-#include <linux/export.h>
 #include <linux/string.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/io.h>
 #include <linux/cache.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
-#include <asm/dma-mapping.h>
 #include <asm/proc-fns.h>
 
 /*
@@ -22,11 +19,6 @@
 static pte_t *consistent_pte;
 static DEFINE_RAW_SPINLOCK(consistent_lock);
 
-enum master_type {
-	FOR_CPU = 0,
-	FOR_DEVICE = 1,
-};
-
 /*
  * VM region handling support.
  *
@@ -124,10 +116,8 @@ static struct arch_vm_region *vm_region_find(struct arch_vm_region *head,
 	return c;
 }
 
-/* FIXME: attrs is not used. */
-static void *nds32_dma_alloc_coherent(struct device *dev, size_t size,
-				      dma_addr_t * handle, gfp_t gfp,
-				      unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+		gfp_t gfp, unsigned long attrs)
 {
 	struct page *page;
 	struct arch_vm_region *c;
@@ -232,8 +222,8 @@ static void *nds32_dma_alloc_coherent(struct device *dev, size_t size,
 	return NULL;
 }
 
-static void nds32_dma_free(struct device *dev, size_t size, void *cpu_addr,
-			   dma_addr_t handle, unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+		dma_addr_t handle, unsigned long attrs)
 {
 	struct arch_vm_region *c;
 	unsigned long flags, addr;
@@ -333,145 +323,39 @@ static int __init consistent_init(void)
 }
 
 core_initcall(consistent_init);
-static void consistent_sync(void *vaddr, size_t size, int direction, int master_type);
-static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
-				     unsigned long offset, size_t size,
-				     enum dma_data_direction dir,
-				     unsigned long attrs)
-{
-	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE);
-	return page_to_phys(page) + offset;
-}
 
-static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
-				 size_t size, enum dma_data_direction dir,
-				 unsigned long attrs)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
 {
-	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU);
-}
-
-/*
- * Make an area consistent for devices.
- */
-static void consistent_sync(void *vaddr, size_t size, int direction, int master_type)
-{
-	unsigned long start = (unsigned long)vaddr;
-	unsigned long end = start + size;
-
-	if (master_type == FOR_CPU) {
-		switch (direction) {
-		case DMA_TO_DEVICE:
-			break;
-		case DMA_FROM_DEVICE:
-		case DMA_BIDIRECTIONAL:
-			cpu_dma_inval_range(start, end);
-			break;
-		default:
-			BUG();
-		}
-	} else {
-		/* FOR_DEVICE */
-		switch (direction) {
-		case DMA_FROM_DEVICE:
-			break;
-		case DMA_TO_DEVICE:
-		case DMA_BIDIRECTIONAL:
-			cpu_dma_wb_range(start, end);
-			break;
-		default:
-			BUG();
-		}
+	void *addr = phys_to_virt(paddr);
+	unsigned long start = (unsigned long)addr;
+
+	switch (dir) {
+	case DMA_FROM_DEVICE:
+		break;
+	case DMA_TO_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		cpu_dma_wb_range(start, start + size);
+		break;
+	default:
+		BUG();
 	}
 }
 
-static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
-			    int nents, enum dma_data_direction dir,
-			    unsigned long attrs)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
 {
-	int i;
-
-	for (i = 0; i < nents; i++, sg++) {
-		void *virt;
-		unsigned long pfn;
-		struct page *page = sg_page(sg);
-
-		sg->dma_address = sg_phys(sg);
-		pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE;
-		page = pfn_to_page(pfn);
-		if (PageHighMem(page)) {
-			virt = kmap_atomic(page);
-			consistent_sync(virt, sg->length, dir, FOR_CPU);
-			kunmap_atomic(virt);
-		} else {
-			if (sg->offset > PAGE_SIZE)
-				panic("sg->offset:%08x > PAGE_SIZE\n",
-				      sg->offset);
-			virt = page_address(page) + sg->offset;
-			consistent_sync(virt, sg->length, dir, FOR_CPU);
-		}
+	void *addr = phys_to_virt(paddr);
+	unsigned long start = (unsigned long)addr;
+
+	switch (dir) {
+	case DMA_TO_DEVICE:
+		break;
+	case DMA_FROM_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		cpu_dma_inval_range(start, start + size);
+		break;
+	default:
+		BUG();
 	}
-	return nents;
 }
-
-static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-			       int nhwentries, enum dma_data_direction dir,
-			       unsigned long attrs)
-{
-}
-
-static void
-nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
-			      size_t size, enum dma_data_direction dir)
-{
-	consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU);
-}
-
-static void
-nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
-				 size_t size, enum dma_data_direction dir)
-{
-	consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE);
-}
-
-static void
-nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
-			  enum dma_data_direction dir)
-{
-	int i;
-
-	for (i = 0; i < nents; i++, sg++) {
-		char *virt =
-		    page_address((struct page *)sg->page_link) + sg->offset;
-		consistent_sync(virt, sg->length, dir, FOR_CPU);
-	}
-}
-
-static void
-nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-			     int nents, enum dma_data_direction dir)
-{
-	int i;
-
-	for (i = 0; i < nents; i++, sg++) {
-		char *virt =
-		    page_address((struct page *)sg->page_link) + sg->offset;
-		consistent_sync(virt, sg->length, dir, FOR_DEVICE);
-	}
-}
-
-struct dma_map_ops nds32_dma_ops = {
-	.alloc = nds32_dma_alloc_coherent,
-	.free = nds32_dma_free,
-	.map_page = nds32_dma_map_page,
-	.unmap_page = nds32_dma_unmap_page,
-	.map_sg = nds32_dma_map_sg,
-	.unmap_sg = nds32_dma_unmap_sg,
-	.sync_single_for_device = nds32_dma_sync_single_for_device,
-	.sync_single_for_cpu = nds32_dma_sync_single_for_cpu,
-	.sync_sg_for_cpu = nds32_dma_sync_sg_for_cpu,
-	.sync_sg_for_device = nds32_dma_sync_sg_for_device,
-};
-
-EXPORT_SYMBOL(nds32_dma_ops);
Greentime Hu April 25, 2018, 12:25 p.m. UTC | #5
2018-04-25 14:40 GMT+08:00 Christoph Hellwig <hch@lst.de>:
> On Wed, Apr 25, 2018 at 09:43:43AM +0800, Greentime Hu wrote:
>> Hi Crhistoph,
>>
>> The ftmac100 works if I revert this commit.
>
> Thanks.  ftmac100 only use dma_map_page, which in the old nds32 code
> is just doing a plain page_address and never kmaps.  Can you apply
> the patch below on the tree with the origin "nds32: use generic
> dma_noncoherent_ops" reverted?  This always just uses page_address,
> although that, just like the original code is broken if you actually
> have highmem that needs to be mapped:
>

Hi, Christoph,

It still failed.

> ---
> From 1dc5d1cae4cd7b9ce03d0e2943364ed4cca938d7 Mon Sep 17 00:00:00 2001
> From: Christoph Hellwig <hch@lst.de>
> Date: Mon, 16 Apr 2018 19:20:30 +0200
> Subject: nds32: use generic dma_noncoherent_ops
>
> Switch to the generic noncoherent direct mapping implementation.
>
> This makes sure the cache_sync routines is called in the unmap_sg
> case, to match the unmap_single and sync_{single,sg}_to_cpu cases.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>  arch/nds32/Kconfig                   |   3 +
>  arch/nds32/include/asm/Kbuild        |   1 +
>  arch/nds32/include/asm/dma-mapping.h |  14 ---
>  arch/nds32/kernel/dma.c              | 182 +++++----------------------
>  4 files changed, 37 insertions(+), 163 deletions(-)
>  delete mode 100644 arch/nds32/include/asm/dma-mapping.h
>
> diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
> index 249f38d3388f..67d0ac0a989c 100644
> --- a/arch/nds32/Kconfig
> +++ b/arch/nds32/Kconfig
> @@ -5,10 +5,13 @@
>
>  config NDS32
>          def_bool y
> +       select ARCH_HAS_SYNC_DMA_FOR_CPU
> +       select ARCH_HAS_SYNC_DMA_FOR_DEVICE
>         select ARCH_WANT_FRAME_POINTERS if FTRACE
>         select CLKSRC_MMIO
>         select CLONE_BACKWARDS
>         select COMMON_CLK
> +       select DMA_NONCOHERENT_OPS
>         select GENERIC_ATOMIC64
>         select GENERIC_CPU_DEVICES
>         select GENERIC_CLOCKEVENTS
> diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
> index 06bdf8167f5a..b3e951f805f8 100644
> --- a/arch/nds32/include/asm/Kbuild
> +++ b/arch/nds32/include/asm/Kbuild
> @@ -13,6 +13,7 @@ generic-y += cputime.h
>  generic-y += device.h
>  generic-y += div64.h
>  generic-y += dma.h
> +generic-y += dma-mapping.h
>  generic-y += emergency-restart.h
>  generic-y += errno.h
>  generic-y += exec.h
> diff --git a/arch/nds32/include/asm/dma-mapping.h b/arch/nds32/include/asm/dma-mapping.h
> deleted file mode 100644
> index 2dd47d245c25..000000000000
> --- a/arch/nds32/include/asm/dma-mapping.h
> +++ /dev/null
> @@ -1,14 +0,0 @@
> -// SPDX-License-Identifier: GPL-2.0
> -// Copyright (C) 2005-2017 Andes Technology Corporation
> -
> -#ifndef ASMNDS32_DMA_MAPPING_H
> -#define ASMNDS32_DMA_MAPPING_H
> -
> -extern struct dma_map_ops nds32_dma_ops;
> -
> -static inline struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
> -{
> -       return &nds32_dma_ops;
> -}
> -
> -#endif
> diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
> index d291800fc621..78311a1e6fd1 100644
> --- a/arch/nds32/kernel/dma.c
> +++ b/arch/nds32/kernel/dma.c
> @@ -3,17 +3,14 @@
>
>  #include <linux/types.h>
>  #include <linux/mm.h>
> -#include <linux/export.h>
>  #include <linux/string.h>
> -#include <linux/scatterlist.h>
> -#include <linux/dma-mapping.h>
> +#include <linux/dma-noncoherent.h>
>  #include <linux/io.h>
>  #include <linux/cache.h>
>  #include <linux/highmem.h>
>  #include <linux/slab.h>
>  #include <asm/cacheflush.h>
>  #include <asm/tlbflush.h>
> -#include <asm/dma-mapping.h>
>  #include <asm/proc-fns.h>
>
>  /*
> @@ -22,11 +19,6 @@
>  static pte_t *consistent_pte;
>  static DEFINE_RAW_SPINLOCK(consistent_lock);
>
> -enum master_type {
> -       FOR_CPU = 0,
> -       FOR_DEVICE = 1,
> -};
> -
>  /*
>   * VM region handling support.
>   *
> @@ -124,10 +116,8 @@ static struct arch_vm_region *vm_region_find(struct arch_vm_region *head,
>         return c;
>  }
>
> -/* FIXME: attrs is not used. */
> -static void *nds32_dma_alloc_coherent(struct device *dev, size_t size,
> -                                     dma_addr_t * handle, gfp_t gfp,
> -                                     unsigned long attrs)
> +void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
> +               gfp_t gfp, unsigned long attrs)
>  {
>         struct page *page;
>         struct arch_vm_region *c;
> @@ -232,8 +222,8 @@ static void *nds32_dma_alloc_coherent(struct device *dev, size_t size,
>         return NULL;
>  }
>
> -static void nds32_dma_free(struct device *dev, size_t size, void *cpu_addr,
> -                          dma_addr_t handle, unsigned long attrs)
> +void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
> +               dma_addr_t handle, unsigned long attrs)
>  {
>         struct arch_vm_region *c;
>         unsigned long flags, addr;
> @@ -333,145 +323,39 @@ static int __init consistent_init(void)
>  }
>
>  core_initcall(consistent_init);
> -static void consistent_sync(void *vaddr, size_t size, int direction, int master_type);
> -static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
> -                                    unsigned long offset, size_t size,
> -                                    enum dma_data_direction dir,
> -                                    unsigned long attrs)
> -{
> -       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -               consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE);
> -       return page_to_phys(page) + offset;
> -}
>
> -static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
> -                                size_t size, enum dma_data_direction dir,
> -                                unsigned long attrs)
> +void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
> +               size_t size, enum dma_data_direction dir)
>  {
> -       if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
> -               consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU);
> -}
> -
> -/*
> - * Make an area consistent for devices.
> - */
> -static void consistent_sync(void *vaddr, size_t size, int direction, int master_type)
> -{
> -       unsigned long start = (unsigned long)vaddr;
> -       unsigned long end = start + size;
> -
> -       if (master_type == FOR_CPU) {
> -               switch (direction) {
> -               case DMA_TO_DEVICE:
> -                       break;
> -               case DMA_FROM_DEVICE:
> -               case DMA_BIDIRECTIONAL:
> -                       cpu_dma_inval_range(start, end);
> -                       break;
> -               default:
> -                       BUG();
> -               }
> -       } else {
> -               /* FOR_DEVICE */
> -               switch (direction) {
> -               case DMA_FROM_DEVICE:
> -                       break;
> -               case DMA_TO_DEVICE:
> -               case DMA_BIDIRECTIONAL:
> -                       cpu_dma_wb_range(start, end);
> -                       break;
> -               default:
> -                       BUG();
> -               }
> +       void *addr = phys_to_virt(paddr);
> +       unsigned long start = (unsigned long)addr;
> +
> +       switch (dir) {
> +       case DMA_FROM_DEVICE:
> +               break;
> +       case DMA_TO_DEVICE:
> +       case DMA_BIDIRECTIONAL:
> +               cpu_dma_wb_range(start, start + size);
> +               break;
> +       default:
> +               BUG();
>         }
>  }
>
> -static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
> -                           int nents, enum dma_data_direction dir,
> -                           unsigned long attrs)
> +void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
> +               size_t size, enum dma_data_direction dir)
>  {
> -       int i;
> -
> -       for (i = 0; i < nents; i++, sg++) {
> -               void *virt;
> -               unsigned long pfn;
> -               struct page *page = sg_page(sg);
> -
> -               sg->dma_address = sg_phys(sg);
> -               pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE;
> -               page = pfn_to_page(pfn);
> -               if (PageHighMem(page)) {
> -                       virt = kmap_atomic(page);
> -                       consistent_sync(virt, sg->length, dir, FOR_CPU);
> -                       kunmap_atomic(virt);
> -               } else {
> -                       if (sg->offset > PAGE_SIZE)
> -                               panic("sg->offset:%08x > PAGE_SIZE\n",
> -                                     sg->offset);
> -                       virt = page_address(page) + sg->offset;
> -                       consistent_sync(virt, sg->length, dir, FOR_CPU);
> -               }
> +       void *addr = phys_to_virt(paddr);
> +       unsigned long start = (unsigned long)addr;
> +
> +       switch (dir) {
> +       case DMA_TO_DEVICE:
> +               break;
> +       case DMA_FROM_DEVICE:
> +       case DMA_BIDIRECTIONAL:
> +               cpu_dma_inval_range(start, start + size);
> +               break;
> +       default:
> +               BUG();
>         }
> -       return nents;
>  }
> -
> -static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
> -                              int nhwentries, enum dma_data_direction dir,
> -                              unsigned long attrs)
> -{
> -}
> -
> -static void
> -nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
> -                             size_t size, enum dma_data_direction dir)
> -{
> -       consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU);
> -}
> -
> -static void
> -nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
> -                                size_t size, enum dma_data_direction dir)
> -{
> -       consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE);
> -}
> -
> -static void
> -nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
> -                         enum dma_data_direction dir)
> -{
> -       int i;
> -
> -       for (i = 0; i < nents; i++, sg++) {
> -               char *virt =
> -                   page_address((struct page *)sg->page_link) + sg->offset;
> -               consistent_sync(virt, sg->length, dir, FOR_CPU);
> -       }
> -}
> -
> -static void
> -nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
> -                            int nents, enum dma_data_direction dir)
> -{
> -       int i;
> -
> -       for (i = 0; i < nents; i++, sg++) {
> -               char *virt =
> -                   page_address((struct page *)sg->page_link) + sg->offset;
> -               consistent_sync(virt, sg->length, dir, FOR_DEVICE);
> -       }
> -}
> -
> -struct dma_map_ops nds32_dma_ops = {
> -       .alloc = nds32_dma_alloc_coherent,
> -       .free = nds32_dma_free,
> -       .map_page = nds32_dma_map_page,
> -       .unmap_page = nds32_dma_unmap_page,
> -       .map_sg = nds32_dma_map_sg,
> -       .unmap_sg = nds32_dma_unmap_sg,
> -       .sync_single_for_device = nds32_dma_sync_single_for_device,
> -       .sync_single_for_cpu = nds32_dma_sync_single_for_cpu,
> -       .sync_sg_for_cpu = nds32_dma_sync_sg_for_cpu,
> -       .sync_sg_for_device = nds32_dma_sync_sg_for_device,
> -};
> -
> -EXPORT_SYMBOL(nds32_dma_ops);
> --
> 2.17.0
>
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoph Hellwig April 26, 2018, 6:42 a.m. UTC | #6
Can you try this patch ontop of either the new or original one?

---
diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c
index f4b8532c20ac..a2c192b3508d 100644
--- a/lib/dma-noncoherent.c
+++ b/lib/dma-noncoherent.c
@@ -48,7 +48,7 @@ static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
 	return nents;
 }
 
-#ifdef CONFIG_DMA_NONCOHERENT_SYNC_FOR_CPU
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
 static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
 		dma_addr_t addr, size_t size, enum dma_data_direction dir)
 {
@@ -88,7 +88,7 @@ const struct dma_map_ops dma_noncoherent_ops = {
 	.sync_sg_for_device	= dma_noncoherent_sync_sg_for_device,
 	.map_page		= dma_noncoherent_map_page,
 	.map_sg			= dma_noncoherent_map_sg,
-#ifdef CONFIG_DMA_NONCOHERENT_SYNC_FOR_CPU
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
 	.sync_single_for_cpu	= dma_noncoherent_sync_single_for_cpu,
 	.sync_sg_for_cpu	= dma_noncoherent_sync_sg_for_cpu,
 	.unmap_page		= dma_noncoherent_unmap_page,
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Greentime Hu April 26, 2018, 8:06 a.m. UTC | #7
2018-04-26 14:42 GMT+08:00 Christoph Hellwig <hch@lst.de>:
> Can you try this patch ontop of either the new or original one?
>
> ---
> diff --git a/lib/dma-noncoherent.c b/lib/dma-noncoherent.c
> index f4b8532c20ac..a2c192b3508d 100644
> --- a/lib/dma-noncoherent.c
> +++ b/lib/dma-noncoherent.c
> @@ -48,7 +48,7 @@ static int dma_noncoherent_map_sg(struct device *dev, struct scatterlist *sgl,
>         return nents;
>  }
>
> -#ifdef CONFIG_DMA_NONCOHERENT_SYNC_FOR_CPU
> +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
>  static void dma_noncoherent_sync_single_for_cpu(struct device *dev,
>                 dma_addr_t addr, size_t size, enum dma_data_direction dir)
>  {
> @@ -88,7 +88,7 @@ const struct dma_map_ops dma_noncoherent_ops = {
>         .sync_sg_for_device     = dma_noncoherent_sync_sg_for_device,
>         .map_page               = dma_noncoherent_map_page,
>         .map_sg                 = dma_noncoherent_map_sg,
> -#ifdef CONFIG_DMA_NONCOHERENT_SYNC_FOR_CPU
> +#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
>         .sync_single_for_cpu    = dma_noncoherent_sync_single_for_cpu,
>         .sync_sg_for_cpu        = dma_noncoherent_sync_sg_for_cpu,
>         .unmap_page             = dma_noncoherent_unmap_page,

It works!!!
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Christoph Hellwig April 26, 2018, 8:24 a.m. UTC | #8
On Thu, Apr 26, 2018 at 04:06:34PM +0800, Greentime Hu wrote:
> It works!!!

Thanks!

Can you retest the updated tree here with all the fixes and give me
your Tested-by: for the generic and nds32 patches?

git://git.infradead.org/users/hch/misc.git generic-dma-noncoherent
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Greentime Hu April 26, 2018, 9:39 a.m. UTC | #9
2018-04-26 16:24 GMT+08:00 Christoph Hellwig <hch@lst.de>:
> On Thu, Apr 26, 2018 at 04:06:34PM +0800, Greentime Hu wrote:
>> It works!!!
>
> Thanks!
>
> Can you retest the updated tree here with all the fixes and give me
> your Tested-by: for the generic and nds32 patches?
>
> git://git.infradead.org/users/hch/misc.git generic-dma-noncoherent

Sorry Christoph. I found the previous mail I said it works was wrong
because I used a wrong vmlinux.
It still failed. >,<
--
To unsubscribe from this list: send the line "unsubscribe sparclinux" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox series

Patch

diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 249f38d3388f..67d0ac0a989c 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -5,10 +5,13 @@ 
 
 config NDS32
         def_bool y
+	select ARCH_HAS_SYNC_DMA_FOR_CPU
+	select ARCH_HAS_SYNC_DMA_FOR_DEVICE
 	select ARCH_WANT_FRAME_POINTERS if FTRACE
 	select CLKSRC_MMIO
 	select CLONE_BACKWARDS
 	select COMMON_CLK
+	select DMA_NONCOHERENT_OPS
 	select GENERIC_ATOMIC64
 	select GENERIC_CPU_DEVICES
 	select GENERIC_CLOCKEVENTS
diff --git a/arch/nds32/include/asm/Kbuild b/arch/nds32/include/asm/Kbuild
index 06bdf8167f5a..b3e951f805f8 100644
--- a/arch/nds32/include/asm/Kbuild
+++ b/arch/nds32/include/asm/Kbuild
@@ -13,6 +13,7 @@  generic-y += cputime.h
 generic-y += device.h
 generic-y += div64.h
 generic-y += dma.h
+generic-y += dma-mapping.h
 generic-y += emergency-restart.h
 generic-y += errno.h
 generic-y += exec.h
diff --git a/arch/nds32/include/asm/dma-mapping.h b/arch/nds32/include/asm/dma-mapping.h
deleted file mode 100644
index 2dd47d245c25..000000000000
--- a/arch/nds32/include/asm/dma-mapping.h
+++ /dev/null
@@ -1,14 +0,0 @@ 
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2005-2017 Andes Technology Corporation
-
-#ifndef ASMNDS32_DMA_MAPPING_H
-#define ASMNDS32_DMA_MAPPING_H
-
-extern struct dma_map_ops nds32_dma_ops;
-
-static inline struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
-{
-	return &nds32_dma_ops;
-}
-
-#endif
diff --git a/arch/nds32/kernel/dma.c b/arch/nds32/kernel/dma.c
index d291800fc621..688f1a03dee6 100644
--- a/arch/nds32/kernel/dma.c
+++ b/arch/nds32/kernel/dma.c
@@ -3,17 +3,14 @@ 
 
 #include <linux/types.h>
 #include <linux/mm.h>
-#include <linux/export.h>
 #include <linux/string.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
+#include <linux/dma-noncoherent.h>
 #include <linux/io.h>
 #include <linux/cache.h>
 #include <linux/highmem.h>
 #include <linux/slab.h>
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
-#include <asm/dma-mapping.h>
 #include <asm/proc-fns.h>
 
 /*
@@ -22,11 +19,6 @@ 
 static pte_t *consistent_pte;
 static DEFINE_RAW_SPINLOCK(consistent_lock);
 
-enum master_type {
-	FOR_CPU = 0,
-	FOR_DEVICE = 1,
-};
-
 /*
  * VM region handling support.
  *
@@ -124,10 +116,8 @@  static struct arch_vm_region *vm_region_find(struct arch_vm_region *head,
 	return c;
 }
 
-/* FIXME: attrs is not used. */
-static void *nds32_dma_alloc_coherent(struct device *dev, size_t size,
-				      dma_addr_t * handle, gfp_t gfp,
-				      unsigned long attrs)
+void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+		gfp_t gfp, unsigned long attrs)
 {
 	struct page *page;
 	struct arch_vm_region *c;
@@ -232,8 +222,8 @@  static void *nds32_dma_alloc_coherent(struct device *dev, size_t size,
 	return NULL;
 }
 
-static void nds32_dma_free(struct device *dev, size_t size, void *cpu_addr,
-			   dma_addr_t handle, unsigned long attrs)
+void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
+		dma_addr_t handle, unsigned long attrs)
 {
 	struct arch_vm_region *c;
 	unsigned long flags, addr;
@@ -333,145 +323,43 @@  static int __init consistent_init(void)
 }
 
 core_initcall(consistent_init);
-static void consistent_sync(void *vaddr, size_t size, int direction, int master_type);
-static dma_addr_t nds32_dma_map_page(struct device *dev, struct page *page,
-				     unsigned long offset, size_t size,
-				     enum dma_data_direction dir,
-				     unsigned long attrs)
-{
-	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		consistent_sync((void *)(page_address(page) + offset), size, dir, FOR_DEVICE);
-	return page_to_phys(page) + offset;
-}
-
-static void nds32_dma_unmap_page(struct device *dev, dma_addr_t handle,
-				 size_t size, enum dma_data_direction dir,
-				 unsigned long attrs)
-{
-	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
-		consistent_sync(phys_to_virt(handle), size, dir, FOR_CPU);
-}
 
-/*
- * Make an area consistent for devices.
- */
-static void consistent_sync(void *vaddr, size_t size, int direction, int master_type)
+void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
 {
-	unsigned long start = (unsigned long)vaddr;
-	unsigned long end = start + size;
-
-	if (master_type == FOR_CPU) {
-		switch (direction) {
-		case DMA_TO_DEVICE:
-			break;
-		case DMA_FROM_DEVICE:
-		case DMA_BIDIRECTIONAL:
-			cpu_dma_inval_range(start, end);
-			break;
-		default:
-			BUG();
-		}
-	} else {
-		/* FOR_DEVICE */
-		switch (direction) {
-		case DMA_FROM_DEVICE:
-			break;
-		case DMA_TO_DEVICE:
-		case DMA_BIDIRECTIONAL:
-			cpu_dma_wb_range(start, end);
-			break;
-		default:
-			BUG();
-		}
+	void *addr = kmap_atomic_pfn(PHYS_PFN(paddr));
+	unsigned long start = (unsigned long)addr;
+
+	switch (direction) {
+	case DMA_FROM_DEVICE:
+		break;
+	case DMA_TO_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		cpu_dma_wb_range(start, start + size);
+		break;
+	default:
+		BUG();
 	}
-}
 
-static int nds32_dma_map_sg(struct device *dev, struct scatterlist *sg,
-			    int nents, enum dma_data_direction dir,
-			    unsigned long attrs)
-{
-	int i;
-
-	for (i = 0; i < nents; i++, sg++) {
-		void *virt;
-		unsigned long pfn;
-		struct page *page = sg_page(sg);
-
-		sg->dma_address = sg_phys(sg);
-		pfn = page_to_pfn(page) + sg->offset / PAGE_SIZE;
-		page = pfn_to_page(pfn);
-		if (PageHighMem(page)) {
-			virt = kmap_atomic(page);
-			consistent_sync(virt, sg->length, dir, FOR_CPU);
-			kunmap_atomic(virt);
-		} else {
-			if (sg->offset > PAGE_SIZE)
-				panic("sg->offset:%08x > PAGE_SIZE\n",
-				      sg->offset);
-			virt = page_address(page) + sg->offset;
-			consistent_sync(virt, sg->length, dir, FOR_CPU);
-		}
-	}
-	return nents;
-}
-
-static void nds32_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
-			       int nhwentries, enum dma_data_direction dir,
-			       unsigned long attrs)
-{
+	kunmap_atomic(addr);
 }
 
-static void
-nds32_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
-			      size_t size, enum dma_data_direction dir)
+void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
+		size_t size, enum dma_data_direction dir)
 {
-	consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_CPU);
-}
-
-static void
-nds32_dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
-				 size_t size, enum dma_data_direction dir)
-{
-	consistent_sync((void *)phys_to_virt(handle), size, dir, FOR_DEVICE);
-}
-
-static void
-nds32_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents,
-			  enum dma_data_direction dir)
-{
-	int i;
-
-	for (i = 0; i < nents; i++, sg++) {
-		char *virt =
-		    page_address((struct page *)sg->page_link) + sg->offset;
-		consistent_sync(virt, sg->length, dir, FOR_CPU);
+	void *addr = kmap_atomic_pfn(PHYS_PFN(paddr));
+	unsigned long start = (unsigned long)addr;
+
+	switch (direction) {
+	case DMA_TO_DEVICE:
+		break;
+	case DMA_FROM_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		cpu_dma_inval_range(start, end);
+		break;
+	default:
+		BUG();
 	}
-}
-
-static void
-nds32_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
-			     int nents, enum dma_data_direction dir)
-{
-	int i;
 
-	for (i = 0; i < nents; i++, sg++) {
-		char *virt =
-		    page_address((struct page *)sg->page_link) + sg->offset;
-		consistent_sync(virt, sg->length, dir, FOR_DEVICE);
-	}
+	kunmap_atomic(addr);
 }
-
-struct dma_map_ops nds32_dma_ops = {
-	.alloc = nds32_dma_alloc_coherent,
-	.free = nds32_dma_free,
-	.map_page = nds32_dma_map_page,
-	.unmap_page = nds32_dma_unmap_page,
-	.map_sg = nds32_dma_map_sg,
-	.unmap_sg = nds32_dma_unmap_sg,
-	.sync_single_for_device = nds32_dma_sync_single_for_device,
-	.sync_single_for_cpu = nds32_dma_sync_single_for_cpu,
-	.sync_sg_for_cpu = nds32_dma_sync_sg_for_cpu,
-	.sync_sg_for_device = nds32_dma_sync_sg_for_device,
-};
-
-EXPORT_SYMBOL(nds32_dma_ops);