Patchwork [RFC,03/11] ppc: Create ops to choose between direct window and iommu based on device mask

login
register
mail settings
Submitter Nishanth Aravamudan
Date Oct. 8, 2010, 5:33 p.m.
Message ID <1286559192-10898-4-git-send-email-nacc@us.ibm.com>
Download mbox | patch
Permalink /patch/67244/
State Superseded
Headers show

Comments

Nishanth Aravamudan - Oct. 8, 2010, 5:33 p.m.
Also allow the coherent ops to be iommu if only the coherent mask is too
small, mostly for driver that do not set set the coherent mask but also
don't use the coherent api.

Signed-off-by: Milton Miller <miltonm@bga.com>
Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
---
 arch/powerpc/include/asm/dma-mapping.h |    2 +
 arch/powerpc/kernel/Makefile           |    2 +-
 arch/powerpc/kernel/dma-choose64.c     |  167 ++++++++++++++++++++++++++++++++
 3 files changed, 170 insertions(+), 1 deletions(-)
 create mode 100644 arch/powerpc/kernel/dma-choose64.c
Benjamin Herrenschmidt - Oct. 8, 2010, 11:43 p.m.
On Fri, 2010-10-08 at 10:33 -0700, Nishanth Aravamudan wrote:
> Also allow the coherent ops to be iommu if only the coherent mask is too
> small, mostly for driver that do not set set the coherent mask but also
> don't use the coherent api.

You are doing the transition at map_sg time which is a hot path, I don't
like that. Also you add all those "choose" variants of the dma ops...
not very nice at all.

You may want to look at the patches I posted to the list a while back
for doing direct DMA on Bimini:

> Signed-off-by: Milton Miller <miltonm@bga.com>
> Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
> ---
>  arch/powerpc/include/asm/dma-mapping.h |    2 +
>  arch/powerpc/kernel/Makefile           |    2 +-
>  arch/powerpc/kernel/dma-choose64.c     |  167 ++++++++++++++++++++++++++++++++
>  3 files changed, 170 insertions(+), 1 deletions(-)
>  create mode 100644 arch/powerpc/kernel/dma-choose64.c
> 
> diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
> index 644103a..9ffb16a 100644
> --- a/arch/powerpc/include/asm/dma-mapping.h
> +++ b/arch/powerpc/include/asm/dma-mapping.h
> @@ -68,6 +68,8 @@ static inline unsigned long device_to_mask(struct device *dev)
>   */
>  #ifdef CONFIG_PPC64
>  extern struct dma_map_ops dma_iommu_ops;
> +extern struct dma_map_ops dma_choose64_ops;
> +extern struct dma_map_ops dma_iommu_coherent_ops;
>  #endif
>  extern struct dma_map_ops dma_direct_ops;
>  
> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
> index 1dda701..21b8ea1 100644
> --- a/arch/powerpc/kernel/Makefile
> +++ b/arch/powerpc/kernel/Makefile
> @@ -82,7 +82,7 @@ obj-y				+= time.o prom.o traps.o setup-common.o \
>  				   udbg.o misc.o io.o dma.o \
>  				   misc_$(CONFIG_WORD_SIZE).o
>  obj-$(CONFIG_PPC32)		+= entry_32.o setup_32.o
> -obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o
> +obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o dma-choose64.o
>  obj-$(CONFIG_KGDB)		+= kgdb.o
>  obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE)	+= prom_init.o
>  obj-$(CONFIG_MODULES)		+= ppc_ksyms.o
> diff --git a/arch/powerpc/kernel/dma-choose64.c b/arch/powerpc/kernel/dma-choose64.c
> new file mode 100644
> index 0000000..17c716f
> --- /dev/null
> +++ b/arch/powerpc/kernel/dma-choose64.c
> @@ -0,0 +1,167 @@
> +/*
> + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
> + *
> + * Provide default implementations of the DMA mapping callbacks for
> + * directly mapped busses.
> + */
> +
> +#include <linux/device.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/bug.h>
> +
> +/*
> + * DMA operations that choose between a 64-bit direct mapping and and iommu
> + *
> + * This set of dma ops chooses between directing to a static 1:1 mapping
> + * that may require a 64 bit address and a iommu based on the declared
> + * streaming and coherent masks for the device.  The choice is made on
> + * the first dma map call.
> + */
> +
> +/* first BUG ops for calls out of sequence */
> +
> +void *dma_bug_alloc_coherent(struct device *dev, size_t size,
> +				dma_addr_t *dma_handle, gfp_t flag)
> +{
> +	BUG();
> +
> +	return NULL;
> +}
> +
> +void dma_bug_free_coherent(struct device *dev, size_t size,
> +			      void *vaddr, dma_addr_t dma_handle)
> +{
> +	BUG();
> +}
> +
> +static int dma_bug_dma_supported(struct device *dev, u64 mask)
> +{
> +	BUG();
> +
> +	return 0;
> +}
> +
> +static int dma_bug_map_sg(struct device *dev, struct scatterlist *sgl,
> +			     int nents, enum dma_data_direction direction,
> +			     struct dma_attrs *attrs)
> +{
> +	BUG();
> +
> +	return 0;
> +}
> +
> +
> +static void dma_bug_unmap_sg(struct device *dev, struct scatterlist *sg,
> +				int nents, enum dma_data_direction direction,
> +				struct dma_attrs *attrs)
> +{
> +	BUG();
> +}
> +
> +static dma_addr_t dma_bug_map_page(struct device *dev,
> +					     struct page *page,
> +					     unsigned long offset,
> +					     size_t size,
> +					     enum dma_data_direction dir,
> +					     struct dma_attrs *attrs)
> +{
> +	BUG();
> +
> +	return DMA_ERROR_CODE;
> +}
> +
> +
> +static void dma_bug_unmap_page(struct device *dev,
> +					 dma_addr_t dma_address,
> +					 size_t size,
> +					 enum dma_data_direction direction,
> +					 struct dma_attrs *attrs)
> +{
> +	BUG();
> +}
> +
> +
> +static struct dma_map_ops *choose(struct device *dev)
> +{
> +	if (dma_direct_ops.dma_supported(dev, device_to_mask(dev))) {
> +		if (dma_direct_ops.dma_supported(dev, dev->coherent_dma_mask))
> +			return &dma_direct_ops;
> +		return &dma_iommu_coherent_ops;
> +	}
> +	return &dma_iommu_ops;
> +}
> +
> +void *dma_choose64_alloc_coherent(struct device *dev, size_t size,
> +				dma_addr_t *dma_handle, gfp_t flag)
> +{
> +	struct dma_map_ops *new = choose(dev);
> +
> +	set_dma_ops(dev, new);
> +	return new->alloc_coherent(dev, size, dma_handle, flag);
> +}
> +
> +static int dma_choose64_map_sg(struct device *dev, struct scatterlist *sgl,
> +			     int nents, enum dma_data_direction direction,
> +			     struct dma_attrs *attrs)
> +{
> +	struct dma_map_ops *new = choose(dev);
> +
> +	set_dma_ops(dev, new);
> +	return new->map_sg(dev, sgl, nents, direction, attrs);
> +}
> +
> +
> +static int dma_choose64_dma_supported(struct device *dev, u64 mask)
> +{
> +	return dma_direct_ops.dma_supported(dev, mask) ||
> +		dma_iommu_ops.dma_supported(dev, mask);
> +}
> +
> +static dma_addr_t dma_choose64_map_page(struct device *dev,
> +					     struct page *page,
> +					     unsigned long offset,
> +					     size_t size,
> +					     enum dma_data_direction dir,
> +					     struct dma_attrs *attrs)
> +{
> +	struct dma_map_ops *new = choose(dev);
> +
> +	set_dma_ops(dev, new);
> +	return new->map_page(dev, page, offset, size, dir, attrs);
> +}
> +
> +struct dma_map_ops dma_choose64_ops = {
> +	.alloc_coherent	= dma_choose64_alloc_coherent,
> +	.free_coherent	= dma_bug_free_coherent,
> +	.map_sg		= dma_choose64_map_sg,
> +	.unmap_sg	= dma_bug_unmap_sg,
> +	.dma_supported	= dma_choose64_dma_supported,
> +	.map_page	= dma_choose64_map_page,
> +	.unmap_page	= dma_bug_unmap_page,
> +};
> +EXPORT_SYMBOL(dma_choose64_ops);
> +
> +/* set these up to BUG() until we initialze them in the arch initcall below */
> +struct dma_map_ops dma_iommu_coherent_ops = {
> +	.alloc_coherent	= dma_bug_alloc_coherent,
> +	.free_coherent	= dma_bug_free_coherent,
> +	.map_sg		= dma_bug_map_sg,
> +	.unmap_sg	= dma_bug_unmap_sg,
> +	.dma_supported	= dma_bug_dma_supported,
> +	.map_page	= dma_bug_map_page,
> +	.unmap_page	= dma_bug_unmap_page,
> +};
> +EXPORT_SYMBOL(dma_iommu_coherent_ops);
> +
> +static int setup_choose64_ops(void)
> +{
> +	dma_iommu_coherent_ops = dma_direct_ops;
> +	dma_iommu_coherent_ops.alloc_coherent = dma_iommu_ops.alloc_coherent;
> +	dma_iommu_coherent_ops.free_coherent = dma_iommu_ops.free_coherent;
> +
> +	/* should we be stricter? */
> +	dma_iommu_coherent_ops.dma_supported = dma_choose64_dma_supported;
> +
> +	return 0;
> +}
> +arch_initcall(setup_choose64_ops);
Benjamin Herrenschmidt - Oct. 8, 2010, 11:44 p.m.
On Fri, 2010-10-08 at 10:33 -0700, Nishanth Aravamudan wrote:
> Also allow the coherent ops to be iommu if only the coherent mask is too
> small, mostly for driver that do not set set the coherent mask but also
> don't use the coherent api.

You are doing the transition at map_sg time which is a hot path, I don't
like that. Also you add all those "choose" variants of the dma ops...
not very nice at all.

You may want to look at the patches I posted to the list a while back
for doing direct DMA on Bimini:

[PATCH 1/2] powerpc/dma: Add optional platform override of dma_set_mask()
[PATCH 2/2] powerpc/dart_iommu: Support for 64-bit iommu bypass window on PCIe

Cheers,
Ben.

> Signed-off-by: Milton Miller <miltonm@bga.com>
> Signed-off-by: Nishanth Aravamudan <nacc@us.ibm.com>
> ---
>  arch/powerpc/include/asm/dma-mapping.h |    2 +
>  arch/powerpc/kernel/Makefile           |    2 +-
>  arch/powerpc/kernel/dma-choose64.c     |  167 ++++++++++++++++++++++++++++++++
>  3 files changed, 170 insertions(+), 1 deletions(-)
>  create mode 100644 arch/powerpc/kernel/dma-choose64.c
> 
> diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
> index 644103a..9ffb16a 100644
> --- a/arch/powerpc/include/asm/dma-mapping.h
> +++ b/arch/powerpc/include/asm/dma-mapping.h
> @@ -68,6 +68,8 @@ static inline unsigned long device_to_mask(struct device *dev)
>   */
>  #ifdef CONFIG_PPC64
>  extern struct dma_map_ops dma_iommu_ops;
> +extern struct dma_map_ops dma_choose64_ops;
> +extern struct dma_map_ops dma_iommu_coherent_ops;
>  #endif
>  extern struct dma_map_ops dma_direct_ops;
>  
> diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
> index 1dda701..21b8ea1 100644
> --- a/arch/powerpc/kernel/Makefile
> +++ b/arch/powerpc/kernel/Makefile
> @@ -82,7 +82,7 @@ obj-y				+= time.o prom.o traps.o setup-common.o \
>  				   udbg.o misc.o io.o dma.o \
>  				   misc_$(CONFIG_WORD_SIZE).o
>  obj-$(CONFIG_PPC32)		+= entry_32.o setup_32.o
> -obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o
> +obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o dma-choose64.o
>  obj-$(CONFIG_KGDB)		+= kgdb.o
>  obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE)	+= prom_init.o
>  obj-$(CONFIG_MODULES)		+= ppc_ksyms.o
> diff --git a/arch/powerpc/kernel/dma-choose64.c b/arch/powerpc/kernel/dma-choose64.c
> new file mode 100644
> index 0000000..17c716f
> --- /dev/null
> +++ b/arch/powerpc/kernel/dma-choose64.c
> @@ -0,0 +1,167 @@
> +/*
> + * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
> + *
> + * Provide default implementations of the DMA mapping callbacks for
> + * directly mapped busses.
> + */
> +
> +#include <linux/device.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/bug.h>
> +
> +/*
> + * DMA operations that choose between a 64-bit direct mapping and and iommu
> + *
> + * This set of dma ops chooses between directing to a static 1:1 mapping
> + * that may require a 64 bit address and a iommu based on the declared
> + * streaming and coherent masks for the device.  The choice is made on
> + * the first dma map call.
> + */
> +
> +/* first BUG ops for calls out of sequence */
> +
> +void *dma_bug_alloc_coherent(struct device *dev, size_t size,
> +				dma_addr_t *dma_handle, gfp_t flag)
> +{
> +	BUG();
> +
> +	return NULL;
> +}
> +
> +void dma_bug_free_coherent(struct device *dev, size_t size,
> +			      void *vaddr, dma_addr_t dma_handle)
> +{
> +	BUG();
> +}
> +
> +static int dma_bug_dma_supported(struct device *dev, u64 mask)
> +{
> +	BUG();
> +
> +	return 0;
> +}
> +
> +static int dma_bug_map_sg(struct device *dev, struct scatterlist *sgl,
> +			     int nents, enum dma_data_direction direction,
> +			     struct dma_attrs *attrs)
> +{
> +	BUG();
> +
> +	return 0;
> +}
> +
> +
> +static void dma_bug_unmap_sg(struct device *dev, struct scatterlist *sg,
> +				int nents, enum dma_data_direction direction,
> +				struct dma_attrs *attrs)
> +{
> +	BUG();
> +}
> +
> +static dma_addr_t dma_bug_map_page(struct device *dev,
> +					     struct page *page,
> +					     unsigned long offset,
> +					     size_t size,
> +					     enum dma_data_direction dir,
> +					     struct dma_attrs *attrs)
> +{
> +	BUG();
> +
> +	return DMA_ERROR_CODE;
> +}
> +
> +
> +static void dma_bug_unmap_page(struct device *dev,
> +					 dma_addr_t dma_address,
> +					 size_t size,
> +					 enum dma_data_direction direction,
> +					 struct dma_attrs *attrs)
> +{
> +	BUG();
> +}
> +
> +
> +static struct dma_map_ops *choose(struct device *dev)
> +{
> +	if (dma_direct_ops.dma_supported(dev, device_to_mask(dev))) {
> +		if (dma_direct_ops.dma_supported(dev, dev->coherent_dma_mask))
> +			return &dma_direct_ops;
> +		return &dma_iommu_coherent_ops;
> +	}
> +	return &dma_iommu_ops;
> +}
> +
> +void *dma_choose64_alloc_coherent(struct device *dev, size_t size,
> +				dma_addr_t *dma_handle, gfp_t flag)
> +{
> +	struct dma_map_ops *new = choose(dev);
> +
> +	set_dma_ops(dev, new);
> +	return new->alloc_coherent(dev, size, dma_handle, flag);
> +}
> +
> +static int dma_choose64_map_sg(struct device *dev, struct scatterlist *sgl,
> +			     int nents, enum dma_data_direction direction,
> +			     struct dma_attrs *attrs)
> +{
> +	struct dma_map_ops *new = choose(dev);
> +
> +	set_dma_ops(dev, new);
> +	return new->map_sg(dev, sgl, nents, direction, attrs);
> +}
> +
> +
> +static int dma_choose64_dma_supported(struct device *dev, u64 mask)
> +{
> +	return dma_direct_ops.dma_supported(dev, mask) ||
> +		dma_iommu_ops.dma_supported(dev, mask);
> +}
> +
> +static dma_addr_t dma_choose64_map_page(struct device *dev,
> +					     struct page *page,
> +					     unsigned long offset,
> +					     size_t size,
> +					     enum dma_data_direction dir,
> +					     struct dma_attrs *attrs)
> +{
> +	struct dma_map_ops *new = choose(dev);
> +
> +	set_dma_ops(dev, new);
> +	return new->map_page(dev, page, offset, size, dir, attrs);
> +}
> +
> +struct dma_map_ops dma_choose64_ops = {
> +	.alloc_coherent	= dma_choose64_alloc_coherent,
> +	.free_coherent	= dma_bug_free_coherent,
> +	.map_sg		= dma_choose64_map_sg,
> +	.unmap_sg	= dma_bug_unmap_sg,
> +	.dma_supported	= dma_choose64_dma_supported,
> +	.map_page	= dma_choose64_map_page,
> +	.unmap_page	= dma_bug_unmap_page,
> +};
> +EXPORT_SYMBOL(dma_choose64_ops);
> +
> +/* set these up to BUG() until we initialze them in the arch initcall below */
> +struct dma_map_ops dma_iommu_coherent_ops = {
> +	.alloc_coherent	= dma_bug_alloc_coherent,
> +	.free_coherent	= dma_bug_free_coherent,
> +	.map_sg		= dma_bug_map_sg,
> +	.unmap_sg	= dma_bug_unmap_sg,
> +	.dma_supported	= dma_bug_dma_supported,
> +	.map_page	= dma_bug_map_page,
> +	.unmap_page	= dma_bug_unmap_page,
> +};
> +EXPORT_SYMBOL(dma_iommu_coherent_ops);
> +
> +static int setup_choose64_ops(void)
> +{
> +	dma_iommu_coherent_ops = dma_direct_ops;
> +	dma_iommu_coherent_ops.alloc_coherent = dma_iommu_ops.alloc_coherent;
> +	dma_iommu_coherent_ops.free_coherent = dma_iommu_ops.free_coherent;
> +
> +	/* should we be stricter? */
> +	dma_iommu_coherent_ops.dma_supported = dma_choose64_dma_supported;
> +
> +	return 0;
> +}
> +arch_initcall(setup_choose64_ops);
FUJITA Tomonori - Oct. 10, 2010, 3:09 p.m.
On Sat, 09 Oct 2010 10:44:53 +1100
Benjamin Herrenschmidt <benh@kernel.crashing.org> wrote:

> On Fri, 2010-10-08 at 10:33 -0700, Nishanth Aravamudan wrote:
> > Also allow the coherent ops to be iommu if only the coherent mask is too
> > small, mostly for driver that do not set set the coherent mask but also
> > don't use the coherent api.
> 
> You are doing the transition at map_sg time which is a hot path, I don't
> like that. Also you add all those "choose" variants of the dma ops...
> not very nice at all.

Agreed, looks hacky.


> You may want to look at the patches I posted to the list a while back
> for doing direct DMA on Bimini:
> 
> [PATCH 1/2] powerpc/dma: Add optional platform override of dma_set_mask()

Would it be cleaner if each ppc dma_map_ops has the own set_dma_mask
and dma_set_mask simply calls dma_map_ops->set_dma_mask?


> [PATCH 2/2] powerpc/dart_iommu: Support for 64-bit iommu bypass window on PCIe
Benjamin Herrenschmidt - Oct. 10, 2010, 11:41 p.m.
On Mon, 2010-10-11 at 00:09 +0900, FUJITA Tomonori wrote:
> On Sat, 09 Oct 2010 10:44:53 +1100
> Benjamin Herrenschmidt <benh@kernel.crashing.org> wrote:
> 
> > On Fri, 2010-10-08 at 10:33 -0700, Nishanth Aravamudan wrote:
> > > Also allow the coherent ops to be iommu if only the coherent mask is too
> > > small, mostly for driver that do not set set the coherent mask but also
> > > don't use the coherent api.
> > 
> > You are doing the transition at map_sg time which is a hot path, I don't
> > like that. Also you add all those "choose" variants of the dma ops...
> > not very nice at all.
> 
> Agreed, looks hacky.
> 
> 
> > You may want to look at the patches I posted to the list a while back
> > for doing direct DMA on Bimini:
> > 
> > [PATCH 1/2] powerpc/dma: Add optional platform override of dma_set_mask()
> 
> Would it be cleaner if each ppc dma_map_ops has the own set_dma_mask
> and dma_set_mask simply calls dma_map_ops->set_dma_mask?

I'm not sure I parse what you wrote above :-)

I did try with various methods back then, and what ended up sucking the
less was basically to hookup dma_set_mask() at the arch level.

In fact, it makes sense to the extent that the arch is the one that
knows that there are multiple regions configured potentially with
different capabilities.

You can still do the switch within the dma_ops->set_dma_mask if you want
I suppose, especially if you end up hitting different attribute regions
within a single bus or such but from my experience, it gets really hacky
with multiple ops structures etc...
> 
> > [PATCH 2/2] powerpc/dart_iommu: Support for 64-bit iommu bypass window on PCIe
> 

Cheers,
Ben.

Patch

diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 644103a..9ffb16a 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -68,6 +68,8 @@  static inline unsigned long device_to_mask(struct device *dev)
  */
 #ifdef CONFIG_PPC64
 extern struct dma_map_ops dma_iommu_ops;
+extern struct dma_map_ops dma_choose64_ops;
+extern struct dma_map_ops dma_iommu_coherent_ops;
 #endif
 extern struct dma_map_ops dma_direct_ops;
 
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 1dda701..21b8ea1 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -82,7 +82,7 @@  obj-y				+= time.o prom.o traps.o setup-common.o \
 				   udbg.o misc.o io.o dma.o \
 				   misc_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_PPC32)		+= entry_32.o setup_32.o
-obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o
+obj-$(CONFIG_PPC64)		+= dma-iommu.o iommu.o dma-choose64.o
 obj-$(CONFIG_KGDB)		+= kgdb.o
 obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE)	+= prom_init.o
 obj-$(CONFIG_MODULES)		+= ppc_ksyms.o
diff --git a/arch/powerpc/kernel/dma-choose64.c b/arch/powerpc/kernel/dma-choose64.c
new file mode 100644
index 0000000..17c716f
--- /dev/null
+++ b/arch/powerpc/kernel/dma-choose64.c
@@ -0,0 +1,167 @@ 
+/*
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * Provide default implementations of the DMA mapping callbacks for
+ * directly mapped busses.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/bug.h>
+
+/*
+ * DMA operations that choose between a 64-bit direct mapping and and iommu
+ *
+ * This set of dma ops chooses between directing to a static 1:1 mapping
+ * that may require a 64 bit address and a iommu based on the declared
+ * streaming and coherent masks for the device.  The choice is made on
+ * the first dma map call.
+ */
+
+/* first BUG ops for calls out of sequence */
+
+void *dma_bug_alloc_coherent(struct device *dev, size_t size,
+				dma_addr_t *dma_handle, gfp_t flag)
+{
+	BUG();
+
+	return NULL;
+}
+
+void dma_bug_free_coherent(struct device *dev, size_t size,
+			      void *vaddr, dma_addr_t dma_handle)
+{
+	BUG();
+}
+
+static int dma_bug_dma_supported(struct device *dev, u64 mask)
+{
+	BUG();
+
+	return 0;
+}
+
+static int dma_bug_map_sg(struct device *dev, struct scatterlist *sgl,
+			     int nents, enum dma_data_direction direction,
+			     struct dma_attrs *attrs)
+{
+	BUG();
+
+	return 0;
+}
+
+
+static void dma_bug_unmap_sg(struct device *dev, struct scatterlist *sg,
+				int nents, enum dma_data_direction direction,
+				struct dma_attrs *attrs)
+{
+	BUG();
+}
+
+static dma_addr_t dma_bug_map_page(struct device *dev,
+					     struct page *page,
+					     unsigned long offset,
+					     size_t size,
+					     enum dma_data_direction dir,
+					     struct dma_attrs *attrs)
+{
+	BUG();
+
+	return DMA_ERROR_CODE;
+}
+
+
+static void dma_bug_unmap_page(struct device *dev,
+					 dma_addr_t dma_address,
+					 size_t size,
+					 enum dma_data_direction direction,
+					 struct dma_attrs *attrs)
+{
+	BUG();
+}
+
+
+static struct dma_map_ops *choose(struct device *dev)
+{
+	if (dma_direct_ops.dma_supported(dev, device_to_mask(dev))) {
+		if (dma_direct_ops.dma_supported(dev, dev->coherent_dma_mask))
+			return &dma_direct_ops;
+		return &dma_iommu_coherent_ops;
+	}
+	return &dma_iommu_ops;
+}
+
+void *dma_choose64_alloc_coherent(struct device *dev, size_t size,
+				dma_addr_t *dma_handle, gfp_t flag)
+{
+	struct dma_map_ops *new = choose(dev);
+
+	set_dma_ops(dev, new);
+	return new->alloc_coherent(dev, size, dma_handle, flag);
+}
+
+static int dma_choose64_map_sg(struct device *dev, struct scatterlist *sgl,
+			     int nents, enum dma_data_direction direction,
+			     struct dma_attrs *attrs)
+{
+	struct dma_map_ops *new = choose(dev);
+
+	set_dma_ops(dev, new);
+	return new->map_sg(dev, sgl, nents, direction, attrs);
+}
+
+
+static int dma_choose64_dma_supported(struct device *dev, u64 mask)
+{
+	return dma_direct_ops.dma_supported(dev, mask) ||
+		dma_iommu_ops.dma_supported(dev, mask);
+}
+
+static dma_addr_t dma_choose64_map_page(struct device *dev,
+					     struct page *page,
+					     unsigned long offset,
+					     size_t size,
+					     enum dma_data_direction dir,
+					     struct dma_attrs *attrs)
+{
+	struct dma_map_ops *new = choose(dev);
+
+	set_dma_ops(dev, new);
+	return new->map_page(dev, page, offset, size, dir, attrs);
+}
+
+struct dma_map_ops dma_choose64_ops = {
+	.alloc_coherent	= dma_choose64_alloc_coherent,
+	.free_coherent	= dma_bug_free_coherent,
+	.map_sg		= dma_choose64_map_sg,
+	.unmap_sg	= dma_bug_unmap_sg,
+	.dma_supported	= dma_choose64_dma_supported,
+	.map_page	= dma_choose64_map_page,
+	.unmap_page	= dma_bug_unmap_page,
+};
+EXPORT_SYMBOL(dma_choose64_ops);
+
+/* set these up to BUG() until we initialze them in the arch initcall below */
+struct dma_map_ops dma_iommu_coherent_ops = {
+	.alloc_coherent	= dma_bug_alloc_coherent,
+	.free_coherent	= dma_bug_free_coherent,
+	.map_sg		= dma_bug_map_sg,
+	.unmap_sg	= dma_bug_unmap_sg,
+	.dma_supported	= dma_bug_dma_supported,
+	.map_page	= dma_bug_map_page,
+	.unmap_page	= dma_bug_unmap_page,
+};
+EXPORT_SYMBOL(dma_iommu_coherent_ops);
+
+static int setup_choose64_ops(void)
+{
+	dma_iommu_coherent_ops = dma_direct_ops;
+	dma_iommu_coherent_ops.alloc_coherent = dma_iommu_ops.alloc_coherent;
+	dma_iommu_coherent_ops.free_coherent = dma_iommu_ops.free_coherent;
+
+	/* should we be stricter? */
+	dma_iommu_coherent_ops.dma_supported = dma_choose64_dma_supported;
+
+	return 0;
+}
+arch_initcall(setup_choose64_ops);