diff mbox

powerpc: Add sync_*_for_* to dma_ops

Message ID 1226953165-19305-1-git-send-email-becky.bruce@freescale.com (mailing list archive)
State Changes Requested, archived
Headers show

Commit Message

Becky Bruce Nov. 17, 2008, 8:19 p.m. UTC
We need to swap these out once we start using swiotlb, so add
them to dma_ops.  When these are called, if the dma_op pointer
for the specific function is NULL, we just do nothing - most
of the 64-bit platforms don't actually need to do anything with
the sync so we don't require a sync function to be implemented.

Signed-off-by: Becky Bruce <becky.bruce@freescale.com>
---
 arch/powerpc/include/asm/dma-mapping.h |   97 ++++++++++++++++++++++----------
 arch/powerpc/kernel/dma.c              |   35 +++++++++++
 2 files changed, 102 insertions(+), 30 deletions(-)

Comments

Benjamin Herrenschmidt Nov. 18, 2008, 12:22 p.m. UTC | #1
>  				dma_addr_t dma_address, size_t size,
>  				enum dma_data_direction direction,
>  				struct dma_attrs *attrs);
> +	void            (*sync_single_for_cpu)(struct device *hwdev,
> +				dma_addr_t dma_handle, size_t size,
> +				enum dma_data_direction direction);
> +	void            (*sync_single_for_device)(struct device *hwdev,
> +				dma_addr_t dma_handle, size_t size,
> +				enum dma_data_direction direction);
> +	void            (*sync_single_range_for_cpu)(struct device *hwdev,
> +				dma_addr_t dma_handle, unsigned long offset,
> +				size_t size,
> +				enum dma_data_direction direction);
> +	void            (*sync_single_range_for_device)(struct device *hwdev,
> +				dma_addr_t dma_handle, unsigned long offset,
> +				size_t size,
> +				enum dma_data_direction direction);

Can't we implement the first 2 using the next 2 and passing a 0 offset ?
I mean, we're going to go through a function pointer, it's not like
handling the offset was going to cost us something :-)

> +	void            (*sync_sg_for_cpu)(struct device *hwdev,
> +				struct scatterlist *sg, int nelems,
> +				enum dma_data_direction direction);
> +	void            (*sync_sg_for_device)(struct device *hwdev,
> +				struct scatterlist *sg, int nelems,
> +				enum dma_data_direction direction);
>  };
>  
>  /*
> @@ -286,42 +306,75 @@ static inline void dma_sync_single_for_cpu(struct device *dev,
>  		dma_addr_t dma_handle, size_t size,
>  		enum dma_data_direction direction)
>  {
> -	BUG_ON(direction == DMA_NONE);
> -	__dma_sync(bus_to_virt(dma_handle), size, direction);
> +	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
> +
> +	BUG_ON(!dma_ops);
> +	if (dma_ops->sync_single_for_cpu != NULL)
> +		dma_ops->sync_single_for_cpu(dev, dma_handle, size,
> +					     direction);
>  }

 .../...

The only objection that comes to mind here is that generally, you know
that your platform is going to be coherent or not at compile time, and
you don't want the above at all when it is (or won't need swiotlb).

So maybe we could have a Kconfig trickery so that
CONFIG_PPC_DMA_NEED_SYNC_OPS is set when either non coherent DMA is
select'ed or swiotlb support is select'ed by one of the enabled
platform. So if I enable only powermac, I get neither and don't get the
bloody inlines :-)

Cheers,
Ben.
Becky Bruce Nov. 18, 2008, 4:03 p.m. UTC | #2
On Nov 18, 2008, at 6:22 AM, Benjamin Herrenschmidt wrote:

>
>> 				dma_addr_t dma_address, size_t size,
>> 				enum dma_data_direction direction,
>> 				struct dma_attrs *attrs);
>> +	void            (*sync_single_for_cpu)(struct device *hwdev,
>> +				dma_addr_t dma_handle, size_t size,
>> +				enum dma_data_direction direction);
>> +	void            (*sync_single_for_device)(struct device *hwdev,
>> +				dma_addr_t dma_handle, size_t size,
>> +				enum dma_data_direction direction);
>> +	void            (*sync_single_range_for_cpu)(struct device *hwdev,
>> +				dma_addr_t dma_handle, unsigned long offset,
>> +				size_t size,
>> +				enum dma_data_direction direction);
>> +	void            (*sync_single_range_for_device)(struct device  
>> *hwdev,
>> +				dma_addr_t dma_handle, unsigned long offset,
>> +				size_t size,
>> +				enum dma_data_direction direction);
>
> Can't we implement the first 2 using the next 2 and passing a 0  
> offset ?
> I mean, we're going to go through a function pointer, it's not like
> handling the offset was going to cost us something :-)

Looking at the swiotlb code; I think that's a reasonable thing to do.   
Will respin.

>
>
>> +	void            (*sync_sg_for_cpu)(struct device *hwdev,
>> +				struct scatterlist *sg, int nelems,
>> +				enum dma_data_direction direction);
>> +	void            (*sync_sg_for_device)(struct device *hwdev,
>> +				struct scatterlist *sg, int nelems,
>> +				enum dma_data_direction direction);
>> };
>>
>> /*
>> @@ -286,42 +306,75 @@ static inline void  
>> dma_sync_single_for_cpu(struct device *dev,
>> 		dma_addr_t dma_handle, size_t size,
>> 		enum dma_data_direction direction)
>> {
>> -	BUG_ON(direction == DMA_NONE);
>> -	__dma_sync(bus_to_virt(dma_handle), size, direction);
>> +	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
>> +
>> +	BUG_ON(!dma_ops);
>> +	if (dma_ops->sync_single_for_cpu != NULL)
>> +		dma_ops->sync_single_for_cpu(dev, dma_handle, size,
>> +					     direction);
>> }
>
> .../...
>
> The only objection that comes to mind here is that generally, you know
> that your platform is going to be coherent or not at compile time, and
> you don't want the above at all when it is (or won't need swiotlb).
>
> So maybe we could have a Kconfig trickery so that
> CONFIG_PPC_DMA_NEED_SYNC_OPS is set when either non coherent DMA is
> select'ed or swiotlb support is select'ed by one of the enabled
> platform. So if I enable only powermac, I get neither and don't get  
> the
> bloody inlines :-)

I thought about doing something like this, but didn't know if it was  
worth the ifdeffing/CONFIG foo that would ensue; I guess your response  
answers that question :)

Thanks!
B
Segher Boessenkool Nov. 18, 2008, 4:57 p.m. UTC | #3
> @@ -286,42 +306,75 @@ static inline void dma_sync_single_for_cpu 
> (struct device *dev,
>  		dma_addr_t dma_handle, size_t size,
>  		enum dma_data_direction direction)
>  {
> -	BUG_ON(direction == DMA_NONE);

Did you intend to remove this here?  It would be nice to test for it
even on platforms where the op is a nop; if there is an equivalent
test in every implementation of the ops, remove it there instead
(that's more source + binary code to remove, always a good thing ;-) )


Segher
Becky Bruce Nov. 18, 2008, 7:29 p.m. UTC | #4
On Nov 18, 2008, at 10:57 AM, Segher Boessenkool wrote:

>> @@ -286,42 +306,75 @@ static inline void  
>> dma_sync_single_for_cpu(struct device *dev,
>> 		dma_addr_t dma_handle, size_t size,
>> 		enum dma_data_direction direction)
>> {
>> -	BUG_ON(direction == DMA_NONE);
>
> Did you intend to remove this here?  It would be nice to test for it
> even on platforms where the op is a nop; if there is an equivalent
> test in every implementation of the ops, remove it there instead
> (that's more source + binary code to remove, always a good thing ;-) )


You're right that we have lost this test in platforms which do not  
implement the sync ops, and if the test is useful, I can certainly  
move it.  However, it will actually result in more code on the current  
ppc kernel, as there are 3x more dma_sync_* instances than there are  
actual dma_direct_* instances (and nobody else implements  
dma_sync_* :)  Not that we're talking about a lot of code here, but it  
definitely won't make the kernel smaller since we just don't need a  
sync unless we're noncoherent which, thank <insert deity here>, is rare.

With the swiotlb case, the check needs to be in the swiotlb code  
because there are other architectures that use the swiotlb code that  
do not use a structure to get to their dma_ops like we do.  IA64, for  
example, sets up its dma_ops using config options and #defines the  
names of the various dma_ functions.  They don't go through a dma_ops  
dereference, so the (possibly redundant for us) check in the swiotlb  
code would need to remain.

So, the reason for moving it would be that we think it's still a  
useful check to have.  I'm happy to move it in that case.

Thanks,
-Becky
diff mbox

Patch

diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 3c4a2c2..7edab27 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -76,6 +76,26 @@  struct dma_mapping_ops {
 				dma_addr_t dma_address, size_t size,
 				enum dma_data_direction direction,
 				struct dma_attrs *attrs);
+	void            (*sync_single_for_cpu)(struct device *hwdev,
+				dma_addr_t dma_handle, size_t size,
+				enum dma_data_direction direction);
+	void            (*sync_single_for_device)(struct device *hwdev,
+				dma_addr_t dma_handle, size_t size,
+				enum dma_data_direction direction);
+	void            (*sync_single_range_for_cpu)(struct device *hwdev,
+				dma_addr_t dma_handle, unsigned long offset,
+				size_t size,
+				enum dma_data_direction direction);
+	void            (*sync_single_range_for_device)(struct device *hwdev,
+				dma_addr_t dma_handle, unsigned long offset,
+				size_t size,
+				enum dma_data_direction direction);
+	void            (*sync_sg_for_cpu)(struct device *hwdev,
+				struct scatterlist *sg, int nelems,
+				enum dma_data_direction direction);
+	void            (*sync_sg_for_device)(struct device *hwdev,
+				struct scatterlist *sg, int nelems,
+				enum dma_data_direction direction);
 };
 
 /*
@@ -286,42 +306,75 @@  static inline void dma_sync_single_for_cpu(struct device *dev,
 		dma_addr_t dma_handle, size_t size,
 		enum dma_data_direction direction)
 {
-	BUG_ON(direction == DMA_NONE);
-	__dma_sync(bus_to_virt(dma_handle), size, direction);
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+	if (dma_ops->sync_single_for_cpu != NULL)
+		dma_ops->sync_single_for_cpu(dev, dma_handle, size,
+					     direction);
 }
 
 static inline void dma_sync_single_for_device(struct device *dev,
 		dma_addr_t dma_handle, size_t size,
 		enum dma_data_direction direction)
 {
-	BUG_ON(direction == DMA_NONE);
-	__dma_sync(bus_to_virt(dma_handle), size, direction);
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+
+	if (dma_ops->sync_single_for_device != NULL)
+		dma_ops->sync_single_for_device(dev, dma_handle,
+						size, direction);
 }
 
 static inline void dma_sync_sg_for_cpu(struct device *dev,
 		struct scatterlist *sgl, int nents,
 		enum dma_data_direction direction)
 {
-	struct scatterlist *sg;
-	int i;
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
 
-	BUG_ON(direction == DMA_NONE);
+	BUG_ON(!dma_ops);
 
-	for_each_sg(sgl, sg, nents, i)
-		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+	if (dma_ops->sync_sg_for_cpu != NULL)
+		dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
 }
 
 static inline void dma_sync_sg_for_device(struct device *dev,
 		struct scatterlist *sgl, int nents,
 		enum dma_data_direction direction)
 {
-	struct scatterlist *sg;
-	int i;
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
 
-	BUG_ON(direction == DMA_NONE);
+	BUG_ON(!dma_ops);
 
-	for_each_sg(sgl, sg, nents, i)
-		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+	if (dma_ops->sync_sg_for_device != NULL)
+		dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+		dma_addr_t dma_handle, unsigned long offset, size_t size,
+		enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+
+	if (dma_ops->sync_single_range_for_cpu != NULL)
+		dma_ops->sync_single_range_for_cpu(dev, dma_handle,
+						   offset, size, direction);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+		dma_addr_t dma_handle, unsigned long offset, size_t size,
+		enum dma_data_direction direction)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
+
+	BUG_ON(!dma_ops);
+
+	if (dma_ops->sync_single_range_for_device != NULL)
+		dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
+						      size, direction);
 }
 
 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
@@ -356,22 +409,6 @@  static inline int dma_get_cache_alignment(void)
 #endif
 }
 
-static inline void dma_sync_single_range_for_cpu(struct device *dev,
-		dma_addr_t dma_handle, unsigned long offset, size_t size,
-		enum dma_data_direction direction)
-{
-	/* just sync everything for now */
-	dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
-}
-
-static inline void dma_sync_single_range_for_device(struct device *dev,
-		dma_addr_t dma_handle, unsigned long offset, size_t size,
-		enum dma_data_direction direction)
-{
-	/* just sync everything for now */
-	dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
-}
-
 static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		enum dma_data_direction direction)
 {
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 1562daf..93f15b4 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -119,6 +119,35 @@  static inline void dma_direct_unmap_page(struct device *dev,
 {
 }
 
+static inline void dma_direct_sync_single(struct device *dev,
+		dma_addr_t dma_handle, size_t size,
+		enum dma_data_direction direction)
+{
+	BUG_ON(direction == DMA_NONE);
+	__dma_sync(bus_to_virt(dma_handle), size, direction);
+}
+
+static inline void dma_direct_sync_sg(struct device *dev,
+		struct scatterlist *sgl, int nents,
+		enum dma_data_direction direction)
+{
+	struct scatterlist *sg;
+	int i;
+
+	BUG_ON(direction == DMA_NONE);
+
+	for_each_sg(sgl, sg, nents, i)
+		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+}
+
+static inline void dma_direct_sync_single_range(struct device *dev,
+		dma_addr_t dma_handle, unsigned long offset, size_t size,
+		enum dma_data_direction direction)
+{
+	/* just sync everything for now */
+	dma_direct_sync_single(dev, dma_handle, offset + size, direction);
+}
+
 struct dma_mapping_ops dma_direct_ops = {
 	.alloc_coherent	= dma_direct_alloc_coherent,
 	.free_coherent	= dma_direct_free_coherent,
@@ -127,5 +156,11 @@  struct dma_mapping_ops dma_direct_ops = {
 	.dma_supported	= dma_direct_dma_supported,
 	.map_page	= dma_direct_map_page,
 	.unmap_page	= dma_direct_unmap_page,
+	.sync_single_for_cpu 		= dma_direct_sync_single,
+	.sync_single_for_device 	= dma_direct_sync_single,
+	.sync_single_range_for_cpu 	= dma_direct_sync_single_range,
+	.sync_single_range_for_device 	= dma_direct_sync_single_range,
+	.sync_sg_for_cpu 		= dma_direct_sync_sg,
+	.sync_sg_for_device 		= dma_direct_sync_sg,
 };
 EXPORT_SYMBOL(dma_direct_ops);