Patchwork swiotlb: Introduce architecture-specific APIs to replace __weak functions

login
register
mail settings
Submitter Ian Campbell
Date May 29, 2009, 9:06 a.m.
Message ID <1243587983.5997.38.camel@zakaz.uk.xensource.com>
Download mbox | patch
Permalink /patch/27837/
State Not Applicable, archived
Headers show

Comments

Ian Campbell - May 29, 2009, 9:06 a.m.
Subject should have been "swiotlb: Introduce architecture-specific APIs
to replace __weak functions". Seems I never drive git send-email right
first time...

On Fri, 2009-05-29 at 04:43 -0400, Ian Campbell wrote:
> This series does not contain any Xen or PowerPC specific changes, those
> will follow in separate postings.

Becky, here is an updated version of your 2/3 "powerpc: Add support for
swiotlb on 32-bit" [0], compile tested with the new interfaces. The
interdiff is attached.

Ian.

[0] http://ozlabs.org/pipermail/linuxppc-dev/2009-May/072140.html

Patch

diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 3d9e887..b2fcd2b 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -87,8 +87,9 @@  struct dma_mapping_ops {
 				dma_addr_t dma_address, size_t size,
 				enum dma_data_direction direction,
 				struct dma_attrs *attrs);
-	int		(*addr_needs_map)(struct device *dev, dma_addr_t addr,
-				size_t size);
+	bool		(*map_range)(struct device *dev, u64 mask,
+				     phys_addr_t addr, size_t size,
+				     dma_addr_t *dma_addr_p);
 #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
 	void            (*sync_single_range_for_cpu)(struct device *hwdev,
 				dma_addr_t dma_handle, unsigned long offset,
@@ -301,6 +302,16 @@  static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
 	dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
 }
 
+static inline bool dma_map_range(struct device *hwdev, u64 mask,
+				 phys_addr_t addr, size_t size,
+				 dma_addr_t *dma_addr_p)
+{
+	struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
+
+	BUG_ON(!dma_ops);
+	return dma_ops->map_range(hwdev, mask, addr, size, dma_addr_p);
+}
+
 #ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
 static inline void dma_sync_single_for_cpu(struct device *dev,
 		dma_addr_t dma_handle, size_t size,
@@ -420,6 +431,16 @@  static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 #define dma_is_consistent(d, h)	(1)
 #endif
 
+static inline dma_addr_t phys_to_dma(struct device *hwdev, phys_addr_t paddr)
+{
+        return paddr + get_dma_direct_offset(hwdev);
+}
+
+static inline phys_addr_t dma_to_phys(struct device *hwdev, dma_addr_t daddr)
+{
+        return daddr + get_dma_direct_offset(hwdev);
+}
+
 static inline int dma_get_cache_alignment(void)
 {
 #ifdef CONFIG_PPC64
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 6c00667..ef2e812 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -24,50 +24,15 @@ 
 int swiotlb __read_mostly;
 unsigned int ppc_swiotlb_enable;
 
-void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
-{
-	unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
-	void *pageaddr = page_address(pfn_to_page(pfn));
-
-	if (pageaddr != NULL)
-		return pageaddr + (addr % PAGE_SIZE);
-	return NULL;
-}
-
-dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
-{
-	return paddr + get_dma_direct_offset(hwdev);
-}
-
-phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
-
-{
-	return baddr - get_dma_direct_offset(hwdev);
-}
-
-/*
- * Determine if an address needs bounce buffering via swiotlb.
- * Going forward I expect the swiotlb code to generalize on using
- * a dma_ops->addr_needs_map, and this function will move from here to the
- * generic swiotlb code.
- */
-int
-swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
-				   size_t size)
-{
-	struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
-
-	BUG_ON(!dma_ops);
-	return dma_ops->addr_needs_map(hwdev, addr, size);
-}
-
 /*
  * Determine if an address is reachable by a pci device, or if we must bounce.
  */
-static int
-swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
+static bool
+pci_map_range(struct device *hwdev, u64 mask,
+	      phys_addr_t addr, size_t size,
+	      dma_addr_t *dma_addr_p)
 {
-	u64 mask = dma_get_mask(hwdev);
+	dma_addr_t dma_addr = phys_to_dma(hwdev, addr);
 	dma_addr_t max;
 	struct pci_controller *hose;
 	struct pci_dev *pdev = to_pci_dev(hwdev);
@@ -76,16 +41,25 @@  swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
 	max = hose->dma_window_base_cur + hose->dma_window_size;
 
 	/* check that we're within mapped pci window space */
-	if ((addr + size > max) | (addr < hose->dma_window_base_cur))
-		return 1;
+	if ((dma_addr + size > max) | (dma_addr < hose->dma_window_base_cur))
+		return false;
 
-	return !is_buffer_dma_capable(mask, addr, size);
+	*dma_addr_p = dma_addr;
+	return true;
 }
 
-static int
-swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
+static bool
+map_range(struct device *dev, u64 mask,
+	  phys_addr_t addr, size_t size,
+	  dma_addr_t *dma_addr_p)
 {
-	return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+	dma_addr_t dma_addr = phys_to_dma(dev, addr);
+
+	if (dma_addr + size > mask)
+		return false;
+
+	*dma_addr_p = dma_addr;
+	return true;
 }
 
 
@@ -104,7 +78,7 @@  struct dma_mapping_ops swiotlb_dma_ops = {
 	.dma_supported = swiotlb_dma_supported,
 	.map_page = swiotlb_map_page,
 	.unmap_page = swiotlb_unmap_page,
-	.addr_needs_map = swiotlb_addr_needs_map,
+	.map_range = map_range,
 	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
 	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
@@ -119,7 +93,7 @@  struct dma_mapping_ops swiotlb_pci_dma_ops = {
 	.dma_supported = swiotlb_dma_supported,
 	.map_page = swiotlb_map_page,
 	.unmap_page = swiotlb_unmap_page,
-	.addr_needs_map = swiotlb_pci_addr_needs_map,
+	.map_range = pci_map_range,
 	.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
 	.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
 	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,