diff mbox series

[B,v2,1/2] UBUNTU: SAUCE: Revert "drm/msm: Use the correct dma_sync calls in msm_gem"

Message ID 20200508210437.31193-2-kelsey.skunberg@canonical.com
State New
Headers show
Series Revert drm/msm patches that cause certification testing failure | expand

Commit Message

Kelsey Skunberg May 8, 2020, 9:04 p.m. UTC
BugLink: https://bugs.launchpad.net/bugs/1877657

This reverts commit 3de433c5b38af49a5fc7602721e2ab5d39f1e69c which is upstream
commit 9f614197c744 ("drm/msm: Use the correct dma_sync calls harder")

Commit contributes to Certification Test failures and should be reverted until
a fix or alternative solution for dma_sync calls in msm_gem can be applied.

Signed-off-by: Kelsey Skunberg <kelsey.skunberg@canonical.com>
---
 drivers/gpu/drm/msm/msm_gem.c | 47 ++++-------------------------------
 1 file changed, 5 insertions(+), 42 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index ea59eb5eb556..21502afbcddc 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -43,46 +43,6 @@  static bool use_pages(struct drm_gem_object *obj)
 	return !msm_obj->vram_node;
 }
 
-/*
- * Cache sync.. this is a bit over-complicated, to fit dma-mapping
- * API.  Really GPU cache is out of scope here (handled on cmdstream)
- * and all we need to do is invalidate newly allocated pages before
- * mapping to CPU as uncached/writecombine.
- *
- * On top of this, we have the added headache, that depending on
- * display generation, the display's iommu may be wired up to either
- * the toplevel drm device (mdss), or to the mdp sub-node, meaning
- * that here we either have dma-direct or iommu ops.
- *
- * Let this be a cautionary tail of abstraction gone wrong.
- */
-
-static void sync_for_device(struct msm_gem_object *msm_obj)
-{
-	struct device *dev = msm_obj->base.dev->dev;
-
-	if (get_dma_ops(dev)) {
-		dma_sync_sg_for_device(dev, msm_obj->sgt->sgl,
-			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-	} else {
-		dma_map_sg(dev, msm_obj->sgt->sgl,
-			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-	}
-}
-
-static void sync_for_cpu(struct msm_gem_object *msm_obj)
-{
-	struct device *dev = msm_obj->base.dev->dev;
-
-	if (get_dma_ops(dev)) {
-		dma_sync_sg_for_cpu(dev, msm_obj->sgt->sgl,
-			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-	} else {
-		dma_unmap_sg(dev, msm_obj->sgt->sgl,
-			msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
-	}
-}
-
 /* allocate pages from VRAM carveout, used when no IOMMU: */
 static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
 {
@@ -148,7 +108,8 @@  static struct page **get_pages(struct drm_gem_object *obj)
 		 * because display controller, GPU, etc. are not coherent:
 		 */
 		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-			sync_for_device(msm_obj);
+			dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
+					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
 	}
 
 	return msm_obj->pages;
@@ -177,7 +138,9 @@  static void put_pages(struct drm_gem_object *obj)
 			 * GPU, etc. are not coherent:
 			 */
 			if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
-				sync_for_cpu(msm_obj);
+				dma_sync_sg_for_cpu(obj->dev->dev, msm_obj->sgt->sgl,
+					     msm_obj->sgt->nents,
+					     DMA_BIDIRECTIONAL);
 
 			sg_free_table(msm_obj->sgt);
 			kfree(msm_obj->sgt);