diff mbox series

[SRU,F,PULL,v2,01/23] Revert "UBUNTU: SAUCE: mlxbf_gige: syncup with v1.23 content"

Message ID 20210709190830.5405-2-asmaa@nvidia.com
State New
Headers show
Series Cherry-pick the upstreamed mlxbf-gige driver | expand

Commit Message

Asmaa Mnebhi July 9, 2021, 7:08 p.m. UTC
BugLink: https://bugs.launchpad.net/bugs/1934923

This reverts commit 119e2a48b345d600b896a5125b1c1f74c308c052.

Signed-off-by: Asmaa Mnebhi <asmaa@nvidia.com>
---
 .../ethernet/mellanox/mlxbf_gige/mlxbf_gige.h |  2 -
 .../mellanox/mlxbf_gige/mlxbf_gige_main.c     | 27 ++++-------
 .../mellanox/mlxbf_gige/mlxbf_gige_rx.c       | 45 ++++++-------------
 .../mellanox/mlxbf_gige/mlxbf_gige_tx.c       | 20 ++++-----
 4 files changed, 30 insertions(+), 64 deletions(-)
diff mbox series

Patch

diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
index 2c049bf114b1..1c59cad682c7 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige.h
@@ -105,7 +105,6 @@  struct mlxbf_gige {
 	int llu_plu_irq;
 	int phy_irq;
 	bool promisc_enabled;
-	u8 valid_polarity;
 	struct napi_struct napi;
 	struct mlxbf_gige_stats stats;
 	u32 tx_pause;
@@ -172,7 +171,6 @@  bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv);
 netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
 				  struct net_device *netdev);
 struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
-				     unsigned int map_len,
 				     dma_addr_t *buf_dma,
 				     enum dma_data_direction dir);
 int mlxbf_gige_request_irqs(struct mlxbf_gige *priv);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index a17a346baf98..2513c3547ea0 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -21,7 +21,7 @@ 
 #include "mlxbf_gige_regs.h"
 
 #define DRV_NAME    "mlxbf_gige"
-#define DRV_VERSION 1.23
+#define DRV_VERSION 1.21
 
 /* This setting defines the version of the ACPI table
  * content that is compatible with this driver version.
@@ -36,7 +36,6 @@ 
  * naturally aligned to a 2KB boundary.
  */
 struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
-				     unsigned int map_len,
 				     dma_addr_t *buf_dma,
 				     enum dma_data_direction dir)
 {
@@ -61,7 +60,8 @@  struct sk_buff *mlxbf_gige_alloc_skb(struct mlxbf_gige *priv,
 		skb_reserve(skb, offset);
 
 	/* Return streaming DMA mapping to caller */
-	*buf_dma = dma_map_single(priv->dev, skb->data, map_len, dir);
+	*buf_dma = dma_map_single(priv->dev, skb->data,
+				  MLXBF_GIGE_DEFAULT_BUF_SZ, dir);
 	if (dma_mapping_error(priv->dev, *buf_dma)) {
 		dev_kfree_skb(skb);
 		*buf_dma = (dma_addr_t)0;
@@ -120,9 +120,6 @@  static int mlxbf_gige_clean_port(struct mlxbf_gige *priv)
 	control |= MLXBF_GIGE_CONTROL_CLEAN_PORT_EN;
 	writeq(control, priv->base + MLXBF_GIGE_CONTROL);
 
-	/* Ensure completion of "clean port" write before polling status */
-	mb();
-
 	err = readq_poll_timeout_atomic(priv->base + MLXBF_GIGE_STATUS, temp,
 					(temp & MLXBF_GIGE_STATUS_READY),
 					100, 100000);
@@ -148,13 +145,13 @@  static int mlxbf_gige_open(struct net_device *netdev)
 	mlxbf_gige_cache_stats(priv);
 	err = mlxbf_gige_clean_port(priv);
 	if (err)
-		goto free_irqs;
+		return err;
 	err = mlxbf_gige_rx_init(priv);
 	if (err)
-		goto free_irqs;
+		return err;
 	err = mlxbf_gige_tx_init(priv);
 	if (err)
-		goto rx_deinit;
+		return err;
 
 	phy_start(phydev);
 
@@ -170,17 +167,9 @@  static int mlxbf_gige_open(struct net_device *netdev)
 		 MLXBF_GIGE_INT_EN_SW_CONFIG_ERROR |
 		 MLXBF_GIGE_INT_EN_SW_ACCESS_ERROR |
 		 MLXBF_GIGE_INT_EN_RX_RECEIVE_PACKET;
-	mb();
 	writeq(int_en, priv->base + MLXBF_GIGE_INT_EN);
 
 	return 0;
-
-rx_deinit:
-	mlxbf_gige_rx_deinit(priv);
-
-free_irqs:
-	mlxbf_gige_free_irqs(priv);
-	return err;
 }
 
 static int mlxbf_gige_stop(struct net_device *netdev)
@@ -229,8 +218,8 @@  static void mlxbf_gige_set_rx_mode(struct net_device *netdev)
 			mlxbf_gige_enable_promisc(priv);
 		else
 			mlxbf_gige_disable_promisc(priv);
+		}
 	}
-}
 
 static void mlxbf_gige_get_stats64(struct net_device *netdev,
 				   struct rtnl_link_stats64 *stats)
@@ -241,7 +230,7 @@  static void mlxbf_gige_get_stats64(struct net_device *netdev,
 
 	stats->rx_length_errors = priv->stats.rx_truncate_errors;
 	stats->rx_fifo_errors = priv->stats.rx_din_dropped_pkts +
-				readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
+		                readq(priv->base + MLXBF_GIGE_RX_DIN_DROP_COUNTER);
 	stats->rx_crc_errors = priv->stats.rx_mac_errors;
 	stats->rx_errors = stats->rx_length_errors +
 			   stats->rx_fifo_errors +
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
index 12cb5beb29f9..9e4c507497a1 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_rx.c
@@ -103,8 +103,7 @@  int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
 	rx_wqe_ptr = priv->rx_wqe_base;
 
 	for (i = 0; i < priv->rx_q_entries; i++) {
-		priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
-						       &rx_buf_dma, DMA_FROM_DEVICE);
+		priv->rx_skb[i] = mlxbf_gige_alloc_skb(priv, &rx_buf_dma, DMA_FROM_DEVICE);
 		if (!priv->rx_skb[i])
 			goto free_wqe_and_skb;
 		*rx_wqe_ptr++ = rx_buf_dma;
@@ -120,9 +119,6 @@  int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
 	if (!priv->rx_cqe_base)
 		goto free_wqe_and_skb;
 
-	for (i = 0; i < priv->rx_q_entries; i++)
-		priv->rx_cqe_base[i] |= MLXBF_GIGE_RX_CQE_VALID_MASK;
-
 	/* Write RX CQE base address into MMIO reg */
 	writeq(priv->rx_cqe_base_dma, priv->base + MLXBF_GIGE_RX_CQ_BASE);
 
@@ -148,9 +144,7 @@  int mlxbf_gige_rx_init(struct mlxbf_gige *priv)
 	writeq(data, priv->base + MLXBF_GIGE_INT_MASK);
 
 	/* Enable RX DMA to write new packets to memory */
-	data = readq(priv->base + MLXBF_GIGE_RX_DMA);
-	data |= MLXBF_GIGE_RX_DMA_EN;
-	writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
+	writeq(MLXBF_GIGE_RX_DMA_EN, priv->base + MLXBF_GIGE_RX_DMA);
 
 	writeq(ilog2(priv->rx_q_entries),
 	       priv->base + MLXBF_GIGE_RX_WQE_SIZE_LOG2);
@@ -178,14 +172,8 @@  void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
 {
 	dma_addr_t *rx_wqe_ptr;
 	size_t size;
-	u64 data;
 	int i;
 
-	/* Disable RX DMA to prevent packet transfers to memory */
-	data = readq(priv->base + MLXBF_GIGE_RX_DMA);
-	data &= ~MLXBF_GIGE_RX_DMA_EN;
-	writeq(data, priv->base + MLXBF_GIGE_RX_DMA);
-
 	rx_wqe_ptr = priv->rx_wqe_base;
 
 	for (i = 0; i < priv->rx_q_entries; i++) {
@@ -214,10 +202,10 @@  void mlxbf_gige_rx_deinit(struct mlxbf_gige *priv)
 static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
 {
 	struct net_device *netdev = priv->netdev;
-	struct sk_buff *skb = NULL, *rx_skb;
 	u16 rx_pi_rem, rx_ci_rem;
 	dma_addr_t *rx_wqe_addr;
 	dma_addr_t rx_buf_dma;
+	struct sk_buff *skb;
 	u64 *rx_cqe_addr;
 	u64 datalen;
 	u64 rx_cqe;
@@ -227,14 +215,14 @@  static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
 	/* Index into RX buffer array is rx_pi w/wrap based on RX_CQE_SIZE */
 	rx_pi = readq(priv->base + MLXBF_GIGE_RX_WQE_PI);
 	rx_pi_rem = rx_pi % priv->rx_q_entries;
-
 	rx_wqe_addr = priv->rx_wqe_base + rx_pi_rem;
+
+	dma_unmap_single(priv->dev, *rx_wqe_addr,
+			 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+
 	rx_cqe_addr = priv->rx_cqe_base + rx_pi_rem;
 	rx_cqe = *rx_cqe_addr;
 
-	if ((!!(rx_cqe & MLXBF_GIGE_RX_CQE_VALID_MASK)) != priv->valid_polarity)
-		return false;
-
 	if ((rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MASK) == 0) {
 		/* Packet is OK, increment stats */
 		datalen = rx_cqe & MLXBF_GIGE_RX_CQE_PKT_LEN_MASK;
@@ -248,15 +236,16 @@  static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
 		skb->ip_summed = CHECKSUM_NONE; /* device did not checksum packet */
 
 		skb->protocol = eth_type_trans(skb, netdev);
+		netif_receive_skb(skb);
 
 		/* Alloc another RX SKB for this same index */
-		rx_skb = mlxbf_gige_alloc_skb(priv, MLXBF_GIGE_DEFAULT_BUF_SZ,
-					      &rx_buf_dma, DMA_FROM_DEVICE);
-		if (!rx_skb)
+		priv->rx_skb[rx_pi_rem] = mlxbf_gige_alloc_skb(priv, &rx_buf_dma,
+							       DMA_FROM_DEVICE);
+		if (!priv->rx_skb[rx_pi_rem]) {
+			netdev->stats.rx_dropped++;
 			return false;
-		priv->rx_skb[rx_pi_rem] = rx_skb;
-		dma_unmap_single(priv->dev, *rx_wqe_addr,
-				 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_FROM_DEVICE);
+		}
+
 		*rx_wqe_addr = rx_buf_dma;
 	} else if (rx_cqe & MLXBF_GIGE_RX_CQE_PKT_STATUS_MAC_ERR) {
 		priv->stats.rx_mac_errors++;
@@ -266,20 +255,14 @@  static bool mlxbf_gige_rx_packet(struct mlxbf_gige *priv, int *rx_pkts)
 
 	/* Let hardware know we've replenished one buffer */
 	rx_pi++;
-	wmb();
 	writeq(rx_pi, priv->base + MLXBF_GIGE_RX_WQE_PI);
 
 	(*rx_pkts)++;
 
 	rx_pi_rem = rx_pi % priv->rx_q_entries;
-	if (rx_pi_rem == 0)
-		priv->valid_polarity ^= 1;
 	rx_ci = readq(priv->base + MLXBF_GIGE_RX_CQE_PACKET_CI);
 	rx_ci_rem = rx_ci % priv->rx_q_entries;
 
-	if (skb)
-		netif_receive_skb(skb);
-
 	return rx_pi_rem != rx_ci_rem;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c
index 4efa5eff5a98..0c35b2f2dfcd 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_tx.c
@@ -66,7 +66,7 @@  void mlxbf_gige_tx_deinit(struct mlxbf_gige *priv)
 	for (i = 0; i < priv->tx_q_entries; i++) {
 		if (priv->tx_skb[i]) {
 			dma_unmap_single(priv->dev, *tx_wqe_addr,
-					 priv->tx_skb[i]->len, DMA_TO_DEVICE);
+					 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_TO_DEVICE);
 			dev_kfree_skb(priv->tx_skb[i]);
 			priv->tx_skb[i] = NULL;
 		}
@@ -156,11 +156,9 @@  bool mlxbf_gige_handle_tx_complete(struct mlxbf_gige *priv)
 		stats->tx_bytes += MLXBF_GIGE_TX_WQE_PKT_LEN(tx_wqe_addr);
 
 		dma_unmap_single(priv->dev, *tx_wqe_addr,
-				 priv->tx_skb[tx_wqe_index]->len, DMA_TO_DEVICE);
+				 MLXBF_GIGE_DEFAULT_BUF_SZ, DMA_TO_DEVICE);
 		dev_consume_skb_any(priv->tx_skb[tx_wqe_index]);
 		priv->tx_skb[tx_wqe_index] = NULL;
-
-		mb();
 	}
 
 	/* Since the TX ring was likely just drained, check if TX queue
@@ -194,12 +192,11 @@  netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
 	long buff_addr, start_dma_page, end_dma_page;
 	struct sk_buff *tx_skb;
 	dma_addr_t tx_buf_dma;
-	unsigned long flags;
 	u64 *tx_wqe_addr;
 	u64 word2;
 
 	/* If needed, linearize TX SKB as hardware DMA expects this */
-	if ((skb->len > MLXBF_GIGE_DEFAULT_BUF_SZ) || skb_linearize(skb)) {
+	if (skb_linearize(skb)) {
 		dev_kfree_skb(skb);
 		netdev->stats.tx_dropped++;
 		return NETDEV_TX_OK;
@@ -214,8 +211,7 @@  netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
 	 */
 	if (start_dma_page != end_dma_page) {
 		/* DMA operation would fail as-is, alloc new aligned SKB */
-		tx_skb = mlxbf_gige_alloc_skb(priv, skb->len,
-					      &tx_buf_dma, DMA_TO_DEVICE);
+		tx_skb = mlxbf_gige_alloc_skb(priv, &tx_buf_dma, DMA_TO_DEVICE);
 		if (!tx_skb) {
 			/* Free original skb, could not alloc new aligned SKB */
 			dev_kfree_skb(skb);
@@ -230,7 +226,8 @@  netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
 	} else {
 		tx_skb = skb;
 		tx_buf_dma = dma_map_single(priv->dev, skb->data,
-					    skb->len, DMA_TO_DEVICE);
+					    MLXBF_GIGE_DEFAULT_BUF_SZ,
+					    DMA_TO_DEVICE);
 		if (dma_mapping_error(priv->dev, tx_buf_dma)) {
 			dev_kfree_skb(skb);
 			netdev->stats.tx_dropped++;
@@ -238,6 +235,8 @@  netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
 		}
 	}
 
+	priv->tx_skb[priv->tx_pi % priv->tx_q_entries] = tx_skb;
+
 	/* Get address of TX WQE */
 	tx_wqe_addr = priv->tx_wqe_next;
 
@@ -255,10 +254,7 @@  netdev_tx_t mlxbf_gige_start_xmit(struct sk_buff *skb,
 	/* Write entire 2nd word of TX WQE */
 	*(tx_wqe_addr + 1) = word2;
 
-	spin_lock_irqsave(&priv->lock, flags);
-	priv->tx_skb[priv->tx_pi % priv->tx_q_entries] = tx_skb;
 	priv->tx_pi++;
-	spin_unlock_irqrestore(&priv->lock, flags);
 
 	if (!netdev_xmit_more()) {
 		/* Create memory barrier before write to TX PI */