@@ -117,8 +117,8 @@ static void gfar_reset_task(struct work_struct *work);
static void gfar_timeout(struct net_device *dev);
static int gfar_close(struct net_device *dev);
struct sk_buff *gfar_new_skb(struct net_device *dev);
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb);
+static int gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb);
static int gfar_set_mac_address(struct net_device *dev);
static int gfar_change_mtu(struct net_device *dev, int new_mtu);
static irqreturn_t gfar_error(int irq, void *dev_id);
@@ -219,9 +219,13 @@ static int gfar_init_bds(struct net_device *ndev)
netdev_err(ndev, "Can't allocate RX buffers\n");
return -ENOMEM;
}
- rx_queue->rx_skbuff[j] = skb;
- gfar_new_rxbdp(rx_queue, rxbdp, skb);
+ if (gfar_new_rxbdp(rx_queue, rxbdp, skb)) {
+ dev_kfree_skb_any(skb);
+ skb = NULL;
+ }
+
+ rx_queue->rx_skbuff[j] = skb;
}
rxbdp++;
@@ -2290,6 +2294,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
0,
frag_len,
DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
/* set the TxBD length and buffer pointer */
txbdp->bufPtr = bufaddr;
@@ -2339,8 +2345,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
fcb->ptp = 1;
}
- txbdp_start->bufPtr = dma_map_single(priv->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
+ bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
+ goto dma_map_err;
+
+ txbdp_start->bufPtr = bufaddr;
/* If time stamping is requested one additional TxBD must be set up. The
* first TxBD points to the FCB and must have a data length of
@@ -2406,6 +2416,25 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&tx_queue->txlock, flags);
return NETDEV_TX_OK;
+
+dma_map_err:
+ txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
+ if (do_tstamp)
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ for (i = 0; i < nr_frags; i++) {
+ lstatus = txbdp->lstatus;
+ if (!(lstatus & BD_LFLAG(TXBD_READY)))
+ break;
+
+ txbdp->lstatus = lstatus & ~BD_LFLAG(TXBD_READY);
+ bufaddr = txbdp->bufPtr;
+ dma_unmap_page(priv->dev, bufaddr, txbdp->length,
+ DMA_TO_DEVICE);
+ txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
+ }
+ gfar_wmb();
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/* Stops the kernel queue, and halts the controller */
@@ -2606,8 +2635,8 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
netdev_tx_completed_queue(txq, howmany, bytes_sent);
}
-static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
- struct sk_buff *skb)
+static int gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
+ struct sk_buff *skb)
{
struct net_device *dev = rx_queue->dev;
struct gfar_private *priv = netdev_priv(dev);
@@ -2615,7 +2644,11 @@ static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
buf = dma_map_single(priv->dev, skb->data,
priv->rx_buffer_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, buf)))
+ return -1;
+
gfar_init_rxbdp(rx_queue, bdp, buf);
+ return 0;
}
static struct sk_buff *gfar_alloc_skb(struct net_device *dev)
@@ -2851,10 +2884,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
}
- rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
-
/* Setup the new bdp */
- gfar_new_rxbdp(rx_queue, bdp, newskb);
+ if (unlikely(gfar_new_rxbdp(rx_queue, bdp, newskb))) {
+ dev_kfree_skb_any(newskb);
+ newskb = NULL;
+ }
+
+ rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
/* Update to the next pointer */
bdp = next_bd(bdp, base, rx_queue->rx_ring_size);