@@ -209,6 +209,7 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq,
}
if (page_ref_count(cache->page_cache[cache->head].page) != 1) {
+ WARN_ON(page_ref_count(cache->page_cache[cache->head].page) <= 0);
rq->stats.cache_busy++;
return false;
}
@@ -292,6 +293,13 @@ static inline void mlx5e_add_skb_frag_mpwqe(struct mlx5e_rq *rq,
wi->umr.dma_info[page_idx].addr + frag_offset,
len, DMA_FROM_DEVICE);
wi->skbs_frags[page_idx]++;
+ WARN_ON(wi->skbs_frags[page_idx] > mlx5e_mpwqe_strides_per_page(rq));
+
+ /* Take a page reference every time we give page to skb (alternative
+ * to original mlx code).
+ */
+ get_page(wi->umr.dma_info[page_idx].page);
+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
wi->umr.dma_info[page_idx].page, frag_offset,
len, truesize);
@@ -372,7 +380,6 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
if (unlikely(err))
goto err_unmap;
wi->umr.mtt[i] = cpu_to_be64(dma_info->addr | MLX5_EN_WR);
- page_ref_add(dma_info->page, pg_strides);
wi->skbs_frags[i] = 0;
}
@@ -385,7 +392,6 @@ static int mlx5e_alloc_rx_umr_mpwqe(struct mlx5e_rq *rq,
while (--i >= 0) {
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
- page_ref_sub(dma_info->page, pg_strides);
mlx5e_page_release(rq, dma_info, true);
}
@@ -400,7 +406,7 @@ void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[i];
- page_ref_sub(dma_info->page, pg_strides - wi->skbs_frags[i]);
+ WARN_ON(pg_strides - wi->skbs_frags[i] < 0);
mlx5e_page_release(rq, dma_info, true);
}
}
@@ -565,12 +571,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
+#if 0
+ /* Disabled since we are seeing checksum faults occurring. This should
+ * not have any noticeable impact (in the short term).
+ */
if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
rq->stats.csum_complete++;
return;
}
+#endif
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
(cqe->hds_ip_ext & CQE_L4_OK))) {
@@ -929,6 +940,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
return;
+ WARN_ON(wi->consumed_strides > rq->mpwqe_num_strides);
mlx5e_free_rx_mpwqe(rq, wi);
mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}