From d46d132cc0212ef08c22b9179dfa5fe21d07d253 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 10 Dec 2012 12:16:06 +0000 Subject: bnx2x: use netdev_alloc_frag() Using netdev_alloc_frag() instead of kmalloc() permits better GRO or TCP coalescing behavior, as skb_gro_receive() doesn't have to fallback to frag_list overhead. Signed-off-by: Eric Dumazet Cc: Dmitry Kravkov Cc: Eilon Greenstein Acked-by: Dmitry Kravkov Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 43 ++++++++++++++++++------- 1 file changed, 31 insertions(+), 12 deletions(-) (limited to 'drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c') diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 67baddd13a6e..a2998bea5d4b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -552,6 +552,23 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, return 0; } +static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data) +{ + if (fp->rx_frag_size) + put_page(virt_to_head_page(data)); + else + kfree(data); +} + +static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp) +{ + if (fp->rx_frag_size) + return netdev_alloc_frag(fp->rx_frag_size); + + return kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); +} + + static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_agg_info *tpa_info, u16 pages, @@ -574,15 +591,14 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, goto drop; /* Try to allocate the new data */ - new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); - + new_data = bnx2x_frag_alloc(fp); /* Unmap skb in the pool anyway, as we are going to change pool entry status to BNX2X_TPA_STOP even if new skb allocation fails. */ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); if (likely(new_data)) - skb = build_skb(data, 0); + skb = build_skb(data, fp->rx_frag_size); if (likely(skb)) { #ifdef BNX2X_STOP_ON_ERROR @@ -619,7 +635,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, return; } - kfree(new_data); + bnx2x_frag_free(fp, new_data); drop: /* drop the packet and keep the buffer in the bin */ DP(NETIF_MSG_RX_STATUS, @@ -635,7 +651,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index]; dma_addr_t mapping; - data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC); + data = bnx2x_frag_alloc(fp); if (unlikely(data == NULL)) return -ENOMEM; @@ -643,7 +659,7 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, fp->rx_buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { - kfree(data); + bnx2x_frag_free(fp, data); BNX2X_ERR("Can't map rx data\n"); return -ENOMEM; } @@ -845,9 +861,9 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) dma_unmap_addr(rx_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - skb = build_skb(data, 0); + skb = build_skb(data, fp->rx_frag_size); if (unlikely(!skb)) { - kfree(data); + bnx2x_frag_free(fp, data); bnx2x_fp_qstats(bp, fp)-> rx_skb_alloc_failed++; goto next_rx; @@ -1145,7 +1161,7 @@ static void bnx2x_free_tpa_pool(struct bnx2x *bp, dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(first_buf, mapping), fp->rx_buf_size, DMA_FROM_DEVICE); - kfree(data); + bnx2x_frag_free(fp, data); first_buf->data = NULL; } } @@ -1190,8 +1206,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp) struct sw_rx_bd *first_buf = &tpa_info->first_buf; - first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, - GFP_ATOMIC); + first_buf->data = bnx2x_frag_alloc(fp); if (!first_buf->data) { BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n", j); @@ -1323,7 +1338,7 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp) fp->rx_buf_size, DMA_FROM_DEVICE); rx_buf->data = NULL; - kfree(data); + bnx2x_frag_free(fp, data); } } @@ -1782,6 +1797,10 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) mtu + BNX2X_FW_RX_ALIGN_END; /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */ + if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE) + fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD; + else + fp->rx_frag_size = 0; } } -- cgit v1.2.3-55-g7522