summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnxt/bnxt.c
diff options
context:
space:
mode:
authorMichael Chan2017-02-06 22:55:36 +0100
committerDavid S. Miller2017-02-07 19:30:58 +0100
commitc61fb99cae51958a9096d8540c8c05e74cfa7e59 (patch)
treecc6709a1220c71163a11029b7c0b61d0217e39a2 /drivers/net/ethernet/broadcom/bnxt/bnxt.c
parentbnxt_en: Parameterize RX buffer offsets. (diff)
downloadkernel-qcow2-linux-c61fb99cae51958a9096d8540c8c05e74cfa7e59.tar.gz
kernel-qcow2-linux-c61fb99cae51958a9096d8540c8c05e74cfa7e59.tar.xz
kernel-qcow2-linux-c61fb99cae51958a9096d8540c8c05e74cfa7e59.zip
bnxt_en: Add RX page mode support.
This mode is to support XDP. In this mode, each rx ring is configured with page sized buffers for linear placement of each packet. MTU will be restricted to what the page sized buffers can support. Signed-off-by: Michael Chan <michael.chan@broadcom.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnxt/bnxt.c')
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c135
1 files changed, 117 insertions, 18 deletions
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c0f2167bbbb9..0bcd46576415 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -573,6 +573,25 @@ next_tx_int:
}
}
+static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
+ gfp_t gfp)
+{
+ struct device *dev = &bp->pdev->dev;
+ struct page *page;
+
+ page = alloc_page(gfp);
+ if (!page)
+ return NULL;
+
+ *mapping = dma_map_page(dev, page, 0, PAGE_SIZE, bp->rx_dir);
+ if (dma_mapping_error(dev, *mapping)) {
+ __free_page(page);
+ return NULL;
+ }
+ *mapping += bp->rx_dma_offset;
+ return page;
+}
+
static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
gfp_t gfp)
{
@@ -599,19 +618,28 @@ static inline int bnxt_alloc_rx_data(struct bnxt *bp,
{
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
- u8 *data;
dma_addr_t mapping;
- data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
- if (!data)
- return -ENOMEM;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
- rx_buf->data = data;
- rx_buf->data_ptr = data + bp->rx_offset;
+ if (!page)
+ return -ENOMEM;
+
+ rx_buf->data = page;
+ rx_buf->data_ptr = page_address(page) + bp->rx_offset;
+ } else {
+ u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
+
+ if (!data)
+ return -ENOMEM;
+
+ rx_buf->data = data;
+ rx_buf->data_ptr = data + bp->rx_offset;
+ }
rx_buf->mapping = mapping;
rxbd->rx_bd_haddr = cpu_to_le64(mapping);
-
return 0;
}
@@ -754,6 +782,51 @@ static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
rxr->rx_sw_agg_prod = sw_prod;
}
+static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
+ struct bnxt_rx_ring_info *rxr,
+ u16 cons, void *data, u8 *data_ptr,
+ dma_addr_t dma_addr,
+ unsigned int offset_and_len)
+{
+ unsigned int payload = offset_and_len >> 16;
+ unsigned int len = offset_and_len & 0xffff;
+ struct skb_frag_struct *frag;
+ struct page *page = data;
+ u16 prod = rxr->rx_prod;
+ struct sk_buff *skb;
+ int off, err;
+
+ err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
+ if (unlikely(err)) {
+ bnxt_reuse_rx_data(rxr, cons, data);
+ return NULL;
+ }
+ dma_addr -= bp->rx_dma_offset;
+ dma_unmap_page(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir);
+
+ if (unlikely(!payload))
+ payload = eth_get_headlen(data_ptr, len);
+
+ skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
+ if (!skb) {
+ __free_page(page);
+ return NULL;
+ }
+
+ off = (void *)data_ptr - page_address(page);
+ skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
+ memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
+ payload + NET_IP_ALIGN);
+
+ frag = &skb_shinfo(skb)->frags[0];
+ skb_frag_size_sub(frag, payload);
+ frag->page_offset += payload;
+ skb->data_len -= payload;
+ skb->tail += payload;
+
+ return skb;
+}
+
static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
struct bnxt_rx_ring_info *rxr, u16 cons,
void *data, u8 *data_ptr,
@@ -1329,6 +1402,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
struct sk_buff *skb;
void *data;
int rc = 0;
+ u32 misc;
rxcmp = (struct rx_cmp *)
&cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
@@ -1381,8 +1455,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
prefetch(data_ptr);
- agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
- RX_CMP_AGG_BUFS_SHIFT;
+ misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
+ agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
if (agg_bufs) {
if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
@@ -1413,8 +1487,11 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
goto next_rx;
}
} else {
+ u32 payload;
+
+ payload = misc & RX_CMP_PAYLOAD_OFFSET;
skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
- len);
+ payload | len);
if (!skb) {
rc = -ENOMEM;
goto next_rx;
@@ -1899,7 +1976,10 @@ static void bnxt_free_rx_skbs(struct bnxt *bp)
rx_buf->data = NULL;
- kfree(data);
+ if (BNXT_RX_PAGE_MODE(bp))
+ __free_page(data);
+ else
+ kfree(data);
}
for (j = 0; j < max_agg_idx; j++) {
@@ -2348,8 +2428,13 @@ static int bnxt_init_rx_rings(struct bnxt *bp)
{
int i, rc = 0;
- bp->rx_offset = BNXT_RX_OFFSET;
- bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
+ if (BNXT_RX_PAGE_MODE(bp)) {
+ bp->rx_offset = NET_IP_ALIGN;
+ bp->rx_dma_offset = 0;
+ } else {
+ bp->rx_offset = BNXT_RX_OFFSET;
+ bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
+ }
for (i = 0; i < bp->rx_nr_rings; i++) {
rc = bnxt_init_one_rx_ring(bp, i);
@@ -2560,10 +2645,24 @@ void bnxt_set_ring_params(struct bnxt *bp)
bp->cp_ring_mask = bp->cp_bit - 1;
}
-static int bnxt_set_rx_skb_mode(struct bnxt *bp)
+int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
{
- bp->rx_dir = DMA_FROM_DEVICE;
- bp->rx_skb_func = bnxt_rx_skb;
+ if (page_mode) {
+ if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
+ return -EOPNOTSUPP;
+ bp->dev->max_mtu = BNXT_MAX_PAGE_MODE_MTU;
+ bp->flags &= ~BNXT_FLAG_AGG_RINGS;
+ bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
+ bp->dev->hw_features &= ~NETIF_F_LRO;
+ bp->dev->features &= ~NETIF_F_LRO;
+ bp->rx_dir = DMA_BIDIRECTIONAL;
+ bp->rx_skb_func = bnxt_rx_page_skb;
+ } else {
+ bp->dev->max_mtu = BNXT_MAX_MTU;
+ bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
+ bp->rx_dir = DMA_FROM_DEVICE;
+ bp->rx_skb_func = bnxt_rx_skb;
+ }
return 0;
}
@@ -7275,7 +7374,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* MTU range: 60 - 9500 */
dev->min_mtu = ETH_ZLEN;
- dev->max_mtu = 9500;
+ dev->max_mtu = BNXT_MAX_MTU;
bnxt_dcb_init(bp);
@@ -7316,7 +7415,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_hwrm_func_qcfg(bp);
bnxt_hwrm_port_led_qcaps(bp);
- bnxt_set_rx_skb_mode(bp);
+ bnxt_set_rx_skb_mode(bp, false);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
bnxt_set_max_func_irqs(bp, max_irqs);