summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
diff options
context:
space:
mode:
authorAlexander Duyck2017-01-17 17:36:14 +0100
committerJeff Kirsher2017-02-16 13:02:44 +0100
commit4f4542bfb3b539bef118578ffafcc98e4ce91979 (patch)
tree582eed63703e811db83ca3e4ad7fe25c2b151312 /drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
parentixgbe: Update code to better handle incrementing page count (diff)
downloadkernel-qcow2-linux-4f4542bfb3b539bef118578ffafcc98e4ce91979.tar.gz
kernel-qcow2-linux-4f4542bfb3b539bef118578ffafcc98e4ce91979.tar.xz
kernel-qcow2-linux-4f4542bfb3b539bef118578ffafcc98e4ce91979.zip
ixgbe: Make use of order 1 pages and 3K buffers independent of FCoE
In order to support build_skb with jumbo frames it will be necessary to use 3K buffers for the Rx path with 8K pages backing them. This is needed on architectures that implement 4K pages because we can't support 2K buffers plus padding in a 4K page. In the case of systems that support page sizes larger than 4K the 3K attribute will only be applied to FCoE as we can fall back to using just 2K buffers and adding the padding. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Andrew Bowers <andrewx.bowers@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe_main.c')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index e4487109292a..415e5ad1485a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1604,6 +1604,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
u16 i = rx_ring->next_to_use;
+ u16 bufsz;
/* nothing to do */
if (!cleaned_count)
@@ -1613,14 +1614,15 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
bi = &rx_ring->rx_buffer_info[i];
i -= rx_ring->count;
+ bufsz = ixgbe_rx_bufsz(rx_ring);
+
do {
if (!ixgbe_alloc_mapped_page(rx_ring, bi))
break;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
- bi->page_offset,
- ixgbe_rx_bufsz(rx_ring),
+ bi->page_offset, bufsz,
DMA_FROM_DEVICE);
/*
@@ -2000,9 +2002,9 @@ static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring,
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = ixgbe_rx_bufsz(rx_ring);
+ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = SKB_DATA_ALIGN(size);
#endif
if (unlikely(skb_is_nonlinear(skb)))
@@ -3866,10 +3868,15 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
rx_ring = adapter->rx_ring[i];
+
+ clear_ring_rsc_enabled(rx_ring);
+ clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
+
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_ring_rsc_enabled(rx_ring);
- else
- clear_ring_rsc_enabled(rx_ring);
+
+ if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state))
+ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state);
}
}