summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/ixgbe/ixgbe.h
diff options
context:
space:
mode:
authorAlexander Duyck2012-02-08 08:49:59 +0100
committerJeff Kirsher2012-03-13 04:52:48 +0100
commitde88eeeb16b164b615a5d71ad5fa0b7d51b14435 (patch)
tree64a52ffcfb4107bd240bbf67a783c0d0dcd508b2 /drivers/net/ethernet/intel/ixgbe/ixgbe.h
parentixgbe: Drop unnecessary napi_schedule_prep and spare blank line from ixgbe_intr (diff)
downloadkernel-qcow2-linux-de88eeeb16b164b615a5d71ad5fa0b7d51b14435.tar.gz
kernel-qcow2-linux-de88eeeb16b164b615a5d71ad5fa0b7d51b14435.tar.xz
kernel-qcow2-linux-de88eeeb16b164b615a5d71ad5fa0b7d51b14435.zip
ixgbe: Allocate rings as part of the q_vector
This patch makes the rings a part of the q_vector directly instead of indirectly. Specifically on x86 systems this helps to avoid any cache set conflicts between the q_vector, the tx_rings, and the rx_rings as the critical stride is 4K and in order to cross that boundary you would need to have over 15 rings on a single q_vector. In addition this allows for smarter allocations when Flow Director is enabled. Previously Flow Director would set the irq_affinity hints based on the CPU and was still using a node interleaving approach which on some systems would end up with the two values mismatched. With the new approach we can set the affinity for the irq_vector and use the CPU for that affinity to determine the node value for the node and the rings. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Stephen Ko <stephen.s.ko@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe/ixgbe.h')
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index f25b4e2f8c57..699899ac85d0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -254,10 +254,8 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
- int numa_node;
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
- struct rcu_head rcu;
struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp;
@@ -317,8 +315,13 @@ struct ixgbe_q_vector {
struct ixgbe_ring_container rx, tx;
struct napi_struct napi;
- cpumask_var_t affinity_mask;
+ cpumask_t affinity_mask;
+ int numa_node;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
};
/*
@@ -514,7 +517,6 @@ struct ixgbe_adapter {
u16 eeprom_verl;
u16 eeprom_cap;
- int node;
u32 interrupt_event;
u32 led_reg;