summaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000/e1000_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/e1000/e1000_main.c')
-rw-r--r--drivers/net/e1000/e1000_main.c332
1 files changed, 202 insertions, 130 deletions
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 68a80893dce1..4686c3983fc3 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -123,8 +123,10 @@ static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
static void e1000_set_rx_mode(struct net_device *netdev);
static void e1000_update_phy_info(unsigned long data);
+static void e1000_update_phy_info_task(struct work_struct *work);
static void e1000_watchdog(unsigned long data);
static void e1000_82547_tx_fifo_stall(unsigned long data);
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
struct net_device *netdev);
static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
@@ -275,7 +277,7 @@ static int e1000_request_irq(struct e1000_adapter *adapter)
err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
netdev);
if (err) {
- e_err("Unable to allocate interrupt Error: %d\n", err);
+ e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
}
return err;
@@ -519,8 +521,21 @@ void e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_rx_rings(adapter);
}
+static void e1000_reinit_safe(struct e1000_adapter *adapter)
+{
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+ rtnl_lock();
+ e1000_down(adapter);
+ e1000_up(adapter);
+ rtnl_unlock();
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
void e1000_reinit_locked(struct e1000_adapter *adapter)
{
+ /* if rtnl_lock is not held the call path is bogus */
+ ASSERT_RTNL();
WARN_ON(in_interrupt());
while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
msleep(1);
@@ -657,7 +672,7 @@ void e1000_reset(struct e1000_adapter *adapter)
ew32(WUC, 0);
if (e1000_init_hw(hw))
- e_err("Hardware Error\n");
+ e_dev_err("Hardware Error\n");
e1000_update_mng_vlan(adapter);
/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
@@ -790,6 +805,70 @@ static const struct net_device_ops e1000_netdev_ops = {
};
/**
+ * e1000_init_hw_struct - initialize members of hw struct
+ * @adapter: board private struct
+ * @hw: structure used by e1000_hw.c
+ *
+ * Factors out initialization of the e1000_hw struct to its own function
+ * that can be called very early at init (just after struct allocation).
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ * Returns negative error codes if MAC type setup fails.
+ */
+static int e1000_init_hw_struct(struct e1000_adapter *adapter,
+ struct e1000_hw *hw)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* PCI config space info */
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+ hw->revision_id = pdev->revision;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+ hw->max_frame_size = adapter->netdev->mtu +
+ ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+ hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+ /* identify the MAC */
+ if (e1000_set_mac_type(hw)) {
+ e_err(probe, "Unknown MAC Type\n");
+ return -EIO;
+ }
+
+ switch (hw->mac_type) {
+ default:
+ break;
+ case e1000_82541:
+ case e1000_82547:
+ case e1000_82541_rev_2:
+ case e1000_82547_rev_2:
+ hw->phy_init_script = 1;
+ break;
+ }
+
+ e1000_set_media_type(hw);
+ e1000_get_bus_info(hw);
+
+ hw->wait_autoneg_complete = false;
+ hw->tbi_compatibility_en = true;
+ hw->adaptive_ifs = true;
+
+ /* Copper options */
+
+ if (hw->media_type == e1000_media_type_copper) {
+ hw->mdix = AUTO_ALL_MODES;
+ hw->disable_polarity_correction = false;
+ hw->master_slave = E1000_MASTER_SLAVE;
+ }
+
+ return 0;
+}
+
+/**
* e1000_probe - Device Initialization Routine
* @pdev: PCI device information struct
* @ent: entry in e1000_pci_tbl
@@ -826,22 +905,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (err)
return err;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = 1;
- } else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- pr_err("No usable DMA config, aborting\n");
- goto err_dma;
- }
- }
- pci_using_dac = 0;
- }
-
err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
if (err)
goto err_pci_reg;
@@ -885,6 +948,32 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
}
}
+ /* make ready for any if (hw->...) below */
+ err = e1000_init_hw_struct(adapter, hw);
+ if (err)
+ goto err_sw_init;
+
+ /*
+ * there is a workaround being applied below that limits
+ * 64-bit DMA addresses to 64-bit hardware. There are some
+ * 32-bit adapters that Tx hang when given 64-bit DMA addresses
+ */
+ pci_using_dac = 0;
+ if ((hw->bus_type == e1000_bus_type_pcix) &&
+ !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ /*
+ * according to DMA-API-HOWTO, coherent calls will always
+ * succeed if the set call did
+ */
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ pci_using_dac = 1;
+ } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ } else {
+ pr_err("No usable DMA config, aborting\n");
+ goto err_dma;
+ }
+
netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
@@ -914,8 +1003,10 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
(hw->mac_type != e1000_82547))
netdev->features |= NETIF_F_TSO;
- if (pci_using_dac)
+ if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
netdev->vlan_features |= NETIF_F_TSO;
netdev->vlan_features |= NETIF_F_HW_CSUM;
@@ -925,7 +1016,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* initialize eeprom parameters */
if (e1000_init_eeprom_params(hw)) {
- e_err("EEPROM initialization failed\n");
+ e_err(probe, "EEPROM initialization failed\n");
goto err_eeprom;
}
@@ -936,7 +1027,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* make sure the EEPROM is good */
if (e1000_validate_eeprom_checksum(hw) < 0) {
- e_err("The EEPROM Checksum Is Not Valid\n");
+ e_err(probe, "The EEPROM Checksum Is Not Valid\n");
e1000_dump_eeprom(adapter);
/*
* set MAC address to all zeroes to invalidate and temporary
@@ -950,30 +1041,30 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
} else {
/* copy the MAC address out of the EEPROM */
if (e1000_read_mac_addr(hw))
- e_err("EEPROM Read Error\n");
+ e_err(probe, "EEPROM Read Error\n");
}
/* don't block initalization here due to bad MAC address */
memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr))
- e_err("Invalid MAC Address\n");
-
- e1000_get_bus_info(hw);
+ e_err(probe, "Invalid MAC Address\n");
init_timer(&adapter->tx_fifo_stall_timer);
- adapter->tx_fifo_stall_timer.function = &e1000_82547_tx_fifo_stall;
+ adapter->tx_fifo_stall_timer.function = e1000_82547_tx_fifo_stall;
adapter->tx_fifo_stall_timer.data = (unsigned long)adapter;
init_timer(&adapter->watchdog_timer);
- adapter->watchdog_timer.function = &e1000_watchdog;
+ adapter->watchdog_timer.function = e1000_watchdog;
adapter->watchdog_timer.data = (unsigned long) adapter;
init_timer(&adapter->phy_info_timer);
- adapter->phy_info_timer.function = &e1000_update_phy_info;
+ adapter->phy_info_timer.function = e1000_update_phy_info;
adapter->phy_info_timer.data = (unsigned long)adapter;
+ INIT_WORK(&adapter->fifo_stall_task, e1000_82547_tx_fifo_stall_task);
INIT_WORK(&adapter->reset_task, e1000_reset_task);
+ INIT_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
e1000_check_options(adapter);
@@ -1047,7 +1138,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_register;
/* print bus type/speed/width info */
- e_info("(PCI%s:%dMHz:%d-bit) %pM\n",
+ e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
((hw->bus_speed == e1000_bus_speed_133) ? 133 :
(hw->bus_speed == e1000_bus_speed_120) ? 120 :
@@ -1059,7 +1150,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- e_info("Intel(R) PRO/1000 Network Connection\n");
+ e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
cards_found++;
return 0;
@@ -1072,6 +1163,7 @@ err_eeprom:
iounmap(hw->flash_address);
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
+err_dma:
err_sw_init:
iounmap(hw->hw_addr);
err_ioremap:
@@ -1079,7 +1171,6 @@ err_ioremap:
err_alloc_etherdev:
pci_release_selected_regions(pdev, bars);
err_pci_reg:
-err_dma:
pci_disable_device(pdev);
return err;
}
@@ -1131,68 +1222,18 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
* @adapter: board private structure to initialize
*
* e1000_sw_init initializes the Adapter private data structure.
- * Fields are initialized based on PCI device information and
- * OS network device settings (MTU size).
+ * e1000_init_hw_struct MUST be called before this function
**/
static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
{
- struct e1000_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
-
- /* PCI config space info */
-
- hw->vendor_id = pdev->vendor;
- hw->device_id = pdev->device;
- hw->subsystem_vendor_id = pdev->subsystem_vendor;
- hw->subsystem_id = pdev->subsystem_device;
- hw->revision_id = pdev->revision;
-
- pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
-
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
- hw->max_frame_size = netdev->mtu +
- ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
- hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
-
- /* identify the MAC */
-
- if (e1000_set_mac_type(hw)) {
- e_err("Unknown MAC Type\n");
- return -EIO;
- }
-
- switch (hw->mac_type) {
- default:
- break;
- case e1000_82541:
- case e1000_82547:
- case e1000_82541_rev_2:
- case e1000_82547_rev_2:
- hw->phy_init_script = 1;
- break;
- }
-
- e1000_set_media_type(hw);
-
- hw->wait_autoneg_complete = false;
- hw->tbi_compatibility_en = true;
- hw->adaptive_ifs = true;
-
- /* Copper options */
-
- if (hw->media_type == e1000_media_type_copper) {
- hw->mdix = AUTO_ALL_MODES;
- hw->disable_polarity_correction = false;
- hw->master_slave = E1000_MASTER_SLAVE;
- }
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
if (e1000_alloc_queues(adapter)) {
- e_err("Unable to allocate memory for queues\n");
+ e_err(probe, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -1386,7 +1427,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
- e_err("Unable to allocate memory for the Tx descriptor ring\n");
+ e_err(probe, "Unable to allocate memory for the Tx descriptor "
+ "ring\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -1401,7 +1443,8 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
if (!txdr->desc) {
setup_tx_desc_die:
vfree(txdr->buffer_info);
- e_err("Unable to allocate memory for the Tx descriptor ring\n");
+ e_err(probe, "Unable to allocate memory for the Tx descriptor "
+ "ring\n");
return -ENOMEM;
}
@@ -1409,7 +1452,7 @@ setup_tx_desc_die:
if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
void *olddesc = txdr->desc;
dma_addr_t olddma = txdr->dma;
- e_err("txdr align check failed: %u bytes at %p\n",
+ e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
txdr->size, txdr->desc);
/* Try again, without freeing the previous */
txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
@@ -1427,7 +1470,7 @@ setup_tx_desc_die:
txdr->dma);
dma_free_coherent(&pdev->dev, txdr->size, olddesc,
olddma);
- e_err("Unable to allocate aligned memory "
+ e_err(probe, "Unable to allocate aligned memory "
"for the transmit descriptor ring\n");
vfree(txdr->buffer_info);
return -ENOMEM;
@@ -1460,7 +1503,7 @@ int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++) {
err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
if (err) {
- e_err("Allocation for Tx Queue %u failed\n", i);
+ e_err(probe, "Allocation for Tx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_tx_resources(adapter,
&adapter->tx_ring[i]);
@@ -1580,7 +1623,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
size = sizeof(struct e1000_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
- e_err("Unable to allocate memory for the Rx descriptor ring\n");
+ e_err(probe, "Unable to allocate memory for the Rx descriptor "
+ "ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -1596,7 +1640,8 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
GFP_KERNEL);
if (!rxdr->desc) {
- e_err("Unable to allocate memory for the Rx descriptor ring\n");
+ e_err(probe, "Unable to allocate memory for the Rx descriptor "
+ "ring\n");
setup_rx_desc_die:
vfree(rxdr->buffer_info);
return -ENOMEM;
@@ -1606,7 +1651,7 @@ setup_rx_desc_die:
if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
void *olddesc = rxdr->desc;
dma_addr_t olddma = rxdr->dma;
- e_err("rxdr align check failed: %u bytes at %p\n",
+ e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
rxdr->size, rxdr->desc);
/* Try again, without freeing the previous */
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
@@ -1615,8 +1660,8 @@ setup_rx_desc_die:
if (!rxdr->desc) {
dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma);
- e_err("Unable to allocate memory for the Rx descriptor "
- "ring\n");
+ e_err(probe, "Unable to allocate memory for the Rx "
+ "descriptor ring\n");
goto setup_rx_desc_die;
}
@@ -1626,8 +1671,8 @@ setup_rx_desc_die:
rxdr->dma);
dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma);
- e_err("Unable to allocate aligned memory for the Rx "
- "descriptor ring\n");
+ e_err(probe, "Unable to allocate aligned memory for "
+ "the Rx descriptor ring\n");
goto setup_rx_desc_die;
} else {
/* Free old allocation, new allocation was successful */
@@ -1659,7 +1704,7 @@ int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) {
err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
if (err) {
- e_err("Allocation for Rx Queue %u failed\n", i);
+ e_err(probe, "Allocation for Rx Queue %u failed\n", i);
for (i-- ; i >= 0; i--)
e1000_free_rx_resources(adapter,
&adapter->rx_ring[i]);
@@ -2110,7 +2155,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
if (!mcarray) {
- e_err("memory allocation failed\n");
+ e_err(probe, "memory allocation failed\n");
return;
}
@@ -2206,22 +2251,45 @@ static void e1000_set_rx_mode(struct net_device *netdev)
static void e1000_update_phy_info(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ schedule_work(&adapter->phy_info_task);
+}
+
+static void e1000_update_phy_info_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ phy_info_task);
struct e1000_hw *hw = &adapter->hw;
+
+ rtnl_lock();
e1000_phy_get_info(hw, &adapter->phy_info);
+ rtnl_unlock();
}
/**
* e1000_82547_tx_fifo_stall - Timer Call-back
* @data: pointer to adapter cast into an unsigned long
**/
-
static void e1000_82547_tx_fifo_stall(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *)data;
+ schedule_work(&adapter->fifo_stall_task);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall_task - task to complete work
+ * @work: work struct contained inside adapter struct
+ **/
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
+{
+ struct e1000_adapter *adapter = container_of(work,
+ struct e1000_adapter,
+ fifo_stall_task);
struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
u32 tctl;
+ rtnl_lock();
if (atomic_read(&adapter->tx_fifo_stall)) {
if ((er32(TDT) == er32(TDH)) &&
(er32(TDFT) == er32(TDFH)) &&
@@ -2242,6 +2310,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
mod_timer(&adapter->tx_fifo_stall_timer, jiffies + 1);
}
}
+ rtnl_unlock();
}
bool e1000_has_link(struct e1000_adapter *adapter)
@@ -2648,7 +2717,8 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit()))
- e_warn("checksum_partial proto=%x!\n", skb->protocol);
+ e_warn(drv, "checksum_partial proto=%x!\n",
+ skb->protocol);
break;
}
@@ -2992,7 +3062,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* fall through */
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
- e_err("__pskb_pull_tail failed.\n");
+ e_err(drv, "__pskb_pull_tail "
+ "failed.\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -3048,7 +3119,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
}
}
- if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+ if (unlikely(vlan_tx_tag_present(skb))) {
tx_flags |= E1000_TX_FLAGS_VLAN;
tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
}
@@ -3107,7 +3178,7 @@ static void e1000_reset_task(struct work_struct *work)
struct e1000_adapter *adapter =
container_of(work, struct e1000_adapter, reset_task);
- e1000_reinit_locked(adapter);
+ e1000_reinit_safe(adapter);
}
/**
@@ -3140,7 +3211,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- e_err("Invalid MTU setting\n");
+ e_err(probe, "Invalid MTU setting\n");
return -EINVAL;
}
@@ -3148,7 +3219,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
switch (hw->mac_type) {
case e1000_undefined ... e1000_82542_rev2_1:
if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
- e_err("Jumbo Frames not supported.\n");
+ e_err(probe, "Jumbo Frames not supported.\n");
return -EINVAL;
}
break;
@@ -3448,6 +3519,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
+ rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
@@ -3500,7 +3572,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
!(er32(STATUS) & E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */
- e_err("Detected Tx Unit Hang\n"
+ e_err(drv, "Detected Tx Unit Hang\n"
" Tx Queue <%lu>\n"
" TDH <%x>\n"
" TDT <%x>\n"
@@ -3528,7 +3600,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
adapter->total_tx_packets += total_tx_packets;
netdev->stats.tx_bytes += total_tx_bytes;
netdev->stats.tx_packets += total_tx_packets;
- return (count < tx_ring->count);
+ return count < tx_ring->count;
}
/**
@@ -3545,7 +3617,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
struct e1000_hw *hw = &adapter->hw;
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
- skb->ip_summed = CHECKSUM_NONE;
+
+ skb_checksum_none_assert(skb);
/* 82543 or newer only */
if (unlikely(hw->mac_type < e1000_82543)) return;
@@ -3591,13 +3664,14 @@ static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
__le16 vlan, struct sk_buff *skb)
{
- if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) {
- vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
- le16_to_cpu(vlan) &
- E1000_RXD_SPC_VLAN_MASK);
- } else {
- netif_receive_skb(skb);
- }
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ if ((unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))))
+ vlan_gro_receive(&adapter->napi, adapter->vlgrp,
+ le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK,
+ skb);
+ else
+ napi_gro_receive(&adapter->napi, skb);
}
/**
@@ -3637,6 +3711,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
if (*work_done >= work_to_do)
break;
(*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
skb = buffer_info->skb;
@@ -3749,13 +3824,11 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
/* eth type trans needs skb->data to point to something */
if (!pskb_may_pull(skb, ETH_HLEN)) {
- e_err("pskb_may_pull failed.\n");
+ e_err(drv, "pskb_may_pull failed.\n");
dev_kfree_skb(skb);
goto next_desc;
}
- skb->protocol = eth_type_trans(skb, netdev);
-
e1000_receive_skb(adapter, status, rx_desc->special, skb);
next_desc:
@@ -3843,6 +3916,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (*work_done >= work_to_do)
break;
(*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
status = rx_desc->status;
skb = buffer_info->skb;
@@ -3874,7 +3948,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
if (adapter->discarding) {
/* All receives must fit into a single buffer */
- e_info("Receive packet consumed multiple buffers\n");
+ e_dbg("Receive packet consumed multiple buffers\n");
/* recycle */
buffer_info->skb = skb;
if (status & E1000_RXD_STAT_EOP)
@@ -3917,8 +3991,6 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
((u32)(rx_desc->errors) << 24),
le16_to_cpu(rx_desc->csum), skb);
- skb->protocol = eth_type_trans(skb, netdev);
-
e1000_receive_skb(adapter, status, rx_desc->special, skb);
next_desc:
@@ -3986,8 +4058,8 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- e_err("skb align check failed: %u bytes at %p\n",
- bufsz, skb->data);
+ e_err(rx_err, "skb align check failed: %u bytes at "
+ "%p\n", bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -4095,8 +4167,8 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
/* Fix for errata 23, can't cross 64kB boundary */
if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
struct sk_buff *oldskb = skb;
- e_err("skb align check failed: %u bytes at %p\n",
- bufsz, skb->data);
+ e_err(rx_err, "skb align check failed: %u bytes at "
+ "%p\n", bufsz, skb->data);
/* Try again, without freeing the previous */
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
/* Failed allocation, critical failure */
@@ -4141,8 +4213,8 @@ map_skb:
if (!e1000_check_64k_bound(adapter,
(void *)(unsigned long)buffer_info->dma,
adapter->rx_buffer_len)) {
- e_err("dma align check failed: %u bytes at %p\n",
- adapter->rx_buffer_len,
+ e_err(rx_err, "dma align check failed: %u bytes at "
+ "%p\n", adapter->rx_buffer_len,
(void *)(unsigned long)buffer_info->dma);
dev_kfree_skb(skb);
buffer_info->skb = NULL;
@@ -4355,7 +4427,7 @@ void e1000_pci_set_mwi(struct e1000_hw *hw)
int ret_val = pci_set_mwi(adapter->pdev);
if (ret_val)
- e_err("Error in setting MWI\n");
+ e_err(probe, "Error in setting MWI\n");
}
void e1000_pci_clear_mwi(struct e1000_hw *hw)
@@ -4469,7 +4541,7 @@ static void e1000_restore_vlan(struct e1000_adapter *adapter)
if (adapter->vlgrp) {
u16 vid;
- for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ for (vid = 0; vid < VLAN_N_VID; vid++) {
if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
e1000_vlan_rx_add_vid(adapter->netdev, vid);
@@ -4486,7 +4558,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((hw->media_type == e1000_media_type_fiber) &&
spddplx != (SPEED_1000 + DUPLEX_FULL)) {
- e_err("Unsupported Speed/Duplex configuration\n");
+ e_err(probe, "Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
@@ -4509,7 +4581,7 @@ int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
- e_err("Unsupported Speed/Duplex configuration\n");
+ e_err(probe, "Unsupported Speed/Duplex configuration\n");
return -EINVAL;
}
return 0;