summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c509.c6
-rw-r--r--drivers/net/8139cp.c5
-rw-r--r--drivers/net/8139too.c5
-rw-r--r--drivers/net/Kconfig43
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/amd8111e.c23
-rw-r--r--drivers/net/arm/at91_ether.c6
-rw-r--r--drivers/net/atl1e/atl1e.h1
-rw-r--r--drivers/net/atl1e/atl1e_hw.c4
-rw-r--r--drivers/net/atlx/atl1.c24
-rw-r--r--drivers/net/atlx/atl1.h2
-rw-r--r--drivers/net/atlx/atl2.c8
-rw-r--r--drivers/net/ax88796.c6
-rw-r--r--drivers/net/bnx2.c50
-rw-r--r--drivers/net/bnx2.h6
-rw-r--r--drivers/net/bnx2x_init.h9
-rw-r--r--drivers/net/bnx2x_main.c10
-rw-r--r--drivers/net/bonding/bond_alb.c13
-rw-r--r--drivers/net/bonding/bond_main.c68
-rw-r--r--drivers/net/chelsio/sge.c4
-rw-r--r--drivers/net/cris/eth_v10.c4
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c4
-rw-r--r--drivers/net/cxgb3/l2t.c1
-rw-r--r--drivers/net/cxgb3/t3_hw.c8
-rw-r--r--drivers/net/dm9000.c9
-rw-r--r--drivers/net/e100.c20
-rw-r--r--drivers/net/e1000/e1000_ethtool.c8
-rw-r--r--drivers/net/e1000/e1000_main.c1
-rw-r--r--drivers/net/e1000e/e1000.h5
-rw-r--r--drivers/net/e1000e/ethtool.c8
-rw-r--r--drivers/net/e1000e/ich8lan.c9
-rw-r--r--drivers/net/e1000e/netdev.c26
-rw-r--r--drivers/net/e1000e/param.c25
-rw-r--r--drivers/net/ehea/ehea.h2
-rw-r--r--drivers/net/ehea/ehea_main.c25
-rw-r--r--drivers/net/ehea/ehea_qmr.c176
-rw-r--r--drivers/net/ehea/ehea_qmr.h5
-rw-r--r--drivers/net/enc28j60.c18
-rw-r--r--drivers/net/fec_mpc52xx.c18
-rw-r--r--drivers/net/fec_mpc52xx_phy.c55
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c6
-rw-r--r--drivers/net/gianfar.c55
-rw-r--r--drivers/net/gianfar_mii.c21
-rw-r--r--drivers/net/gianfar_mii.h3
-rw-r--r--drivers/net/hp-plus.c2
-rw-r--r--drivers/net/ibm_newemac/core.c10
-rw-r--r--drivers/net/ibm_newemac/mal.c15
-rw-r--r--drivers/net/igb/igb_ethtool.c8
-rw-r--r--drivers/net/igb/igb_main.c64
-rw-r--r--drivers/net/ipg.c8
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c68
-rw-r--r--drivers/net/jme.c21
-rw-r--r--drivers/net/jme.h2
-rw-r--r--drivers/net/loopback.c9
-rw-r--r--drivers/net/macvlan.c3
-rw-r--r--drivers/net/mlx4/Makefile7
-rw-r--r--drivers/net/mlx4/alloc.c97
-rw-r--r--drivers/net/mlx4/cq.c2
-rw-r--r--drivers/net/mlx4/en_cq.c146
-rw-r--r--drivers/net/mlx4/en_main.c253
-rw-r--r--drivers/net/mlx4/en_netdev.c1088
-rw-r--r--drivers/net/mlx4/en_params.c482
-rw-r--r--drivers/net/mlx4/en_port.c261
-rw-r--r--drivers/net/mlx4/en_port.h570
-rw-r--r--drivers/net/mlx4/en_resources.c96
-rw-r--r--drivers/net/mlx4/en_rx.c1080
-rw-r--r--drivers/net/mlx4/en_tx.c820
-rw-r--r--drivers/net/mlx4/eq.c2
-rw-r--r--drivers/net/mlx4/fw.c20
-rw-r--r--drivers/net/mlx4/fw.h7
-rw-r--r--drivers/net/mlx4/main.c295
-rw-r--r--drivers/net/mlx4/mcg.c4
-rw-r--r--drivers/net/mlx4/mlx4.h55
-rw-r--r--drivers/net/mlx4/mlx4_en.h561
-rw-r--r--drivers/net/mlx4/mr.c2
-rw-r--r--drivers/net/mlx4/pd.c4
-rw-r--r--drivers/net/mlx4/port.c319
-rw-r--r--drivers/net/mlx4/qp.c81
-rw-r--r--drivers/net/mlx4/srq.c2
-rw-r--r--drivers/net/mv643xx_eth.c14
-rw-r--r--drivers/net/myri10ge/myri10ge.c10
-rw-r--r--drivers/net/netx-eth.c2
-rw-r--r--drivers/net/niu.c299
-rw-r--r--drivers/net/niu.h13
-rw-r--r--drivers/net/pcmcia/axnet_cs.c2
-rw-r--r--drivers/net/pcmcia/ibmtr_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c1
-rw-r--r--drivers/net/phy/marvell.c66
-rw-r--r--drivers/net/phy/mdio_bus.c7
-rw-r--r--drivers/net/phy/phy_device.c47
-rw-r--r--drivers/net/phy/vitesse.c64
-rw-r--r--drivers/net/pppoe.c6
-rw-r--r--drivers/net/pppol2tp.c1
-rw-r--r--drivers/net/qla3xxx.c19
-rw-r--r--drivers/net/qlge/qlge.h5
-rw-r--r--drivers/net/qlge/qlge_main.c89
-rw-r--r--drivers/net/r8169.c74
-rw-r--r--drivers/net/sfc/ethtool.c4
-rw-r--r--drivers/net/sh_eth.c4
-rw-r--r--drivers/net/sis190.c1
-rw-r--r--drivers/net/sis900.c1
-rw-r--r--drivers/net/smc911x.c56
-rw-r--r--drivers/net/smc911x.h6
-rw-r--r--drivers/net/smc91x.c14
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/starfire.c5
-rw-r--r--drivers/net/sungem.c144
-rw-r--r--drivers/net/tlan.c23
-rw-r--r--drivers/net/tulip/dmfe.c12
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/ucc_geth_ethtool.c7
-rw-r--r--drivers/net/usb/asix.c8
-rw-r--r--drivers/net/usb/dm9601.c15
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/via-velocity.c13
-rw-r--r--drivers/net/wan/syncppp.c5
-rw-r--r--drivers/net/wan/z85230.c1
-rw-r--r--drivers/net/wireless/ath5k/base.c95
-rw-r--r--drivers/net/wireless/ath5k/base.h4
-rw-r--r--drivers/net/wireless/ath5k/debug.c12
-rw-r--r--drivers/net/wireless/ath5k/desc.c16
-rw-r--r--drivers/net/wireless/ath5k/initvals.c2
-rw-r--r--drivers/net/wireless/ath5k/reset.c22
-rw-r--r--drivers/net/wireless/ath9k/beacon.c10
-rw-r--r--drivers/net/wireless/ath9k/recv.c19
-rw-r--r--drivers/net/wireless/hostap/hostap_wlan.h5
-rw-r--r--drivers/net/wireless/ipw2200.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c19
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c14
-rw-r--r--drivers/net/wireless/libertas/cmd.c4
-rw-r--r--drivers/net/wireless/libertas/rx.c2
-rw-r--r--drivers/net/wireless/libertas/scan.c4
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/orinoco.c42
-rw-r--r--drivers/net/wireless/p54/p54common.c31
-rw-r--r--drivers/net/wireless/p54/p54pci.c132
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig2
-rw-r--r--drivers/net/wireless/rtl8187_dev.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/net/xtsonic.c319
149 files changed, 8277 insertions, 892 deletions
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 3a7bc524af33..c7a4f3bcc2bc 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -94,7 +94,7 @@
#include <asm/io.h>
#include <asm/irq.h>
-static char version[] __initdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
+static char version[] __devinitdata = DRV_NAME ".c:" DRV_VERSION " " DRV_RELDATE " becker@scyld.com\n";
#ifdef EL3_DEBUG
static int el3_debug = EL3_DEBUG;
@@ -186,7 +186,7 @@ static int max_interrupt_work = 10;
static int nopnp;
#endif
-static int __init el3_common_init(struct net_device *dev);
+static int __devinit el3_common_init(struct net_device *dev);
static void el3_common_remove(struct net_device *dev);
static ushort id_read_eeprom(int index);
static ushort read_eeprom(int ioaddr, int index);
@@ -537,7 +537,7 @@ static struct mca_driver el3_mca_driver = {
static int mca_registered;
#endif /* CONFIG_MCA */
-static int __init el3_common_init(struct net_device *dev)
+static int __devinit el3_common_init(struct net_device *dev)
{
struct el3_private *lp = netdev_priv(dev);
int err;
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 85fa40a0a667..9ba1f0b46429 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -1836,10 +1836,9 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
- dev_err(&pdev->dev,
- "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip\n",
+ dev_info(&pdev->dev,
+ "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
pdev->vendor, pdev->device, pdev->revision);
- dev_err(&pdev->dev, "Try the \"8139too\" driver instead.\n");
return -ENODEV;
}
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 0daf8c15e381..63f906b04899 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -946,10 +946,9 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision >= 0x20) {
dev_info(&pdev->dev,
- "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip\n",
+ "This (id %04x:%04x rev %02x) is an enhanced 8139C+ chip, use 8139cp\n",
pdev->vendor, pdev->device, pdev->revision);
- dev_info(&pdev->dev,
- "Use the \"8139cp\" driver for improved performance and stability.\n");
+ return -ENODEV;
}
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index ad301ace6085..231eeaf1d552 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -464,6 +464,12 @@ config MIPS_JAZZ_SONIC
This is the driver for the onboard card of MIPS Magnum 4000,
Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
+config XTENSA_XT2000_SONIC
+ tristate "Xtensa XT2000 onboard SONIC Ethernet support"
+ depends on XTENSA_PLATFORM_XT2000
+ help
+ This is the driver for the onboard card of the Xtensa XT2000 board.
+
config MIPS_AU1X00_ENET
bool "MIPS AU1000 Ethernet support"
depends on SOC_AU1X00
@@ -888,7 +894,7 @@ config SMC91X
select CRC32
select MII
depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \
- SOC_AU1X00 || BLACKFIN || MN10300
+ MIPS || BLACKFIN || MN10300
help
This is a driver for SMC's 91x series of Ethernet chipsets,
including the SMC91C94 and the SMC91C111. Say Y if you want it
@@ -960,7 +966,7 @@ config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
select MII
- depends on ARCH_PXA || SUPERH
+ depends on ARM || SUPERH
help
This is a driver for SMSC's LAN911x series of Ethernet chipsets
including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1819,9 +1825,10 @@ config FEC2
config FEC_MPC52xx
tristate "MPC52xx FEC driver"
- depends on PPC_MPC52xx && PPC_BESTCOMM_FEC
+ depends on PPC_MPC52xx && PPC_BESTCOMM
select CRC32
select PHYLIB
+ select PPC_BESTCOMM_FEC
---help---
This option enables support for the MPC5200's on-chip
Fast Ethernet Controller
@@ -2003,6 +2010,15 @@ config IGB_LRO
If in doubt, say N.
+config IGB_DCA
+ bool "Direct Cache Access (DCA) Support"
+ default y
+ depends on IGB && DCA && !(IGB=y && DCA=m)
+ ---help---
+ Say Y here if you want to use Direct Cache Access (DCA) in the
+ driver. DCA is a method for warming the CPU cache before data
+ is used, with the intent of lessening the impact of cache misses.
+
source "drivers/net/ixp2000/Kconfig"
config MYRI_SBUS
@@ -2426,9 +2442,13 @@ config IXGBE
will be called ixgbe.
config IXGBE_DCA
- bool
+ bool "Direct Cache Access (DCA) Support"
default y
depends on IXGBE && DCA && !(IXGBE=y && DCA=m)
+ ---help---
+ Say Y here if you want to use Direct Cache Access (DCA) in the
+ driver. DCA is a method for warming the CPU cache before data
+ is used, with the intent of lessening the impact of cache misses.
config IXGB
tristate "Intel(R) PRO/10GbE support"
@@ -2478,9 +2498,13 @@ config MYRI10GE
will be called myri10ge.
config MYRI10GE_DCA
- bool
+ bool "Direct Cache Access (DCA) Support"
default y
depends on MYRI10GE && DCA && !(MYRI10GE=y && DCA=m)
+ ---help---
+ Say Y here if you want to use Direct Cache Access (DCA) in the
+ driver. DCA is a method for warming the CPU cache before data
+ is used, with the intent of lessening the impact of cache misses.
config NETXEN_NIC
tristate "NetXen Multi port (1/10) Gigabit Ethernet NIC"
@@ -2504,6 +2528,15 @@ config PASEMI_MAC
This driver supports the on-chip 1/10Gbit Ethernet controller on
PA Semi's PWRficient line of chips.
+config MLX4_EN
+ tristate "Mellanox Technologies 10Gbit Ethernet support"
+ depends on PCI && INET
+ select MLX4_CORE
+ select INET_LRO
+ help
+ This driver supports Mellanox Technologies ConnectX Ethernet
+ devices.
+
config MLX4_CORE
tristate
depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa2510b2e609..017383ad5ec6 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -114,7 +114,7 @@ obj-$(CONFIG_EL2) += 3c503.o 8390p.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
obj-$(CONFIG_NE2_MCA) += ne2.o 8390p.o
obj-$(CONFIG_HPLAN) += hp.o 8390p.o
-obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390.o
+obj-$(CONFIG_HPLAN_PLUS) += hp-plus.o 8390p.o
obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
obj-$(CONFIG_ULTRAMCA) += smc-mca.o 8390.o
obj-$(CONFIG_ULTRA32) += smc-ultra32.o 8390.o
@@ -227,6 +227,8 @@ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_ENC28J60) += enc28j60.o
+obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
+
obj-$(CONFIG_MACB) += macb.o
obj-$(CONFIG_ARM) += arm/
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index c54967f7942a..07a6697e3635 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -644,10 +644,6 @@ This function frees the transmiter and receiver descriptor rings.
*/
static void amd8111e_free_ring(struct amd8111e_priv* lp)
{
-
- /* Free transmit and receive skbs */
- amd8111e_free_skbs(lp->amd8111e_net_dev);
-
/* Free transmit and receive descriptor rings */
if(lp->rx_ring){
pci_free_consistent(lp->pci_dev,
@@ -833,12 +829,14 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
} while(intr0 & RINT0);
- /* Receive descriptor is empty now */
- spin_lock_irqsave(&lp->lock, flags);
- __netif_rx_complete(dev, napi);
- writel(VAL0|RINTEN0, mmio + INTEN0);
- writel(VAL2 | RDMD0, mmio + CMD0);
- spin_unlock_irqrestore(&lp->lock, flags);
+ if (rx_pkt_limit > 0) {
+ /* Receive descriptor is empty now */
+ spin_lock_irqsave(&lp->lock, flags);
+ __netif_rx_complete(dev, napi);
+ writel(VAL0|RINTEN0, mmio + INTEN0);
+ writel(VAL2 | RDMD0, mmio + CMD0);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
rx_not_empty:
return num_rx_pkt;
@@ -1231,7 +1229,9 @@ static int amd8111e_close(struct net_device * dev)
amd8111e_disable_interrupt(lp);
amd8111e_stop_chip(lp);
- amd8111e_free_ring(lp);
+
+ /* Free transmit and receive skbs */
+ amd8111e_free_skbs(lp->amd8111e_net_dev);
netif_carrier_off(lp->amd8111e_net_dev);
@@ -1241,6 +1241,7 @@ static int amd8111e_close(struct net_device * dev)
spin_unlock_irq(&lp->lock);
free_irq(dev->irq, dev);
+ amd8111e_free_ring(lp);
/* Update the statistics before closing */
amd8111e_get_stats(dev);
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index 0fa53464efb2..6f431a887e7e 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -1080,7 +1080,8 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
init_timer(&lp->check_timer);
lp->check_timer.data = (unsigned long)dev;
lp->check_timer.function = at91ether_check_link;
- }
+ } else if (lp->board_data.phy_irq_pin >= 32)
+ gpio_request(lp->board_data.phy_irq_pin, "ethernet_phy");
/* Display ethernet banner */
printk(KERN_INFO "%s: AT91 ethernet at 0x%08x int=%d %s%s (%s)\n",
@@ -1167,6 +1168,9 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(dev);
+ if (lp->board_data.phy_irq_pin >= 32)
+ gpio_free(lp->board_data.phy_irq_pin);
+
unregister_netdev(dev);
free_irq(dev->irq, dev);
dma_free_coherent(NULL, sizeof(struct recv_desc_bufs), lp->dlist, (dma_addr_t)lp->dlist_phys);
diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h
index b645fa0f3f64..c49550d507a0 100644
--- a/drivers/net/atl1e/atl1e.h
+++ b/drivers/net/atl1e/atl1e.h
@@ -46,7 +46,6 @@
#include <linux/vmalloc.h>
#include <linux/pagemap.h>
#include <linux/tcp.h>
-#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/workqueue.h>
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 8cbc1b59bd62..4a7700620119 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -163,9 +163,6 @@ int atl1e_read_mac_addr(struct atl1e_hw *hw)
* atl1e_hash_mc_addr
* purpose
* set hash value for a multicast address
- * hash calcu processing :
- * 1. calcu 32bit CRC for multicast address
- * 2. reverse crc with MSB to LSB
*/
u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr)
{
@@ -174,7 +171,6 @@ u32 atl1e_hash_mc_addr(struct atl1e_hw *hw, u8 *mc_addr)
int i;
crc32 = ether_crc_le(6, mc_addr);
- crc32 = ~crc32;
for (i = 0; i < 32; i++)
value |= (((crc32 >> i) & 1) << (31 - i));
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 3cf59a7f5a1c..aef403d299ee 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2310,7 +2310,8 @@ static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count,
if (tpd != ptpd)
memcpy(tpd, ptpd, sizeof(struct tx_packet_desc));
tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
- tpd->word2 = (cpu_to_le16(buffer_info->length) &
+ tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT);
+ tpd->word2 |= (cpu_to_le16(buffer_info->length) &
TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT;
/*
@@ -2409,8 +2410,8 @@ static int atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) |
((vlan_tag >> 9) & 0x8);
ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT;
- ptpd->word3 |= (vlan_tag & TPD_VL_TAGGED_MASK) <<
- TPD_VL_TAGGED_SHIFT;
+ ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) <<
+ TPD_VLANTAG_SHIFT;
}
tso = atl1_tso(adapter, skb, ptpd);
@@ -3403,14 +3404,8 @@ static void atl1_get_wol(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
+ wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
- if (adapter->wol & ATLX_WUFC_EX)
- wol->wolopts |= WAKE_UCAST;
- if (adapter->wol & ATLX_WUFC_MC)
- wol->wolopts |= WAKE_MCAST;
- if (adapter->wol & ATLX_WUFC_BC)
- wol->wolopts |= WAKE_BCAST;
if (adapter->wol & ATLX_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
return;
@@ -3421,15 +3416,10 @@ static int atl1_set_wol(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST |
+ WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
adapter->wol = 0;
- if (wol->wolopts & WAKE_UCAST)
- adapter->wol |= ATLX_WUFC_EX;
- if (wol->wolopts & WAKE_MCAST)
- adapter->wol |= ATLX_WUFC_MC;
- if (wol->wolopts & WAKE_BCAST)
- adapter->wol |= ATLX_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= ATLX_WUFC_MAG;
return 0;
diff --git a/drivers/net/atlx/atl1.h b/drivers/net/atlx/atl1.h
index a5015b14a429..ffa73fc8d95e 100644
--- a/drivers/net/atlx/atl1.h
+++ b/drivers/net/atlx/atl1.h
@@ -504,7 +504,7 @@ struct rx_free_desc {
#define TPD_PKTNT_MASK 0x0001
#define TPD_PKTINT_SHIFT 15
#define TPD_VLANTAG_MASK 0xFFFF
-#define TPD_VLAN_SHIFT 16
+#define TPD_VLANTAG_SHIFT 16
/* tpd word 3 bits 0:13 */
#define TPD_EOP_MASK 0x0001
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index f5bdc92c1a65..8571e8c0bc67 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1690,9 +1690,11 @@ static int atl2_resume(struct pci_dev *pdev)
ATL2_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
- err = atl2_request_irq(adapter);
- if (netif_running(netdev) && err)
- return err;
+ if (netif_running(netdev)) {
+ err = atl2_request_irq(adapter);
+ if (err)
+ return err;
+ }
atl2_reset_hw(&adapter->hw);
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index 4207d6efddc0..9a314d88e7b6 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -838,12 +838,12 @@ static int ax_probe(struct platform_device *pdev)
/* find the platform resources */
- dev->irq = platform_get_irq(pdev, 0);
- if (dev->irq < 0) {
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
dev_err(&pdev->dev, "no IRQ specified\n");
- ret = -ENXIO;
goto exit_mem;
}
+ dev->irq = ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 430d430bce29..9e8222f9e90e 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -543,9 +543,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_pg_ring; j++) {
if (rxr->rx_pg_desc_ring[j])
pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
- rxr->rx_pg_desc_ring[i],
- rxr->rx_pg_desc_mapping[i]);
- rxr->rx_pg_desc_ring[i] = NULL;
+ rxr->rx_pg_desc_ring[j],
+ rxr->rx_pg_desc_mapping[j]);
+ rxr->rx_pg_desc_ring[j] = NULL;
}
if (rxr->rx_pg_ring)
vfree(rxr->rx_pg_ring);
@@ -3144,6 +3144,28 @@ bnx2_has_work(struct bnx2_napi *bnapi)
return 0;
}
+static void
+bnx2_chk_missed_msi(struct bnx2 *bp)
+{
+ struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+ u32 msi_ctrl;
+
+ if (bnx2_has_work(bnapi)) {
+ msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
+ if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
+ return;
+
+ if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
+ REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
+ ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
+ REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
+ bnx2_msi(bp->irq_tbl[0].vector, bnapi);
+ }
+ }
+
+ bp->idle_chk_status_idx = bnapi->last_status_idx;
+}
+
static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
{
struct status_block *sblk = bnapi->status_blk.msi;
@@ -3218,14 +3240,15 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
- if (unlikely(work_done >= budget))
- break;
-
/* bnapi->last_status_idx is used below to tell the hw how
* much work has been processed, so we must read it before
* checking for more work.
*/
bnapi->last_status_idx = sblk->status_idx;
+
+ if (unlikely(work_done >= budget))
+ break;
+
rmb();
if (likely(!bnx2_has_work(bnapi))) {
netif_rx_complete(bp->dev, napi);
@@ -4570,6 +4593,8 @@ bnx2_init_chip(struct bnx2 *bp)
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
bp->bnx2_napi[i].last_status_idx = 0;
+ bp->idle_chk_status_idx = 0xffff;
+
bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
/* Set up how to generate a link change interrupt. */
@@ -5718,6 +5743,10 @@ bnx2_timer(unsigned long data)
if (atomic_read(&bp->intr_sem) != 0)
goto bnx2_restart_timer;
+ if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
+ BNX2_FLAG_USING_MSI)
+ bnx2_chk_missed_msi(bp);
+
bnx2_send_heart_beat(bp);
bp->stats_blk->stat_FwRxDrop =
@@ -7204,10 +7233,13 @@ static void
poll_bnx2(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
+ int i;
- disable_irq(bp->pdev->irq);
- bnx2_interrupt(bp->pdev->irq, dev);
- enable_irq(bp->pdev->irq);
+ for (i = 0; i < bp->irq_nvecs; i++) {
+ disable_irq(bp->irq_tbl[i].vector);
+ bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
+ enable_irq(bp->irq_tbl[i].vector);
+ }
}
#endif
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 617d95340160..0b032c3c7b61 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -378,6 +378,9 @@ struct l2_fhdr {
* pci_config_l definition
* offset: 0000
*/
+#define BNX2_PCICFG_MSI_CONTROL 0x00000058
+#define BNX2_PCICFG_MSI_CONTROL_ENABLE (1L<<16)
+
#define BNX2_PCICFG_MISC_CONFIG 0x00000068
#define BNX2_PCICFG_MISC_CONFIG_TARGET_BYTE_SWAP (1L<<2)
#define BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP (1L<<3)
@@ -6863,6 +6866,9 @@ struct bnx2 {
u8 num_tx_rings;
u8 num_rx_rings;
+
+ u32 idle_chk_status_idx;
+
};
#define REG_RD(bp, offset) \
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h
index 130927cfc75b..a6c0b3abba29 100644
--- a/drivers/net/bnx2x_init.h
+++ b/drivers/net/bnx2x_init.h
@@ -564,14 +564,15 @@ static const struct arb_line write_arb_addr[NUM_WR_Q-1] = {
static void bnx2x_init_pxp(struct bnx2x *bp)
{
+ u16 devctl;
int r_order, w_order;
u32 val, i;
pci_read_config_word(bp->pdev,
- bp->pcie_cap + PCI_EXP_DEVCTL, (u16 *)&val);
- DP(NETIF_MSG_HW, "read 0x%x from devctl\n", (u16)val);
- w_order = ((val & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
- r_order = ((val & PCI_EXP_DEVCTL_READRQ) >> 12);
+ bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
+ DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
+ w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
+ r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
if (r_order > MAX_RD_ORD) {
DP(NETIF_MSG_HW, "read order of %d order adjusted to %d\n",
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index fce745148ff9..600210d7eff9 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -59,8 +59,8 @@
#include "bnx2x.h"
#include "bnx2x_init.h"
-#define DRV_MODULE_VERSION "1.45.22"
-#define DRV_MODULE_RELDATE "2008/09/09"
+#define DRV_MODULE_VERSION "1.45.23"
+#define DRV_MODULE_RELDATE "2008/11/03"
#define BNX2X_BC_VER 0x040200
/* Time in jiffies before concluding the transmitter is hung */
@@ -6481,6 +6481,7 @@ load_int_disable:
bnx2x_free_irq(bp);
load_error:
bnx2x_free_mem(bp);
+ bp->port.pmf = 0;
/* TBD we really need to reset the chip
if we want to recover from this */
@@ -6791,6 +6792,7 @@ unload_error:
/* Report UNLOAD_DONE to MCP */
if (!BP_NOMCP(bp))
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
+ bp->port.pmf = 0;
/* Free SKBs, SGEs, TPA pool and driver internals */
bnx2x_free_skbs(bp);
@@ -10204,8 +10206,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return -ENOMEM;
}
- netif_carrier_off(dev);
-
bp = netdev_priv(dev);
bp->msglevel = debug;
@@ -10229,6 +10229,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
goto init_one_exit;
}
+ netif_carrier_off(dev);
+
bp->common.name = board_info[ent->driver_data].name;
printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
" IRQ %d, ", dev->name, bp->common.name,
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index ade5f3f6693b..87437c788476 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -169,11 +169,14 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
/* clear slave from tx_hashtbl */
tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
- index = SLAVE_TLB_INFO(slave).head;
- while (index != TLB_NULL_INDEX) {
- u32 next_index = tx_hash_table[index].next;
- tlb_init_table_entry(&tx_hash_table[index], save_load);
- index = next_index;
+ /* skip this if we've already freed the tx hash table */
+ if (tx_hash_table) {
+ index = SLAVE_TLB_INFO(slave).head;
+ while (index != TLB_NULL_INDEX) {
+ u32 next_index = tx_hash_table[index].next;
+ tlb_init_table_entry(&tx_hash_table[index], save_load);
+ index = next_index;
+ }
}
tlb_init_slave(slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8e2be24f3fe4..a3efba59eee9 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1341,18 +1341,24 @@ static int bond_compute_features(struct bonding *bond)
int i;
features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES);
- features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
- NETIF_F_GSO_MASK | NETIF_F_NO_CSUM;
+ features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM;
+
+ if (!bond->first_slave)
+ goto done;
+
+ features &= ~NETIF_F_ONE_FOR_ALL;
bond_for_each_slave(bond, slave, i) {
- features = netdev_compute_features(features,
- slave->dev->features);
+ features = netdev_increment_features(features,
+ slave->dev->features,
+ NETIF_F_ONE_FOR_ALL);
if (slave->dev->hard_header_len > max_hard_header_len)
max_hard_header_len = slave->dev->hard_header_len;
}
+done:
features |= (bond_dev->features & BOND_VLAN_FEATURES);
- bond_dev->features = features;
+ bond_dev->features = netdev_fix_features(features, NULL);
bond_dev->hard_header_len = max_hard_header_len;
return 0;
@@ -1973,6 +1979,20 @@ void bond_destroy(struct bonding *bond)
unregister_netdevice(bond->dev);
}
+static void bond_destructor(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+
+ if (bond->wq)
+ destroy_workqueue(bond->wq);
+
+ netif_addr_lock_bh(bond_dev);
+ bond_mc_list_destroy(bond);
+ netif_addr_unlock_bh(bond_dev);
+
+ free_netdev(bond_dev);
+}
+
/*
* First release a slave and than destroy the bond if no more slaves iare left.
* Must be under rtnl_lock when this function is called.
@@ -2370,6 +2390,9 @@ static void bond_miimon_commit(struct bonding *bond)
continue;
case BOND_LINK_DOWN:
+ if (slave->link_failure_count < UINT_MAX)
+ slave->link_failure_count++;
+
slave->link = BOND_LINK_DOWN;
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
@@ -4544,7 +4567,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
bond_set_mode_ops(bond, bond->params.mode);
- bond_dev->destructor = free_netdev;
+ bond_dev->destructor = bond_destructor;
/* Initialize the device options */
bond_dev->tx_queue_len = 0;
@@ -4583,20 +4606,6 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
return 0;
}
-/* De-initialize device specific data.
- * Caller must hold rtnl_lock.
- */
-static void bond_deinit(struct net_device *bond_dev)
-{
- struct bonding *bond = bond_dev->priv;
-
- list_del(&bond->bond_list);
-
-#ifdef CONFIG_PROC_FS
- bond_remove_proc_entry(bond);
-#endif
-}
-
static void bond_work_cancel_all(struct bonding *bond)
{
write_lock_bh(&bond->lock);
@@ -4618,6 +4627,22 @@ static void bond_work_cancel_all(struct bonding *bond)
cancel_delayed_work(&bond->ad_work);
}
+/* De-initialize device specific data.
+ * Caller must hold rtnl_lock.
+ */
+static void bond_deinit(struct net_device *bond_dev)
+{
+ struct bonding *bond = bond_dev->priv;
+
+ list_del(&bond->bond_list);
+
+ bond_work_cancel_all(bond);
+
+#ifdef CONFIG_PROC_FS
+ bond_remove_proc_entry(bond);
+#endif
+}
+
/* Unregister and free all bond devices.
* Caller must hold rtnl_lock.
*/
@@ -4629,9 +4654,6 @@ static void bond_free_all(void)
struct net_device *bond_dev = bond->dev;
bond_work_cancel_all(bond);
- netif_addr_lock_bh(bond_dev);
- bond_mc_list_destroy(bond);
- netif_addr_unlock_bh(bond_dev);
/* Release the bonded slaves */
bond_release_all(bond_dev);
bond_destroy(bond);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index d6c7d2aa761b..7092df50ff78 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -1035,10 +1035,6 @@ MODULE_PARM_DESC(copybreak, "Receive copy threshold");
* @pdev: the PCI device that received the packet
* @fl: the SGE free list holding the packet
* @len: the actual packet length, excluding any SGE padding
- * @dma_pad: padding at beginning of buffer left by SGE DMA
- * @skb_pad: padding to be used if the packet is copied
- * @copy_thres: length threshold under which a packet should be copied
- * @drop_thres: # of remaining buffers before we start dropping packets
*
* Get the next packet from a free list and complete setup of the
* sk_buff. If the packet is small we make a copy and recycle the
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 65d0a9103297..7e8a63106bdf 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -32,14 +32,14 @@
#include <linux/skbuff.h>
#include <linux/ethtool.h>
-#include <asm/arch/svinto.h>/* DMA and register descriptions */
+#include <arch/svinto.h>/* DMA and register descriptions */
#include <asm/io.h> /* CRIS_LED_* I/O functions */
#include <asm/irq.h>
#include <asm/dma.h>
#include <asm/system.h>
#include <asm/ethernet.h>
#include <asm/cache.h>
-#include <asm/arch/io_interface_mux.h>
+#include <arch/io_interface_mux.h>
//#define ETHDEBUG
#define D(x)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 1ace41a13ac3..2c341f83d327 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1307,8 +1307,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
u32 fw_vers = 0;
u32 tp_vers = 0;
+ spin_lock(&adapter->stats_lock);
t3_get_fw_version(adapter, &fw_vers);
t3_get_tp_version(adapter, &tp_vers);
+ spin_unlock(&adapter->stats_lock);
strcpy(info->driver, DRV_NAME);
strcpy(info->version, DRV_VERSION);
@@ -2699,7 +2701,7 @@ static void set_nqsets(struct adapter *adap)
int hwports = adap->params.nports;
int nqsets = SGE_QSETS;
- if (adap->params.rev > 0) {
+ if (adap->params.rev > 0 && adap->flags & USING_MSIX) {
if (hwports == 2 &&
(hwports * nqsets > SGE_QSETS ||
num_cpus >= nqsets / hwports))
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c
index 4407ac9bb555..ff1611f90e7a 100644
--- a/drivers/net/cxgb3/l2t.c
+++ b/drivers/net/cxgb3/l2t.c
@@ -431,6 +431,7 @@ struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
for (i = 0; i < l2t_capacity; ++i) {
d->l2tab[i].idx = i;
d->l2tab[i].state = L2T_STATE_UNUSED;
+ __skb_queue_head_init(&d->l2tab[i].arpq);
spin_lock_init(&d->l2tab[i].lock);
atomic_set(&d->l2tab[i].refcnt, 0);
}
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 968f64be3743..9a0898b0dbce 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -572,7 +572,7 @@ struct t3_vpd {
u32 pad; /* for multiple-of-4 sizing and alignment */
};
-#define EEPROM_MAX_POLL 4
+#define EEPROM_MAX_POLL 40
#define EEPROM_STAT_ADDR 0x4000
#define VPD_BASE 0xc00
@@ -3690,6 +3690,12 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
;
pti = &port_types[adapter->params.vpd.port_type[j]];
+ if (!pti->phy_prep) {
+ CH_ALERT(adapter, "Invalid port type index %d\n",
+ adapter->params.vpd.port_type[j]);
+ return -EINVAL;
+ }
+
ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
ai->mdio_ops);
if (ret)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index f42c23f42652..5a9083e3f443 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -47,15 +47,6 @@
#define CARDNAME "dm9000"
#define DRV_VERSION "1.31"
-#ifdef CONFIG_BLACKFIN
-#define readsb insb
-#define readsw insw
-#define readsl insl
-#define writesb outsb
-#define writesw outsw
-#define writesl outsl
-#endif
-
/*
* Transmit timeout, default 5 seconds.
*/
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 3d69fae781cf..e8bfcce6b319 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -166,7 +166,7 @@
#define DRV_NAME "e100"
#define DRV_EXT "-NAPI"
-#define DRV_VERSION "3.5.23-k4"DRV_EXT
+#define DRV_VERSION "3.5.23-k6"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
#define PFX DRV_NAME ": "
@@ -1804,7 +1804,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
pci_dma_sync_single_for_device(nic->pdev, rx->prev->dma_addr,
- sizeof(struct rfd), PCI_DMA_TODEVICE);
+ sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
}
return 0;
@@ -1823,7 +1823,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
/* Need to sync before taking a peek at cb_complete bit */
pci_dma_sync_single_for_cpu(nic->pdev, rx->dma_addr,
- sizeof(struct rfd), PCI_DMA_FROMDEVICE);
+ sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
rfd_status = le16_to_cpu(rfd->status);
DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status);
@@ -1850,7 +1850,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
/* Get data */
pci_unmap_single(nic->pdev, rx->dma_addr,
- RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
+ RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
/* If this buffer has the el bit, but we think the receiver
* is still running, check to see if it really stopped while
@@ -1943,7 +1943,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
new_before_last_rfd->command |= cpu_to_le16(cb_el);
pci_dma_sync_single_for_device(nic->pdev,
new_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_TODEVICE);
+ PCI_DMA_BIDIRECTIONAL);
/* Now that we have a new stopping point, we can clear the old
* stopping point. We must sync twice to get the proper
@@ -1951,11 +1951,11 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
old_before_last_rfd->command &= ~cpu_to_le16(cb_el);
pci_dma_sync_single_for_device(nic->pdev,
old_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_TODEVICE);
+ PCI_DMA_BIDIRECTIONAL);
old_before_last_rfd->size = cpu_to_le16(VLAN_ETH_FRAME_LEN);
pci_dma_sync_single_for_device(nic->pdev,
old_before_last_rx->dma_addr, sizeof(struct rfd),
- PCI_DMA_TODEVICE);
+ PCI_DMA_BIDIRECTIONAL);
}
if(restart_required) {
@@ -1978,7 +1978,7 @@ static void e100_rx_clean_list(struct nic *nic)
for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
if(rx->skb) {
pci_unmap_single(nic->pdev, rx->dma_addr,
- RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
+ RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
dev_kfree_skb(rx->skb);
}
}
@@ -2021,7 +2021,7 @@ static int e100_rx_alloc_list(struct nic *nic)
before_last->command |= cpu_to_le16(cb_el);
before_last->size = 0;
pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
- sizeof(struct rfd), PCI_DMA_TODEVICE);
+ sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
nic->rx_to_use = nic->rx_to_clean = nic->rxs;
nic->ru_running = RU_SUSPENDED;
@@ -2222,7 +2222,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
msleep(10);
pci_dma_sync_single_for_cpu(nic->pdev, nic->rx_to_clean->dma_addr,
- RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
+ RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
if(memcmp(nic->rx_to_clean->skb->data + sizeof(struct rfd),
skb->data, ETH_DATA_LEN))
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 6a3893acfe04..c854c96f5ab3 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1774,7 +1774,8 @@ static void e1000_get_wol(struct net_device *netdev,
/* this function will set ->supported = 0 and return 1 if wol is not
* supported by this hardware */
- if (e1000_wol_exclusion(adapter, wol))
+ if (e1000_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
return;
/* apply any specific unsupported masks here */
@@ -1811,7 +1812,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
- if (e1000_wol_exclusion(adapter, wol))
+ if (e1000_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
return wol->wolopts ? -EOPNOTSUPP : 0;
switch (hw->device_id) {
@@ -1838,6 +1840,8 @@ static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index fac82152e4c8..872799b746f5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1179,6 +1179,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* initialize the wol settings based on the eeprom settings */
adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* print bus type/speed/width info */
DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c55de1c027af..c55fd6fdb91c 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -299,6 +299,7 @@ struct e1000_adapter {
unsigned long led_status;
unsigned int flags;
+ unsigned int flags2;
struct work_struct downshift_task;
struct work_struct update_phy_task;
};
@@ -306,6 +307,7 @@ struct e1000_adapter {
struct e1000_info {
enum e1000_mac_type mac;
unsigned int flags;
+ unsigned int flags2;
u32 pba;
s32 (*get_variants)(struct e1000_adapter *);
struct e1000_mac_operations *mac_ops;
@@ -347,6 +349,9 @@ struct e1000_info {
#define FLAG_RX_RESTART_NOW (1 << 30)
#define FLAG_MSI_TEST_FAILED (1 << 31)
+/* CRC Stripping defines */
+#define FLAG2_CRC_STRIPPING (1 << 0)
+
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 70c11c811a08..62421ce96311 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -1713,7 +1713,8 @@ static void e1000_get_wol(struct net_device *netdev,
wol->supported = 0;
wol->wolopts = 0;
- if (!(adapter->flags & FLAG_HAS_WOL))
+ if (!(adapter->flags & FLAG_HAS_WOL) ||
+ !device_can_wakeup(&adapter->pdev->dev))
return;
wol->supported = WAKE_UCAST | WAKE_MCAST |
@@ -1751,7 +1752,8 @@ static int e1000_set_wol(struct net_device *netdev,
if (wol->wolopts & WAKE_MAGICSECURE)
return -EOPNOTSUPP;
- if (!(adapter->flags & FLAG_HAS_WOL))
+ if (!(adapter->flags & FLAG_HAS_WOL) ||
+ !device_can_wakeup(&adapter->pdev->dev))
return wol->wolopts ? -EOPNOTSUPP : 0;
/* these settings will always override what we currently have */
@@ -1770,6 +1772,8 @@ static int e1000_set_wol(struct net_device *netdev,
if (wol->wolopts & WAKE_ARP)
adapter->wol |= E1000_WUFC_ARP;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 523b9716a543..d115a6d30f29 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1893,12 +1893,17 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl |= E1000_CTRL_PHY_RST;
}
ret_val = e1000_acquire_swflag_ich8lan(hw);
+ /* Whether or not the swflag was acquired, we need to reset the part */
hw_dbg(hw, "Issuing a global reset to ich8lan");
ew32(CTRL, (ctrl | E1000_CTRL_RST));
msleep(20);
- /* release the swflag because it is not reset by hardware reset */
- e1000_release_swflag_ich8lan(hw);
+ if (!ret_val) {
+ /* release the swflag because it is not reset by
+ * hardware reset
+ */
+ e1000_release_swflag_ich8lan(hw);
+ }
ret_val = e1000e_get_auto_rd_done(hw);
if (ret_val) {
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index abd492b7336d..122539a0e1fe 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -345,7 +345,6 @@ no_buffers:
/**
* e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
* @adapter: address of board private structure
- * @rx_ring: pointer to receive ring structure
* @cleaned_count: number of buffers to allocate this pass
**/
@@ -499,6 +498,10 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
goto next_desc;
}
+ /* adjust length to remove Ethernet CRC */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+ length -= 4;
+
total_rx_bytes += length;
total_rx_packets++;
@@ -804,6 +807,10 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
pci_dma_sync_single_for_device(pdev, ps_page->dma,
PAGE_SIZE, PCI_DMA_FROMDEVICE);
+ /* remove the CRC */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+ l1 -= 4;
+
skb_put(skb, l1);
goto copydone;
} /* if */
@@ -825,6 +832,12 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
skb->truesize += length;
}
+ /* strip the ethernet crc, problem is we're using pages now so
+ * this whole operation can get a little cpu intensive
+ */
+ if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+ pskb_trim(skb, skb->len - 4);
+
copydone:
total_rx_bytes += skb->len;
total_rx_packets++;
@@ -2301,8 +2314,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
else
rctl |= E1000_RCTL_LPE;
- /* Enable hardware CRC frame stripping */
- rctl |= E1000_RCTL_SECRC;
+ /* Some systems expect that the CRC is included in SMBUS traffic. The
+ * hardware strips the CRC before sending to both SMBUS (BMC) and to
+ * host memory when this is enabled
+ */
+ if (adapter->flags2 & FLAG2_CRC_STRIPPING)
+ rctl |= E1000_RCTL_SECRC;
/* Setup buffer sizes */
rctl &= ~E1000_RCTL_SZ_4096;
@@ -4766,6 +4783,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
adapter->ei = ei;
adapter->pba = ei->pba;
adapter->flags = ei->flags;
+ adapter->flags2 = ei->flags2;
adapter->hw.adapter = adapter;
adapter->hw.mac.type = ei->mac;
adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
@@ -4970,6 +4988,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* initialize the wol settings based on the eeprom settings */
adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* reset the hardware with the new settings */
e1000e_reset(adapter);
@@ -5008,6 +5027,7 @@ err_hw_init:
err_sw_init:
if (adapter->hw.flash_address)
iounmap(adapter->hw.flash_address);
+ e1000e_reset_interrupt_capability(adapter);
err_flashmap:
iounmap(adapter->hw.hw_addr);
err_ioremap:
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 77a3d7207a5f..e909f96698e8 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -151,6 +151,16 @@ E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
*/
E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
+/*
+ * Enable CRC Stripping
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
+ "the CRC");
+
struct e1000_option {
enum { enable_option, range_option, list_option } type;
const char *name;
@@ -404,6 +414,21 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
adapter->flags |= FLAG_SMART_POWER_DOWN;
}
}
+ { /* CRC Stripping */
+ const struct e1000_option opt = {
+ .type = enable_option,
+ .name = "CRC Stripping",
+ .err = "defaulting to enabled",
+ .def = OPTION_ENABLED
+ };
+
+ if (num_CrcStripping > bd) {
+ unsigned int crc_stripping = CrcStripping[bd];
+ e1000_validate_option(&crc_stripping, &opt, adapter);
+ if (crc_stripping == OPTION_ENABLED)
+ adapter->flags2 |= FLAG2_CRC_STRIPPING;
+ }
+ }
{ /* Kumeran Lock Loss Workaround */
const struct e1000_option opt = {
.type = enable_option,
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 5524271eedca..002d918fb4c7 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0093"
+#define DRV_VERSION "EHEA_0095"
/* eHEA capability flags */
#define DLPAR_PORT_ADD_REM 1
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b70c5314f537..422fcb93e2c3 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -2863,7 +2863,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
struct ehea_adapter *adapter;
mutex_lock(&dlpar_mem_lock);
- ehea_info("LPAR memory enlarged - re-initializing driver");
+ ehea_info("LPAR memory changed - re-initializing driver");
list_for_each_entry(adapter, &adapter_list, list)
if (adapter->active_ports) {
@@ -2900,13 +2900,6 @@ static void ehea_rereg_mrs(struct work_struct *work)
}
}
- ehea_destroy_busmap();
- ret = ehea_create_busmap();
- if (ret) {
- ehea_error("creating ehea busmap failed");
- goto out;
- }
-
clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
list_for_each_entry(adapter, &adapter_list, list)
@@ -3519,9 +3512,21 @@ void ehea_crash_handler(void)
static int ehea_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
+ struct memory_notify *arg = data;
switch (action) {
- case MEM_OFFLINE:
- ehea_info("memory has been removed");
+ case MEM_CANCEL_OFFLINE:
+ ehea_info("memory offlining canceled");
+ /* Readd canceled memory block */
+ case MEM_ONLINE:
+ ehea_info("memory is going online");
+ if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
+ return NOTIFY_BAD;
+ ehea_rereg_mrs(NULL);
+ break;
+ case MEM_GOING_OFFLINE:
+ ehea_info("memory is going offline");
+ if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
+ return NOTIFY_BAD;
ehea_rereg_mrs(NULL);
break;
default:
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index db8a9257e680..9d006878f045 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -567,7 +567,7 @@ static inline int ehea_calc_index(unsigned long i, unsigned long s)
static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
int dir)
{
- if(!ehea_top_bmap->dir[dir]) {
+ if (!ehea_top_bmap->dir[dir]) {
ehea_top_bmap->dir[dir] =
kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
if (!ehea_top_bmap->dir[dir])
@@ -578,7 +578,7 @@ static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
{
- if(!ehea_bmap->top[top]) {
+ if (!ehea_bmap->top[top]) {
ehea_bmap->top[top] =
kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
if (!ehea_bmap->top[top])
@@ -587,53 +587,171 @@ static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
return ehea_init_top_bmap(ehea_bmap->top[top], dir);
}
-static int ehea_create_busmap_callback(unsigned long pfn,
- unsigned long nr_pages, void *arg)
+static DEFINE_MUTEX(ehea_busmap_mutex);
+static unsigned long ehea_mr_len;
+
+#define EHEA_BUSMAP_ADD_SECT 1
+#define EHEA_BUSMAP_REM_SECT 0
+
+static void ehea_rebuild_busmap(void)
{
- unsigned long i, mr_len, start_section, end_section;
- start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
- end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
- mr_len = *(unsigned long *)arg;
+ u64 vaddr = EHEA_BUSMAP_START;
+ int top, dir, idx;
- if (!ehea_bmap)
- ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
- if (!ehea_bmap)
- return -ENOMEM;
+ for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
+ struct ehea_top_bmap *ehea_top;
+ int valid_dir_entries = 0;
- for (i = start_section; i < end_section; i++) {
- int ret;
- int top, dir, idx;
- u64 vaddr;
+ if (!ehea_bmap->top[top])
+ continue;
+ ehea_top = ehea_bmap->top[top];
+ for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
+ struct ehea_dir_bmap *ehea_dir;
+ int valid_entries = 0;
- top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
- dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
+ if (!ehea_top->dir[dir])
+ continue;
+ valid_dir_entries++;
+ ehea_dir = ehea_top->dir[dir];
+ for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
+ if (!ehea_dir->ent[idx])
+ continue;
+ valid_entries++;
+ ehea_dir->ent[idx] = vaddr;
+ vaddr += EHEA_SECTSIZE;
+ }
+ if (!valid_entries) {
+ ehea_top->dir[dir] = NULL;
+ kfree(ehea_dir);
+ }
+ }
+ if (!valid_dir_entries) {
+ ehea_bmap->top[top] = NULL;
+ kfree(ehea_top);
+ }
+ }
+}
- ret = ehea_init_bmap(ehea_bmap, top, dir);
- if(ret)
- return ret;
+static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
+{
+ unsigned long i, start_section, end_section;
- idx = i & EHEA_INDEX_MASK;
- vaddr = EHEA_BUSMAP_START + mr_len + i * EHEA_SECTSIZE;
+ if (!nr_pages)
+ return 0;
- ehea_bmap->top[top]->dir[dir]->ent[idx] = vaddr;
+ if (!ehea_bmap) {
+ ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
+ if (!ehea_bmap)
+ return -ENOMEM;
}
- mr_len += nr_pages * PAGE_SIZE;
- *(unsigned long *)arg = mr_len;
+ start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
+ end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
+ /* Mark entries as valid or invalid only; address is assigned later */
+ for (i = start_section; i < end_section; i++) {
+ u64 flag;
+ int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
+ int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
+ int idx = i & EHEA_INDEX_MASK;
+
+ if (add) {
+ int ret = ehea_init_bmap(ehea_bmap, top, dir);
+ if (ret)
+ return ret;
+ flag = 1; /* valid */
+ ehea_mr_len += EHEA_SECTSIZE;
+ } else {
+ if (!ehea_bmap->top[top])
+ continue;
+ if (!ehea_bmap->top[top]->dir[dir])
+ continue;
+ flag = 0; /* invalid */
+ ehea_mr_len -= EHEA_SECTSIZE;
+ }
+ ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
+ }
+ ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
return 0;
}
-static unsigned long ehea_mr_len;
+int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
+{
+ int ret;
-static DEFINE_MUTEX(ehea_busmap_mutex);
+ mutex_lock(&ehea_busmap_mutex);
+ ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
+ mutex_unlock(&ehea_busmap_mutex);
+ return ret;
+}
+
+int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
+{
+ int ret;
+
+ mutex_lock(&ehea_busmap_mutex);
+ ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
+ mutex_unlock(&ehea_busmap_mutex);
+ return ret;
+}
+
+static int ehea_is_hugepage(unsigned long pfn)
+{
+ int page_order;
+
+ if (pfn & EHEA_HUGEPAGE_PFN_MASK)
+ return 0;
+
+ page_order = compound_order(pfn_to_page(pfn));
+ if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
+ return 0;
+
+ return 1;
+}
+
+static int ehea_create_busmap_callback(unsigned long initial_pfn,
+ unsigned long total_nr_pages, void *arg)
+{
+ int ret;
+ unsigned long pfn, start_pfn, end_pfn, nr_pages;
+
+ if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
+ return ehea_update_busmap(initial_pfn, total_nr_pages,
+ EHEA_BUSMAP_ADD_SECT);
+
+ /* Given chunk is >= 16GB -> check for hugepages */
+ start_pfn = initial_pfn;
+ end_pfn = initial_pfn + total_nr_pages;
+ pfn = start_pfn;
+
+ while (pfn < end_pfn) {
+ if (ehea_is_hugepage(pfn)) {
+ /* Add mem found in front of the hugepage */
+ nr_pages = pfn - start_pfn;
+ ret = ehea_update_busmap(start_pfn, nr_pages,
+ EHEA_BUSMAP_ADD_SECT);
+ if (ret)
+ return ret;
+
+ /* Skip the hugepage */
+ pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
+ start_pfn = pfn;
+ } else
+ pfn += (EHEA_SECTSIZE / PAGE_SIZE);
+ }
+
+ /* Add mem found behind the hugepage(s) */
+ nr_pages = pfn - start_pfn;
+ return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
+}
int ehea_create_busmap(void)
{
int ret;
+
mutex_lock(&ehea_busmap_mutex);
ehea_mr_len = 0;
- ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, &ehea_mr_len,
+ ret = walk_memory_resource(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
ehea_create_busmap_callback);
mutex_unlock(&ehea_busmap_mutex);
return ret;
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 0bb6f92fa2f8..0817c1e74a19 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -40,6 +40,9 @@
#define EHEA_PAGESIZE (1UL << EHEA_PAGESHIFT)
#define EHEA_SECTSIZE (1UL << 24)
#define EHEA_PAGES_PER_SECTION (EHEA_SECTSIZE >> EHEA_PAGESHIFT)
+#define EHEA_HUGEPAGESHIFT 34
+#define EHEA_HUGEPAGE_SIZE (1UL << EHEA_HUGEPAGESHIFT)
+#define EHEA_HUGEPAGE_PFN_MASK ((EHEA_HUGEPAGE_SIZE - 1) >> PAGE_SHIFT)
#if ((1UL << SECTION_SIZE_BITS) < EHEA_SECTSIZE)
#error eHEA module cannot work if kernel sectionsize < ehea sectionsize
@@ -378,6 +381,8 @@ int ehea_rem_mr(struct ehea_mr *mr);
void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
+int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages);
+int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages);
int ehea_create_busmap(void);
void ehea_destroy_busmap(void);
u64 ehea_map_vaddr(void *caddr);
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index e1b441effbbe..36cb6e95b465 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -568,6 +568,17 @@ static u16 erxrdpt_workaround(u16 next_packet_ptr, u16 start, u16 end)
return erxrdpt;
}
+/*
+ * Calculate wrap around when reading beyond the end of the RX buffer
+ */
+static u16 rx_packet_start(u16 ptr)
+{
+ if (ptr + RSV_SIZE > RXEND_INIT)
+ return (ptr + RSV_SIZE) - (RXEND_INIT - RXSTART_INIT + 1);
+ else
+ return ptr + RSV_SIZE;
+}
+
static void nolock_rxfifo_init(struct enc28j60_net *priv, u16 start, u16 end)
{
u16 erxrdpt;
@@ -938,8 +949,9 @@ static void enc28j60_hw_rx(struct net_device *ndev)
skb->dev = ndev;
skb_reserve(skb, NET_IP_ALIGN);
/* copy the packet from the receive buffer */
- enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv),
- len, skb_put(skb, len));
+ enc28j60_mem_read(priv,
+ rx_packet_start(priv->next_pk_ptr),
+ len, skb_put(skb, len));
if (netif_msg_pktdata(priv))
dump_packet(__func__, skb->len, skb->data);
skb->protocol = eth_type_trans(skb, ndev);
@@ -947,7 +959,7 @@ static void enc28j60_hw_rx(struct net_device *ndev)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += len;
ndev->last_rx = jiffies;
- netif_rx(skb);
+ netif_rx_ni(skb);
}
}
/*
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 4e4f68304e82..aec3b97e794d 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -401,6 +401,21 @@ static int mpc52xx_fec_hard_start_xmit(struct sk_buff *skb, struct net_device *d
return 0;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mpc52xx_fec_poll_controller(struct net_device *dev)
+{
+ struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+
+ disable_irq(priv->t_irq);
+ mpc52xx_fec_tx_interrupt(priv->t_irq, dev);
+ enable_irq(priv->t_irq);
+ disable_irq(priv->r_irq);
+ mpc52xx_fec_rx_interrupt(priv->r_irq, dev);
+ enable_irq(priv->r_irq);
+}
+#endif
+
+
/* This handles BestComm transmit task interrupts
*/
static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
@@ -926,6 +941,9 @@ mpc52xx_fec_probe(struct of_device *op, const struct of_device_id *match)
ndev->tx_timeout = mpc52xx_fec_tx_timeout;
ndev->watchdog_timeo = FEC_WATCHDOG_TIMEOUT;
ndev->base_addr = mem.start;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ ndev->poll_controller = mpc52xx_fec_poll_controller;
+#endif
priv->t_irq = priv->r_irq = ndev->irq = NO_IRQ; /* IRQ are free for now */
diff --git a/drivers/net/fec_mpc52xx_phy.c b/drivers/net/fec_mpc52xx_phy.c
index 08e18bcb970f..45dd9bdc5d62 100644
--- a/drivers/net/fec_mpc52xx_phy.c
+++ b/drivers/net/fec_mpc52xx_phy.c
@@ -2,6 +2,7 @@
* Driver for the MPC5200 Fast Ethernet Controller - MDIO bus driver
*
* Copyright (C) 2007 Domen Puncer, Telargo, Inc.
+ * Copyright (C) 2008 Wolfram Sang, Pengutronix
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
@@ -21,58 +22,45 @@ struct mpc52xx_fec_mdio_priv {
struct mpc52xx_fec __iomem *regs;
};
-static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+static int mpc52xx_fec_mdio_transfer(struct mii_bus *bus, int phy_id,
+ int reg, u32 value)
{
struct mpc52xx_fec_mdio_priv *priv = bus->priv;
struct mpc52xx_fec __iomem *fec;
int tries = 100;
- u32 request = FEC_MII_READ_FRAME;
+
+ value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
+ value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
fec = priv->regs;
out_be32(&fec->ievent, FEC_IEVENT_MII);
-
- request |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
- request |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
-
- out_be32(&priv->regs->mii_data, request);
+ out_be32(&priv->regs->mii_data, value);
/* wait for it to finish, this takes about 23 us on lite5200b */
while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
udelay(5);
- if (tries == 0)
+ if (!tries)
return -ETIMEDOUT;
- return in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK;
+ return value & FEC_MII_DATA_OP_RD ?
+ in_be32(&priv->regs->mii_data) & FEC_MII_DATA_DATAMSK : 0;
}
-static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 data)
+static int mpc52xx_fec_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
- struct mpc52xx_fec_mdio_priv *priv = bus->priv;
- struct mpc52xx_fec __iomem *fec;
- u32 value = data;
- int tries = 100;
-
- fec = priv->regs;
- out_be32(&fec->ievent, FEC_IEVENT_MII);
-
- value |= FEC_MII_WRITE_FRAME;
- value |= (phy_id << FEC_MII_DATA_PA_SHIFT) & FEC_MII_DATA_PA_MSK;
- value |= (reg << FEC_MII_DATA_RA_SHIFT) & FEC_MII_DATA_RA_MSK;
-
- out_be32(&priv->regs->mii_data, value);
-
- /* wait for request to finish */
- while (!(in_be32(&fec->ievent) & FEC_IEVENT_MII) && --tries)
- udelay(5);
-
- if (tries == 0)
- return -ETIMEDOUT;
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg, FEC_MII_READ_FRAME);
+}
- return 0;
+static int mpc52xx_fec_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 data)
+{
+ return mpc52xx_fec_mdio_transfer(bus, phy_id, reg,
+ data | FEC_MII_WRITE_FRAME);
}
-static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_id *match)
+static int mpc52xx_fec_mdio_probe(struct of_device *of,
+ const struct of_device_id *match)
{
struct device *dev = &of->dev;
struct device_node *np = of->node;
@@ -131,7 +119,8 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
dev_set_drvdata(dev, bus);
/* set MII speed */
- out_be32(&priv->regs->mii_speed, ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
+ out_be32(&priv->regs->mii_speed,
+ ((mpc52xx_find_ipb_freq(of->node) >> 20) / 5) << 1);
/* enable MII interrupt */
out_be32(&priv->regs->imask, in_be32(&priv->regs->imask) | FEC_IMASK_MII);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index cb51c1fb0338..a6f49d025787 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -1099,7 +1099,9 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
ndev->stop = fs_enet_close;
ndev->get_stats = fs_enet_get_stats;
ndev->set_multicast_list = fs_set_multicast_list;
-
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ ndev->poll_controller = fs_enet_netpoll;
+#endif
if (fpi->use_napi)
netif_napi_add(ndev, &fep->napi, fs_enet_rx_napi,
fpi->napi_weight);
@@ -1209,7 +1211,7 @@ static void __exit fs_cleanup(void)
static void fs_enet_netpoll(struct net_device *dev)
{
disable_irq(dev->irq);
- fs_enet_interrupt(dev->irq, dev, NULL);
+ fs_enet_interrupt(dev->irq, dev);
enable_irq(dev->irq);
}
#endif
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b5bb7ae2817f..c4af949bf860 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -161,7 +161,7 @@ static int gfar_probe(struct platform_device *pdev)
struct gfar_private *priv = NULL;
struct gianfar_platform_data *einfo;
struct resource *r;
- int err = 0;
+ int err = 0, irq;
DECLARE_MAC_BUF(mac);
einfo = (struct gianfar_platform_data *) pdev->dev.platform_data;
@@ -187,15 +187,25 @@ static int gfar_probe(struct platform_device *pdev)
/* fill out IRQ fields */
if (einfo->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
- priv->interruptTransmit = platform_get_irq_byname(pdev, "tx");
- priv->interruptReceive = platform_get_irq_byname(pdev, "rx");
- priv->interruptError = platform_get_irq_byname(pdev, "error");
- if (priv->interruptTransmit < 0 || priv->interruptReceive < 0 || priv->interruptError < 0)
+ irq = platform_get_irq_byname(pdev, "tx");
+ if (irq < 0)
+ goto regs_fail;
+ priv->interruptTransmit = irq;
+
+ irq = platform_get_irq_byname(pdev, "rx");
+ if (irq < 0)
+ goto regs_fail;
+ priv->interruptReceive = irq;
+
+ irq = platform_get_irq_byname(pdev, "error");
+ if (irq < 0)
goto regs_fail;
+ priv->interruptError = irq;
} else {
- priv->interruptTransmit = platform_get_irq(pdev, 0);
- if (priv->interruptTransmit < 0)
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
goto regs_fail;
+ priv->interruptTransmit = irq;
}
/* get a pointer to the register memory */
@@ -576,6 +586,18 @@ static void gfar_configure_serdes(struct net_device *dev)
struct gfar_mii __iomem *regs =
(void __iomem *)&priv->regs->gfar_mii_regs;
int tbipa = gfar_read(&priv->regs->tbipa);
+ struct mii_bus *bus = gfar_get_miibus(priv);
+
+ if (bus)
+ mutex_lock(&bus->mdio_lock);
+
+ /* If the link is already up, we must already be ok, and don't need to
+ * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
+ * everything for us? Resetting it takes the link down and requires
+ * several seconds for it to come back.
+ */
+ if (gfar_local_mdio_read(regs, tbipa, MII_BMSR) & BMSR_LSTATUS)
+ goto done;
/* Single clk mode, mii mode off(for serdes communication) */
gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT);
@@ -586,6 +608,10 @@ static void gfar_configure_serdes(struct net_device *dev)
gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE |
BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
+
+ done:
+ if (bus)
+ mutex_unlock(&bus->mdio_lock);
}
static void init_registers(struct net_device *dev)
@@ -1381,6 +1407,10 @@ static int gfar_clean_tx_ring(struct net_device *dev)
if (bdp->status & TXBD_DEF)
dev->stats.collisions++;
+ /* Unmap the DMA memory */
+ dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
+ bdp->length, DMA_TO_DEVICE);
+
/* Free the sk buffer associated with this TxBD */
dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
@@ -1640,6 +1670,9 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
skb = priv->rx_skbuff[priv->skb_currx];
+ dma_unmap_single(&priv->dev->dev, bdp->bufPtr,
+ priv->rx_buffer_size, DMA_FROM_DEVICE);
+
/* We drop the frame if we failed to allocate a new buffer */
if (unlikely(!newskb || !(bdp->status & RXBD_LAST) ||
bdp->status & RXBD_ERR)) {
@@ -1648,14 +1681,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
if (unlikely(!newskb))
newskb = skb;
- if (skb) {
- dma_unmap_single(&priv->dev->dev,
- bdp->bufPtr,
- priv->rx_buffer_size,
- DMA_FROM_DEVICE);
-
+ if (skb)
dev_kfree_skb_any(skb);
- }
} else {
/* Increment the number of packets */
dev->stats.rx_packets++;
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index bf73eea98010..0e2595d24933 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -269,6 +269,27 @@ static struct device_driver gianfar_mdio_driver = {
.remove = gfar_mdio_remove,
};
+static int match_mdio_bus(struct device *dev, void *data)
+{
+ const struct gfar_private *priv = data;
+ const struct platform_device *pdev = to_platform_device(dev);
+
+ return !strcmp(pdev->name, gianfar_mdio_driver.name) &&
+ pdev->id == priv->einfo->mdio_bus;
+}
+
+/* Given a gfar_priv structure, find the mii_bus controlled by this device (not
+ * necessarily the same as the bus the gfar's PHY is on), if one exists.
+ * Normally only the first gianfar controls a mii_bus. */
+struct mii_bus *gfar_get_miibus(const struct gfar_private *priv)
+{
+ /*const*/ struct device *d;
+
+ d = bus_find_device(gianfar_mdio_driver.bus, NULL, (void *)priv,
+ match_mdio_bus);
+ return d ? dev_get_drvdata(d) : NULL;
+}
+
int __init gfar_mdio_init(void)
{
return driver_register(&gianfar_mdio_driver);
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h
index 2af28b16a0e2..02dc970ca1ff 100644
--- a/drivers/net/gianfar_mii.h
+++ b/drivers/net/gianfar_mii.h
@@ -18,6 +18,8 @@
#ifndef __GIANFAR_MII_H
#define __GIANFAR_MII_H
+struct gfar_private; /* forward ref */
+
#define MIIMIND_BUSY 0x00000001
#define MIIMIND_NOTVALID 0x00000004
@@ -44,6 +46,7 @@ int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
int regnum, u16 value);
int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
+struct mii_bus *gfar_get_miibus(const struct gfar_private *priv);
int __init gfar_mdio_init(void);
void gfar_mdio_exit(void);
#endif /* GIANFAR_PHY_H */
diff --git a/drivers/net/hp-plus.c b/drivers/net/hp-plus.c
index fbbd3e660c27..c01e290d09d2 100644
--- a/drivers/net/hp-plus.c
+++ b/drivers/net/hp-plus.c
@@ -230,7 +230,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
dev->open = &hpp_open;
dev->stop = &hpp_close;
#ifdef CONFIG_NET_POLL_CONTROLLER
- dev->poll_controller = ei_poll;
+ dev->poll_controller = eip_poll;
#endif
ei_status.name = name;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index efcf21c9f5c7..901212aa37cb 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2604,8 +2604,16 @@ static int __devinit emac_init_config(struct emac_instance *dev)
if (of_device_is_compatible(np, "ibm,emac-440ep") ||
of_device_is_compatible(np, "ibm,emac-440gr"))
dev->features |= EMAC_FTR_440EP_PHY_CLK_FIX;
- if (of_device_is_compatible(np, "ibm,emac-405ez"))
+ if (of_device_is_compatible(np, "ibm,emac-405ez")) {
+#ifdef CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL
dev->features |= EMAC_FTR_NO_FLOW_CONTROL_40x;
+#else
+ printk(KERN_ERR "%s: Flow control not disabled!\n",
+ np->full_name);
+ return -ENXIO;
+#endif
+ }
+
}
/* Fixup some feature bits based on the device tree */
diff --git a/drivers/net/ibm_newemac/mal.c b/drivers/net/ibm_newemac/mal.c
index 1839d3f154a3..ecf9798987fa 100644
--- a/drivers/net/ibm_newemac/mal.c
+++ b/drivers/net/ibm_newemac/mal.c
@@ -280,9 +280,11 @@ static irqreturn_t mal_txeob(int irq, void *dev_instance)
mal_schedule_poll(mal);
set_mal_dcrn(mal, MAL_TXEOBISR, r);
+#ifdef CONFIG_PPC_DCR_NATIVE
if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICTX));
+#endif
return IRQ_HANDLED;
}
@@ -298,9 +300,11 @@ static irqreturn_t mal_rxeob(int irq, void *dev_instance)
mal_schedule_poll(mal);
set_mal_dcrn(mal, MAL_RXEOBISR, r);
+#ifdef CONFIG_PPC_DCR_NATIVE
if (mal_has_feature(mal, MAL_FTR_CLEAR_ICINTSTAT))
mtdcri(SDR0, DCRN_SDR_ICINTSTAT,
(mfdcri(SDR0, DCRN_SDR_ICINTSTAT) | ICINTSTAT_ICRX));
+#endif
return IRQ_HANDLED;
}
@@ -572,9 +576,18 @@ static int __devinit mal_probe(struct of_device *ofdev,
goto fail;
}
- if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez"))
+ if (of_device_is_compatible(ofdev->node, "ibm,mcmal-405ez")) {
+#if defined(CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT) && \
+ defined(CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR)
mal->features |= (MAL_FTR_CLEAR_ICINTSTAT |
MAL_FTR_COMMON_ERR_INT);
+#else
+ printk(KERN_ERR "%s: Support for 405EZ not enabled!\n",
+ ofdev->node->full_name);
+ err = -ENODEV;
+ goto fail;
+#endif
+ }
mal->txeob_irq = irq_of_parse_and_map(ofdev->node, 0);
mal->rxeob_irq = irq_of_parse_and_map(ofdev->node, 1);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 58906c984be9..89964fa739a0 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -1776,7 +1776,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
/* this function will set ->supported = 0 and return 1 if wol is not
* supported by this hardware */
- if (igb_wol_exclusion(adapter, wol))
+ if (igb_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
return;
/* apply any specific unsupported masks here */
@@ -1805,7 +1806,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
- if (igb_wol_exclusion(adapter, wol))
+ if (igb_wol_exclusion(adapter, wol) ||
+ !device_can_wakeup(&adapter->pdev->dev))
return wol->wolopts ? -EOPNOTSUPP : 0;
switch (hw->device_id) {
@@ -1825,6 +1827,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
return 0;
}
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 93d02efa9a0a..20d27e622ec1 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -38,10 +38,11 @@
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
+#include <linux/pci-aspm.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
#include "igb.h"
@@ -106,11 +107,11 @@ static irqreturn_t igb_msix_other(int irq, void *);
static irqreturn_t igb_msix_rx(int irq, void *);
static irqreturn_t igb_msix_tx(int irq, void *);
static int igb_clean_rx_ring_msix(struct napi_struct *, int);
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
static void igb_update_rx_dca(struct igb_ring *);
static void igb_update_tx_dca(struct igb_ring *);
static void igb_setup_dca(struct igb_adapter *);
-#endif /* CONFIG_DCA */
+#endif /* CONFIG_IGB_DCA */
static bool igb_clean_tx_irq(struct igb_ring *);
static int igb_poll(struct napi_struct *, int);
static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int);
@@ -131,7 +132,7 @@ static int igb_suspend(struct pci_dev *, pm_message_t);
static int igb_resume(struct pci_dev *);
#endif
static void igb_shutdown(struct pci_dev *);
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
static struct notifier_block dca_notifier = {
.notifier_call = igb_notify_dca,
@@ -207,7 +208,7 @@ static int __init igb_init_module(void)
global_quad_port_a = 0;
ret = pci_register_driver(&igb_driver);
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
dca_register_notify(&dca_notifier);
#endif
return ret;
@@ -223,7 +224,7 @@ module_init(igb_init_module);
**/
static void __exit igb_exit_module(void)
{
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
dca_unregister_notify(&dca_notifier);
#endif
pci_unregister_driver(&igb_driver);
@@ -966,10 +967,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
struct net_device *netdev;
struct igb_adapter *adapter;
struct e1000_hw *hw;
+ struct pci_dev *us_dev;
const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
unsigned long mmio_start, mmio_len;
- int i, err, pci_using_dac;
- u16 eeprom_data = 0;
+ int i, err, pci_using_dac, pos;
+ u16 eeprom_data = 0, state = 0;
u16 eeprom_apme_mask = IGB_EEPROM_APME;
u32 part_num;
int bars, need_ioport;
@@ -1004,6 +1006,27 @@ static int __devinit igb_probe(struct pci_dev *pdev,
}
}
+ /* 82575 requires that the pci-e link partner disable the L0s state */
+ switch (pdev->device) {
+ case E1000_DEV_ID_82575EB_COPPER:
+ case E1000_DEV_ID_82575EB_FIBER_SERDES:
+ case E1000_DEV_ID_82575GB_QUAD_COPPER:
+ us_dev = pdev->bus->self;
+ pos = pci_find_capability(us_dev, PCI_CAP_ID_EXP);
+ if (pos) {
+ pci_read_config_word(us_dev, pos + PCI_EXP_LNKCTL,
+ &state);
+ state &= ~PCIE_LINK_STATE_L0S;
+ pci_write_config_word(us_dev, pos + PCI_EXP_LNKCTL,
+ state);
+ dev_info(&pdev->dev,
+ "Disabling ASPM L0s upstream switch port %s\n",
+ pci_name(us_dev));
+ }
+ default:
+ break;
+ }
+
err = pci_request_selected_regions(pdev, bars, igb_driver_name);
if (err)
goto err_pci_reg;
@@ -1220,6 +1243,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* initialize the wol settings based on the eeprom settings */
adapter->wol = adapter->eeprom_wol;
+ device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
/* reset the hardware with the new settings */
igb_reset(adapter);
@@ -1237,7 +1261,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
if (err)
goto err_register;
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
if ((adapter->flags & IGB_FLAG_HAS_DCA) &&
(dca_add_requester(&pdev->dev) == 0)) {
adapter->flags |= IGB_FLAG_DCA_ENABLED;
@@ -1311,7 +1335,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
struct e1000_hw *hw = &adapter->hw;
#endif
@@ -1323,7 +1347,7 @@ static void __devexit igb_remove(struct pci_dev *pdev)
flush_scheduled_work();
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
dev_info(&pdev->dev, "DCA disabled\n");
dca_remove_requester(&pdev->dev);
@@ -1956,7 +1980,6 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/**
* igb_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
* @tx_ring: Tx descriptor ring for a specific queue
*
* Free all transmit software resources
@@ -2009,7 +2032,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
/**
* igb_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
* @tx_ring: ring to be cleaned
**/
static void igb_clean_tx_ring(struct igb_ring *tx_ring)
@@ -2056,7 +2078,6 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
/**
* igb_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
* @rx_ring: ring to clean the resources from
*
* Free all receive software resources
@@ -2096,7 +2117,6 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
/**
* igb_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
* @rx_ring: ring to free buffers from
**/
static void igb_clean_rx_ring(struct igb_ring *rx_ring)
@@ -3271,7 +3291,7 @@ static irqreturn_t igb_msix_tx(int irq, void *data)
struct igb_adapter *adapter = tx_ring->adapter;
struct e1000_hw *hw = &adapter->hw;
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_tx_dca(tx_ring);
#endif
@@ -3323,14 +3343,14 @@ static irqreturn_t igb_msix_rx(int irq, void *data)
if (netif_rx_schedule_prep(adapter->netdev, &rx_ring->napi))
__netif_rx_schedule(adapter->netdev, &rx_ring->napi);
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_rx_dca(rx_ring);
#endif
return IRQ_HANDLED;
}
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
static void igb_update_rx_dca(struct igb_ring *rx_ring)
{
u32 dca_rxctrl;
@@ -3450,7 +3470,7 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
}
-#endif /* CONFIG_DCA */
+#endif /* CONFIG_IGB_DCA */
/**
* igb_intr_msi - Interrupt Handler
@@ -3529,13 +3549,13 @@ static int igb_poll(struct napi_struct *napi, int budget)
int tx_clean_complete, work_done = 0;
/* this poll routine only supports one tx and one rx queue */
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_tx_dca(&adapter->tx_ring[0]);
#endif
tx_clean_complete = igb_clean_tx_irq(&adapter->tx_ring[0]);
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_rx_dca(&adapter->rx_ring[0]);
#endif
@@ -3563,7 +3583,7 @@ static int igb_clean_rx_ring_msix(struct napi_struct *napi, int budget)
struct net_device *netdev = adapter->netdev;
int work_done = 0;
-#ifdef CONFIG_DCA
+#ifdef CONFIG_IGB_DCA
if (adapter->flags & IGB_FLAG_DCA_ENABLED)
igb_update_rx_dca(rx_ring);
#endif
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 7373dafbb3f7..059369885be1 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -1112,7 +1112,7 @@ static void ipg_nic_rx_free_skb(struct net_device *dev)
struct ipg_rx *rxfd = sp->rxd + entry;
pci_unmap_single(sp->pdev,
- le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
+ le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_irq(sp->rx_buff[entry]);
sp->rx_buff[entry] = NULL;
@@ -1179,7 +1179,7 @@ static int ipg_nic_rx_check_error(struct net_device *dev)
*/
if (sp->rx_buff[entry]) {
pci_unmap_single(sp->pdev,
- le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
+ le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb_irq(sp->rx_buff[entry]);
@@ -1246,7 +1246,7 @@ static void ipg_nic_rx_with_start(struct net_device *dev,
if (jumbo->found_start)
dev_kfree_skb_irq(jumbo->skb);
- pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN),
+ pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
skb_put(skb, sp->rxfrag_size);
@@ -1349,7 +1349,7 @@ static int ipg_nic_rx_jumbo(struct net_device *dev)
unsigned int entry = curr % IPG_RFDLIST_LENGTH;
struct ipg_rx *rxfd = sp->rxd + entry;
- if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE)))
+ if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
break;
switch (ipg_nic_rx_check_frame_type(dev)) {
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 2482d61662a2..2e67ae015d91 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -118,7 +118,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/kref.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 1e0de93fd618..3843b5faba8b 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -82,7 +82,6 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/module.h>
#include <linux/kref.h>
#include <linux/usb.h>
#include <linux/device.h>
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 7548fb7360d9..5236f633ee36 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -1287,13 +1287,39 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
return;
}
-static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter);
+/**
+ * ixgbe_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
+{
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
+ int i;
+ for (i = 0; i < adapter->num_msix_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+ } else {
+ synchronize_irq(adapter->pdev->irq);
+ }
+}
+
+/**
+ * ixgbe_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
+{
+ u32 mask;
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
+ IXGBE_WRITE_FLUSH(&adapter->hw);
+}
/**
* ixgbe_intr - legacy mode Interrupt Handler
* @irq: interrupt number
* @data: pointer to a network interface device structure
- * @pt_regs: CPU registers structure
**/
static irqreturn_t ixgbe_intr(int irq, void *data)
{
@@ -1394,35 +1420,6 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_irq_disable - Mask off interrupt generation on the NIC
- * @adapter: board private structure
- **/
-static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
-{
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
- IXGBE_WRITE_FLUSH(&adapter->hw);
- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
- int i;
- for (i = 0; i < adapter->num_msix_vectors; i++)
- synchronize_irq(adapter->msix_entries[i].vector);
- } else {
- synchronize_irq(adapter->pdev->irq);
- }
-}
-
-/**
- * ixgbe_irq_enable - Enable default interrupt generation settings
- * @adapter: board private structure
- **/
-static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
-{
- u32 mask;
- mask = IXGBE_EIMS_ENABLE_MASK;
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
- IXGBE_WRITE_FLUSH(&adapter->hw);
-}
-
-/**
* ixgbe_configure_msi_and_legacy - Initialize PIN (INTA...) and MSI interrupts
*
**/
@@ -2332,7 +2329,7 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* Once we know the feature-set enabled for the device, we'll cache
* the register offset the descriptor ring is assigned to.
**/
-static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
+static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
int feature_mask = 0, rss_i;
int i, txr_idx, rxr_idx;
@@ -2369,7 +2366,7 @@ static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
* number of queues at compile-time. The polling_netdev array is
* intended for Multiqueue, but should work fine with a single queue.
**/
-static int __devinit ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
+static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
int i;
@@ -2410,8 +2407,7 @@ err_tx_ring_allocation:
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel.
**/
-static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter
- *adapter)
+static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
int err = 0;
int vector, v_budget;
@@ -2503,7 +2499,7 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
* - Hardware queue count (num_*_queues)
* - defined by miscellaneous hardware support/features (RSS, etc.)
**/
-static int __devinit ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
+static int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
{
int err;
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 81c6cdc3851f..665e70d620fc 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -912,23 +912,23 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx)
skb_put(skb, framesize);
skb->protocol = eth_type_trans(skb, jme->dev);
- if (jme_rxsum_ok(jme, rxdesc->descwb.flags))
+ if (jme_rxsum_ok(jme, le16_to_cpu(rxdesc->descwb.flags)))
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
- if (rxdesc->descwb.flags & RXWBFLAG_TAGON) {
+ if (rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_TAGON)) {
if (jme->vlgrp) {
jme->jme_vlan_rx(skb, jme->vlgrp,
- le32_to_cpu(rxdesc->descwb.vlan));
+ le16_to_cpu(rxdesc->descwb.vlan));
NET_STAT(jme).rx_bytes += 4;
}
} else {
jme->jme_rx(skb);
}
- if ((le16_to_cpu(rxdesc->descwb.flags) & RXWBFLAG_DEST) ==
- RXWBFLAG_DEST_MUL)
+ if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_DEST)) ==
+ cpu_to_le16(RXWBFLAG_DEST_MUL))
++(NET_STAT(jme).multicast);
jme->dev->last_rx = jiffies;
@@ -961,7 +961,7 @@ jme_process_receive(struct jme_adapter *jme, int limit)
rxdesc = rxring->desc;
rxdesc += i;
- if ((rxdesc->descwb.flags & RXWBFLAG_OWN) ||
+ if ((rxdesc->descwb.flags & cpu_to_le16(RXWBFLAG_OWN)) ||
!(rxdesc->descwb.desccnt & RXWBDCNT_WBCPL))
goto out;
@@ -1763,10 +1763,9 @@ jme_expand_header(struct jme_adapter *jme, struct sk_buff *skb)
}
static int
-jme_tx_tso(struct sk_buff *skb,
- u16 *mss, u8 *flags)
+jme_tx_tso(struct sk_buff *skb, __le16 *mss, u8 *flags)
{
- *mss = skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT;
+ *mss = cpu_to_le16(skb_shinfo(skb)->gso_size << TXDESC_MSS_SHIFT);
if (*mss) {
*flags |= TXFLAG_LSEN;
@@ -1826,11 +1825,11 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
}
static inline void
-jme_tx_vlan(struct sk_buff *skb, u16 *vlan, u8 *flags)
+jme_tx_vlan(struct sk_buff *skb, __le16 *vlan, u8 *flags)
{
if (vlan_tx_tag_present(skb)) {
*flags |= TXFLAG_TAGON;
- *vlan = vlan_tx_tag_get(skb);
+ *vlan = cpu_to_le16(vlan_tx_tag_get(skb));
}
}
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index f863aee6648b..3f5d91543246 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -22,7 +22,7 @@
*/
#ifndef __JME_H_INCLUDED__
-#define __JME_H_INCLUDEE__
+#define __JME_H_INCLUDED__
#define DRV_NAME "jme"
#define DRV_VERSION "1.0.3"
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 3b43bfd85a0f..b1ac63ab8c16 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -76,15 +76,6 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev)
skb->protocol = eth_type_trans(skb,dev);
-#ifdef LOOPBACK_TSO
- if (skb_is_gso(skb)) {
- BUG_ON(skb->protocol != htons(ETH_P_IP));
- BUG_ON(ip_hdr(skb)->protocol != IPPROTO_TCP);
-
- emulate_large_send_offload(skb);
- return 0;
- }
-#endif
dev->last_rx = jiffies;
/* it's OK to use per_cpu_ptr() because BHs are off */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 42394505bb50..590039cbb146 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -70,6 +70,9 @@ static void macvlan_broadcast(struct sk_buff *skb,
struct sk_buff *nskb;
unsigned int i;
+ if (skb->protocol == htons(ETH_P_PAUSE))
+ return;
+
for (i = 0; i < MACVLAN_HASH_SIZE; i++) {
hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) {
dev = vlan->dev;
diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile
index 0952a6528f58..a7a97bf998f8 100644
--- a/drivers/net/mlx4/Makefile
+++ b/drivers/net/mlx4/Makefile
@@ -1,4 +1,9 @@
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
- mr.o pd.o profile.o qp.o reset.o srq.o
+ mr.o pd.o port.o profile.o qp.o reset.o srq.o
+
+obj-$(CONFIG_MLX4_EN) += mlx4_en.o
+
+mlx4_en-y := en_main.o en_tx.o en_rx.o en_params.o en_port.o en_cq.o \
+ en_resources.o en_netdev.o
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index b411b79d72ad..ad95d5f7b630 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -48,13 +48,16 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
obj = find_next_zero_bit(bitmap->table, bitmap->max, bitmap->last);
if (obj >= bitmap->max) {
- bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
obj = find_first_zero_bit(bitmap->table, bitmap->max);
}
if (obj < bitmap->max) {
set_bit(obj, bitmap->table);
- bitmap->last = (obj + 1) & (bitmap->max - 1);
+ bitmap->last = (obj + 1);
+ if (bitmap->last == bitmap->max)
+ bitmap->last = 0;
obj |= bitmap->top;
} else
obj = -1;
@@ -66,16 +69,90 @@ u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap)
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj)
{
- obj &= bitmap->max - 1;
+ mlx4_bitmap_free_range(bitmap, obj, 1);
+}
+
+static unsigned long find_aligned_range(unsigned long *bitmap,
+ u32 start, u32 nbits,
+ int len, int align)
+{
+ unsigned long end, i;
+
+again:
+ start = ALIGN(start, align);
+
+ while ((start < nbits) && test_bit(start, bitmap))
+ start += align;
+
+ if (start >= nbits)
+ return -1;
+
+ end = start+len;
+ if (end > nbits)
+ return -1;
+
+ for (i = start + 1; i < end; i++) {
+ if (test_bit(i, bitmap)) {
+ start = i + 1;
+ goto again;
+ }
+ }
+
+ return start;
+}
+
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align)
+{
+ u32 obj, i;
+
+ if (likely(cnt == 1 && align == 1))
+ return mlx4_bitmap_alloc(bitmap);
+
+ spin_lock(&bitmap->lock);
+
+ obj = find_aligned_range(bitmap->table, bitmap->last,
+ bitmap->max, cnt, align);
+ if (obj >= bitmap->max) {
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
+ obj = find_aligned_range(bitmap->table, 0, bitmap->max,
+ cnt, align);
+ }
+
+ if (obj < bitmap->max) {
+ for (i = 0; i < cnt; i++)
+ set_bit(obj + i, bitmap->table);
+ if (obj == bitmap->last) {
+ bitmap->last = (obj + cnt);
+ if (bitmap->last >= bitmap->max)
+ bitmap->last = 0;
+ }
+ obj |= bitmap->top;
+ } else
+ obj = -1;
+
+ spin_unlock(&bitmap->lock);
+
+ return obj;
+}
+
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt)
+{
+ u32 i;
+
+ obj &= bitmap->max + bitmap->reserved_top - 1;
spin_lock(&bitmap->lock);
- clear_bit(obj, bitmap->table);
+ for (i = 0; i < cnt; i++)
+ clear_bit(obj + i, bitmap->table);
bitmap->last = min(bitmap->last, obj);
- bitmap->top = (bitmap->top + bitmap->max) & bitmap->mask;
+ bitmap->top = (bitmap->top + bitmap->max + bitmap->reserved_top)
+ & bitmap->mask;
spin_unlock(&bitmap->lock);
}
-int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved)
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 reserved_top)
{
int i;
@@ -85,14 +162,16 @@ int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved
bitmap->last = 0;
bitmap->top = 0;
- bitmap->max = num;
+ bitmap->max = num - reserved_top;
bitmap->mask = mask;
+ bitmap->reserved_top = reserved_top;
spin_lock_init(&bitmap->lock);
- bitmap->table = kzalloc(BITS_TO_LONGS(num) * sizeof (long), GFP_KERNEL);
+ bitmap->table = kzalloc(BITS_TO_LONGS(bitmap->max) *
+ sizeof (long), GFP_KERNEL);
if (!bitmap->table)
return -ENOMEM;
- for (i = 0; i < reserved; ++i)
+ for (i = 0; i < reserved_bot; ++i)
set_bit(i, bitmap->table);
return 0;
diff --git a/drivers/net/mlx4/cq.c b/drivers/net/mlx4/cq.c
index 9bb50e3f8974..b7ad2829d67e 100644
--- a/drivers/net/mlx4/cq.c
+++ b/drivers/net/mlx4/cq.c
@@ -300,7 +300,7 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
- dev->caps.num_cqs - 1, dev->caps.reserved_cqs);
+ dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
if (err)
return err;
diff --git a/drivers/net/mlx4/en_cq.c b/drivers/net/mlx4/en_cq.c
new file mode 100644
index 000000000000..1368a8010af4
--- /dev/null
+++ b/drivers/net/mlx4/en_cq.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+static void mlx4_en_cq_event(struct mlx4_cq *cq, enum mlx4_event event)
+{
+ return;
+}
+
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv,
+ struct mlx4_en_cq *cq,
+ int entries, int ring, enum cq_type mode)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ cq->size = entries;
+ if (mode == RX)
+ cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
+ else
+ cq->buf_size = sizeof(struct mlx4_cqe);
+
+ cq->ring = ring;
+ cq->is_tx = mode;
+ spin_lock_init(&cq->lock);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
+ cq->buf_size, 2 * PAGE_SIZE);
+ if (err)
+ return err;
+
+ err = mlx4_en_map_buffer(&cq->wqres.buf);
+ if (err)
+ mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+
+ return err;
+}
+
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ cq->dev = mdev->pndev[priv->port];
+ cq->mcq.set_ci_db = cq->wqres.db.db;
+ cq->mcq.arm_db = cq->wqres.db.db + 1;
+ *cq->mcq.set_ci_db = 0;
+ *cq->mcq.arm_db = 0;
+ cq->buf = (struct mlx4_cqe *) cq->wqres.buf.direct.buf;
+ memset(cq->buf, 0, cq->buf_size);
+
+ err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
+ cq->wqres.db.dma, &cq->mcq, cq->is_tx);
+ if (err)
+ return err;
+
+ cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
+ cq->mcq.event = mlx4_en_cq_event;
+
+ if (cq->is_tx) {
+ init_timer(&cq->timer);
+ cq->timer.function = mlx4_en_poll_tx_cq;
+ cq->timer.data = (unsigned long) cq;
+ } else {
+ netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
+ napi_enable(&cq->napi);
+ }
+
+ return 0;
+}
+
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_en_unmap_buffer(&cq->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
+ cq->buf_size = 0;
+ cq->buf = NULL;
+}
+
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (cq->is_tx)
+ del_timer(&cq->timer);
+ else
+ napi_disable(&cq->napi);
+
+ mlx4_cq_free(mdev->dev, &cq->mcq);
+}
+
+/* Set rx cq moderation parameters */
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ return mlx4_cq_modify(priv->mdev->dev, &cq->mcq,
+ cq->moder_cnt, cq->moder_time);
+}
+
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
+{
+ cq->armed = 1;
+ mlx4_cq_arm(&cq->mcq, MLX4_CQ_DB_REQ_NOT, priv->mdev->uar_map,
+ &priv->mdev->uar_lock);
+
+ return 0;
+}
+
+
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
new file mode 100644
index 000000000000..4b9794e97a79
--- /dev/null
+++ b/drivers/net/mlx4/en_main.c
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/cpumask.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4_en.h"
+
+MODULE_AUTHOR("Liran Liss, Yevgeny Petrilin");
+MODULE_DESCRIPTION("Mellanox ConnectX HCA Ethernet driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION " ("DRV_RELDATE")");
+
+static const char mlx4_en_version[] =
+ DRV_NAME ": Mellanox ConnectX HCA Ethernet driver v"
+ DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
+ enum mlx4_dev_event event, int port)
+{
+ struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
+ struct mlx4_en_priv *priv;
+
+ if (!mdev->pndev[port])
+ return;
+
+ priv = netdev_priv(mdev->pndev[port]);
+ switch (event) {
+ case MLX4_DEV_EVENT_PORT_UP:
+ case MLX4_DEV_EVENT_PORT_DOWN:
+ /* To prevent races, we poll the link state in a separate
+ task rather than changing it here */
+ priv->link_state = event;
+ queue_work(mdev->workqueue, &priv->linkstate_task);
+ break;
+
+ case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+ mlx4_err(mdev, "Internal error detected, restarting device\n");
+ break;
+
+ default:
+ mlx4_warn(mdev, "Unhandled event: %d\n", event);
+ }
+}
+
+static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
+{
+ struct mlx4_en_dev *mdev = endev_ptr;
+ int i;
+
+ mutex_lock(&mdev->state_lock);
+ mdev->device_up = false;
+ mutex_unlock(&mdev->state_lock);
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ if (mdev->pndev[i])
+ mlx4_en_destroy_netdev(mdev->pndev[i]);
+
+ flush_workqueue(mdev->workqueue);
+ destroy_workqueue(mdev->workqueue);
+ mlx4_mr_free(dev, &mdev->mr);
+ mlx4_uar_free(dev, &mdev->priv_uar);
+ mlx4_pd_free(dev, mdev->priv_pdn);
+ kfree(mdev);
+}
+
+static void *mlx4_en_add(struct mlx4_dev *dev)
+{
+ static int mlx4_en_version_printed;
+ struct mlx4_en_dev *mdev;
+ int i;
+ int err;
+
+ if (!mlx4_en_version_printed) {
+ printk(KERN_INFO "%s", mlx4_en_version);
+ mlx4_en_version_printed++;
+ }
+
+ mdev = kzalloc(sizeof *mdev, GFP_KERNEL);
+ if (!mdev) {
+ dev_err(&dev->pdev->dev, "Device struct alloc failed, "
+ "aborting.\n");
+ err = -ENOMEM;
+ goto err_free_res;
+ }
+
+ if (mlx4_pd_alloc(dev, &mdev->priv_pdn))
+ goto err_free_dev;
+
+ if (mlx4_uar_alloc(dev, &mdev->priv_uar))
+ goto err_pd;
+
+ mdev->uar_map = ioremap(mdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!mdev->uar_map)
+ goto err_uar;
+ spin_lock_init(&mdev->uar_lock);
+
+ mdev->dev = dev;
+ mdev->dma_device = &(dev->pdev->dev);
+ mdev->pdev = dev->pdev;
+ mdev->device_up = false;
+
+ mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
+ if (!mdev->LSO_support)
+ mlx4_warn(mdev, "LSO not supported, please upgrade to later "
+ "FW version to enable LSO\n");
+
+ if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
+ MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
+ 0, 0, &mdev->mr)) {
+ mlx4_err(mdev, "Failed allocating memory region\n");
+ goto err_uar;
+ }
+ if (mlx4_mr_enable(mdev->dev, &mdev->mr)) {
+ mlx4_err(mdev, "Failed enabling memory region\n");
+ goto err_mr;
+ }
+
+ /* Build device profile according to supplied module parameters */
+ err = mlx4_en_get_profile(mdev);
+ if (err) {
+ mlx4_err(mdev, "Bad module parameters, aborting.\n");
+ goto err_mr;
+ }
+
+ /* Configure wich ports to start according to module parameters */
+ mdev->port_cnt = 0;
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH)
+ mdev->port_cnt++;
+
+ /* If we did not receive an explicit number of Rx rings, default to
+ * the number of completion vectors populated by the mlx4_core */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Using %d tx rings for port:%d\n",
+ mdev->profile.prof[i].tx_ring_num, i);
+ if (!mdev->profile.prof[i].rx_ring_num) {
+ mdev->profile.prof[i].rx_ring_num = 1;
+ mlx4_info(mdev, "Defaulting to %d rx rings for port:%d\n",
+ 1, i);
+ } else
+ mlx4_info(mdev, "Using %d rx rings for port:%d\n",
+ mdev->profile.prof[i].rx_ring_num, i);
+ }
+
+ /* Create our own workqueue for reset/multicast tasks
+ * Note: we cannot use the shared workqueue because of deadlocks caused
+ * by the rtnl lock */
+ mdev->workqueue = create_singlethread_workqueue("mlx4_en");
+ if (!mdev->workqueue) {
+ err = -ENOMEM;
+ goto err_close_nic;
+ }
+
+ /* At this stage all non-port specific tasks are complete:
+ * mark the card state as up */
+ mutex_init(&mdev->state_lock);
+ mdev->device_up = true;
+
+ /* Setup ports */
+
+ /* Create a netdev for each port */
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ mlx4_info(mdev, "Activating port:%d\n", i);
+ if (mlx4_en_init_netdev(mdev, i, &mdev->profile.prof[i])) {
+ mdev->pndev[i] = NULL;
+ goto err_free_netdev;
+ }
+ }
+ return mdev;
+
+
+err_free_netdev:
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ if (mdev->pndev[i])
+ mlx4_en_destroy_netdev(mdev->pndev[i]);
+ }
+
+ mutex_lock(&mdev->state_lock);
+ mdev->device_up = false;
+ mutex_unlock(&mdev->state_lock);
+ flush_workqueue(mdev->workqueue);
+
+ /* Stop event queue before we drop down to release shared SW state */
+
+err_close_nic:
+ destroy_workqueue(mdev->workqueue);
+err_mr:
+ mlx4_mr_free(dev, &mdev->mr);
+err_uar:
+ mlx4_uar_free(dev, &mdev->priv_uar);
+err_pd:
+ mlx4_pd_free(dev, mdev->priv_pdn);
+err_free_dev:
+ kfree(mdev);
+err_free_res:
+ return NULL;
+}
+
+static struct mlx4_interface mlx4_en_interface = {
+ .add = mlx4_en_add,
+ .remove = mlx4_en_remove,
+ .event = mlx4_en_event,
+};
+
+static int __init mlx4_en_init(void)
+{
+ return mlx4_register_interface(&mlx4_en_interface);
+}
+
+static void __exit mlx4_en_cleanup(void)
+{
+ mlx4_unregister_interface(&mlx4_en_interface);
+}
+
+module_init(mlx4_en_init);
+module_exit(mlx4_en_cleanup);
+
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
new file mode 100644
index 000000000000..96e709d6440a
--- /dev/null
+++ b/drivers/net/mlx4/en_netdev.c
@@ -0,0 +1,1088 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/delay.h>
+
+#include <linux/mlx4/driver.h>
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/cq.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+
+static void mlx4_en_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ mlx4_dbg(HW, priv, "Registering VLAN group:%p\n", grp);
+ priv->vlgrp = grp;
+
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up && priv->port_up) {
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, grp);
+ if (err)
+ mlx4_err(mdev, "Failed configuring VLAN filter\n");
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ if (!priv->vlgrp)
+ return;
+
+ mlx4_dbg(HW, priv, "adding VLAN:%d (vlgrp entry:%p)\n",
+ vid, vlan_group_get_device(priv->vlgrp, vid));
+
+ /* Add VID to port VLAN filter */
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up && priv->port_up) {
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+ if (err)
+ mlx4_err(mdev, "Failed configuring VLAN filter\n");
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ if (!priv->vlgrp)
+ return;
+
+ mlx4_dbg(HW, priv, "Killing VID:%d (vlgrp:%p vlgrp "
+ "entry:%p)\n", vid, priv->vlgrp,
+ vlan_group_get_device(priv->vlgrp, vid));
+ vlan_group_set_device(priv->vlgrp, vid, NULL);
+
+ /* Remove VID from port VLAN filter */
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up && priv->port_up) {
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+ if (err)
+ mlx4_err(mdev, "Failed configuring VLAN filter\n");
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+static u64 mlx4_en_mac_to_u64(u8 *addr)
+{
+ u64 mac = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac <<= 8;
+ mac |= addr[i];
+ }
+ return mac;
+}
+
+static int mlx4_en_set_mac(struct net_device *dev, void *addr)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ priv->mac = mlx4_en_mac_to_u64(dev->dev_addr);
+ queue_work(mdev->workqueue, &priv->mac_task);
+ return 0;
+}
+
+static void mlx4_en_do_set_mac(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ mac_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ /* Remove old MAC and insert the new one */
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+ err = mlx4_register_mac(mdev->dev, priv->port,
+ priv->mac, &priv->mac_index);
+ if (err)
+ mlx4_err(mdev, "Failed changing HW MAC address\n");
+ } else
+ mlx4_dbg(HW, priv, "Port is down, exiting...\n");
+
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_clear_list(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct dev_mc_list *plist = priv->mc_list;
+ struct dev_mc_list *next;
+
+ while (plist) {
+ next = plist->next;
+ kfree(plist);
+ plist = next;
+ }
+ priv->mc_list = NULL;
+}
+
+static void mlx4_en_cache_mclist(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct dev_mc_list *mclist;
+ struct dev_mc_list *tmp;
+ struct dev_mc_list *plist = NULL;
+
+ for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
+ tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC);
+ if (!tmp) {
+ mlx4_err(mdev, "failed to allocate multicast list\n");
+ mlx4_en_clear_list(dev);
+ return;
+ }
+ memcpy(tmp, mclist, sizeof(struct dev_mc_list));
+ tmp->next = NULL;
+ if (plist)
+ plist->next = tmp;
+ else
+ priv->mc_list = tmp;
+ plist = tmp;
+ }
+}
+
+
+static void mlx4_en_set_multicast(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (!priv->port_up)
+ return;
+
+ queue_work(priv->mdev->workqueue, &priv->mcast_task);
+}
+
+static void mlx4_en_do_set_multicast(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ mcast_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+ struct dev_mc_list *mclist;
+ u64 mcast_addr = 0;
+ int err;
+
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ mlx4_dbg(HW, priv, "Card is not up, ignoring "
+ "multicast change.\n");
+ goto out;
+ }
+ if (!priv->port_up) {
+ mlx4_dbg(HW, priv, "Port is down, ignoring "
+ "multicast change.\n");
+ goto out;
+ }
+
+ /*
+ * Promsicuous mode: disable all filters
+ */
+
+ if (dev->flags & IFF_PROMISC) {
+ if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
+ if (netif_msg_rx_status(priv))
+ mlx4_warn(mdev, "Port:%d entering promiscuous mode\n",
+ priv->port);
+ priv->flags |= MLX4_EN_FLAG_PROMISC;
+
+ /* Enable promiscouos mode */
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+ priv->base_qpn, 1);
+ if (err)
+ mlx4_err(mdev, "Failed enabling "
+ "promiscous mode\n");
+
+ /* Disable port multicast filter (unconditionally) */
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ mlx4_err(mdev, "Failed disabling "
+ "multicast filter\n");
+
+ /* Disable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, NULL);
+ if (err)
+ mlx4_err(mdev, "Failed disabling "
+ "VLAN filter\n");
+ }
+ goto out;
+ }
+
+ /*
+ * Not in promiscous mode
+ */
+
+ if (priv->flags & MLX4_EN_FLAG_PROMISC) {
+ if (netif_msg_rx_status(priv))
+ mlx4_warn(mdev, "Port:%d leaving promiscuous mode\n",
+ priv->port);
+ priv->flags &= ~MLX4_EN_FLAG_PROMISC;
+
+ /* Disable promiscouos mode */
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
+ priv->base_qpn, 0);
+ if (err)
+ mlx4_err(mdev, "Failed disabling promiscous mode\n");
+
+ /* Enable port VLAN filter */
+ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv->port, priv->vlgrp);
+ if (err)
+ mlx4_err(mdev, "Failed enabling VLAN filter\n");
+ }
+
+ /* Enable/disable the multicast filter according to IFF_ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI) {
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ mlx4_err(mdev, "Failed disabling multicast filter\n");
+ } else {
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_DISABLE);
+ if (err)
+ mlx4_err(mdev, "Failed disabling multicast filter\n");
+
+ /* Flush mcast filter and init it with broadcast address */
+ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
+ 1, MLX4_MCAST_CONFIG);
+
+ /* Update multicast list - we cache all addresses so they won't
+ * change while HW is updated holding the command semaphor */
+ netif_tx_lock_bh(dev);
+ mlx4_en_cache_mclist(dev);
+ netif_tx_unlock_bh(dev);
+ for (mclist = priv->mc_list; mclist; mclist = mclist->next) {
+ mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr);
+ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
+ mcast_addr, 0, MLX4_MCAST_CONFIG);
+ }
+ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
+ 0, MLX4_MCAST_ENABLE);
+ if (err)
+ mlx4_err(mdev, "Failed enabling multicast filter\n");
+
+ mlx4_en_clear_list(dev);
+ }
+out:
+ mutex_unlock(&mdev->state_lock);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mlx4_en_netpoll(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_cq *cq;
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ spin_lock_irqsave(&cq->lock, flags);
+ napi_synchronize(&cq->napi);
+ mlx4_en_process_rx_cq(dev, cq, 0);
+ spin_unlock_irqrestore(&cq->lock, flags);
+ }
+}
+#endif
+
+static void mlx4_en_tx_timeout(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (netif_msg_timer(priv))
+ mlx4_warn(mdev, "Tx timeout called on port:%d\n", priv->port);
+
+ if (netif_carrier_ok(dev)) {
+ priv->port_stats.tx_timeout++;
+ mlx4_dbg(DRV, priv, "Scheduling watchdog\n");
+ queue_work(mdev->workqueue, &priv->watchdog_task);
+ }
+}
+
+
+static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ spin_lock_bh(&priv->stats_lock);
+ memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+ spin_unlock_bh(&priv->stats_lock);
+
+ return &priv->ret_stats;
+}
+
+static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
+ int i;
+
+ /* If we haven't received a specific coalescing setting
+ * (module param), we set the moderation paramters as follows:
+ * - moder_cnt is set to the number of mtu sized packets to
+ * satisfy our coelsing target.
+ * - moder_time is set to a fixed value.
+ */
+ priv->rx_frames = (mdev->profile.rx_moder_cnt ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TARGET /
+ priv->dev->mtu + 1 :
+ mdev->profile.rx_moder_cnt;
+ priv->rx_usecs = (mdev->profile.rx_moder_time ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TIME :
+ mdev->profile.rx_moder_time;
+ mlx4_dbg(INTR, priv, "Default coalesing params for mtu:%d - "
+ "rx_frames:%d rx_usecs:%d\n",
+ priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
+
+ /* Setup cq moderation params */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ cq->moder_cnt = priv->rx_frames;
+ cq->moder_time = priv->rx_usecs;
+ }
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ cq = &priv->tx_cq[i];
+ cq->moder_cnt = MLX4_EN_TX_COAL_PKTS;
+ cq->moder_time = MLX4_EN_TX_COAL_TIME;
+ }
+
+ /* Reset auto-moderation params */
+ priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
+ priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
+ priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
+ priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
+ priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
+ priv->adaptive_rx_coal = mdev->profile.auto_moder;
+ priv->last_moder_time = MLX4_EN_AUTO_CONF;
+ priv->last_moder_jiffies = 0;
+ priv->last_moder_packets = 0;
+ priv->last_moder_tx_packets = 0;
+ priv->last_moder_bytes = 0;
+}
+
+static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
+{
+ unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
+ unsigned long packets;
+ unsigned long rate;
+ unsigned long avg_pkt_size;
+ unsigned long rx_packets;
+ unsigned long rx_bytes;
+ unsigned long tx_packets;
+ unsigned long tx_pkt_diff;
+ unsigned long rx_pkt_diff;
+ int moder_time;
+ int i, err;
+
+ if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
+ return;
+
+ spin_lock_bh(&priv->stats_lock);
+ rx_packets = priv->stats.rx_packets;
+ rx_bytes = priv->stats.rx_bytes;
+ tx_packets = priv->stats.tx_packets;
+ spin_unlock_bh(&priv->stats_lock);
+
+ if (!priv->last_moder_jiffies || !period)
+ goto out;
+
+ tx_pkt_diff = ((unsigned long) (tx_packets -
+ priv->last_moder_tx_packets));
+ rx_pkt_diff = ((unsigned long) (rx_packets -
+ priv->last_moder_packets));
+ packets = max(tx_pkt_diff, rx_pkt_diff);
+ rate = packets * HZ / period;
+ avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
+ priv->last_moder_bytes)) / packets : 0;
+
+ /* Apply auto-moderation only when packet rate exceeds a rate that
+ * it matters */
+ if (rate > MLX4_EN_RX_RATE_THRESH) {
+ /* If tx and rx packet rates are not balanced, assume that
+ * traffic is mainly BW bound and apply maximum moderation.
+ * Otherwise, moderate according to packet rate */
+ if (2 * tx_pkt_diff > 3 * rx_pkt_diff ||
+ 2 * rx_pkt_diff > 3 * tx_pkt_diff) {
+ moder_time = priv->rx_usecs_high;
+ } else {
+ if (rate < priv->pkt_rate_low)
+ moder_time = priv->rx_usecs_low;
+ else if (rate > priv->pkt_rate_high)
+ moder_time = priv->rx_usecs_high;
+ else
+ moder_time = (rate - priv->pkt_rate_low) *
+ (priv->rx_usecs_high - priv->rx_usecs_low) /
+ (priv->pkt_rate_high - priv->pkt_rate_low) +
+ priv->rx_usecs_low;
+ }
+ } else {
+ /* When packet rate is low, use default moderation rather than
+ * 0 to prevent interrupt storms if traffic suddenly increases */
+ moder_time = priv->rx_usecs;
+ }
+
+ mlx4_dbg(INTR, priv, "tx rate:%lu rx_rate:%lu\n",
+ tx_pkt_diff * HZ / period, rx_pkt_diff * HZ / period);
+
+ mlx4_dbg(INTR, priv, "Rx moder_time changed from:%d to %d period:%lu "
+ "[jiff] packets:%lu avg_pkt_size:%lu rate:%lu [p/s])\n",
+ priv->last_moder_time, moder_time, period, packets,
+ avg_pkt_size, rate);
+
+ if (moder_time != priv->last_moder_time) {
+ priv->last_moder_time = moder_time;
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ cq->moder_time = moder_time;
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed modifying moderation for cq:%d "
+ "on port:%d\n", i, priv->port);
+ break;
+ }
+ }
+ }
+
+out:
+ priv->last_moder_packets = rx_packets;
+ priv->last_moder_tx_packets = tx_packets;
+ priv->last_moder_bytes = rx_bytes;
+ priv->last_moder_jiffies = jiffies;
+}
+
+static void mlx4_en_do_get_stats(struct work_struct *work)
+{
+ struct delayed_work *delay = container_of(work, struct delayed_work, work);
+ struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+ stats_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
+ if (err)
+ mlx4_dbg(HW, priv, "Could not update stats for "
+ "port:%d\n", priv->port);
+
+ mutex_lock(&mdev->state_lock);
+ if (mdev->device_up) {
+ if (priv->port_up)
+ mlx4_en_auto_moderation(priv);
+
+ queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+ }
+ mutex_unlock(&mdev->state_lock);
+}
+
+static void mlx4_en_linkstate(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ linkstate_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int linkstate = priv->link_state;
+
+ mutex_lock(&mdev->state_lock);
+ /* If observable port state changed set carrier state and
+ * report to system log */
+ if (priv->last_link_state != linkstate) {
+ if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
+ if (netif_msg_link(priv))
+ mlx4_info(mdev, "Port %d - link down\n", priv->port);
+ netif_carrier_off(priv->dev);
+ } else {
+ if (netif_msg_link(priv))
+ mlx4_info(mdev, "Port %d - link up\n", priv->port);
+ netif_carrier_on(priv->dev);
+ }
+ }
+ priv->last_link_state = linkstate;
+ mutex_unlock(&mdev->state_lock);
+}
+
+
+static int mlx4_en_start_port(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_cq *cq;
+ struct mlx4_en_tx_ring *tx_ring;
+ struct mlx4_en_rx_ring *rx_ring;
+ int rx_index = 0;
+ int tx_index = 0;
+ u16 stride;
+ int err = 0;
+ int i;
+ int j;
+
+ if (priv->port_up) {
+ mlx4_dbg(DRV, priv, "start port called while port already up\n");
+ return 0;
+ }
+
+ /* Calculate Rx buf size */
+ dev->mtu = min(dev->mtu, priv->max_mtu);
+ mlx4_en_calc_rx_buf(dev);
+ mlx4_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
+ stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * priv->num_frags);
+ /* Configure rx cq's and rings */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ cq = &priv->rx_cq[i];
+ rx_ring = &priv->rx_ring[i];
+
+ err = mlx4_en_activate_cq(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed activating Rx CQ\n");
+ goto rx_err;
+ }
+ for (j = 0; j < cq->size; j++)
+ cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed setting cq moderation parameters");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto cq_err;
+ }
+ mlx4_en_arm_cq(priv, cq);
+
+ ++rx_index;
+ }
+
+ err = mlx4_en_activate_rx_rings(priv);
+ if (err) {
+ mlx4_err(mdev, "Failed to activate RX rings\n");
+ goto cq_err;
+ }
+
+ err = mlx4_en_config_rss_steer(priv);
+ if (err) {
+ mlx4_err(mdev, "Failed configuring rss steering\n");
+ goto rx_err;
+ }
+
+ /* Configure tx cq's and rings */
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ /* Configure cq */
+ cq = &priv->tx_cq[i];
+ err = mlx4_en_activate_cq(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed allocating Tx CQ\n");
+ goto tx_err;
+ }
+ err = mlx4_en_set_cq_moder(priv, cq);
+ if (err) {
+ mlx4_err(mdev, "Failed setting cq moderation parameters");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ mlx4_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
+ cq->buf->wqe_index = cpu_to_be16(0xffff);
+
+ /* Configure ring */
+ tx_ring = &priv->tx_ring[i];
+ err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
+ priv->rx_ring[0].srq.srqn);
+ if (err) {
+ mlx4_err(mdev, "Failed allocating Tx ring\n");
+ mlx4_en_deactivate_cq(priv, cq);
+ goto tx_err;
+ }
+ /* Set initial ownership of all Tx TXBBs to SW (1) */
+ for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
+ *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
+ ++tx_index;
+ }
+
+ /* Configure port */
+ err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ priv->prof->tx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->rx_ppp);
+ if (err) {
+ mlx4_err(mdev, "Failed setting port general configurations"
+ " for port %d, with error %d\n", priv->port, err);
+ goto tx_err;
+ }
+ /* Set default qp number */
+ err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
+ if (err) {
+ mlx4_err(mdev, "Failed setting default qp numbers\n");
+ goto tx_err;
+ }
+ /* Set port mac number */
+ mlx4_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
+ err = mlx4_register_mac(mdev->dev, priv->port,
+ priv->mac, &priv->mac_index);
+ if (err) {
+ mlx4_err(mdev, "Failed setting port mac\n");
+ goto tx_err;
+ }
+
+ /* Init port */
+ mlx4_dbg(HW, priv, "Initializing port\n");
+ err = mlx4_INIT_PORT(mdev->dev, priv->port);
+ if (err) {
+ mlx4_err(mdev, "Failed Initializing port\n");
+ goto mac_err;
+ }
+
+ /* Schedule multicast task to populate multicast list */
+ queue_work(mdev->workqueue, &priv->mcast_task);
+
+ priv->port_up = true;
+ netif_start_queue(dev);
+ return 0;
+
+mac_err:
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+tx_err:
+ while (tx_index--) {
+ mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
+ mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
+ }
+
+ mlx4_en_release_rss_steer(priv);
+rx_err:
+ for (i = 0; i < priv->rx_ring_num; i++)
+ mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+cq_err:
+ while (rx_index--)
+ mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
+
+ return err; /* need to close devices */
+}
+
+
+static void mlx4_en_stop_port(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int i;
+
+ if (!priv->port_up) {
+ mlx4_dbg(DRV, priv, "stop port (%d) called while port already down\n",
+ priv->port);
+ return;
+ }
+ netif_stop_queue(dev);
+
+ /* Synchronize with tx routine */
+ netif_tx_lock_bh(dev);
+ priv->port_up = false;
+ netif_tx_unlock_bh(dev);
+
+ /* close port*/
+ mlx4_CLOSE_PORT(mdev->dev, priv->port);
+
+ /* Unregister Mac address for the port */
+ mlx4_unregister_mac(mdev->dev, priv->port, priv->mac_index);
+
+ /* Free TX Rings */
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
+ mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
+ }
+ msleep(10);
+
+ for (i = 0; i < priv->tx_ring_num; i++)
+ mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
+
+ /* Free RSS qps */
+ mlx4_en_release_rss_steer(priv);
+
+ /* Free RX Rings */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
+ while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
+ msleep(1);
+ mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
+ }
+}
+
+static void mlx4_en_restart(struct work_struct *work)
+{
+ struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
+ watchdog_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+
+ mlx4_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+ mlx4_en_stop_port(dev);
+ if (mlx4_en_start_port(dev))
+ mlx4_err(mdev, "Failed restarting port %d\n", priv->port);
+}
+
+
+static int mlx4_en_open(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int i;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+
+ if (!mdev->device_up) {
+ mlx4_err(mdev, "Cannot open - device down/disabled\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ /* Reset HW statistics and performance counters */
+ if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
+ mlx4_dbg(HW, priv, "Failed dumping statistics\n");
+
+ memset(&priv->stats, 0, sizeof(priv->stats));
+ memset(&priv->pstats, 0, sizeof(priv->pstats));
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ priv->tx_ring[i].bytes = 0;
+ priv->tx_ring[i].packets = 0;
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->rx_ring[i].bytes = 0;
+ priv->rx_ring[i].packets = 0;
+ }
+
+ mlx4_en_set_default_moderation(priv);
+ err = mlx4_en_start_port(dev);
+ if (err)
+ mlx4_err(mdev, "Failed starting port:%d\n", priv->port);
+
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
+}
+
+
+static int mlx4_en_close(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (netif_msg_ifdown(priv))
+ mlx4_info(mdev, "Close called for port:%d\n", priv->port);
+
+ mutex_lock(&mdev->state_lock);
+
+ mlx4_en_stop_port(dev);
+ netif_carrier_off(dev);
+
+ mutex_unlock(&mdev->state_lock);
+ return 0;
+}
+
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ if (priv->tx_ring[i].tx_info)
+ mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
+ if (priv->tx_cq[i].buf)
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
+ }
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (priv->rx_ring[i].rx_info)
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
+ if (priv->rx_cq[i].buf)
+ mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
+ }
+}
+
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile *prof = priv->prof;
+ int i;
+
+ /* Create tx Rings */
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
+ prof->tx_ring_size, i, TX))
+ goto err;
+
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
+ prof->tx_ring_size, TXBB_SIZE))
+ goto err;
+ }
+
+ /* Create rx Rings */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
+ prof->rx_ring_size, i, RX))
+ goto err;
+
+ if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
+ prof->rx_ring_size, priv->stride))
+ goto err;
+ }
+
+ return 0;
+
+err:
+ mlx4_err(mdev, "Failed to allocate NIC resources\n");
+ return -ENOMEM;
+}
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
+
+ /* Unregister device - this will close the port if it was up */
+ if (priv->registered)
+ unregister_netdev(dev);
+
+ if (priv->allocated)
+ mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
+
+ cancel_delayed_work(&priv->stats_task);
+ cancel_delayed_work(&priv->refill_task);
+ /* flush any pending task for this netdev */
+ flush_workqueue(mdev->workqueue);
+
+ /* Detach the netdev so tasks would not attempt to access it */
+ mutex_lock(&mdev->state_lock);
+ mdev->pndev[priv->port] = NULL;
+ mutex_unlock(&mdev->state_lock);
+
+ mlx4_en_free_resources(priv);
+ free_netdev(dev);
+}
+
+static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+
+ mlx4_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
+ dev->mtu, new_mtu);
+
+ if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
+ mlx4_err(mdev, "Bad MTU size:%d.\n", new_mtu);
+ return -EPERM;
+ }
+ dev->mtu = new_mtu;
+
+ if (netif_running(dev)) {
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up) {
+ /* NIC is probably restarting - let watchdog task reset
+ * the port */
+ mlx4_dbg(DRV, priv, "Change MTU called with card down!?\n");
+ } else {
+ mlx4_en_stop_port(dev);
+ mlx4_en_set_default_moderation(priv);
+ err = mlx4_en_start_port(dev);
+ if (err) {
+ mlx4_err(mdev, "Failed restarting port:%d\n",
+ priv->port);
+ queue_work(mdev->workqueue, &priv->watchdog_task);
+ }
+ }
+ mutex_unlock(&mdev->state_lock);
+ }
+ return 0;
+}
+
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ struct mlx4_en_port_profile *prof)
+{
+ struct net_device *dev;
+ struct mlx4_en_priv *priv;
+ int i;
+ int err;
+
+ dev = alloc_etherdev(sizeof(struct mlx4_en_priv));
+ if (dev == NULL) {
+ mlx4_err(mdev, "Net device allocation failed\n");
+ return -ENOMEM;
+ }
+
+ SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
+
+ /*
+ * Initialize driver private data
+ */
+
+ priv = netdev_priv(dev);
+ memset(priv, 0, sizeof(struct mlx4_en_priv));
+ priv->dev = dev;
+ priv->mdev = mdev;
+ priv->prof = prof;
+ priv->port = port;
+ priv->port_up = false;
+ priv->rx_csum = 1;
+ priv->flags = prof->flags;
+ priv->tx_ring_num = prof->tx_ring_num;
+ priv->rx_ring_num = prof->rx_ring_num;
+ priv->mc_list = NULL;
+ priv->mac_index = -1;
+ priv->msg_enable = MLX4_EN_MSG_LEVEL;
+ spin_lock_init(&priv->stats_lock);
+ INIT_WORK(&priv->mcast_task, mlx4_en_do_set_multicast);
+ INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
+ INIT_DELAYED_WORK(&priv->refill_task, mlx4_en_rx_refill);
+ INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
+ INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
+ INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
+
+ /* Query for default mac and max mtu */
+ priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
+ priv->mac = mdev->dev->caps.def_mac[priv->port];
+ if (ILLEGAL_MAC(priv->mac)) {
+ mlx4_err(mdev, "Port: %d, invalid mac burned: 0x%llx, quiting\n",
+ priv->port, priv->mac);
+ err = -EINVAL;
+ goto out;
+ }
+
+ priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+ err = mlx4_en_alloc_resources(priv);
+ if (err)
+ goto out;
+
+ /* Populate Rx default RSS mappings */
+ mlx4_en_set_default_rss_map(priv, &priv->rss_map, priv->rx_ring_num *
+ RSS_FACTOR, priv->rx_ring_num);
+ /* Allocate page for receive rings */
+ err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
+ MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+ if (err) {
+ mlx4_err(mdev, "Failed to allocate page for rx qps\n");
+ goto out;
+ }
+ priv->allocated = 1;
+
+ /* Populate Tx priority mappings */
+ mlx4_en_set_prio_map(priv, priv->tx_prio_map, prof->tx_ring_num);
+
+ /*
+ * Initialize netdev entry points
+ */
+
+ dev->open = &mlx4_en_open;
+ dev->stop = &mlx4_en_close;
+ dev->hard_start_xmit = &mlx4_en_xmit;
+ dev->get_stats = &mlx4_en_get_stats;
+ dev->set_multicast_list = &mlx4_en_set_multicast;
+ dev->set_mac_address = &mlx4_en_set_mac;
+ dev->change_mtu = &mlx4_en_change_mtu;
+ dev->tx_timeout = &mlx4_en_tx_timeout;
+ dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
+ dev->vlan_rx_register = mlx4_en_vlan_rx_register;
+ dev->vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = mlx4_en_netpoll;
+#endif
+ SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+
+ /* Set defualt MAC */
+ dev->addr_len = ETH_ALEN;
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[ETH_ALEN - 1 - i] =
+ (u8) (priv->mac >> (8 * i));
+
+ /*
+ * Set driver features
+ */
+ dev->features |= NETIF_F_SG;
+ dev->features |= NETIF_F_HW_CSUM;
+ dev->features |= NETIF_F_HIGHDMA;
+ dev->features |= NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+ if (mdev->profile.num_lro)
+ dev->features |= NETIF_F_LRO;
+ if (mdev->LSO_support) {
+ dev->features |= NETIF_F_TSO;
+ dev->features |= NETIF_F_TSO6;
+ }
+
+ mdev->pndev[port] = dev;
+
+ netif_carrier_off(dev);
+ err = register_netdev(dev);
+ if (err) {
+ mlx4_err(mdev, "Netdev registration failed\n");
+ goto out;
+ }
+ priv->registered = 1;
+ queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
+ return 0;
+
+out:
+ mlx4_en_destroy_netdev(dev);
+ return err;
+}
+
diff --git a/drivers/net/mlx4/en_params.c b/drivers/net/mlx4/en_params.c
new file mode 100644
index 000000000000..95706ee1c019
--- /dev/null
+++ b/drivers/net/mlx4/en_params.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "mlx4_en.h"
+#include "en_port.h"
+
+#define MLX4_EN_PARM_INT(X, def_val, desc) \
+ static unsigned int X = def_val;\
+ module_param(X , uint, 0444); \
+ MODULE_PARM_DESC(X, desc);
+
+
+/*
+ * Device scope module parameters
+ */
+
+
+/* Use a XOR rathern than Toeplitz hash function for RSS */
+MLX4_EN_PARM_INT(rss_xor, 0, "Use XOR hash function for RSS");
+
+/* RSS hash type mask - default to <saddr, daddr, sport, dport> */
+MLX4_EN_PARM_INT(rss_mask, 0xf, "RSS hash type bitmask");
+
+/* Number of LRO sessions per Rx ring (rounded up to a power of two) */
+MLX4_EN_PARM_INT(num_lro, MLX4_EN_MAX_LRO_DESCRIPTORS,
+ "Number of LRO sessions per ring or disabled (0)");
+
+/* Priority pausing */
+MLX4_EN_PARM_INT(pptx, MLX4_EN_DEF_TX_PAUSE,
+ "Pause policy on TX: 0 never generate pause frames "
+ "1 generate pause frames according to RX buffer threshold");
+MLX4_EN_PARM_INT(pprx, MLX4_EN_DEF_RX_PAUSE,
+ "Pause policy on RX: 0 ignore received pause frames "
+ "1 respect received pause frames");
+MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
+ " Per priority bit mask");
+MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
+ " Per priority bit mask");
+
+/* Interrupt moderation tunning */
+MLX4_EN_PARM_INT(rx_moder_cnt, MLX4_EN_AUTO_CONF,
+ "Max coalesced descriptors for Rx interrupt moderation");
+MLX4_EN_PARM_INT(rx_moder_time, MLX4_EN_AUTO_CONF,
+ "Timeout following last packet for Rx interrupt moderation");
+MLX4_EN_PARM_INT(auto_moder, 1, "Enable dynamic interrupt moderation");
+
+MLX4_EN_PARM_INT(rx_ring_num1, 0, "Number or Rx rings for port 1 (0 = #cores)");
+MLX4_EN_PARM_INT(rx_ring_num2, 0, "Number or Rx rings for port 2 (0 = #cores)");
+
+MLX4_EN_PARM_INT(tx_ring_size1, MLX4_EN_AUTO_CONF, "Tx ring size for port 1");
+MLX4_EN_PARM_INT(tx_ring_size2, MLX4_EN_AUTO_CONF, "Tx ring size for port 2");
+MLX4_EN_PARM_INT(rx_ring_size1, MLX4_EN_AUTO_CONF, "Rx ring size for port 1");
+MLX4_EN_PARM_INT(rx_ring_size2, MLX4_EN_AUTO_CONF, "Rx ring size for port 2");
+
+
+int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
+{
+ struct mlx4_en_profile *params = &mdev->profile;
+ int i;
+
+ params->rx_moder_cnt = min_t(int, rx_moder_cnt, MLX4_EN_AUTO_CONF);
+ params->rx_moder_time = min_t(int, rx_moder_time, MLX4_EN_AUTO_CONF);
+ params->auto_moder = auto_moder;
+ params->rss_xor = (rss_xor != 0);
+ params->rss_mask = rss_mask & 0x1f;
+ params->num_lro = min_t(int, num_lro , MLX4_EN_MAX_LRO_DESCRIPTORS);
+ for (i = 1; i <= MLX4_MAX_PORTS; i++) {
+ params->prof[i].rx_pause = pprx;
+ params->prof[i].rx_ppp = pfcrx;
+ params->prof[i].tx_pause = pptx;
+ params->prof[i].tx_ppp = pfctx;
+ }
+ if (pfcrx || pfctx) {
+ params->prof[1].tx_ring_num = MLX4_EN_TX_RING_NUM;
+ params->prof[2].tx_ring_num = MLX4_EN_TX_RING_NUM;
+ } else {
+ params->prof[1].tx_ring_num = 1;
+ params->prof[2].tx_ring_num = 1;
+ }
+ params->prof[1].rx_ring_num = min_t(int, rx_ring_num1, MAX_RX_RINGS);
+ params->prof[2].rx_ring_num = min_t(int, rx_ring_num2, MAX_RX_RINGS);
+
+ if (tx_ring_size1 == MLX4_EN_AUTO_CONF)
+ tx_ring_size1 = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[1].tx_ring_size =
+ (tx_ring_size1 < MLX4_EN_MIN_TX_SIZE) ?
+ MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size1);
+
+ if (tx_ring_size2 == MLX4_EN_AUTO_CONF)
+ tx_ring_size2 = MLX4_EN_DEF_TX_RING_SIZE;
+ params->prof[2].tx_ring_size =
+ (tx_ring_size2 < MLX4_EN_MIN_TX_SIZE) ?
+ MLX4_EN_MIN_TX_SIZE : roundup_pow_of_two(tx_ring_size2);
+
+ if (rx_ring_size1 == MLX4_EN_AUTO_CONF)
+ rx_ring_size1 = MLX4_EN_DEF_RX_RING_SIZE;
+ params->prof[1].rx_ring_size =
+ (rx_ring_size1 < MLX4_EN_MIN_RX_SIZE) ?
+ MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size1);
+
+ if (rx_ring_size2 == MLX4_EN_AUTO_CONF)
+ rx_ring_size2 = MLX4_EN_DEF_RX_RING_SIZE;
+ params->prof[2].rx_ring_size =
+ (rx_ring_size2 < MLX4_EN_MIN_RX_SIZE) ?
+ MLX4_EN_MIN_RX_SIZE : roundup_pow_of_two(rx_ring_size2);
+ return 0;
+}
+
+
+/*
+ * Ethtool support
+ */
+
+static void mlx4_en_update_lro_stats(struct mlx4_en_priv *priv)
+{
+ int i;
+
+ priv->port_stats.lro_aggregated = 0;
+ priv->port_stats.lro_flushed = 0;
+ priv->port_stats.lro_no_desc = 0;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->port_stats.lro_aggregated += priv->rx_ring[i].lro.stats.aggregated;
+ priv->port_stats.lro_flushed += priv->rx_ring[i].lro.stats.flushed;
+ priv->port_stats.lro_no_desc += priv->rx_ring[i].lro.stats.no_desc;
+ }
+}
+
+static void
+mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ sprintf(drvinfo->driver, DRV_NAME " (%s)", mdev->dev->board_id);
+ strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
+ sprintf(drvinfo->fw_version, "%d.%d.%d",
+ (u16) (mdev->dev->caps.fw_ver >> 32),
+ (u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
+ (u16) (mdev->dev->caps.fw_ver & 0xffff));
+ strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+ drvinfo->n_stats = 0;
+ drvinfo->regdump_len = 0;
+ drvinfo->eedump_len = 0;
+}
+
+static u32 mlx4_en_get_tso(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_TSO) != 0;
+}
+
+static int mlx4_en_set_tso(struct net_device *dev, u32 data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (data) {
+ if (!priv->mdev->LSO_support)
+ return -EPERM;
+ dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ } else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+ return 0;
+}
+
+static u32 mlx4_en_get_rx_csum(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ return priv->rx_csum;
+}
+
+static int mlx4_en_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ priv->rx_csum = (data != 0);
+ return 0;
+}
+
+static const char main_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+ "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+ "rx_length_errors", "rx_over_errors", "rx_crc_errors",
+ "rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+ "tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+ "tx_heartbeat_errors", "tx_window_errors",
+
+ /* port statistics */
+ "lro_aggregated", "lro_flushed", "lro_no_desc", "tso_packets",
+ "queue_stopped", "wake_queue", "tx_timeout", "rx_alloc_failed",
+ "rx_csum_good", "rx_csum_none", "tx_chksum_offload",
+
+ /* packet statistics */
+ "broadcast", "rx_prio_0", "rx_prio_1", "rx_prio_2", "rx_prio_3",
+ "rx_prio_4", "rx_prio_5", "rx_prio_6", "rx_prio_7", "tx_prio_0",
+ "tx_prio_1", "tx_prio_2", "tx_prio_3", "tx_prio_4", "tx_prio_5",
+ "tx_prio_6", "tx_prio_7",
+};
+#define NUM_MAIN_STATS 21
+#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + NUM_PERF_STATS)
+
+static u32 mlx4_en_get_msglevel(struct net_device *dev)
+{
+ return ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable;
+}
+
+static void mlx4_en_set_msglevel(struct net_device *dev, u32 val)
+{
+ ((struct mlx4_en_priv *) netdev_priv(dev))->msg_enable = val;
+}
+
+static void mlx4_en_get_wol(struct net_device *netdev,
+ struct ethtool_wolinfo *wol)
+{
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ return;
+}
+
+static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return NUM_ALL_STATS + (priv->tx_ring_num + priv->rx_ring_num) * 2;
+}
+
+static void mlx4_en_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, uint64_t *data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int index = 0;
+ int i;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ mlx4_en_update_lro_stats(priv);
+
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ data[index++] = ((unsigned long *) &priv->stats)[i];
+ for (i = 0; i < NUM_PORT_STATS; i++)
+ data[index++] = ((unsigned long *) &priv->port_stats)[i];
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ data[index++] = priv->tx_ring[i].packets;
+ data[index++] = priv->tx_ring[i].bytes;
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ data[index++] = priv->rx_ring[i].packets;
+ data[index++] = priv->rx_ring[i].bytes;
+ }
+ for (i = 0; i < NUM_PKT_STATS; i++)
+ data[index++] = ((unsigned long *) &priv->pkstats)[i];
+ spin_unlock_bh(&priv->stats_lock);
+
+}
+
+static void mlx4_en_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int index = 0;
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ /* Add main counters */
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
+ for (i = 0; i < NUM_PORT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i + NUM_MAIN_STATS]);
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "tx%d_bytes", i);
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_packets", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_bytes", i);
+ }
+ for (i = 0; i < NUM_PKT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
+}
+
+static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ cmd->autoneg = AUTONEG_DISABLE;
+ cmd->supported = SUPPORTED_10000baseT_Full;
+ cmd->advertising = SUPPORTED_10000baseT_Full;
+ if (netif_carrier_ok(dev)) {
+ cmd->speed = SPEED_10000;
+ cmd->duplex = DUPLEX_FULL;
+ } else {
+ cmd->speed = -1;
+ cmd->duplex = -1;
+ }
+ return 0;
+}
+
+static int mlx4_en_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ if ((cmd->autoneg == AUTONEG_ENABLE) ||
+ (cmd->speed != SPEED_10000) || (cmd->duplex != DUPLEX_FULL))
+ return -EINVAL;
+
+ /* Nothing to change */
+ return 0;
+}
+
+static int mlx4_en_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ coal->tx_coalesce_usecs = 0;
+ coal->tx_max_coalesced_frames = 0;
+ coal->rx_coalesce_usecs = priv->rx_usecs;
+ coal->rx_max_coalesced_frames = priv->rx_frames;
+
+ coal->pkt_rate_low = priv->pkt_rate_low;
+ coal->rx_coalesce_usecs_low = priv->rx_usecs_low;
+ coal->pkt_rate_high = priv->pkt_rate_high;
+ coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
+ coal->rate_sample_interval = priv->sample_interval;
+ coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+ return 0;
+}
+
+static int mlx4_en_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int err, i;
+
+ priv->rx_frames = (coal->rx_max_coalesced_frames ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TARGET /
+ priv->dev->mtu + 1 :
+ coal->rx_max_coalesced_frames;
+ priv->rx_usecs = (coal->rx_coalesce_usecs ==
+ MLX4_EN_AUTO_CONF) ?
+ MLX4_EN_RX_COAL_TIME :
+ coal->rx_coalesce_usecs;
+
+ /* Set adaptive coalescing params */
+ priv->pkt_rate_low = coal->pkt_rate_low;
+ priv->rx_usecs_low = coal->rx_coalesce_usecs_low;
+ priv->pkt_rate_high = coal->pkt_rate_high;
+ priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
+ priv->sample_interval = coal->rate_sample_interval;
+ priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+ priv->last_moder_time = MLX4_EN_AUTO_CONF;
+ if (priv->adaptive_rx_coal)
+ return 0;
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ priv->rx_cq[i].moder_cnt = priv->rx_frames;
+ priv->rx_cq[i].moder_time = priv->rx_usecs;
+ err = mlx4_en_set_cq_moder(priv, &priv->rx_cq[i]);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlx4_en_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ priv->prof->tx_pause = pause->tx_pause != 0;
+ priv->prof->rx_pause = pause->rx_pause != 0;
+ err = mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ priv->prof->tx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->rx_ppp);
+ if (err)
+ mlx4_err(mdev, "Failed setting pause params to\n");
+
+ return err;
+}
+
+static void mlx4_en_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ pause->tx_pause = priv->prof->tx_pause;
+ pause->rx_pause = priv->prof->rx_pause;
+}
+
+static void mlx4_en_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ memset(param, 0, sizeof(*param));
+ param->rx_max_pending = mdev->dev->caps.max_rq_sg;
+ param->tx_max_pending = mdev->dev->caps.max_sq_sg;
+ param->rx_pending = mdev->profile.prof[priv->port].rx_ring_size;
+ param->tx_pending = mdev->profile.prof[priv->port].tx_ring_size;
+}
+
+const struct ethtool_ops mlx4_en_ethtool_ops = {
+ .get_drvinfo = mlx4_en_get_drvinfo,
+ .get_settings = mlx4_en_get_settings,
+ .set_settings = mlx4_en_set_settings,
+#ifdef NETIF_F_TSO
+ .get_tso = mlx4_en_get_tso,
+ .set_tso = mlx4_en_set_tso,
+#endif
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_link = ethtool_op_get_link,
+ .get_rx_csum = mlx4_en_get_rx_csum,
+ .set_rx_csum = mlx4_en_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ .get_strings = mlx4_en_get_strings,
+ .get_sset_count = mlx4_en_get_sset_count,
+ .get_ethtool_stats = mlx4_en_get_ethtool_stats,
+ .get_wol = mlx4_en_get_wol,
+ .get_msglevel = mlx4_en_get_msglevel,
+ .set_msglevel = mlx4_en_set_msglevel,
+ .get_coalesce = mlx4_en_get_coalesce,
+ .set_coalesce = mlx4_en_set_coalesce,
+ .get_pauseparam = mlx4_en_get_pauseparam,
+ .set_pauseparam = mlx4_en_set_pauseparam,
+ .get_ringparam = mlx4_en_get_ringparam,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = ethtool_op_set_flags,
+};
+
+
+
+
+
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
new file mode 100644
index 000000000000..c5a4c0389752
--- /dev/null
+++ b/drivers/net/mlx4/en_port.c
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+
+#include <linux/if_vlan.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/cmd.h>
+
+#include "en_port.h"
+#include "mlx4_en.h"
+
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+ u64 mac, u64 clear, u8 mode)
+{
+ return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+ MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
+}
+
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_vlan_fltr_mbox *filter;
+ int i;
+ int j;
+ int index = 0;
+ u32 entry;
+ int err = 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ filter = mailbox->buf;
+ if (grp) {
+ memset(filter, 0, sizeof *filter);
+ for (i = VLAN_FLTR_SIZE - 1; i >= 0; i--) {
+ entry = 0;
+ for (j = 0; j < 32; j++)
+ if (vlan_group_get_device(grp, index++))
+ entry |= 1 << j;
+ filter->entry[i] = cpu_to_be32(entry);
+ }
+ } else {
+ /* When no vlans are configured we block all vlans */
+ memset(filter, 0, sizeof(*filter));
+ }
+ err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_VLAN_FLTR,
+ MLX4_CMD_TIME_CLASS_B);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->flags = SET_PORT_GEN_ALL_VALID;
+ context->mtu = cpu_to_be16(mtu);
+ context->pptx = (pptx * (!pfctx)) << 7;
+ context->pfctx = pfctx;
+ context->pprx = (pprx * (!pfcrx)) << 7;
+ context->pfcrx = pfcrx;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_rqp_calc_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->base_qpn = cpu_to_be32(base_qpn);
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT | base_qpn);
+ context->mcast = cpu_to_be32(1 << SET_PORT_PROMISC_SHIFT | base_qpn);
+ context->intra_no_vlan = 0;
+ context->no_vlan = MLX4_NO_VLAN_IDX;
+ context->intra_vlan_miss = 0;
+ context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+ in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
+{
+ struct mlx4_en_stat_out_mbox *mlx4_en_stats;
+ struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
+ struct net_device_stats *stats = &priv->stats;
+ struct mlx4_cmd_mailbox *mailbox;
+ u64 in_mod = reset << 8 | port;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(mdev->dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
+ err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+ if (err)
+ goto out;
+
+ mlx4_en_stats = mailbox->buf;
+
+ spin_lock_bh(&priv->stats_lock);
+
+ stats->rx_packets = be32_to_cpu(mlx4_en_stats->RTOTFRMS) -
+ be32_to_cpu(mlx4_en_stats->RDROP);
+ stats->tx_packets = be64_to_cpu(mlx4_en_stats->TTOT_prio_0) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_1) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_2) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_3) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_4) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_5) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_6) +
+ be64_to_cpu(mlx4_en_stats->TTOT_prio_7) +
+ be64_to_cpu(mlx4_en_stats->TTOT_novlan) +
+ be64_to_cpu(mlx4_en_stats->TTOT_loopbk);
+ stats->rx_bytes = be64_to_cpu(mlx4_en_stats->ROCT_prio_0) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_1) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_2) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_3) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_4) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_5) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_6) +
+ be64_to_cpu(mlx4_en_stats->ROCT_prio_7) +
+ be64_to_cpu(mlx4_en_stats->ROCT_novlan);
+
+ stats->tx_bytes = be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_0) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_1) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_2) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_3) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_4) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_5) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_6) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_prio_7) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_novlan) +
+ be64_to_cpu(mlx4_en_stats->TTTLOCT_loopbk);
+
+ stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
+ be32_to_cpu(mlx4_en_stats->RdropLength) +
+ be32_to_cpu(mlx4_en_stats->RJBBR) +
+ be32_to_cpu(mlx4_en_stats->RCRC) +
+ be32_to_cpu(mlx4_en_stats->RRUNT);
+ stats->tx_errors = be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->multicast = be64_to_cpu(mlx4_en_stats->MCAST_prio_0) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_1) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_2) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_3) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_4) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_5) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_6) +
+ be64_to_cpu(mlx4_en_stats->MCAST_prio_7) +
+ be64_to_cpu(mlx4_en_stats->MCAST_novlan);
+ stats->collisions = 0;
+ stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
+ stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
+ stats->rx_frame_errors = 0;
+ stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->tx_aborted_errors = 0;
+ stats->tx_carrier_errors = 0;
+ stats->tx_fifo_errors = 0;
+ stats->tx_heartbeat_errors = 0;
+ stats->tx_window_errors = 0;
+
+ priv->pkstats.broadcast =
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_0) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_1) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_2) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_3) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_4) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_5) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_6) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_prio_7) +
+ be64_to_cpu(mlx4_en_stats->RBCAST_novlan);
+ priv->pkstats.rx_prio[0] = be64_to_cpu(mlx4_en_stats->RTOT_prio_0);
+ priv->pkstats.rx_prio[1] = be64_to_cpu(mlx4_en_stats->RTOT_prio_1);
+ priv->pkstats.rx_prio[2] = be64_to_cpu(mlx4_en_stats->RTOT_prio_2);
+ priv->pkstats.rx_prio[3] = be64_to_cpu(mlx4_en_stats->RTOT_prio_3);
+ priv->pkstats.rx_prio[4] = be64_to_cpu(mlx4_en_stats->RTOT_prio_4);
+ priv->pkstats.rx_prio[5] = be64_to_cpu(mlx4_en_stats->RTOT_prio_5);
+ priv->pkstats.rx_prio[6] = be64_to_cpu(mlx4_en_stats->RTOT_prio_6);
+ priv->pkstats.rx_prio[7] = be64_to_cpu(mlx4_en_stats->RTOT_prio_7);
+ priv->pkstats.tx_prio[0] = be64_to_cpu(mlx4_en_stats->TTOT_prio_0);
+ priv->pkstats.tx_prio[1] = be64_to_cpu(mlx4_en_stats->TTOT_prio_1);
+ priv->pkstats.tx_prio[2] = be64_to_cpu(mlx4_en_stats->TTOT_prio_2);
+ priv->pkstats.tx_prio[3] = be64_to_cpu(mlx4_en_stats->TTOT_prio_3);
+ priv->pkstats.tx_prio[4] = be64_to_cpu(mlx4_en_stats->TTOT_prio_4);
+ priv->pkstats.tx_prio[5] = be64_to_cpu(mlx4_en_stats->TTOT_prio_5);
+ priv->pkstats.tx_prio[6] = be64_to_cpu(mlx4_en_stats->TTOT_prio_6);
+ priv->pkstats.tx_prio[7] = be64_to_cpu(mlx4_en_stats->TTOT_prio_7);
+ spin_unlock_bh(&priv->stats_lock);
+
+out:
+ mlx4_free_cmd_mailbox(mdev->dev, mailbox);
+ return err;
+}
+
diff --git a/drivers/net/mlx4/en_port.h b/drivers/net/mlx4/en_port.h
new file mode 100644
index 000000000000..e6477f12beb5
--- /dev/null
+++ b/drivers/net/mlx4/en_port.h
@@ -0,0 +1,570 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_PORT_H_
+#define _MLX4_EN_PORT_H_
+
+
+#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_PROMISC_SHIFT 31
+
+enum {
+ MLX4_CMD_SET_VLAN_FLTR = 0x47,
+ MLX4_CMD_SET_MCAST_FLTR = 0x48,
+ MLX4_CMD_DUMP_ETH_STATS = 0x49,
+};
+
+struct mlx4_set_port_general_context {
+ u8 reserved[3];
+ u8 flags;
+ u16 reserved2;
+ __be16 mtu;
+ u8 pptx;
+ u8 pfctx;
+ u16 reserved3;
+ u8 pprx;
+ u8 pfcrx;
+ u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+ __be32 base_qpn;
+ __be32 flags;
+ u8 reserved[3];
+ u8 mac_miss;
+ u8 intra_no_vlan;
+ u8 no_vlan;
+ u8 intra_vlan_miss;
+ u8 vlan_miss;
+ u8 reserved2[3];
+ u8 no_vlan_prio;
+ __be32 promisc;
+ __be32 mcast;
+};
+
+#define VLAN_FLTR_SIZE 128
+struct mlx4_set_vlan_fltr_mbox {
+ __be32 entry[VLAN_FLTR_SIZE];
+};
+
+
+enum {
+ MLX4_MCAST_CONFIG = 0,
+ MLX4_MCAST_DISABLE = 1,
+ MLX4_MCAST_ENABLE = 2,
+};
+
+
+struct mlx4_en_stat_out_mbox {
+ /* Received frames with a length of 64 octets */
+ __be64 R64_prio_0;
+ __be64 R64_prio_1;
+ __be64 R64_prio_2;
+ __be64 R64_prio_3;
+ __be64 R64_prio_4;
+ __be64 R64_prio_5;
+ __be64 R64_prio_6;
+ __be64 R64_prio_7;
+ __be64 R64_novlan;
+ /* Received frames with a length of 127 octets */
+ __be64 R127_prio_0;
+ __be64 R127_prio_1;
+ __be64 R127_prio_2;
+ __be64 R127_prio_3;
+ __be64 R127_prio_4;
+ __be64 R127_prio_5;
+ __be64 R127_prio_6;
+ __be64 R127_prio_7;
+ __be64 R127_novlan;
+ /* Received frames with a length of 255 octets */
+ __be64 R255_prio_0;
+ __be64 R255_prio_1;
+ __be64 R255_prio_2;
+ __be64 R255_prio_3;
+ __be64 R255_prio_4;
+ __be64 R255_prio_5;
+ __be64 R255_prio_6;
+ __be64 R255_prio_7;
+ __be64 R255_novlan;
+ /* Received frames with a length of 511 octets */
+ __be64 R511_prio_0;
+ __be64 R511_prio_1;
+ __be64 R511_prio_2;
+ __be64 R511_prio_3;
+ __be64 R511_prio_4;
+ __be64 R511_prio_5;
+ __be64 R511_prio_6;
+ __be64 R511_prio_7;
+ __be64 R511_novlan;
+ /* Received frames with a length of 1023 octets */
+ __be64 R1023_prio_0;
+ __be64 R1023_prio_1;
+ __be64 R1023_prio_2;
+ __be64 R1023_prio_3;
+ __be64 R1023_prio_4;
+ __be64 R1023_prio_5;
+ __be64 R1023_prio_6;
+ __be64 R1023_prio_7;
+ __be64 R1023_novlan;
+ /* Received frames with a length of 1518 octets */
+ __be64 R1518_prio_0;
+ __be64 R1518_prio_1;
+ __be64 R1518_prio_2;
+ __be64 R1518_prio_3;
+ __be64 R1518_prio_4;
+ __be64 R1518_prio_5;
+ __be64 R1518_prio_6;
+ __be64 R1518_prio_7;
+ __be64 R1518_novlan;
+ /* Received frames with a length of 1522 octets */
+ __be64 R1522_prio_0;
+ __be64 R1522_prio_1;
+ __be64 R1522_prio_2;
+ __be64 R1522_prio_3;
+ __be64 R1522_prio_4;
+ __be64 R1522_prio_5;
+ __be64 R1522_prio_6;
+ __be64 R1522_prio_7;
+ __be64 R1522_novlan;
+ /* Received frames with a length of 1548 octets */
+ __be64 R1548_prio_0;
+ __be64 R1548_prio_1;
+ __be64 R1548_prio_2;
+ __be64 R1548_prio_3;
+ __be64 R1548_prio_4;
+ __be64 R1548_prio_5;
+ __be64 R1548_prio_6;
+ __be64 R1548_prio_7;
+ __be64 R1548_novlan;
+ /* Received frames with a length of 1548 < octets < MTU */
+ __be64 R2MTU_prio_0;
+ __be64 R2MTU_prio_1;
+ __be64 R2MTU_prio_2;
+ __be64 R2MTU_prio_3;
+ __be64 R2MTU_prio_4;
+ __be64 R2MTU_prio_5;
+ __be64 R2MTU_prio_6;
+ __be64 R2MTU_prio_7;
+ __be64 R2MTU_novlan;
+ /* Received frames with a length of MTU< octets and good CRC */
+ __be64 RGIANT_prio_0;
+ __be64 RGIANT_prio_1;
+ __be64 RGIANT_prio_2;
+ __be64 RGIANT_prio_3;
+ __be64 RGIANT_prio_4;
+ __be64 RGIANT_prio_5;
+ __be64 RGIANT_prio_6;
+ __be64 RGIANT_prio_7;
+ __be64 RGIANT_novlan;
+ /* Received broadcast frames with good CRC */
+ __be64 RBCAST_prio_0;
+ __be64 RBCAST_prio_1;
+ __be64 RBCAST_prio_2;
+ __be64 RBCAST_prio_3;
+ __be64 RBCAST_prio_4;
+ __be64 RBCAST_prio_5;
+ __be64 RBCAST_prio_6;
+ __be64 RBCAST_prio_7;
+ __be64 RBCAST_novlan;
+ /* Received multicast frames with good CRC */
+ __be64 MCAST_prio_0;
+ __be64 MCAST_prio_1;
+ __be64 MCAST_prio_2;
+ __be64 MCAST_prio_3;
+ __be64 MCAST_prio_4;
+ __be64 MCAST_prio_5;
+ __be64 MCAST_prio_6;
+ __be64 MCAST_prio_7;
+ __be64 MCAST_novlan;
+ /* Received unicast not short or GIANT frames with good CRC */
+ __be64 RTOTG_prio_0;
+ __be64 RTOTG_prio_1;
+ __be64 RTOTG_prio_2;
+ __be64 RTOTG_prio_3;
+ __be64 RTOTG_prio_4;
+ __be64 RTOTG_prio_5;
+ __be64 RTOTG_prio_6;
+ __be64 RTOTG_prio_7;
+ __be64 RTOTG_novlan;
+
+ /* Count of total octets of received frames, includes framing characters */
+ __be64 RTTLOCT_prio_0;
+ /* Count of total octets of received frames, not including framing
+ characters */
+ __be64 RTTLOCT_NOFRM_prio_0;
+ /* Count of Total number of octets received
+ (only for frames without errors) */
+ __be64 ROCT_prio_0;
+
+ __be64 RTTLOCT_prio_1;
+ __be64 RTTLOCT_NOFRM_prio_1;
+ __be64 ROCT_prio_1;
+
+ __be64 RTTLOCT_prio_2;
+ __be64 RTTLOCT_NOFRM_prio_2;
+ __be64 ROCT_prio_2;
+
+ __be64 RTTLOCT_prio_3;
+ __be64 RTTLOCT_NOFRM_prio_3;
+ __be64 ROCT_prio_3;
+
+ __be64 RTTLOCT_prio_4;
+ __be64 RTTLOCT_NOFRM_prio_4;
+ __be64 ROCT_prio_4;
+
+ __be64 RTTLOCT_prio_5;
+ __be64 RTTLOCT_NOFRM_prio_5;
+ __be64 ROCT_prio_5;
+
+ __be64 RTTLOCT_prio_6;
+ __be64 RTTLOCT_NOFRM_prio_6;
+ __be64 ROCT_prio_6;
+
+ __be64 RTTLOCT_prio_7;
+ __be64 RTTLOCT_NOFRM_prio_7;
+ __be64 ROCT_prio_7;
+
+ __be64 RTTLOCT_novlan;
+ __be64 RTTLOCT_NOFRM_novlan;
+ __be64 ROCT_novlan;
+
+ /* Count of Total received frames including bad frames */
+ __be64 RTOT_prio_0;
+ /* Count of Total number of received frames with 802.1Q encapsulation */
+ __be64 R1Q_prio_0;
+ __be64 reserved1;
+
+ __be64 RTOT_prio_1;
+ __be64 R1Q_prio_1;
+ __be64 reserved2;
+
+ __be64 RTOT_prio_2;
+ __be64 R1Q_prio_2;
+ __be64 reserved3;
+
+ __be64 RTOT_prio_3;
+ __be64 R1Q_prio_3;
+ __be64 reserved4;
+
+ __be64 RTOT_prio_4;
+ __be64 R1Q_prio_4;
+ __be64 reserved5;
+
+ __be64 RTOT_prio_5;
+ __be64 R1Q_prio_5;
+ __be64 reserved6;
+
+ __be64 RTOT_prio_6;
+ __be64 R1Q_prio_6;
+ __be64 reserved7;
+
+ __be64 RTOT_prio_7;
+ __be64 R1Q_prio_7;
+ __be64 reserved8;
+
+ __be64 RTOT_novlan;
+ __be64 R1Q_novlan;
+ __be64 reserved9;
+
+ /* Total number of Successfully Received Control Frames */
+ __be64 RCNTL;
+ __be64 reserved10;
+ __be64 reserved11;
+ __be64 reserved12;
+ /* Count of received frames with a length/type field value between 46
+ (42 for VLANtagged frames) and 1500 (also 1500 for VLAN-tagged frames),
+ inclusive */
+ __be64 RInRangeLengthErr;
+ /* Count of received frames with length/type field between 1501 and 1535
+ decimal, inclusive */
+ __be64 ROutRangeLengthErr;
+ /* Count of received frames that are longer than max allowed size for
+ 802.3 frames (1518/1522) */
+ __be64 RFrmTooLong;
+ /* Count frames received with PCS error */
+ __be64 PCS;
+
+ /* Transmit frames with a length of 64 octets */
+ __be64 T64_prio_0;
+ __be64 T64_prio_1;
+ __be64 T64_prio_2;
+ __be64 T64_prio_3;
+ __be64 T64_prio_4;
+ __be64 T64_prio_5;
+ __be64 T64_prio_6;
+ __be64 T64_prio_7;
+ __be64 T64_novlan;
+ __be64 T64_loopbk;
+ /* Transmit frames with a length of 65 to 127 octets. */
+ __be64 T127_prio_0;
+ __be64 T127_prio_1;
+ __be64 T127_prio_2;
+ __be64 T127_prio_3;
+ __be64 T127_prio_4;
+ __be64 T127_prio_5;
+ __be64 T127_prio_6;
+ __be64 T127_prio_7;
+ __be64 T127_novlan;
+ __be64 T127_loopbk;
+ /* Transmit frames with a length of 128 to 255 octets */
+ __be64 T255_prio_0;
+ __be64 T255_prio_1;
+ __be64 T255_prio_2;
+ __be64 T255_prio_3;
+ __be64 T255_prio_4;
+ __be64 T255_prio_5;
+ __be64 T255_prio_6;
+ __be64 T255_prio_7;
+ __be64 T255_novlan;
+ __be64 T255_loopbk;
+ /* Transmit frames with a length of 256 to 511 octets */
+ __be64 T511_prio_0;
+ __be64 T511_prio_1;
+ __be64 T511_prio_2;
+ __be64 T511_prio_3;
+ __be64 T511_prio_4;
+ __be64 T511_prio_5;
+ __be64 T511_prio_6;
+ __be64 T511_prio_7;
+ __be64 T511_novlan;
+ __be64 T511_loopbk;
+ /* Transmit frames with a length of 512 to 1023 octets */
+ __be64 T1023_prio_0;
+ __be64 T1023_prio_1;
+ __be64 T1023_prio_2;
+ __be64 T1023_prio_3;
+ __be64 T1023_prio_4;
+ __be64 T1023_prio_5;
+ __be64 T1023_prio_6;
+ __be64 T1023_prio_7;
+ __be64 T1023_novlan;
+ __be64 T1023_loopbk;
+ /* Transmit frames with a length of 1024 to 1518 octets */
+ __be64 T1518_prio_0;
+ __be64 T1518_prio_1;
+ __be64 T1518_prio_2;
+ __be64 T1518_prio_3;
+ __be64 T1518_prio_4;
+ __be64 T1518_prio_5;
+ __be64 T1518_prio_6;
+ __be64 T1518_prio_7;
+ __be64 T1518_novlan;
+ __be64 T1518_loopbk;
+ /* Counts transmit frames with a length of 1519 to 1522 bytes */
+ __be64 T1522_prio_0;
+ __be64 T1522_prio_1;
+ __be64 T1522_prio_2;
+ __be64 T1522_prio_3;
+ __be64 T1522_prio_4;
+ __be64 T1522_prio_5;
+ __be64 T1522_prio_6;
+ __be64 T1522_prio_7;
+ __be64 T1522_novlan;
+ __be64 T1522_loopbk;
+ /* Transmit frames with a length of 1523 to 1548 octets */
+ __be64 T1548_prio_0;
+ __be64 T1548_prio_1;
+ __be64 T1548_prio_2;
+ __be64 T1548_prio_3;
+ __be64 T1548_prio_4;
+ __be64 T1548_prio_5;
+ __be64 T1548_prio_6;
+ __be64 T1548_prio_7;
+ __be64 T1548_novlan;
+ __be64 T1548_loopbk;
+ /* Counts transmit frames with a length of 1549 to MTU bytes */
+ __be64 T2MTU_prio_0;
+ __be64 T2MTU_prio_1;
+ __be64 T2MTU_prio_2;
+ __be64 T2MTU_prio_3;
+ __be64 T2MTU_prio_4;
+ __be64 T2MTU_prio_5;
+ __be64 T2MTU_prio_6;
+ __be64 T2MTU_prio_7;
+ __be64 T2MTU_novlan;
+ __be64 T2MTU_loopbk;
+ /* Transmit frames with a length greater than MTU octets and a good CRC. */
+ __be64 TGIANT_prio_0;
+ __be64 TGIANT_prio_1;
+ __be64 TGIANT_prio_2;
+ __be64 TGIANT_prio_3;
+ __be64 TGIANT_prio_4;
+ __be64 TGIANT_prio_5;
+ __be64 TGIANT_prio_6;
+ __be64 TGIANT_prio_7;
+ __be64 TGIANT_novlan;
+ __be64 TGIANT_loopbk;
+ /* Transmit broadcast frames with a good CRC */
+ __be64 TBCAST_prio_0;
+ __be64 TBCAST_prio_1;
+ __be64 TBCAST_prio_2;
+ __be64 TBCAST_prio_3;
+ __be64 TBCAST_prio_4;
+ __be64 TBCAST_prio_5;
+ __be64 TBCAST_prio_6;
+ __be64 TBCAST_prio_7;
+ __be64 TBCAST_novlan;
+ __be64 TBCAST_loopbk;
+ /* Transmit multicast frames with a good CRC */
+ __be64 TMCAST_prio_0;
+ __be64 TMCAST_prio_1;
+ __be64 TMCAST_prio_2;
+ __be64 TMCAST_prio_3;
+ __be64 TMCAST_prio_4;
+ __be64 TMCAST_prio_5;
+ __be64 TMCAST_prio_6;
+ __be64 TMCAST_prio_7;
+ __be64 TMCAST_novlan;
+ __be64 TMCAST_loopbk;
+ /* Transmit good frames that are neither broadcast nor multicast */
+ __be64 TTOTG_prio_0;
+ __be64 TTOTG_prio_1;
+ __be64 TTOTG_prio_2;
+ __be64 TTOTG_prio_3;
+ __be64 TTOTG_prio_4;
+ __be64 TTOTG_prio_5;
+ __be64 TTOTG_prio_6;
+ __be64 TTOTG_prio_7;
+ __be64 TTOTG_novlan;
+ __be64 TTOTG_loopbk;
+
+ /* total octets of transmitted frames, including framing characters */
+ __be64 TTTLOCT_prio_0;
+ /* total octets of transmitted frames, not including framing characters */
+ __be64 TTTLOCT_NOFRM_prio_0;
+ /* ifOutOctets */
+ __be64 TOCT_prio_0;
+
+ __be64 TTTLOCT_prio_1;
+ __be64 TTTLOCT_NOFRM_prio_1;
+ __be64 TOCT_prio_1;
+
+ __be64 TTTLOCT_prio_2;
+ __be64 TTTLOCT_NOFRM_prio_2;
+ __be64 TOCT_prio_2;
+
+ __be64 TTTLOCT_prio_3;
+ __be64 TTTLOCT_NOFRM_prio_3;
+ __be64 TOCT_prio_3;
+
+ __be64 TTTLOCT_prio_4;
+ __be64 TTTLOCT_NOFRM_prio_4;
+ __be64 TOCT_prio_4;
+
+ __be64 TTTLOCT_prio_5;
+ __be64 TTTLOCT_NOFRM_prio_5;
+ __be64 TOCT_prio_5;
+
+ __be64 TTTLOCT_prio_6;
+ __be64 TTTLOCT_NOFRM_prio_6;
+ __be64 TOCT_prio_6;
+
+ __be64 TTTLOCT_prio_7;
+ __be64 TTTLOCT_NOFRM_prio_7;
+ __be64 TOCT_prio_7;
+
+ __be64 TTTLOCT_novlan;
+ __be64 TTTLOCT_NOFRM_novlan;
+ __be64 TOCT_novlan;
+
+ __be64 TTTLOCT_loopbk;
+ __be64 TTTLOCT_NOFRM_loopbk;
+ __be64 TOCT_loopbk;
+
+ /* Total frames transmitted with a good CRC that are not aborted */
+ __be64 TTOT_prio_0;
+ /* Total number of frames transmitted with 802.1Q encapsulation */
+ __be64 T1Q_prio_0;
+ __be64 reserved13;
+
+ __be64 TTOT_prio_1;
+ __be64 T1Q_prio_1;
+ __be64 reserved14;
+
+ __be64 TTOT_prio_2;
+ __be64 T1Q_prio_2;
+ __be64 reserved15;
+
+ __be64 TTOT_prio_3;
+ __be64 T1Q_prio_3;
+ __be64 reserved16;
+
+ __be64 TTOT_prio_4;
+ __be64 T1Q_prio_4;
+ __be64 reserved17;
+
+ __be64 TTOT_prio_5;
+ __be64 T1Q_prio_5;
+ __be64 reserved18;
+
+ __be64 TTOT_prio_6;
+ __be64 T1Q_prio_6;
+ __be64 reserved19;
+
+ __be64 TTOT_prio_7;
+ __be64 T1Q_prio_7;
+ __be64 reserved20;
+
+ __be64 TTOT_novlan;
+ __be64 T1Q_novlan;
+ __be64 reserved21;
+
+ __be64 TTOT_loopbk;
+ __be64 T1Q_loopbk;
+ __be64 reserved22;
+
+ /* Received frames with a length greater than MTU octets and a bad CRC */
+ __be32 RJBBR;
+ /* Received frames with a bad CRC that are not runts, jabbers,
+ or alignment errors */
+ __be32 RCRC;
+ /* Received frames with SFD with a length of less than 64 octets and a
+ bad CRC */
+ __be32 RRUNT;
+ /* Received frames with a length less than 64 octets and a good CRC */
+ __be32 RSHORT;
+ /* Total Number of Received Packets Dropped */
+ __be32 RDROP;
+ /* Drop due to overflow */
+ __be32 RdropOvflw;
+ /* Drop due to overflow */
+ __be32 RdropLength;
+ /* Total of good frames. Does not include frames received with
+ frame-too-long, FCS, or length errors */
+ __be32 RTOTFRMS;
+ /* Total dropped Xmited packets */
+ __be32 TDROP;
+};
+
+
+#endif
diff --git a/drivers/net/mlx4/en_resources.c b/drivers/net/mlx4/en_resources.c
new file mode 100644
index 000000000000..a0545209e507
--- /dev/null
+++ b/drivers/net/mlx4/en_resources.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4_en.h"
+
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+ int is_tx, int rss, int qpn, int cqn, int srqn,
+ struct mlx4_qp_context *context)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ memset(context, 0, sizeof *context);
+ context->flags = cpu_to_be32(7 << 16 | rss << 13);
+ context->pd = cpu_to_be32(mdev->priv_pdn);
+ context->mtu_msgmax = 0xff;
+ context->rq_size_stride = 0;
+ if (is_tx)
+ context->sq_size_stride = ilog2(size) << 3 | (ilog2(stride) - 4);
+ else
+ context->sq_size_stride = 1;
+ context->usr_page = cpu_to_be32(mdev->priv_uar.index);
+ context->local_qpn = cpu_to_be32(qpn);
+ context->pri_path.ackto = 1 & 0x07;
+ context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
+ context->pri_path.counter_index = 0xff;
+ context->cqn_send = cpu_to_be32(cqn);
+ context->cqn_recv = cpu_to_be32(cqn);
+ context->db_rec_addr = cpu_to_be64(priv->res.db.dma << 2);
+ if (!rss)
+ context->srqn = cpu_to_be32(MLX4_EN_USE_SRQ | srqn);
+}
+
+
+int mlx4_en_map_buffer(struct mlx4_buf *buf)
+{
+ struct page **pages;
+ int i;
+
+ if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+ return 0;
+
+ pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
+ if (!pages)
+ return -ENOMEM;
+
+ for (i = 0; i < buf->nbufs; ++i)
+ pages[i] = virt_to_page(buf->page_list[i].buf);
+
+ buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+ if (!buf->direct.buf)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
+{
+ if (BITS_PER_LONG == 64 || buf->nbufs == 1)
+ return;
+
+ vunmap(buf->direct.buf);
+}
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
new file mode 100644
index 000000000000..6232227f56c3
--- /dev/null
+++ b/drivers/net/mlx4/en_rx.c
@@ -0,0 +1,1080 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4_en.h"
+
+static void *get_wqe(struct mlx4_en_rx_ring *ring, int n)
+{
+ int offset = n << ring->srq.wqe_shift;
+ return ring->buf + offset;
+}
+
+static void mlx4_en_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
+{
+ return;
+}
+
+static int mlx4_en_get_frag_header(struct skb_frag_struct *frags, void **mac_hdr,
+ void **ip_hdr, void **tcpudp_hdr,
+ u64 *hdr_flags, void *priv)
+{
+ *mac_hdr = page_address(frags->page) + frags->page_offset;
+ *ip_hdr = *mac_hdr + ETH_HLEN;
+ *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
+ *hdr_flags = LRO_IPV4 | LRO_TCP;
+
+ return 0;
+}
+
+static int mlx4_en_alloc_frag(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *ring_alloc,
+ int i)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_frag_info *frag_info = &priv->frag_info[i];
+ struct mlx4_en_rx_alloc *page_alloc = &ring_alloc[i];
+ struct page *page;
+ dma_addr_t dma;
+
+ if (page_alloc->offset == frag_info->last_offset) {
+ /* Allocate new page */
+ page = alloc_pages(GFP_ATOMIC | __GFP_COMP, MLX4_EN_ALLOC_ORDER);
+ if (!page)
+ return -ENOMEM;
+
+ skb_frags[i].page = page_alloc->page;
+ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->page = page;
+ page_alloc->offset = frag_info->frag_align;
+ } else {
+ page = page_alloc->page;
+ get_page(page);
+
+ skb_frags[i].page = page;
+ skb_frags[i].page_offset = page_alloc->offset;
+ page_alloc->offset += frag_info->frag_stride;
+ }
+ dma = pci_map_single(mdev->pdev, page_address(skb_frags[i].page) +
+ skb_frags[i].page_offset, frag_info->frag_size,
+ PCI_DMA_FROMDEVICE);
+ rx_desc->data[i].addr = cpu_to_be64(dma);
+ return 0;
+}
+
+static int mlx4_en_init_allocator(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_rx_alloc *page_alloc;
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++) {
+ page_alloc = &ring->page_alloc[i];
+ page_alloc->page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
+ MLX4_EN_ALLOC_ORDER);
+ if (!page_alloc->page)
+ goto out;
+
+ page_alloc->offset = priv->frag_info[i].frag_align;
+ mlx4_dbg(DRV, priv, "Initialized allocator:%d with page:%p\n",
+ i, page_alloc->page);
+ }
+ return 0;
+
+out:
+ while (i--) {
+ page_alloc = &ring->page_alloc[i];
+ put_page(page_alloc->page);
+ page_alloc->page = NULL;
+ }
+ return -ENOMEM;
+}
+
+static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_rx_alloc *page_alloc;
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++) {
+ page_alloc = &ring->page_alloc[i];
+ mlx4_dbg(DRV, priv, "Freeing allocator:%d count:%d\n",
+ i, page_count(page_alloc->page));
+
+ put_page(page_alloc->page);
+ page_alloc->page = NULL;
+ }
+}
+
+
+static void mlx4_en_init_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, int index)
+{
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
+ struct skb_frag_struct *skb_frags = ring->rx_info +
+ (index << priv->log_rx_info);
+ int possible_frags;
+ int i;
+
+ /* Pre-link descriptor */
+ rx_desc->next.next_wqe_index = cpu_to_be16((index + 1) & ring->size_mask);
+
+ /* Set size and memtype fields */
+ for (i = 0; i < priv->num_frags; i++) {
+ skb_frags[i].size = priv->frag_info[i].frag_size;
+ rx_desc->data[i].byte_count =
+ cpu_to_be32(priv->frag_info[i].frag_size);
+ rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
+ }
+
+ /* If the number of used fragments does not fill up the ring stride,
+ * remaining (unused) fragments must be padded with null address/size
+ * and a special memory key */
+ possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
+ for (i = priv->num_frags; i < possible_frags; i++) {
+ rx_desc->data[i].byte_count = 0;
+ rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
+ rx_desc->data[i].addr = 0;
+ }
+}
+
+
+static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, int index)
+{
+ struct mlx4_en_rx_desc *rx_desc = ring->buf + (index * ring->stride);
+ struct skb_frag_struct *skb_frags = ring->rx_info +
+ (index << priv->log_rx_info);
+ int i;
+
+ for (i = 0; i < priv->num_frags; i++)
+ if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, ring->page_alloc, i))
+ goto err;
+
+ return 0;
+
+err:
+ while (i--)
+ put_page(skb_frags[i].page);
+ return -ENOMEM;
+}
+
+static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
+{
+ *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
+}
+
+static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rx_ring *ring;
+ int ring_ind;
+ int buf_ind;
+
+ for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+
+ if (mlx4_en_prepare_rx_desc(priv, ring,
+ ring->actual_size)) {
+ if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
+ mlx4_err(mdev, "Failed to allocate "
+ "enough rx buffers\n");
+ return -ENOMEM;
+ } else {
+ if (netif_msg_rx_err(priv))
+ mlx4_warn(mdev,
+ "Only %d buffers allocated\n",
+ ring->actual_size);
+ goto out;
+ }
+ }
+ ring->actual_size++;
+ ring->prod++;
+ }
+ }
+out:
+ return 0;
+}
+
+static int mlx4_en_fill_rx_buf(struct net_device *dev,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int num = 0;
+ int err;
+
+ while ((u32) (ring->prod - ring->cons) < ring->actual_size) {
+ err = mlx4_en_prepare_rx_desc(priv, ring, ring->prod &
+ ring->size_mask);
+ if (err) {
+ if (netif_msg_rx_err(priv))
+ mlx4_warn(priv->mdev,
+ "Failed preparing rx descriptor\n");
+ priv->port_stats.rx_alloc_failed++;
+ break;
+ }
+ ++num;
+ ++ring->prod;
+ }
+ if ((u32) (ring->prod - ring->cons) == ring->size)
+ ring->full = 1;
+
+ return num;
+}
+
+static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct skb_frag_struct *skb_frags;
+ struct mlx4_en_rx_desc *rx_desc;
+ dma_addr_t dma;
+ int index;
+ int nr;
+
+ mlx4_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
+ ring->cons, ring->prod);
+
+ /* Unmap and free Rx buffers */
+ BUG_ON((u32) (ring->prod - ring->cons) > ring->size);
+ while (ring->cons != ring->prod) {
+ index = ring->cons & ring->size_mask;
+ rx_desc = ring->buf + (index << ring->log_stride);
+ skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ mlx4_dbg(DRV, priv, "Processing descriptor:%d\n", index);
+
+ for (nr = 0; nr < priv->num_frags; nr++) {
+ mlx4_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ mlx4_dbg(DRV, priv, "Unmaping buffer at dma:0x%llx\n", (u64) dma);
+ pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ PCI_DMA_FROMDEVICE);
+ put_page(skb_frags[nr].page);
+ }
+ ++ring->cons;
+ }
+}
+
+
+void mlx4_en_rx_refill(struct work_struct *work)
+{
+ struct delayed_work *delay = container_of(work, struct delayed_work, work);
+ struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
+ refill_task);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct net_device *dev = priv->dev;
+ struct mlx4_en_rx_ring *ring;
+ int need_refill = 0;
+ int i;
+
+ mutex_lock(&mdev->state_lock);
+ if (!mdev->device_up || !priv->port_up)
+ goto out;
+
+ /* We only get here if there are no receive buffers, so we can't race
+ * with Rx interrupts while filling buffers */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ ring = &priv->rx_ring[i];
+ if (ring->need_refill) {
+ if (mlx4_en_fill_rx_buf(dev, ring)) {
+ ring->need_refill = 0;
+ mlx4_en_update_rx_prod_db(ring);
+ } else
+ need_refill = 1;
+ }
+ }
+ if (need_refill)
+ queue_delayed_work(mdev->workqueue, &priv->refill_task, HZ);
+
+out:
+ mutex_unlock(&mdev->state_lock);
+}
+
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+ int tmp;
+
+ /* Sanity check SRQ size before proceeding */
+ if (size >= mdev->dev->caps.max_srq_wqes)
+ return -EINVAL;
+
+ ring->prod = 0;
+ ring->cons = 0;
+ ring->size = size;
+ ring->size_mask = size - 1;
+ ring->stride = stride;
+ ring->log_stride = ffs(ring->stride) - 1;
+ ring->buf_size = ring->size * ring->stride;
+
+ tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
+ sizeof(struct skb_frag_struct));
+ ring->rx_info = vmalloc(tmp);
+ if (!ring->rx_info) {
+ mlx4_err(mdev, "Failed allocating rx_info ring\n");
+ return -ENOMEM;
+ }
+ mlx4_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
+ ring->rx_info, tmp);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
+ ring->buf_size, 2 * PAGE_SIZE);
+ if (err)
+ goto err_ring;
+
+ err = mlx4_en_map_buffer(&ring->wqres.buf);
+ if (err) {
+ mlx4_err(mdev, "Failed to map RX buffer\n");
+ goto err_hwq;
+ }
+ ring->buf = ring->wqres.buf.direct.buf;
+
+ /* Configure lro mngr */
+ memset(&ring->lro, 0, sizeof(struct net_lro_mgr));
+ ring->lro.dev = priv->dev;
+ ring->lro.features = LRO_F_NAPI;
+ ring->lro.frag_align_pad = NET_IP_ALIGN;
+ ring->lro.ip_summed = CHECKSUM_UNNECESSARY;
+ ring->lro.ip_summed_aggr = CHECKSUM_UNNECESSARY;
+ ring->lro.max_desc = mdev->profile.num_lro;
+ ring->lro.max_aggr = MAX_SKB_FRAGS;
+ ring->lro.lro_arr = kzalloc(mdev->profile.num_lro *
+ sizeof(struct net_lro_desc),
+ GFP_KERNEL);
+ if (!ring->lro.lro_arr) {
+ mlx4_err(mdev, "Failed to allocate lro array\n");
+ goto err_map;
+ }
+ ring->lro.get_frag_header = mlx4_en_get_frag_header;
+
+ return 0;
+
+err_map:
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+err_hwq:
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_ring:
+ vfree(ring->rx_info);
+ ring->rx_info = NULL;
+ return err;
+}
+
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_wqe_srq_next_seg *next;
+ struct mlx4_en_rx_ring *ring;
+ int i;
+ int ring_ind;
+ int err;
+ int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * priv->num_frags);
+ int max_gs = (stride - sizeof(struct mlx4_wqe_srq_next_seg)) / DS_SIZE;
+
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+
+ ring->prod = 0;
+ ring->cons = 0;
+ ring->actual_size = 0;
+ ring->cqn = priv->rx_cq[ring_ind].mcq.cqn;
+
+ ring->stride = stride;
+ ring->log_stride = ffs(ring->stride) - 1;
+ ring->buf_size = ring->size * ring->stride;
+
+ memset(ring->buf, 0, ring->buf_size);
+ mlx4_en_update_rx_prod_db(ring);
+
+ /* Initailize all descriptors */
+ for (i = 0; i < ring->size; i++)
+ mlx4_en_init_rx_desc(priv, ring, i);
+
+ /* Initialize page allocators */
+ err = mlx4_en_init_allocator(priv, ring);
+ if (err) {
+ mlx4_err(mdev, "Failed initializing ring allocator\n");
+ goto err_allocator;
+ }
+
+ /* Fill Rx buffers */
+ ring->full = 0;
+ }
+ if (mlx4_en_fill_rx_buffers(priv))
+ goto err_buffers;
+
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
+ ring = &priv->rx_ring[ring_ind];
+
+ mlx4_en_update_rx_prod_db(ring);
+
+ /* Configure SRQ representing the ring */
+ ring->srq.max = ring->size;
+ ring->srq.max_gs = max_gs;
+ ring->srq.wqe_shift = ilog2(ring->stride);
+
+ for (i = 0; i < ring->srq.max; ++i) {
+ next = get_wqe(ring, i);
+ next->next_wqe_index =
+ cpu_to_be16((i + 1) & (ring->srq.max - 1));
+ }
+
+ err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt,
+ ring->wqres.db.dma, &ring->srq);
+ if (err){
+ mlx4_err(mdev, "Failed to allocate srq\n");
+ goto err_srq;
+ }
+ ring->srq.event = mlx4_en_srq_event;
+ }
+
+ return 0;
+
+err_srq:
+ while (ring_ind >= 0) {
+ ring = &priv->rx_ring[ring_ind];
+ mlx4_srq_free(mdev->dev, &ring->srq);
+ ring_ind--;
+ }
+
+err_buffers:
+ for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
+ mlx4_en_free_rx_buf(priv, &priv->rx_ring[ring_ind]);
+
+ ring_ind = priv->rx_ring_num - 1;
+err_allocator:
+ while (ring_ind >= 0) {
+ mlx4_en_destroy_allocator(priv, &priv->rx_ring[ring_ind]);
+ ring_ind--;
+ }
+ return err;
+}
+
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ kfree(ring->lro.lro_arr);
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ vfree(ring->rx_info);
+ ring->rx_info = NULL;
+}
+
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_srq_free(mdev->dev, &ring->srq);
+ mlx4_en_free_rx_buf(priv, ring);
+ mlx4_en_destroy_allocator(priv, ring);
+}
+
+
+/* Unmap a completed descriptor and free unused pages */
+static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct skb_frag_struct *skb_frags,
+ struct skb_frag_struct *skb_frags_rx,
+ struct mlx4_en_rx_alloc *page_alloc,
+ int length)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_frag_info *frag_info;
+ int nr;
+ dma_addr_t dma;
+
+ /* Collect used fragments while replacing them in the HW descirptors */
+ for (nr = 0; nr < priv->num_frags; nr++) {
+ frag_info = &priv->frag_info[nr];
+ if (length <= frag_info->frag_prefix_size)
+ break;
+
+ /* Save page reference in skb */
+ skb_frags_rx[nr].page = skb_frags[nr].page;
+ skb_frags_rx[nr].size = skb_frags[nr].size;
+ skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset;
+ dma = be64_to_cpu(rx_desc->data[nr].addr);
+
+ /* Allocate a replacement page */
+ if (mlx4_en_alloc_frag(priv, rx_desc, skb_frags, page_alloc, nr))
+ goto fail;
+
+ /* Unmap buffer */
+ pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size,
+ PCI_DMA_FROMDEVICE);
+ }
+ /* Adjust size of last fragment to match actual length */
+ skb_frags_rx[nr - 1].size = length -
+ priv->frag_info[nr - 1].frag_prefix_size;
+ return nr;
+
+fail:
+ /* Drop all accumulated fragments (which have already been replaced in
+ * the descriptor) of this packet; remaining fragments are reused... */
+ while (nr > 0) {
+ nr--;
+ put_page(skb_frags_rx[nr].page);
+ }
+ return 0;
+}
+
+
+static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_desc *rx_desc,
+ struct skb_frag_struct *skb_frags,
+ struct mlx4_en_rx_alloc *page_alloc,
+ unsigned int length)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct sk_buff *skb;
+ void *va;
+ int used_frags;
+ dma_addr_t dma;
+
+ skb = dev_alloc_skb(SMALL_PACKET_SIZE + NET_IP_ALIGN);
+ if (!skb) {
+ mlx4_dbg(RX_ERR, priv, "Failed allocating skb\n");
+ return NULL;
+ }
+ skb->dev = priv->dev;
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->len = length;
+ skb->truesize = length + sizeof(struct sk_buff);
+
+ /* Get pointer to first fragment so we could copy the headers into the
+ * (linear part of the) skb */
+ va = page_address(skb_frags[0].page) + skb_frags[0].page_offset;
+
+ if (length <= SMALL_PACKET_SIZE) {
+ /* We are copying all relevant data to the skb - temporarily
+ * synch buffers for the copy */
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
+ length, DMA_FROM_DEVICE);
+ skb_copy_to_linear_data(skb, va, length);
+ dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
+ length, DMA_FROM_DEVICE);
+ skb->tail += length;
+ } else {
+
+ /* Move relevant fragments to skb */
+ used_frags = mlx4_en_complete_rx_desc(priv, rx_desc, skb_frags,
+ skb_shinfo(skb)->frags,
+ page_alloc, length);
+ skb_shinfo(skb)->nr_frags = used_frags;
+
+ /* Copy headers into the skb linear buffer */
+ memcpy(skb->data, va, HEADER_COPY_SIZE);
+ skb->tail += HEADER_COPY_SIZE;
+
+ /* Skip headers in first fragment */
+ skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE;
+
+ /* Adjust size of first fragment */
+ skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE;
+ skb->data_len = length - HEADER_COPY_SIZE;
+ }
+ return skb;
+}
+
+static void mlx4_en_copy_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
+ int from, int to, int num)
+{
+ struct skb_frag_struct *skb_frags_from;
+ struct skb_frag_struct *skb_frags_to;
+ struct mlx4_en_rx_desc *rx_desc_from;
+ struct mlx4_en_rx_desc *rx_desc_to;
+ int from_index, to_index;
+ int nr, i;
+
+ for (i = 0; i < num; i++) {
+ from_index = (from + i) & ring->size_mask;
+ to_index = (to + i) & ring->size_mask;
+ skb_frags_from = ring->rx_info + (from_index << priv->log_rx_info);
+ skb_frags_to = ring->rx_info + (to_index << priv->log_rx_info);
+ rx_desc_from = ring->buf + (from_index << ring->log_stride);
+ rx_desc_to = ring->buf + (to_index << ring->log_stride);
+
+ for (nr = 0; nr < priv->num_frags; nr++) {
+ skb_frags_to[nr].page = skb_frags_from[nr].page;
+ skb_frags_to[nr].page_offset = skb_frags_from[nr].page_offset;
+ rx_desc_to->data[nr].addr = rx_desc_from->data[nr].addr;
+ }
+ }
+}
+
+
+int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_cqe *cqe;
+ struct mlx4_en_rx_ring *ring = &priv->rx_ring[cq->ring];
+ struct skb_frag_struct *skb_frags;
+ struct skb_frag_struct lro_frags[MLX4_EN_MAX_RX_FRAGS];
+ struct mlx4_en_rx_desc *rx_desc;
+ struct sk_buff *skb;
+ int index;
+ int nr;
+ unsigned int length;
+ int polled = 0;
+ int ip_summed;
+
+ if (!priv->port_up)
+ return 0;
+
+ /* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
+ * descriptor offset can be deduced from the CQE index instead of
+ * reading 'cqe->index' */
+ index = cq->mcq.cons_index & ring->size_mask;
+ cqe = &cq->buf[index];
+
+ /* Process all completed CQEs */
+ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+ cq->mcq.cons_index & cq->size)) {
+
+ skb_frags = ring->rx_info + (index << priv->log_rx_info);
+ rx_desc = ring->buf + (index << ring->log_stride);
+
+ /*
+ * make sure we read the CQE after we read the ownership bit
+ */
+ rmb();
+
+ /* Drop packet on bad receive or bad checksum */
+ if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
+ MLX4_CQE_OPCODE_ERROR)) {
+ mlx4_err(mdev, "CQE completed in error - vendor "
+ "syndrom:%d syndrom:%d\n",
+ ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
+ ((struct mlx4_err_cqe *) cqe)->syndrome);
+ goto next;
+ }
+ if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
+ mlx4_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
+ goto next;
+ }
+
+ /*
+ * Packet is OK - process it.
+ */
+ length = be32_to_cpu(cqe->byte_cnt);
+ ring->bytes += length;
+ ring->packets++;
+
+ if (likely(priv->rx_csum)) {
+ if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
+ (cqe->checksum == cpu_to_be16(0xffff))) {
+ priv->port_stats.rx_chksum_good++;
+ /* This packet is eligible for LRO if it is:
+ * - DIX Ethernet (type interpretation)
+ * - TCP/IP (v4)
+ * - without IP options
+ * - not an IP fragment */
+ if (mlx4_en_can_lro(cqe->status) &&
+ dev->features & NETIF_F_LRO) {
+
+ nr = mlx4_en_complete_rx_desc(
+ priv, rx_desc,
+ skb_frags, lro_frags,
+ ring->page_alloc, length);
+ if (!nr)
+ goto next;
+
+ if (priv->vlgrp && (cqe->vlan_my_qpn &
+ cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK))) {
+ lro_vlan_hwaccel_receive_frags(
+ &ring->lro, lro_frags,
+ length, length,
+ priv->vlgrp,
+ be16_to_cpu(cqe->sl_vid),
+ NULL, 0);
+ } else
+ lro_receive_frags(&ring->lro,
+ lro_frags,
+ length,
+ length,
+ NULL, 0);
+
+ goto next;
+ }
+
+ /* LRO not possible, complete processing here */
+ ip_summed = CHECKSUM_UNNECESSARY;
+ INC_PERF_COUNTER(priv->pstats.lro_misses);
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ priv->port_stats.rx_chksum_none++;
+ }
+ } else {
+ ip_summed = CHECKSUM_NONE;
+ priv->port_stats.rx_chksum_none++;
+ }
+
+ skb = mlx4_en_rx_skb(priv, rx_desc, skb_frags,
+ ring->page_alloc, length);
+ if (!skb) {
+ priv->stats.rx_dropped++;
+ goto next;
+ }
+
+ skb->ip_summed = ip_summed;
+ skb->protocol = eth_type_trans(skb, dev);
+
+ /* Push it up the stack */
+ if (priv->vlgrp && (be32_to_cpu(cqe->vlan_my_qpn) &
+ MLX4_CQE_VLAN_PRESENT_MASK)) {
+ vlan_hwaccel_receive_skb(skb, priv->vlgrp,
+ be16_to_cpu(cqe->sl_vid));
+ } else
+ netif_receive_skb(skb);
+
+ dev->last_rx = jiffies;
+
+next:
+ ++cq->mcq.cons_index;
+ index = (cq->mcq.cons_index) & ring->size_mask;
+ cqe = &cq->buf[index];
+ if (++polled == budget) {
+ /* We are here because we reached the NAPI budget -
+ * flush only pending LRO sessions */
+ lro_flush_all(&ring->lro);
+ goto out;
+ }
+ }
+
+ /* If CQ is empty flush all LRO sessions unconditionally */
+ lro_flush_all(&ring->lro);
+
+out:
+ AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
+ mlx4_cq_set_ci(&cq->mcq);
+ wmb(); /* ensure HW sees CQ consumer before we post new buffers */
+ ring->cons = cq->mcq.cons_index;
+ ring->prod += polled; /* Polled descriptors were realocated in place */
+ if (unlikely(!ring->full)) {
+ mlx4_en_copy_desc(priv, ring, ring->cons - polled,
+ ring->prod - polled, polled);
+ mlx4_en_fill_rx_buf(dev, ring);
+ }
+ mlx4_en_update_rx_prod_db(ring);
+ return polled;
+}
+
+
+void mlx4_en_rx_irq(struct mlx4_cq *mcq)
+{
+ struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+ struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+
+ if (priv->port_up)
+ netif_rx_schedule(cq->dev, &cq->napi);
+ else
+ mlx4_en_arm_cq(priv, cq);
+}
+
+/* Rx CQ polling - called by NAPI */
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
+{
+ struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
+ struct net_device *dev = cq->dev;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int done;
+
+ done = mlx4_en_process_rx_cq(dev, cq, budget);
+
+ /* If we used up all the quota - we're probably not done yet... */
+ if (done == budget)
+ INC_PERF_COUNTER(priv->pstats.napi_quota);
+ else {
+ /* Done for now */
+ netif_rx_complete(dev, napi);
+ mlx4_en_arm_cq(priv, cq);
+ }
+ return done;
+}
+
+
+/* Calculate the last offset position that accomodates a full fragment
+ * (assuming fagment size = stride-align) */
+static int mlx4_en_last_alloc_offset(struct mlx4_en_priv *priv, u16 stride, u16 align)
+{
+ u16 res = MLX4_EN_ALLOC_SIZE % stride;
+ u16 offset = MLX4_EN_ALLOC_SIZE - stride - res + align;
+
+ mlx4_dbg(DRV, priv, "Calculated last offset for stride:%d align:%d "
+ "res:%d offset:%d\n", stride, align, res, offset);
+ return offset;
+}
+
+
+static int frag_sizes[] = {
+ FRAG_SZ0,
+ FRAG_SZ1,
+ FRAG_SZ2,
+ FRAG_SZ3
+};
+
+void mlx4_en_calc_rx_buf(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
+ int buf_size = 0;
+ int i = 0;
+
+ while (buf_size < eff_mtu) {
+ priv->frag_info[i].frag_size =
+ (eff_mtu > buf_size + frag_sizes[i]) ?
+ frag_sizes[i] : eff_mtu - buf_size;
+ priv->frag_info[i].frag_prefix_size = buf_size;
+ if (!i) {
+ priv->frag_info[i].frag_align = NET_IP_ALIGN;
+ priv->frag_info[i].frag_stride =
+ ALIGN(frag_sizes[i] + NET_IP_ALIGN, SMP_CACHE_BYTES);
+ } else {
+ priv->frag_info[i].frag_align = 0;
+ priv->frag_info[i].frag_stride =
+ ALIGN(frag_sizes[i], SMP_CACHE_BYTES);
+ }
+ priv->frag_info[i].last_offset = mlx4_en_last_alloc_offset(
+ priv, priv->frag_info[i].frag_stride,
+ priv->frag_info[i].frag_align);
+ buf_size += priv->frag_info[i].frag_size;
+ i++;
+ }
+
+ priv->num_frags = i;
+ priv->rx_skb_size = eff_mtu;
+ priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct skb_frag_struct));
+
+ mlx4_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
+ "num_frags:%d):\n", eff_mtu, priv->num_frags);
+ for (i = 0; i < priv->num_frags; i++) {
+ mlx4_dbg(DRV, priv, " frag:%d - size:%d prefix:%d align:%d "
+ "stride:%d last_offset:%d\n", i,
+ priv->frag_info[i].frag_size,
+ priv->frag_info[i].frag_prefix_size,
+ priv->frag_info[i].frag_align,
+ priv->frag_info[i].frag_stride,
+ priv->frag_info[i].last_offset);
+ }
+}
+
+/* RSS related functions */
+
+/* Calculate rss size and map each entry in rss table to rx ring */
+void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
+ struct mlx4_en_rss_map *rss_map,
+ int num_entries, int num_rings)
+{
+ int i;
+
+ rss_map->size = roundup_pow_of_two(num_entries);
+ mlx4_dbg(DRV, priv, "Setting default RSS map of %d entires\n",
+ rss_map->size);
+
+ for (i = 0; i < rss_map->size; i++) {
+ rss_map->map[i] = i % num_rings;
+ mlx4_dbg(DRV, priv, "Entry %d ---> ring %d\n", i, rss_map->map[i]);
+ }
+}
+
+static void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
+{
+ return;
+}
+
+
+static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv,
+ int qpn, int srqn, int cqn,
+ enum mlx4_qp_state *state,
+ struct mlx4_qp *qp)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_qp_context *context;
+ int err = 0;
+
+ context = kmalloc(sizeof *context , GFP_KERNEL);
+ if (!context) {
+ mlx4_err(mdev, "Failed to allocate qp context\n");
+ return -ENOMEM;
+ }
+
+ err = mlx4_qp_alloc(mdev->dev, qpn, qp);
+ if (err) {
+ mlx4_err(mdev, "Failed to allocate qp #%d\n", qpn);
+ goto out;
+ return err;
+ }
+ qp->event = mlx4_en_sqp_event;
+
+ memset(context, 0, sizeof *context);
+ mlx4_en_fill_qp_context(priv, 0, 0, 0, 0, qpn, cqn, srqn, context);
+
+ err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state);
+ if (err) {
+ mlx4_qp_remove(mdev->dev, qp);
+ mlx4_qp_free(mdev->dev, qp);
+ }
+out:
+ kfree(context);
+ return err;
+}
+
+/* Allocate rx qp's and configure them according to rss map */
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+ struct mlx4_qp_context context;
+ struct mlx4_en_rss_context *rss_context;
+ void *ptr;
+ int rss_xor = mdev->profile.rss_xor;
+ u8 rss_mask = mdev->profile.rss_mask;
+ int i, srqn, qpn, cqn;
+ int err = 0;
+ int good_qps = 0;
+
+ mlx4_dbg(DRV, priv, "Configuring rss steering for port %u\n", priv->port);
+ err = mlx4_qp_reserve_range(mdev->dev, rss_map->size,
+ rss_map->size, &rss_map->base_qpn);
+ if (err) {
+ mlx4_err(mdev, "Failed reserving %d qps for port %u\n",
+ rss_map->size, priv->port);
+ return err;
+ }
+
+ for (i = 0; i < rss_map->size; i++) {
+ cqn = priv->rx_ring[rss_map->map[i]].cqn;
+ srqn = priv->rx_ring[rss_map->map[i]].srq.srqn;
+ qpn = rss_map->base_qpn + i;
+ err = mlx4_en_config_rss_qp(priv, qpn, srqn, cqn,
+ &rss_map->state[i],
+ &rss_map->qps[i]);
+ if (err)
+ goto rss_err;
+
+ ++good_qps;
+ }
+
+ /* Configure RSS indirection qp */
+ err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &priv->base_qpn);
+ if (err) {
+ mlx4_err(mdev, "Failed to reserve range for RSS "
+ "indirection qp\n");
+ goto rss_err;
+ }
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, &rss_map->indir_qp);
+ if (err) {
+ mlx4_err(mdev, "Failed to allocate RSS indirection QP\n");
+ goto reserve_err;
+ }
+ rss_map->indir_qp.event = mlx4_en_sqp_event;
+ mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
+ priv->rx_ring[0].cqn, 0, &context);
+
+ ptr = ((void *) &context) + 0x3c;
+ rss_context = (struct mlx4_en_rss_context *) ptr;
+ rss_context->base_qpn = cpu_to_be32(ilog2(rss_map->size) << 24 |
+ (rss_map->base_qpn));
+ rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+ rss_context->hash_fn = rss_xor & 0x3;
+ rss_context->flags = rss_mask << 2;
+
+ err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
+ &rss_map->indir_qp, &rss_map->indir_state);
+ if (err)
+ goto indir_err;
+
+ return 0;
+
+indir_err:
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+reserve_err:
+ mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
+rss_err:
+ for (i = 0; i < good_qps; i++) {
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+ mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+ mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+ }
+ mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
+ return err;
+}
+
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+ int i;
+
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->indir_qp);
+ mlx4_qp_remove(mdev->dev, &rss_map->indir_qp);
+ mlx4_qp_free(mdev->dev, &rss_map->indir_qp);
+ mlx4_qp_release_range(mdev->dev, priv->base_qpn, 1);
+
+ for (i = 0; i < rss_map->size; i++) {
+ mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
+ MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
+ mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
+ mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
+ }
+ mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, rss_map->size);
+}
+
+
+
+
+
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
new file mode 100644
index 000000000000..8592f8fb8475
--- /dev/null
+++ b/drivers/net/mlx4/en_tx.c
@@ -0,0 +1,820 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <asm/page.h>
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/qp.h>
+#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
+
+#include "mlx4_en.h"
+
+enum {
+ MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+};
+
+static int inline_thold __read_mostly = MAX_INLINE;
+
+module_param_named(inline_thold, inline_thold, int, 0444);
+MODULE_PARM_DESC(inline_thold, "treshold for using inline data");
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring, u32 size,
+ u16 stride)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int tmp;
+ int err;
+
+ ring->size = size;
+ ring->size_mask = size - 1;
+ ring->stride = stride;
+
+ inline_thold = min(inline_thold, MAX_INLINE);
+
+ spin_lock_init(&ring->comp_lock);
+
+ tmp = size * sizeof(struct mlx4_en_tx_info);
+ ring->tx_info = vmalloc(tmp);
+ if (!ring->tx_info) {
+ mlx4_err(mdev, "Failed allocating tx_info ring\n");
+ return -ENOMEM;
+ }
+ mlx4_dbg(DRV, priv, "Allocated tx_info ring at addr:%p size:%d\n",
+ ring->tx_info, tmp);
+
+ ring->bounce_buf = kmalloc(MAX_DESC_SIZE, GFP_KERNEL);
+ if (!ring->bounce_buf) {
+ mlx4_err(mdev, "Failed allocating bounce buffer\n");
+ err = -ENOMEM;
+ goto err_tx;
+ }
+ ring->buf_size = ALIGN(size * ring->stride, MLX4_EN_PAGE_SIZE);
+
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
+ 2 * PAGE_SIZE);
+ if (err) {
+ mlx4_err(mdev, "Failed allocating hwq resources\n");
+ goto err_bounce;
+ }
+
+ err = mlx4_en_map_buffer(&ring->wqres.buf);
+ if (err) {
+ mlx4_err(mdev, "Failed to map TX buffer\n");
+ goto err_hwq_res;
+ }
+
+ ring->buf = ring->wqres.buf.direct.buf;
+
+ mlx4_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
+ "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
+ ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+
+ err = mlx4_qp_reserve_range(mdev->dev, 1, 1, &ring->qpn);
+ if (err) {
+ mlx4_err(mdev, "Failed reserving qp for tx ring.\n");
+ goto err_map;
+ }
+
+ err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp);
+ if (err) {
+ mlx4_err(mdev, "Failed allocating qp %d\n", ring->qpn);
+ goto err_reserve;
+ }
+
+ return 0;
+
+err_reserve:
+ mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
+err_map:
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+err_hwq_res:
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+err_bounce:
+ kfree(ring->bounce_buf);
+ ring->bounce_buf = NULL;
+err_tx:
+ vfree(ring->tx_info);
+ ring->tx_info = NULL;
+ return err;
+}
+
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ mlx4_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
+
+ mlx4_qp_remove(mdev->dev, &ring->qp);
+ mlx4_qp_free(mdev->dev, &ring->qp);
+ mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
+ mlx4_en_unmap_buffer(&ring->wqres.buf);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
+ kfree(ring->bounce_buf);
+ ring->bounce_buf = NULL;
+ vfree(ring->tx_info);
+ ring->tx_info = NULL;
+}
+
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int cq, int srqn)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err;
+
+ ring->cqn = cq;
+ ring->prod = 0;
+ ring->cons = 0xffffffff;
+ ring->last_nr_txbb = 1;
+ ring->poll_cnt = 0;
+ ring->blocked = 0;
+ memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
+ memset(ring->buf, 0, ring->buf_size);
+
+ ring->qp_state = MLX4_QP_STATE_RST;
+ ring->doorbell_qpn = swab32(ring->qp.qpn << 8);
+
+ mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
+ ring->cqn, srqn, &ring->context);
+
+ err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
+ &ring->qp, &ring->qp_state);
+
+ return err;
+}
+
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ mlx4_qp_modify(mdev->dev, NULL, ring->qp_state,
+ MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
+}
+
+
+static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner)
+{
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+ struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+ struct mlx4_wqe_data_seg *data = (void *) tx_desc + tx_info->data_offset;
+ struct sk_buff *skb = tx_info->skb;
+ struct skb_frag_struct *frag;
+ void *end = ring->buf + ring->buf_size;
+ int frags = skb_shinfo(skb)->nr_frags;
+ int i;
+ __be32 *ptr = (__be32 *)tx_desc;
+ __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+
+ /* Optimize the common case when there are no wraparounds */
+ if (likely((void *) tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+ if (tx_info->linear) {
+ pci_unmap_single(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ ++data;
+ }
+
+ for (i = 0; i < frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ pci_unmap_page(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data[i].addr),
+ frag->size, PCI_DMA_TODEVICE);
+ }
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ }
+
+ } else {
+ if ((void *) data >= end) {
+ data = (struct mlx4_wqe_data_seg *)
+ (ring->buf + ((void *) data - end));
+ }
+
+ if (tx_info->linear) {
+ pci_unmap_single(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
+ be32_to_cpu(data->byte_count),
+ PCI_DMA_TODEVICE);
+ ++data;
+ }
+
+ for (i = 0; i < frags; i++) {
+ /* Check for wraparound before unmapping */
+ if ((void *) data >= end)
+ data = (struct mlx4_wqe_data_seg *) ring->buf;
+ frag = &skb_shinfo(skb)->frags[i];
+ pci_unmap_page(mdev->pdev,
+ (dma_addr_t) be64_to_cpu(data->addr),
+ frag->size, PCI_DMA_TODEVICE);
+ }
+ /* Stamp the freed descriptor */
+ for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
+ *ptr = stamp;
+ ptr += STAMP_DWORDS;
+ if ((void *) ptr >= end) {
+ ptr = ring->buf;
+ stamp ^= cpu_to_be32(0x80000000);
+ }
+ }
+
+ }
+ dev_kfree_skb_any(skb);
+ return tx_info->nr_txbb;
+}
+
+
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int cnt = 0;
+
+ /* Skip last polled descriptor */
+ ring->cons += ring->last_nr_txbb;
+ mlx4_dbg(DRV, priv, "Freeing Tx buf - cons:0x%x prod:0x%x\n",
+ ring->cons, ring->prod);
+
+ if ((u32) (ring->prod - ring->cons) > ring->size) {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(priv->mdev, "Tx consumer passed producer!\n");
+ return 0;
+ }
+
+ while (ring->cons != ring->prod) {
+ ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
+ ring->cons & ring->size_mask,
+ !!(ring->cons & ring->size));
+ ring->cons += ring->last_nr_txbb;
+ cnt++;
+ }
+
+ if (cnt)
+ mlx4_dbg(DRV, priv, "Freed %d uncompleted tx descriptors\n", cnt);
+
+ return cnt;
+}
+
+void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num)
+{
+ int block = 8 / ring_num;
+ int extra = 8 - (block * ring_num);
+ int num = 0;
+ u16 ring = 1;
+ int prio;
+
+ if (ring_num == 1) {
+ for (prio = 0; prio < 8; prio++)
+ prio_map[prio] = 0;
+ return;
+ }
+
+ for (prio = 0; prio < 8; prio++) {
+ if (extra && (num == block + 1)) {
+ ring++;
+ num = 0;
+ extra--;
+ } else if (!extra && (num == block)) {
+ ring++;
+ num = 0;
+ }
+ prio_map[prio] = ring;
+ mlx4_dbg(DRV, priv, " prio:%d --> ring:%d\n", prio, ring);
+ num++;
+ }
+}
+
+static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_cq *mcq = &cq->mcq;
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+ struct mlx4_cqe *cqe = cq->buf;
+ u16 index;
+ u16 new_index;
+ u32 txbbs_skipped = 0;
+ u32 cq_last_sav;
+
+ /* index always points to the first TXBB of the last polled descriptor */
+ index = ring->cons & ring->size_mask;
+ new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+ if (index == new_index)
+ return;
+
+ if (!priv->port_up)
+ return;
+
+ /*
+ * We use a two-stage loop:
+ * - the first samples the HW-updated CQE
+ * - the second frees TXBBs until the last sample
+ * This lets us amortize CQE cache misses, while still polling the CQ
+ * until is quiescent.
+ */
+ cq_last_sav = mcq->cons_index;
+ do {
+ do {
+ /* Skip over last polled CQE */
+ index = (index + ring->last_nr_txbb) & ring->size_mask;
+ txbbs_skipped += ring->last_nr_txbb;
+
+ /* Poll next CQE */
+ ring->last_nr_txbb = mlx4_en_free_tx_desc(
+ priv, ring, index,
+ !!((ring->cons + txbbs_skipped) &
+ ring->size));
+ ++mcq->cons_index;
+
+ } while (index != new_index);
+
+ new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
+ } while (index != new_index);
+ AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
+ (u32) (mcq->cons_index - cq_last_sav));
+
+ /*
+ * To prevent CQ overflow we first update CQ consumer and only then
+ * the ring consumer.
+ */
+ mlx4_cq_set_ci(mcq);
+ wmb();
+ ring->cons += txbbs_skipped;
+
+ /* Wakeup Tx queue if this ring stopped it */
+ if (unlikely(ring->blocked)) {
+ if (((u32) (ring->prod - ring->cons) <=
+ ring->size - HEADROOM - MAX_DESC_TXBBS) && !cq->armed) {
+
+ /* TODO: support multiqueue netdevs. Currently, we block
+ * when *any* ring is full. Note that:
+ * - 2 Tx rings can unblock at the same time and call
+ * netif_wake_queue(), which is OK since this
+ * operation is idempotent.
+ * - We might wake the queue just after another ring
+ * stopped it. This is no big deal because the next
+ * transmission on that ring would stop the queue.
+ */
+ ring->blocked = 0;
+ netif_wake_queue(dev);
+ priv->port_stats.wake_queue++;
+ }
+ }
+}
+
+void mlx4_en_tx_irq(struct mlx4_cq *mcq)
+{
+ struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
+ struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+
+ spin_lock_irq(&ring->comp_lock);
+ cq->armed = 0;
+ mlx4_en_process_tx_cq(cq->dev, cq);
+ if (ring->blocked)
+ mlx4_en_arm_cq(priv, cq);
+ else
+ mod_timer(&cq->timer, jiffies + 1);
+ spin_unlock_irq(&ring->comp_lock);
+}
+
+
+void mlx4_en_poll_tx_cq(unsigned long data)
+{
+ struct mlx4_en_cq *cq = (struct mlx4_en_cq *) data;
+ struct mlx4_en_priv *priv = netdev_priv(cq->dev);
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
+ u32 inflight;
+
+ INC_PERF_COUNTER(priv->pstats.tx_poll);
+
+ netif_tx_lock(priv->dev);
+ spin_lock_irq(&ring->comp_lock);
+ mlx4_en_process_tx_cq(cq->dev, cq);
+ inflight = (u32) (ring->prod - ring->cons - ring->last_nr_txbb);
+
+ /* If there are still packets in flight and the timer has not already
+ * been scheduled by the Tx routine then schedule it here to guarantee
+ * completion processing of these packets */
+ if (inflight && priv->port_up)
+ mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+ spin_unlock_irq(&ring->comp_lock);
+ netif_tx_unlock(priv->dev);
+}
+
+static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ u32 index,
+ unsigned int desc_size)
+{
+ u32 copy = (ring->size - index) * TXBB_SIZE;
+ int i;
+
+ for (i = desc_size - copy - 4; i >= 0; i -= 4) {
+ if ((i & (TXBB_SIZE - 1)) == 0)
+ wmb();
+
+ *((u32 *) (ring->buf + i)) =
+ *((u32 *) (ring->bounce_buf + copy + i));
+ }
+
+ for (i = copy - 4; i >= 4 ; i -= 4) {
+ if ((i & (TXBB_SIZE - 1)) == 0)
+ wmb();
+
+ *((u32 *) (ring->buf + index * TXBB_SIZE + i)) =
+ *((u32 *) (ring->bounce_buf + i));
+ }
+
+ /* Return real descriptor location */
+ return ring->buf + index * TXBB_SIZE;
+}
+
+static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
+{
+ struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
+ struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
+
+ /* If we don't have a pending timer, set one up to catch our recent
+ post in case the interface becomes idle */
+ if (!timer_pending(&cq->timer))
+ mod_timer(&cq->timer, jiffies + MLX4_EN_TX_POLL_TIMEOUT);
+
+ /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
+ if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
+ mlx4_en_process_tx_cq(priv->dev, cq);
+}
+
+static void *get_frag_ptr(struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ struct page *page = frag->page;
+ void *ptr;
+
+ ptr = page_address(page);
+ if (unlikely(!ptr))
+ return NULL;
+
+ return ptr + frag->page_offset;
+}
+
+static int is_inline(struct sk_buff *skb, void **pfrag)
+{
+ void *ptr;
+
+ if (inline_thold && !skb_is_gso(skb) && skb->len <= inline_thold) {
+ if (skb_shinfo(skb)->nr_frags == 1) {
+ ptr = get_frag_ptr(skb);
+ if (unlikely(!ptr))
+ return 0;
+
+ if (pfrag)
+ *pfrag = ptr;
+
+ return 1;
+ } else if (unlikely(skb_shinfo(skb)->nr_frags))
+ return 0;
+ else
+ return 1;
+ }
+
+ return 0;
+}
+
+static int inline_size(struct sk_buff *skb)
+{
+ if (skb->len + CTRL_SIZE + sizeof(struct mlx4_wqe_inline_seg)
+ <= MLX4_INLINE_ALIGN)
+ return ALIGN(skb->len + CTRL_SIZE +
+ sizeof(struct mlx4_wqe_inline_seg), 16);
+ else
+ return ALIGN(skb->len + CTRL_SIZE + 2 *
+ sizeof(struct mlx4_wqe_inline_seg), 16);
+}
+
+static int get_real_size(struct sk_buff *skb, struct net_device *dev,
+ int *lso_header_size)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int real_size;
+
+ if (skb_is_gso(skb)) {
+ *lso_header_size = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ real_size = CTRL_SIZE + skb_shinfo(skb)->nr_frags * DS_SIZE +
+ ALIGN(*lso_header_size + 4, DS_SIZE);
+ if (unlikely(*lso_header_size != skb_headlen(skb))) {
+ /* We add a segment for the skb linear buffer only if
+ * it contains data */
+ if (*lso_header_size < skb_headlen(skb))
+ real_size += DS_SIZE;
+ else {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(mdev, "Non-linear headers\n");
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ }
+ if (unlikely(*lso_header_size > MAX_LSO_HDR_SIZE)) {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(mdev, "LSO header size too big\n");
+ dev_kfree_skb_any(skb);
+ return 0;
+ }
+ } else {
+ *lso_header_size = 0;
+ if (!is_inline(skb, NULL))
+ real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
+ else
+ real_size = inline_size(skb);
+ }
+
+ return real_size;
+}
+
+static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *skb,
+ int real_size, u16 *vlan_tag, int tx_ind, void *fragptr)
+{
+ struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
+ int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
+
+ if (skb->len <= spc) {
+ inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+ if (skb_shinfo(skb)->nr_frags)
+ memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
+ skb_shinfo(skb)->frags[0].size);
+
+ } else {
+ inl->byte_count = cpu_to_be32(1 << 31 | spc);
+ if (skb_headlen(skb) <= spc) {
+ skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
+ if (skb_headlen(skb) < spc) {
+ memcpy(((void *)(inl + 1)) + skb_headlen(skb),
+ fragptr, spc - skb_headlen(skb));
+ fragptr += spc - skb_headlen(skb);
+ }
+ inl = (void *) (inl + 1) + spc;
+ memcpy(((void *)(inl + 1)), fragptr, skb->len - spc);
+ } else {
+ skb_copy_from_linear_data(skb, inl + 1, spc);
+ inl = (void *) (inl + 1) + spc;
+ skb_copy_from_linear_data_offset(skb, spc, inl + 1,
+ skb_headlen(skb) - spc);
+ if (skb_shinfo(skb)->nr_frags)
+ memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc,
+ fragptr, skb_shinfo(skb)->frags[0].size);
+ }
+
+ wmb();
+ inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
+ }
+ tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+ tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+}
+
+static int get_vlan_info(struct mlx4_en_priv *priv, struct sk_buff *skb,
+ u16 *vlan_tag)
+{
+ int tx_ind;
+
+ /* Obtain VLAN information if present */
+ if (priv->vlgrp && vlan_tx_tag_present(skb)) {
+ *vlan_tag = vlan_tx_tag_get(skb);
+ /* Set the Tx ring to use according to vlan priority */
+ tx_ind = priv->tx_prio_map[*vlan_tag >> 13];
+ } else {
+ *vlan_tag = 0;
+ tx_ind = 0;
+ }
+ return tx_ind;
+}
+
+int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_tx_ring *ring;
+ struct mlx4_en_cq *cq;
+ struct mlx4_en_tx_desc *tx_desc;
+ struct mlx4_wqe_data_seg *data;
+ struct skb_frag_struct *frag;
+ struct mlx4_en_tx_info *tx_info;
+ int tx_ind = 0;
+ int nr_txbb;
+ int desc_size;
+ int real_size;
+ dma_addr_t dma;
+ u32 index;
+ __be32 op_own;
+ u16 vlan_tag;
+ int i;
+ int lso_header_size;
+ void *fragptr;
+
+ if (unlikely(!skb->len)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ real_size = get_real_size(skb, dev, &lso_header_size);
+ if (unlikely(!real_size))
+ return NETDEV_TX_OK;
+
+ /* Allign descriptor to TXBB size */
+ desc_size = ALIGN(real_size, TXBB_SIZE);
+ nr_txbb = desc_size / TXBB_SIZE;
+ if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(mdev, "Oversized header or SG list\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ tx_ind = get_vlan_info(priv, skb, &vlan_tag);
+ ring = &priv->tx_ring[tx_ind];
+
+ /* Check available TXBBs And 2K spare for prefetch */
+ if (unlikely(((int)(ring->prod - ring->cons)) >
+ ring->size - HEADROOM - MAX_DESC_TXBBS)) {
+ /* every full Tx ring stops queue.
+ * TODO: implement multi-queue support (per-queue stop) */
+ netif_stop_queue(dev);
+ ring->blocked = 1;
+ priv->port_stats.queue_stopped++;
+
+ /* Use interrupts to find out when queue opened */
+ cq = &priv->tx_cq[tx_ind];
+ mlx4_en_arm_cq(priv, cq);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Now that we know what Tx ring to use */
+ if (unlikely(!priv->port_up)) {
+ if (netif_msg_tx_err(priv))
+ mlx4_warn(mdev, "xmit: port down!\n");
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Track current inflight packets for performance analysis */
+ AVG_PERF_COUNTER(priv->pstats.inflight_avg,
+ (u32) (ring->prod - ring->cons - 1));
+
+ /* Packet is good - grab an index and transmit it */
+ index = ring->prod & ring->size_mask;
+
+ /* See if we have enough space for whole descriptor TXBB for setting
+ * SW ownership on next descriptor; if not, use a bounce buffer. */
+ if (likely(index + nr_txbb <= ring->size))
+ tx_desc = ring->buf + index * TXBB_SIZE;
+ else
+ tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
+
+ /* Save skb in tx_info ring */
+ tx_info = &ring->tx_info[index];
+ tx_info->skb = skb;
+ tx_info->nr_txbb = nr_txbb;
+
+ /* Prepare ctrl segement apart opcode+ownership, which depends on
+ * whether LSO is used */
+ tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+ tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
+ tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+ MLX4_WQE_CTRL_SOLICITED);
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
+ MLX4_WQE_CTRL_TCP_UDP_CSUM);
+ priv->port_stats.tx_chksum_offload++;
+ }
+
+ /* Handle LSO (TSO) packets */
+ if (lso_header_size) {
+ /* Mark opcode as LSO */
+ op_own = cpu_to_be32(MLX4_OPCODE_LSO | (1 << 6)) |
+ ((ring->prod & ring->size) ?
+ cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+
+ /* Fill in the LSO prefix */
+ tx_desc->lso.mss_hdr_size = cpu_to_be32(
+ skb_shinfo(skb)->gso_size << 16 | lso_header_size);
+
+ /* Copy headers;
+ * note that we already verified that it is linear */
+ memcpy(tx_desc->lso.header, skb->data, lso_header_size);
+ data = ((void *) &tx_desc->lso +
+ ALIGN(lso_header_size + 4, DS_SIZE));
+
+ priv->port_stats.tso_packets++;
+ i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
+ !!((skb->len - lso_header_size) % skb_shinfo(skb)->gso_size);
+ ring->bytes += skb->len + (i - 1) * lso_header_size;
+ ring->packets += i;
+ } else {
+ /* Normal (Non LSO) packet */
+ op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
+ ((ring->prod & ring->size) ?
+ cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+ data = &tx_desc->data;
+ ring->bytes += max(skb->len, (unsigned int) ETH_ZLEN);
+ ring->packets++;
+
+ }
+ AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
+
+
+ /* valid only for none inline segments */
+ tx_info->data_offset = (void *) data - (void *) tx_desc;
+
+ tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
+ data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
+
+ if (!is_inline(skb, &fragptr)) {
+ /* Map fragments */
+ for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
+ frag = &skb_shinfo(skb)->frags[i];
+ dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(frag->size);
+ --data;
+ }
+
+ /* Map linear part */
+ if (tx_info->linear) {
+ dma = pci_map_single(mdev->dev->pdev, skb->data + lso_header_size,
+ skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
+ data->addr = cpu_to_be64(dma);
+ data->lkey = cpu_to_be32(mdev->mr.key);
+ wmb();
+ data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
+ }
+ } else
+ build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
+
+ ring->prod += nr_txbb;
+
+ /* If we used a bounce buffer then copy descriptor back into place */
+ if (tx_desc == (struct mlx4_en_tx_desc *) ring->bounce_buf)
+ tx_desc = mlx4_en_bounce_to_desc(priv, ring, index, desc_size);
+
+ /* Run destructor before passing skb to HW */
+ if (likely(!skb_shared(skb)))
+ skb_orphan(skb);
+
+ /* Ensure new descirptor hits memory
+ * before setting ownership of this descriptor to HW */
+ wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+
+ /* Ring doorbell! */
+ wmb();
+ writel(ring->doorbell_qpn, mdev->uar_map + MLX4_SEND_DOORBELL);
+ dev->trans_start = jiffies;
+
+ /* Poll CQ here */
+ mlx4_en_xmit_poll(priv, tx_ind);
+
+ return 0;
+}
+
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8a8b56135a58..de169338cd90 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -558,7 +558,7 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
int i;
err = mlx4_bitmap_init(&priv->eq_table.bitmap, dev->caps.num_eqs,
- dev->caps.num_eqs - 1, dev->caps.reserved_eqs);
+ dev->caps.num_eqs - 1, dev->caps.reserved_eqs, 0);
if (err)
return err;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7e32955da982..cee199ceba2f 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -88,6 +88,7 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
[ 8] = "P_Key violation counter",
[ 9] = "Q_Key violation counter",
[10] = "VMM",
+ [12] = "DPDP",
[16] = "MW support",
[17] = "APM support",
[18] = "Atomic ops support",
@@ -346,7 +347,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
MLX4_GET(field, outbox, QUERY_DEV_CAP_VL_PORT_OFFSET);
dev_cap->max_vl[i] = field >> 4;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MTU_WIDTH_OFFSET);
- dev_cap->max_mtu[i] = field >> 4;
+ dev_cap->ib_mtu[i] = field >> 4;
dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GID_OFFSET);
dev_cap->max_gids[i] = 1 << (field & 0xf);
@@ -354,10 +355,14 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
}
} else {
+#define QUERY_PORT_SUPPORTED_TYPE_OFFSET 0x00
#define QUERY_PORT_MTU_OFFSET 0x01
+#define QUERY_PORT_ETH_MTU_OFFSET 0x02
#define QUERY_PORT_WIDTH_OFFSET 0x06
#define QUERY_PORT_MAX_GID_PKEY_OFFSET 0x07
+#define QUERY_PORT_MAX_MACVLAN_OFFSET 0x0a
#define QUERY_PORT_MAX_VL_OFFSET 0x0b
+#define QUERY_PORT_MAC_OFFSET 0x10
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
@@ -365,8 +370,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (err)
goto out;
+ MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ dev_cap->supported_port_types[i] = field & 3;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
- dev_cap->max_mtu[i] = field & 0xf;
+ dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
dev_cap->max_port_width[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_MAX_GID_PKEY_OFFSET);
@@ -374,6 +381,11 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_pkeys[i] = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
dev_cap->max_vl[i] = field & 0xf;
+ MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
+ dev_cap->log_max_macs[i] = field & 0xf;
+ dev_cap->log_max_vlans[i] = field >> 4;
+ MLX4_GET(dev_cap->eth_mtu[i], outbox, QUERY_PORT_ETH_MTU_OFFSET);
+ MLX4_GET(dev_cap->def_mac[i], outbox, QUERY_PORT_MAC_OFFSET);
}
}
@@ -407,7 +419,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
mlx4_dbg(dev, "Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n",
dev_cap->max_cq_sz, dev_cap->max_qp_sz, dev_cap->max_srq_sz);
mlx4_dbg(dev, "Local CA ACK delay: %d, max MTU: %d, port width cap: %d\n",
- dev_cap->local_ca_ack_delay, 128 << dev_cap->max_mtu[1],
+ dev_cap->local_ca_ack_delay, 128 << dev_cap->ib_mtu[1],
dev_cap->max_port_width[1]);
mlx4_dbg(dev, "Max SQ desc size: %d, max SQ S/G: %d\n",
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
@@ -819,7 +831,7 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
flags |= (dev->caps.port_width_cap[port] & 0xf) << INIT_PORT_PORT_WIDTH_SHIFT;
MLX4_PUT(inbox, flags, INIT_PORT_FLAGS_OFFSET);
- field = 128 << dev->caps.mtu_cap[port];
+ field = 128 << dev->caps.ib_mtu_cap[port];
MLX4_PUT(inbox, field, INIT_PORT_MTU_OFFSET);
field = dev->caps.gid_table_len[port];
MLX4_PUT(inbox, field, INIT_PORT_MAX_GID_OFFSET);
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index decbb5c2ad41..526d7f30c041 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -66,11 +66,13 @@ struct mlx4_dev_cap {
int local_ca_ack_delay;
int num_ports;
u32 max_msg_sz;
- int max_mtu[MLX4_MAX_PORTS + 1];
+ int ib_mtu[MLX4_MAX_PORTS + 1];
int max_port_width[MLX4_MAX_PORTS + 1];
int max_vl[MLX4_MAX_PORTS + 1];
int max_gids[MLX4_MAX_PORTS + 1];
int max_pkeys[MLX4_MAX_PORTS + 1];
+ u64 def_mac[MLX4_MAX_PORTS + 1];
+ u16 eth_mtu[MLX4_MAX_PORTS + 1];
u16 stat_rate_support;
u32 flags;
int reserved_uars;
@@ -102,6 +104,9 @@ struct mlx4_dev_cap {
u32 reserved_lkey;
u64 max_icm_sz;
int max_gso_sz;
+ u8 supported_port_types[MLX4_MAX_PORTS + 1];
+ u8 log_max_macs[MLX4_MAX_PORTS + 1];
+ u8 log_max_vlans[MLX4_MAX_PORTS + 1];
};
struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 1252a919de2e..90a0281d15ea 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -85,6 +85,57 @@ static struct mlx4_profile default_profile = {
.num_mtt = 1 << 20,
};
+static int log_num_mac = 2;
+module_param_named(log_num_mac, log_num_mac, int, 0444);
+MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
+
+static int log_num_vlan;
+module_param_named(log_num_vlan, log_num_vlan, int, 0444);
+MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
+
+static int use_prio;
+module_param_named(use_prio, use_prio, bool, 0444);
+MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
+ "(0/1, default 0)");
+
+static int mlx4_check_port_params(struct mlx4_dev *dev,
+ enum mlx4_port_type *port_type)
+{
+ int i;
+
+ for (i = 0; i < dev->caps.num_ports - 1; i++) {
+ if (port_type[i] != port_type[i+1] &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ mlx4_err(dev, "Only same port types supported "
+ "on this HCA, aborting.\n");
+ return -EINVAL;
+ }
+ }
+ if ((port_type[0] == MLX4_PORT_TYPE_ETH) &&
+ (port_type[1] == MLX4_PORT_TYPE_IB)) {
+ mlx4_err(dev, "eth-ib configuration is not supported.\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dev->caps.num_ports; i++) {
+ if (!(port_type[i] & dev->caps.supported_type[i+1])) {
+ mlx4_err(dev, "Requested port type for port %d is not "
+ "supported on this HCA\n", i + 1);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static void mlx4_set_port_mask(struct mlx4_dev *dev)
+{
+ int i;
+
+ dev->caps.port_mask = 0;
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
+ dev->caps.port_mask |= 1 << (i - 1);
+}
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
int err;
@@ -120,10 +171,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.num_ports = dev_cap->num_ports;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
- dev->caps.mtu_cap[i] = dev_cap->max_mtu[i];
+ dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
+ dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
+ dev->caps.def_mac[i] = dev_cap->def_mac[i];
+ dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
}
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
@@ -134,7 +188,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_rq_sg = dev_cap->max_rq_sg;
dev->caps.max_wqes = dev_cap->max_qp_sz;
dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
- dev->caps.reserved_qps = dev_cap->reserved_qps;
dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
@@ -163,9 +216,138 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
+ dev->caps.log_num_macs = log_num_mac;
+ dev->caps.log_num_vlans = log_num_vlan;
+ dev->caps.log_num_prios = use_prio ? 3 : 0;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i) {
+ if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
+ else
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+
+ if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
+ dev->caps.log_num_macs = dev_cap->log_max_macs[i];
+ mlx4_warn(dev, "Requested number of MACs is too much "
+ "for port %d, reducing to %d.\n",
+ i, 1 << dev->caps.log_num_macs);
+ }
+ if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
+ dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
+ mlx4_warn(dev, "Requested number of VLANs is too much "
+ "for port %d, reducing to %d.\n",
+ i, 1 << dev->caps.log_num_vlans);
+ }
+ }
+
+ mlx4_set_port_mask(dev);
+
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
+ (1 << dev->caps.log_num_macs) *
+ (1 << dev->caps.log_num_vlans) *
+ (1 << dev->caps.log_num_prios) *
+ dev->caps.num_ports;
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
+
+ dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
+
return 0;
}
+/*
+ * Change the port configuration of the device.
+ * Every user of this function must hold the port mutex.
+ */
+static int mlx4_change_port_types(struct mlx4_dev *dev,
+ enum mlx4_port_type *port_types)
+{
+ int err = 0;
+ int change = 0;
+ int port;
+
+ for (port = 0; port < dev->caps.num_ports; port++) {
+ if (port_types[port] != dev->caps.port_type[port + 1]) {
+ change = 1;
+ dev->caps.port_type[port + 1] = port_types[port];
+ }
+ }
+ if (change) {
+ mlx4_unregister_device(dev);
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ mlx4_CLOSE_PORT(dev, port);
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, "
+ "aborting\n", port);
+ goto out;
+ }
+ }
+ mlx4_set_port_mask(dev);
+ err = mlx4_register_device(dev);
+ }
+
+out:
+ return err;
+}
+
+static ssize_t show_port_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+ port_attr);
+ struct mlx4_dev *mdev = info->dev;
+
+ return sprintf(buf, "%s\n",
+ mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB ?
+ "ib" : "eth");
+}
+
+static ssize_t set_port_type(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
+ port_attr);
+ struct mlx4_dev *mdev = info->dev;
+ struct mlx4_priv *priv = mlx4_priv(mdev);
+ enum mlx4_port_type types[MLX4_MAX_PORTS];
+ int i;
+ int err = 0;
+
+ if (!strcmp(buf, "ib\n"))
+ info->tmp_type = MLX4_PORT_TYPE_IB;
+ else if (!strcmp(buf, "eth\n"))
+ info->tmp_type = MLX4_PORT_TYPE_ETH;
+ else {
+ mlx4_err(mdev, "%s is not supported port type\n", buf);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->port_mutex);
+ for (i = 0; i < mdev->caps.num_ports; i++)
+ types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
+ mdev->caps.port_type[i+1];
+
+ err = mlx4_check_port_params(mdev, types);
+ if (err)
+ goto out;
+
+ for (i = 1; i <= mdev->caps.num_ports; i++)
+ priv->port[i].tmp_type = 0;
+
+ err = mlx4_change_port_types(mdev, types);
+
+out:
+ mutex_unlock(&priv->port_mutex);
+ return err ? err : count;
+}
+
static int mlx4_load_fw(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -211,7 +393,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
((u64) (MLX4_CMPT_TYPE_QP *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err)
goto err;
@@ -336,7 +519,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->qpc_base,
dev_cap->qpc_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
goto err_unmap_dmpt;
@@ -346,7 +530,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->auxc_base,
dev_cap->aux_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
goto err_unmap_qp;
@@ -356,7 +541,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->altc_base,
dev_cap->altc_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
goto err_unmap_auxc;
@@ -366,7 +552,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->rdmarc_base,
dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0, 0);
+ dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
goto err_unmap_altc;
@@ -565,6 +752,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ int port;
+ __be32 ib_port_default_caps;
err = mlx4_init_uar_table(dev);
if (err) {
@@ -663,8 +852,27 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_qp_table_free;
}
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ ib_port_default_caps = 0;
+ err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d default "
+ "ib capabilities (%d). Continuing with "
+ "caps = 0\n", port, err);
+ dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
+ goto err_mcg_table_free;
+ }
+ }
+
return 0;
+err_mcg_table_free:
+ mlx4_cleanup_mcg_table(dev);
+
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -728,11 +936,45 @@ no_msi:
priv->eq_table.eq[i].irq = dev->pdev->irq;
}
+static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ int err = 0;
+
+ info->dev = dev;
+ info->port = port;
+ mlx4_init_mac_table(dev, &info->mac_table);
+ mlx4_init_vlan_table(dev, &info->vlan_table);
+
+ sprintf(info->dev_name, "mlx4_port%d", port);
+ info->port_attr.attr.name = info->dev_name;
+ info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ info->port_attr.show = show_port_type;
+ info->port_attr.store = set_port_type;
+
+ err = device_create_file(&dev->pdev->dev, &info->port_attr);
+ if (err) {
+ mlx4_err(dev, "Failed to create file for port %d\n", port);
+ info->port = -1;
+ }
+
+ return err;
+}
+
+static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
+{
+ if (info->port < 0)
+ return;
+
+ device_remove_file(&info->dev->pdev->dev, &info->port_attr);
+}
+
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx4_priv *priv;
struct mlx4_dev *dev;
int err;
+ int port;
printk(KERN_INFO PFX "Initializing %s\n",
pci_name(pdev));
@@ -807,6 +1049,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
+ mutex_init(&priv->port_mutex);
+
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
@@ -842,15 +1086,24 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_close;
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ err = mlx4_init_port_info(dev, port);
+ if (err)
+ goto err_port;
+ }
+
err = mlx4_register_device(dev);
if (err)
- goto err_cleanup;
+ goto err_port;
pci_set_drvdata(pdev, dev);
return 0;
-err_cleanup:
+err_port:
+ for (port = 1; port <= dev->caps.num_ports; port++)
+ mlx4_cleanup_port_info(&priv->port[port]);
+
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
@@ -907,8 +1160,10 @@ static void mlx4_remove_one(struct pci_dev *pdev)
if (dev) {
mlx4_unregister_device(dev);
- for (p = 1; p <= dev->caps.num_ports; ++p)
+ for (p = 1; p <= dev->caps.num_ports; p++) {
+ mlx4_cleanup_port_info(&priv->port[p]);
mlx4_CLOSE_PORT(dev, p);
+ }
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
@@ -948,6 +1203,8 @@ static struct pci_device_id mlx4_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
{ 0, }
};
@@ -960,10 +1217,28 @@ static struct pci_driver mlx4_driver = {
.remove = __devexit_p(mlx4_remove_one)
};
+static int __init mlx4_verify_params(void)
+{
+ if ((log_num_mac < 0) || (log_num_mac > 7)) {
+ printk(KERN_WARNING "mlx4_core: bad num_mac: %d\n", log_num_mac);
+ return -1;
+ }
+
+ if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
+ printk(KERN_WARNING "mlx4_core: bad num_vlan: %d\n", log_num_vlan);
+ return -1;
+ }
+
+ return 0;
+}
+
static int __init mlx4_init(void)
{
int ret;
+ if (mlx4_verify_params())
+ return -EINVAL;
+
ret = mlx4_catas_init();
if (ret)
return ret;
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index c83f88ce0736..592c01ae2c5d 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -368,8 +368,8 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
- err = mlx4_bitmap_init(&priv->mcg_table.bitmap,
- dev->caps.num_amgms, dev->caps.num_amgms - 1, 0);
+ err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
+ dev->caps.num_amgms - 1, 0, 0);
if (err)
return err;
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index 5337e3ac3e78..34c909deaff3 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -87,6 +87,9 @@ enum {
#ifdef CONFIG_MLX4_DEBUG
extern int mlx4_debug_level;
+#else /* CONFIG_MLX4_DEBUG */
+#define mlx4_debug_level (0)
+#endif /* CONFIG_MLX4_DEBUG */
#define mlx4_dbg(mdev, format, arg...) \
do { \
@@ -94,12 +97,6 @@ extern int mlx4_debug_level;
dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ## arg); \
} while (0)
-#else /* CONFIG_MLX4_DEBUG */
-
-#define mlx4_dbg(mdev, format, arg...) do { (void) mdev; } while (0)
-
-#endif /* CONFIG_MLX4_DEBUG */
-
#define mlx4_err(mdev, format, arg...) \
dev_err(&mdev->pdev->dev, format, ## arg)
#define mlx4_info(mdev, format, arg...) \
@@ -111,6 +108,7 @@ struct mlx4_bitmap {
u32 last;
u32 top;
u32 max;
+ u32 reserved_top;
u32 mask;
spinlock_t lock;
unsigned long *table;
@@ -251,6 +249,38 @@ struct mlx4_catas_err {
struct list_head list;
};
+#define MLX4_MAX_MAC_NUM 128
+#define MLX4_MAC_TABLE_SIZE (MLX4_MAX_MAC_NUM << 3)
+
+struct mlx4_mac_table {
+ __be64 entries[MLX4_MAX_MAC_NUM];
+ int refs[MLX4_MAX_MAC_NUM];
+ struct mutex mutex;
+ int total;
+ int max;
+};
+
+#define MLX4_MAX_VLAN_NUM 128
+#define MLX4_VLAN_TABLE_SIZE (MLX4_MAX_VLAN_NUM << 2)
+
+struct mlx4_vlan_table {
+ __be32 entries[MLX4_MAX_VLAN_NUM];
+ int refs[MLX4_MAX_VLAN_NUM];
+ struct mutex mutex;
+ int total;
+ int max;
+};
+
+struct mlx4_port_info {
+ struct mlx4_dev *dev;
+ int port;
+ char dev_name[16];
+ struct device_attribute port_attr;
+ enum mlx4_port_type tmp_type;
+ struct mlx4_mac_table mac_table;
+ struct mlx4_vlan_table vlan_table;
+};
+
struct mlx4_priv {
struct mlx4_dev dev;
@@ -279,6 +309,8 @@ struct mlx4_priv {
struct mlx4_uar driver_uar;
void __iomem *kar;
+ struct mlx4_port_info port[MLX4_MAX_PORTS + 1];
+ struct mutex port_mutex;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -288,7 +320,10 @@ static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
u32 mlx4_bitmap_alloc(struct mlx4_bitmap *bitmap);
void mlx4_bitmap_free(struct mlx4_bitmap *bitmap, u32 obj);
-int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask, u32 reserved);
+u32 mlx4_bitmap_alloc_range(struct mlx4_bitmap *bitmap, int cnt, int align);
+void mlx4_bitmap_free_range(struct mlx4_bitmap *bitmap, u32 obj, int cnt);
+int mlx4_bitmap_init(struct mlx4_bitmap *bitmap, u32 num, u32 mask,
+ u32 reserved_bot, u32 resetrved_top);
void mlx4_bitmap_cleanup(struct mlx4_bitmap *bitmap);
int mlx4_reset(struct mlx4_dev *dev);
@@ -346,4 +381,10 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_handle_catas_err(struct mlx4_dev *dev);
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
+
#endif /* MLX4_H */
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
new file mode 100644
index 000000000000..98ddc0811f93
--- /dev/null
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -0,0 +1,561 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _MLX4_EN_H_
+#define _MLX4_EN_H_
+
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/inet_lro.h>
+
+#include <linux/mlx4/device.h>
+#include <linux/mlx4/qp.h>
+#include <linux/mlx4/cq.h>
+#include <linux/mlx4/srq.h>
+#include <linux/mlx4/doorbell.h>
+
+#include "en_port.h"
+
+#define DRV_NAME "mlx4_en"
+#define DRV_VERSION "1.4.0"
+#define DRV_RELDATE "Sep 2008"
+
+
+#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
+
+#define mlx4_dbg(mlevel, priv, format, arg...) \
+ if (NETIF_MSG_##mlevel & priv->msg_enable) \
+ printk(KERN_DEBUG "%s %s: " format , DRV_NAME ,\
+ (&priv->mdev->pdev->dev)->bus_id , ## arg)
+
+#define mlx4_err(mdev, format, arg...) \
+ printk(KERN_ERR "%s %s: " format , DRV_NAME ,\
+ (&mdev->pdev->dev)->bus_id , ## arg)
+#define mlx4_info(mdev, format, arg...) \
+ printk(KERN_INFO "%s %s: " format , DRV_NAME ,\
+ (&mdev->pdev->dev)->bus_id , ## arg)
+#define mlx4_warn(mdev, format, arg...) \
+ printk(KERN_WARNING "%s %s: " format , DRV_NAME ,\
+ (&mdev->pdev->dev)->bus_id , ## arg)
+
+/*
+ * Device constants
+ */
+
+
+#define MLX4_EN_PAGE_SHIFT 12
+#define MLX4_EN_PAGE_SIZE (1 << MLX4_EN_PAGE_SHIFT)
+#define MAX_TX_RINGS 16
+#define MAX_RX_RINGS 16
+#define MAX_RSS_MAP_SIZE 64
+#define RSS_FACTOR 2
+#define TXBB_SIZE 64
+#define HEADROOM (2048 / TXBB_SIZE + 1)
+#define MAX_LSO_HDR_SIZE 92
+#define STAMP_STRIDE 64
+#define STAMP_DWORDS (STAMP_STRIDE / 4)
+#define STAMP_SHIFT 31
+#define STAMP_VAL 0x7fffffff
+#define STATS_DELAY (HZ / 4)
+
+/* Typical TSO descriptor with 16 gather entries is 352 bytes... */
+#define MAX_DESC_SIZE 512
+#define MAX_DESC_TXBBS (MAX_DESC_SIZE / TXBB_SIZE)
+
+/*
+ * OS related constants and tunables
+ */
+
+#define MLX4_EN_WATCHDOG_TIMEOUT (15 * HZ)
+
+#define MLX4_EN_ALLOC_ORDER 2
+#define MLX4_EN_ALLOC_SIZE (PAGE_SIZE << MLX4_EN_ALLOC_ORDER)
+
+#define MLX4_EN_MAX_LRO_DESCRIPTORS 32
+
+/* Receive fragment sizes; we use at most 4 fragments (for 9600 byte MTU
+ * and 4K allocations) */
+enum {
+ FRAG_SZ0 = 512 - NET_IP_ALIGN,
+ FRAG_SZ1 = 1024,
+ FRAG_SZ2 = 4096,
+ FRAG_SZ3 = MLX4_EN_ALLOC_SIZE
+};
+#define MLX4_EN_MAX_RX_FRAGS 4
+
+/* Minimum ring size for our page-allocation sceme to work */
+#define MLX4_EN_MIN_RX_SIZE (MLX4_EN_ALLOC_SIZE / SMP_CACHE_BYTES)
+#define MLX4_EN_MIN_TX_SIZE (4096 / TXBB_SIZE)
+
+#define MLX4_EN_TX_RING_NUM 9
+#define MLX4_EN_DEF_TX_RING_SIZE 1024
+#define MLX4_EN_DEF_RX_RING_SIZE 1024
+
+/* Target number of bytes to coalesce with interrupt moderation */
+#define MLX4_EN_RX_COAL_TARGET 0x20000
+#define MLX4_EN_RX_COAL_TIME 0x10
+
+#define MLX4_EN_TX_COAL_PKTS 5
+#define MLX4_EN_TX_COAL_TIME 0x80
+
+#define MLX4_EN_RX_RATE_LOW 400000
+#define MLX4_EN_RX_COAL_TIME_LOW 0
+#define MLX4_EN_RX_RATE_HIGH 450000
+#define MLX4_EN_RX_COAL_TIME_HIGH 128
+#define MLX4_EN_RX_SIZE_THRESH 1024
+#define MLX4_EN_RX_RATE_THRESH (1000000 / MLX4_EN_RX_COAL_TIME_HIGH)
+#define MLX4_EN_SAMPLE_INTERVAL 0
+
+#define MLX4_EN_AUTO_CONF 0xffff
+
+#define MLX4_EN_DEF_RX_PAUSE 1
+#define MLX4_EN_DEF_TX_PAUSE 1
+
+/* Interval between sucessive polls in the Tx routine when polling is used
+ instead of interrupts (in per-core Tx rings) - should be power of 2 */
+#define MLX4_EN_TX_POLL_MODER 16
+#define MLX4_EN_TX_POLL_TIMEOUT (HZ / 4)
+
+#define ETH_LLC_SNAP_SIZE 8
+
+#define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN)
+#define HEADER_COPY_SIZE (128 - NET_IP_ALIGN)
+
+#define MLX4_EN_MIN_MTU 46
+#define ETH_BCAST 0xffffffffffffULL
+
+#ifdef MLX4_EN_PERF_STAT
+/* Number of samples to 'average' */
+#define AVG_SIZE 128
+#define AVG_FACTOR 1024
+#define NUM_PERF_STATS NUM_PERF_COUNTERS
+
+#define INC_PERF_COUNTER(cnt) (++(cnt))
+#define ADD_PERF_COUNTER(cnt, add) ((cnt) += (add))
+#define AVG_PERF_COUNTER(cnt, sample) \
+ ((cnt) = ((cnt) * (AVG_SIZE - 1) + (sample) * AVG_FACTOR) / AVG_SIZE)
+#define GET_PERF_COUNTER(cnt) (cnt)
+#define GET_AVG_PERF_COUNTER(cnt) ((cnt) / AVG_FACTOR)
+
+#else
+
+#define NUM_PERF_STATS 0
+#define INC_PERF_COUNTER(cnt) do {} while (0)
+#define ADD_PERF_COUNTER(cnt, add) do {} while (0)
+#define AVG_PERF_COUNTER(cnt, sample) do {} while (0)
+#define GET_PERF_COUNTER(cnt) (0)
+#define GET_AVG_PERF_COUNTER(cnt) (0)
+#endif /* MLX4_EN_PERF_STAT */
+
+/*
+ * Configurables
+ */
+
+enum cq_type {
+ RX = 0,
+ TX = 1,
+};
+
+
+/*
+ * Useful macros
+ */
+#define ROUNDUP_LOG2(x) ilog2(roundup_pow_of_two(x))
+#define XNOR(x, y) (!(x) == !(y))
+#define ILLEGAL_MAC(addr) (addr == 0xffffffffffffULL || addr == 0x0)
+
+
+struct mlx4_en_tx_info {
+ struct sk_buff *skb;
+ u32 nr_txbb;
+ u8 linear;
+ u8 data_offset;
+};
+
+
+#define MLX4_EN_BIT_DESC_OWN 0x80000000
+#define CTRL_SIZE sizeof(struct mlx4_wqe_ctrl_seg)
+#define MLX4_EN_MEMTYPE_PAD 0x100
+#define DS_SIZE sizeof(struct mlx4_wqe_data_seg)
+
+
+struct mlx4_en_tx_desc {
+ struct mlx4_wqe_ctrl_seg ctrl;
+ union {
+ struct mlx4_wqe_data_seg data; /* at least one data segment */
+ struct mlx4_wqe_lso_seg lso;
+ struct mlx4_wqe_inline_seg inl;
+ };
+};
+
+#define MLX4_EN_USE_SRQ 0x01000000
+
+struct mlx4_en_rx_alloc {
+ struct page *page;
+ u16 offset;
+};
+
+struct mlx4_en_tx_ring {
+ struct mlx4_hwq_resources wqres;
+ u32 size ; /* number of TXBBs */
+ u32 size_mask;
+ u16 stride;
+ u16 cqn; /* index of port CQ associated with this ring */
+ u32 prod;
+ u32 cons;
+ u32 buf_size;
+ u32 doorbell_qpn;
+ void *buf;
+ u16 poll_cnt;
+ int blocked;
+ struct mlx4_en_tx_info *tx_info;
+ u8 *bounce_buf;
+ u32 last_nr_txbb;
+ struct mlx4_qp qp;
+ struct mlx4_qp_context context;
+ int qpn;
+ enum mlx4_qp_state qp_state;
+ struct mlx4_srq dummy;
+ unsigned long bytes;
+ unsigned long packets;
+ spinlock_t comp_lock;
+};
+
+struct mlx4_en_rx_desc {
+ struct mlx4_wqe_srq_next_seg next;
+ /* actual number of entries depends on rx ring stride */
+ struct mlx4_wqe_data_seg data[0];
+};
+
+struct mlx4_en_rx_ring {
+ struct mlx4_srq srq;
+ struct mlx4_hwq_resources wqres;
+ struct mlx4_en_rx_alloc page_alloc[MLX4_EN_MAX_RX_FRAGS];
+ struct net_lro_mgr lro;
+ u32 size ; /* number of Rx descs*/
+ u32 actual_size;
+ u32 size_mask;
+ u16 stride;
+ u16 log_stride;
+ u16 cqn; /* index of port CQ associated with this ring */
+ u32 prod;
+ u32 cons;
+ u32 buf_size;
+ int need_refill;
+ int full;
+ void *buf;
+ void *rx_info;
+ unsigned long bytes;
+ unsigned long packets;
+};
+
+
+static inline int mlx4_en_can_lro(__be16 status)
+{
+ return (status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+ MLX4_CQE_STATUS_IPV4F |
+ MLX4_CQE_STATUS_IPV6 |
+ MLX4_CQE_STATUS_IPV4OPT |
+ MLX4_CQE_STATUS_TCP |
+ MLX4_CQE_STATUS_UDP |
+ MLX4_CQE_STATUS_IPOK)) ==
+ cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
+ MLX4_CQE_STATUS_IPOK |
+ MLX4_CQE_STATUS_TCP);
+}
+
+struct mlx4_en_cq {
+ struct mlx4_cq mcq;
+ struct mlx4_hwq_resources wqres;
+ int ring;
+ spinlock_t lock;
+ struct net_device *dev;
+ struct napi_struct napi;
+ /* Per-core Tx cq processing support */
+ struct timer_list timer;
+ int size;
+ int buf_size;
+ unsigned vector;
+ enum cq_type is_tx;
+ u16 moder_time;
+ u16 moder_cnt;
+ int armed;
+ struct mlx4_cqe *buf;
+#define MLX4_EN_OPCODE_ERROR 0x1e
+};
+
+struct mlx4_en_port_profile {
+ u32 flags;
+ u32 tx_ring_num;
+ u32 rx_ring_num;
+ u32 tx_ring_size;
+ u32 rx_ring_size;
+ u8 rx_pause;
+ u8 rx_ppp;
+ u8 tx_pause;
+ u8 tx_ppp;
+};
+
+struct mlx4_en_profile {
+ int rss_xor;
+ int num_lro;
+ u8 rss_mask;
+ u32 active_ports;
+ u32 small_pkt_int;
+ int rx_moder_cnt;
+ int rx_moder_time;
+ int auto_moder;
+ u8 no_reset;
+ struct mlx4_en_port_profile prof[MLX4_MAX_PORTS + 1];
+};
+
+struct mlx4_en_dev {
+ struct mlx4_dev *dev;
+ struct pci_dev *pdev;
+ struct mutex state_lock;
+ struct net_device *pndev[MLX4_MAX_PORTS + 1];
+ u32 port_cnt;
+ bool device_up;
+ struct mlx4_en_profile profile;
+ u32 LSO_support;
+ struct workqueue_struct *workqueue;
+ struct device *dma_device;
+ void __iomem *uar_map;
+ struct mlx4_uar priv_uar;
+ struct mlx4_mr mr;
+ u32 priv_pdn;
+ spinlock_t uar_lock;
+};
+
+
+struct mlx4_en_rss_map {
+ int size;
+ int base_qpn;
+ u16 map[MAX_RSS_MAP_SIZE];
+ struct mlx4_qp qps[MAX_RSS_MAP_SIZE];
+ enum mlx4_qp_state state[MAX_RSS_MAP_SIZE];
+ struct mlx4_qp indir_qp;
+ enum mlx4_qp_state indir_state;
+};
+
+struct mlx4_en_rss_context {
+ __be32 base_qpn;
+ __be32 default_qpn;
+ u16 reserved;
+ u8 hash_fn;
+ u8 flags;
+ __be32 rss_key[10];
+};
+
+struct mlx4_en_pkt_stats {
+ unsigned long broadcast;
+ unsigned long rx_prio[8];
+ unsigned long tx_prio[8];
+#define NUM_PKT_STATS 17
+};
+
+struct mlx4_en_port_stats {
+ unsigned long lro_aggregated;
+ unsigned long lro_flushed;
+ unsigned long lro_no_desc;
+ unsigned long tso_packets;
+ unsigned long queue_stopped;
+ unsigned long wake_queue;
+ unsigned long tx_timeout;
+ unsigned long rx_alloc_failed;
+ unsigned long rx_chksum_good;
+ unsigned long rx_chksum_none;
+ unsigned long tx_chksum_offload;
+#define NUM_PORT_STATS 11
+};
+
+struct mlx4_en_perf_stats {
+ u32 tx_poll;
+ u64 tx_pktsz_avg;
+ u32 inflight_avg;
+ u16 tx_coal_avg;
+ u16 rx_coal_avg;
+ u32 napi_quota;
+#define NUM_PERF_COUNTERS 6
+};
+
+struct mlx4_en_frag_info {
+ u16 frag_size;
+ u16 frag_prefix_size;
+ u16 frag_stride;
+ u16 frag_align;
+ u16 last_offset;
+
+};
+
+struct mlx4_en_priv {
+ struct mlx4_en_dev *mdev;
+ struct mlx4_en_port_profile *prof;
+ struct net_device *dev;
+ struct vlan_group *vlgrp;
+ struct net_device_stats stats;
+ struct net_device_stats ret_stats;
+ spinlock_t stats_lock;
+
+ unsigned long last_moder_packets;
+ unsigned long last_moder_tx_packets;
+ unsigned long last_moder_bytes;
+ unsigned long last_moder_jiffies;
+ int last_moder_time;
+ u16 rx_usecs;
+ u16 rx_frames;
+ u16 tx_usecs;
+ u16 tx_frames;
+ u32 pkt_rate_low;
+ u16 rx_usecs_low;
+ u32 pkt_rate_high;
+ u16 rx_usecs_high;
+ u16 sample_interval;
+ u16 adaptive_rx_coal;
+ u32 msg_enable;
+
+ struct mlx4_hwq_resources res;
+ int link_state;
+ int last_link_state;
+ bool port_up;
+ int port;
+ int registered;
+ int allocated;
+ int stride;
+ int rx_csum;
+ u64 mac;
+ int mac_index;
+ unsigned max_mtu;
+ int base_qpn;
+
+ struct mlx4_en_rss_map rss_map;
+ u16 tx_prio_map[8];
+ u32 flags;
+#define MLX4_EN_FLAG_PROMISC 0x1
+ u32 tx_ring_num;
+ u32 rx_ring_num;
+ u32 rx_skb_size;
+ struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
+ u16 num_frags;
+ u16 log_rx_info;
+
+ struct mlx4_en_tx_ring tx_ring[MAX_TX_RINGS];
+ struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS];
+ struct mlx4_en_cq tx_cq[MAX_TX_RINGS];
+ struct mlx4_en_cq rx_cq[MAX_RX_RINGS];
+ struct work_struct mcast_task;
+ struct work_struct mac_task;
+ struct delayed_work refill_task;
+ struct work_struct watchdog_task;
+ struct work_struct linkstate_task;
+ struct delayed_work stats_task;
+ struct mlx4_en_perf_stats pstats;
+ struct mlx4_en_pkt_stats pkstats;
+ struct mlx4_en_port_stats port_stats;
+ struct dev_mc_list *mc_list;
+ struct mlx4_en_stat_out_mbox hw_stats;
+};
+
+
+void mlx4_en_destroy_netdev(struct net_device *dev);
+int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
+ struct mlx4_en_port_profile *prof);
+
+int mlx4_en_get_profile(struct mlx4_en_dev *mdev);
+
+int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
+ int entries, int ring, enum cq_type mode);
+void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_set_cq_moder(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
+
+void mlx4_en_poll_tx_cq(unsigned long data);
+void mlx4_en_tx_irq(struct mlx4_cq *mcq);
+int mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+
+int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring,
+ u32 size, u16 stride);
+void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring);
+int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int cq, int srqn);
+void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring);
+
+int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring,
+ u32 size, u16 stride);
+void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring);
+int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
+void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
+ struct mlx4_en_rx_ring *ring);
+int mlx4_en_process_rx_cq(struct net_device *dev,
+ struct mlx4_en_cq *cq,
+ int budget);
+int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
+void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
+ int is_tx, int rss, int qpn, int cqn, int srqn,
+ struct mlx4_qp_context *context);
+int mlx4_en_map_buffer(struct mlx4_buf *buf);
+void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
+
+void mlx4_en_calc_rx_buf(struct net_device *dev);
+void mlx4_en_set_default_rss_map(struct mlx4_en_priv *priv,
+ struct mlx4_en_rss_map *rss_map,
+ int num_entries, int num_rings);
+void mlx4_en_set_prio_map(struct mlx4_en_priv *priv, u16 *prio_map, u32 ring_num);
+int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv);
+void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv);
+int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring);
+void mlx4_en_rx_refill(struct work_struct *work);
+void mlx4_en_rx_irq(struct mlx4_cq *mcq);
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port, u64 mac, u64 clear, u8 mode);
+int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, u8 port, struct vlan_group *grp);
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc);
+
+int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset);
+
+/*
+ * Globals
+ */
+extern const struct ethtool_ops mlx4_en_ethtool_ops;
+#endif
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index d1dd5b48dbd1..0caf74cae8bc 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -461,7 +461,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
int err;
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
- ~0, dev->caps.reserved_mrws);
+ ~0, dev->caps.reserved_mrws, 0);
if (err)
return err;
diff --git a/drivers/net/mlx4/pd.c b/drivers/net/mlx4/pd.c
index aa616892d09c..26d1a7a9e375 100644
--- a/drivers/net/mlx4/pd.c
+++ b/drivers/net/mlx4/pd.c
@@ -62,7 +62,7 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
- (1 << 24) - 1, dev->caps.reserved_pds);
+ (1 << 24) - 1, dev->caps.reserved_pds, 0);
}
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -100,7 +100,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
- max(128, dev->caps.reserved_uars));
+ max(128, dev->caps.reserved_uars), 0);
}
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
new file mode 100644
index 000000000000..0a057e5dc63b
--- /dev/null
+++ b/drivers/net/mlx4/port.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/errno.h>
+#include <linux/if_ether.h>
+
+#include <linux/mlx4/cmd.h>
+
+#include "mlx4.h"
+
+#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_MAC_MASK 0xffffffffffffULL
+
+#define MLX4_VLAN_VALID (1u << 31)
+#define MLX4_VLAN_MASK 0xfff
+
+void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
+{
+ int i;
+
+ mutex_init(&table->mutex);
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ table->entries[i] = 0;
+ table->refs[i] = 0;
+ }
+ table->max = 1 << dev->caps.log_num_macs;
+ table->total = 0;
+}
+
+void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
+{
+ int i;
+
+ mutex_init(&table->mutex);
+ for (i = 0; i < MLX4_MAX_VLAN_NUM; i++) {
+ table->entries[i] = 0;
+ table->refs[i] = 0;
+ }
+ table->max = 1 << dev->caps.log_num_vlans;
+ table->total = 0;
+}
+
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+ __be64 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+ in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *index)
+{
+ struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+ int i, err = 0;
+ int free = -1;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+ mutex_lock(&table->mutex);
+ for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
+ if (free < 0 && !table->refs[i]) {
+ free = i;
+ continue;
+ }
+
+ if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
+ /* MAC already registered, increase refernce count */
+ *index = i;
+ ++table->refs[i];
+ goto out;
+ }
+ }
+ mlx4_dbg(dev, "Free MAC index is %d\n", free);
+
+ if (table->total == table->max) {
+ /* No free mac entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
+ /* Register new MAC */
+ table->refs[free] = 1;
+ table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
+
+ err = mlx4_set_port_mac_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
+ table->refs[free] = 0;
+ table->entries[free] = 0;
+ goto out;
+ }
+
+ *index = free;
+ ++table->total;
+out:
+ mutex_unlock(&table->mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int index)
+{
+ struct mlx4_mac_table *table = &mlx4_priv(dev)->port[port].mac_table;
+
+ mutex_lock(&table->mutex);
+ if (!table->refs[index]) {
+ mlx4_warn(dev, "No MAC entry for index %d\n", index);
+ goto out;
+ }
+ if (--table->refs[index]) {
+ mlx4_warn(dev, "Have more references for index %d,"
+ "no need to modify MAC table\n", index);
+ goto out;
+ }
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
+out:
+ mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
+
+static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
+ __be32 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
+ in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+ int i, err = 0;
+ int free = -1;
+
+ mutex_lock(&table->mutex);
+ for (i = MLX4_VLAN_REGULAR; i < MLX4_MAX_VLAN_NUM; i++) {
+ if (free < 0 && (table->refs[i] == 0)) {
+ free = i;
+ continue;
+ }
+
+ if (table->refs[i] &&
+ (vlan == (MLX4_VLAN_MASK &
+ be32_to_cpu(table->entries[i])))) {
+ /* Vlan already registered, increase refernce count */
+ *index = i;
+ ++table->refs[i];
+ goto out;
+ }
+ }
+
+ if (table->total == table->max) {
+ /* No free vlan entries */
+ err = -ENOSPC;
+ goto out;
+ }
+
+ /* Register new MAC */
+ table->refs[free] = 1;
+ table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
+
+ err = mlx4_set_port_vlan_table(dev, port, table->entries);
+ if (unlikely(err)) {
+ mlx4_warn(dev, "Failed adding vlan: %u\n", vlan);
+ table->refs[free] = 0;
+ table->entries[free] = 0;
+ goto out;
+ }
+
+ *index = free;
+ ++table->total;
+out:
+ mutex_unlock(&table->mutex);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_register_vlan);
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+ struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
+
+ if (index < MLX4_VLAN_REGULAR) {
+ mlx4_warn(dev, "Trying to free special vlan index %d\n", index);
+ return;
+ }
+
+ mutex_lock(&table->mutex);
+ if (!table->refs[index]) {
+ mlx4_warn(dev, "No vlan entry for index %d\n", index);
+ goto out;
+ }
+ if (--table->refs[index]) {
+ mlx4_dbg(dev, "Have more references for index %d,"
+ "no need to modify vlan table\n", index);
+ goto out;
+ }
+ table->entries[index] = 0;
+ mlx4_set_port_vlan_table(dev, port, table->entries);
+ --table->total;
+out:
+ mutex_unlock(&table->mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
+
+int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
+{
+ struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
+ u8 *inbuf, *outbuf;
+ int err;
+
+ inmailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inmailbox))
+ return PTR_ERR(inmailbox);
+
+ outmailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outmailbox)) {
+ mlx4_free_cmd_mailbox(dev, inmailbox);
+ return PTR_ERR(outmailbox);
+ }
+
+ inbuf = inmailbox->buf;
+ outbuf = outmailbox->buf;
+ memset(inbuf, 0, 256);
+ memset(outbuf, 0, 256);
+ inbuf[0] = 1;
+ inbuf[1] = 1;
+ inbuf[2] = 1;
+ inbuf[3] = 1;
+ *(__be16 *) (&inbuf[16]) = cpu_to_be16(0x0015);
+ *(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
+
+ err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ if (!err)
+ *caps = *(__be32 *) (outbuf + 84);
+ mlx4_free_cmd_mailbox(dev, inmailbox);
+ mlx4_free_cmd_mailbox(dev, outmailbox);
+ return err;
+}
+
+int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+ u8 is_eth = dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memset(mailbox->buf, 0, 256);
+ if (is_eth) {
+ ((u8 *) mailbox->buf)[3] = 6;
+ ((__be16 *) mailbox->buf)[4] = cpu_to_be16(1 << 15);
+ ((__be16 *) mailbox->buf)[6] = cpu_to_be16(1 << 15);
+ } else
+ ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
+ err = mlx4_cmd(dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index c49a86044bf7..1c565ef8d179 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -147,19 +147,42 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp)
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int qpn;
+
+ qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (qpn == -1)
+ return -ENOMEM;
+
+ *base = qpn;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ if (base_qpn < dev->caps.sqp_start + 8)
+ return;
+
+ mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- if (sqpn)
- qp->qpn = sqpn;
- else {
- qp->qpn = mlx4_bitmap_alloc(&qp_table->bitmap);
- if (qp->qpn == -1)
- return -ENOMEM;
- }
+ if (!qpn)
+ return -EINVAL;
+
+ qp->qpn = qpn;
err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
if (err)
@@ -208,9 +231,6 @@ err_put_qp:
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
err_out:
- if (!sqpn)
- mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
-
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
@@ -239,9 +259,6 @@ void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
-
- if (qp->qpn >= dev->caps.sqp_start + 8)
- mlx4_bitmap_free(&qp_table->bitmap, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
@@ -255,6 +272,7 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
{
struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
int err;
+ int reserved_from_top = 0;
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
@@ -264,9 +282,40 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
* block of special QPs must be aligned to a multiple of 8, so
* round up.
*/
- dev->caps.sqp_start = ALIGN(dev->caps.reserved_qps, 8);
+ dev->caps.sqp_start =
+ ALIGN(dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW], 8);
+
+ {
+ int sort[MLX4_NUM_QP_REGION];
+ int i, j, tmp;
+ int last_base = dev->caps.num_qps;
+
+ for (i = 1; i < MLX4_NUM_QP_REGION; ++i)
+ sort[i] = i;
+
+ for (i = MLX4_NUM_QP_REGION; i > 0; --i) {
+ for (j = 2; j < i; ++j) {
+ if (dev->caps.reserved_qps_cnt[sort[j]] >
+ dev->caps.reserved_qps_cnt[sort[j - 1]]) {
+ tmp = sort[j];
+ sort[j] = sort[j - 1];
+ sort[j - 1] = tmp;
+ }
+ }
+ }
+
+ for (i = 1; i < MLX4_NUM_QP_REGION; ++i) {
+ last_base -= dev->caps.reserved_qps_cnt[sort[i]];
+ dev->caps.reserved_qps_base[sort[i]] = last_base;
+ reserved_from_top +=
+ dev->caps.reserved_qps_cnt[sort[i]];
+ }
+
+ }
+
err = mlx4_bitmap_init(&qp_table->bitmap, dev->caps.num_qps,
- (1 << 24) - 1, dev->caps.sqp_start + 8);
+ (1 << 23) - 1, dev->caps.sqp_start + 8,
+ reserved_from_top);
if (err)
return err;
diff --git a/drivers/net/mlx4/srq.c b/drivers/net/mlx4/srq.c
index 533eb6db24b3..fe9f218691f5 100644
--- a/drivers/net/mlx4/srq.c
+++ b/drivers/net/mlx4/srq.c
@@ -245,7 +245,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
- dev->caps.num_srqs - 1, dev->caps.reserved_srqs);
+ dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
if (err)
return err;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index a9c8c08044b1..e513f76f2a9f 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -899,7 +899,8 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
if (skb != NULL) {
if (skb_queue_len(&mp->rx_recycle) <
mp->default_rx_ring_size &&
- skb_recycle_check(skb, mp->skb_size))
+ skb_recycle_check(skb, mp->skb_size +
+ dma_get_cache_alignment() - 1))
__skb_queue_head(&mp->rx_recycle, skb);
else
dev_kfree_skb(skb);
@@ -1066,9 +1067,12 @@ static int smi_wait_ready(struct mv643xx_eth_shared_private *msp)
return 0;
}
- if (!wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
- msecs_to_jiffies(100)))
- return -ETIMEDOUT;
+ if (!smi_is_done(msp)) {
+ wait_event_timeout(msp->smi_busy_wait, smi_is_done(msp),
+ msecs_to_jiffies(100));
+ if (!smi_is_done(msp))
+ return -ETIMEDOUT;
+ }
return 0;
}
@@ -2432,8 +2436,8 @@ static int mv643xx_eth_shared_remove(struct platform_device *pdev)
struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
if (pd == NULL || pd->shared_smi == NULL) {
- mdiobus_free(msp->smi_bus);
mdiobus_unregister(msp->smi_bus);
+ mdiobus_free(msp->smi_bus);
}
if (msp->err_interrupt != NO_IRQ)
free_irq(msp->err_interrupt, msp);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index a9aebad52652..b37867097308 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -75,7 +75,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
-#define MYRI10GE_VERSION_STR "1.4.3-1.369"
+#define MYRI10GE_VERSION_STR "1.4.3-1.378"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -1393,6 +1393,8 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
if (tx->req == tx->done) {
tx->queue_active = 0;
put_be32(htonl(1), tx->send_stop);
+ mb();
+ mmiowb();
}
__netif_tx_unlock(dev_queue);
}
@@ -2497,6 +2499,10 @@ static int myri10ge_open(struct net_device *dev)
return 0;
abort_with_rings:
+ while (slice) {
+ slice--;
+ napi_disable(&mgp->ss[slice].napi);
+ }
for (i = 0; i < mgp->num_slices; i++)
myri10ge_free_rings(&mgp->ss[i]);
@@ -2860,6 +2866,8 @@ again:
if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) {
tx->queue_active = 1;
put_be32(htonl(1), tx->send_go);
+ mb();
+ mmiowb();
}
tx->pkt_start++;
if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index b9bed82e1d21..b289a0a2b945 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -401,6 +401,8 @@ static int netx_eth_drv_probe(struct platform_device *pdev)
priv->xmac_base = priv->xc->xmac_base;
priv->sram_base = priv->xc->sram_base;
+ spin_lock_init(&priv->lock);
+
ret = pfifo_request(PFIFO_MASK(priv->id));
if (ret) {
printk("unable to request PFIFO\n");
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index ebc812702903..1b6f548c4411 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -33,8 +33,8 @@
#define DRV_MODULE_NAME "niu"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "0.9"
-#define DRV_MODULE_RELDATE "May 4, 2008"
+#define DRV_MODULE_VERSION "1.0"
+#define DRV_MODULE_RELDATE "Nov 14, 2008"
static char version[] __devinitdata =
DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -51,8 +51,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#ifndef readq
static u64 readq(void __iomem *reg)
{
- return (((u64)readl(reg + 0x4UL) << 32) |
- (u64)readl(reg));
+ return ((u64) readl(reg)) | (((u64) readl(reg + 4UL)) << 32);
}
static void writeq(u64 val, void __iomem *reg)
@@ -407,7 +406,7 @@ static int esr2_set_rx_cfg(struct niu *np, unsigned long channel, u32 val)
}
/* Mode is always 10G fiber. */
-static int serdes_init_niu(struct niu *np)
+static int serdes_init_niu_10g_fiber(struct niu *np)
{
struct niu_link_config *lp = &np->link_config;
u32 tx_cfg, rx_cfg;
@@ -444,6 +443,223 @@ static int serdes_init_niu(struct niu *np)
return 0;
}
+static int serdes_init_niu_1g_serdes(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u16 pll_cfg, pll_sts;
+ int max_retry = 100;
+ u64 sig, mask, val;
+ u32 tx_cfg, rx_cfg;
+ unsigned long i;
+ int err;
+
+ tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV |
+ PLL_TX_CFG_RATE_HALF);
+ rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+ PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+ PLL_RX_CFG_RATE_HALF);
+
+ if (np->port == 0)
+ rx_cfg |= PLL_RX_CFG_EQ_LP_ADAPTIVE;
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+ mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+ tx_cfg |= PLL_TX_CFG_ENTEST;
+ rx_cfg |= PLL_RX_CFG_ENTEST;
+ }
+
+ /* Initialize PLL for 1G */
+ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_8X);
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_CFG_L, pll_cfg);
+ if (err) {
+ dev_err(np->device, PFX "NIU Port %d "
+ "serdes_init_niu_1g_serdes: "
+ "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ return err;
+ }
+
+ pll_sts = PLL_CFG_ENPLL;
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_STS_L, pll_sts);
+ if (err) {
+ dev_err(np->device, PFX "NIU Port %d "
+ "serdes_init_niu_1g_serdes: "
+ "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ return err;
+ }
+
+ udelay(200);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_tx_cfg(np, i, tx_cfg);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_rx_cfg(np, i, rx_cfg);
+ if (err)
+ return err;
+ }
+
+ switch (np->port) {
+ case 0:
+ val = (ESR_INT_SRDY0_P0 | ESR_INT_DET0_P0);
+ mask = val;
+ break;
+
+ case 1:
+ val = (ESR_INT_SRDY0_P1 | ESR_INT_DET0_P1);
+ mask = val;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ while (max_retry--) {
+ sig = nr64(ESR_INT_SIGNALS);
+ if ((sig & mask) == val)
+ break;
+
+ mdelay(500);
+ }
+
+ if ((sig & mask) != val) {
+ dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
+ "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int serdes_init_niu_10g_serdes(struct niu *np)
+{
+ struct niu_link_config *lp = &np->link_config;
+ u32 tx_cfg, rx_cfg, pll_cfg, pll_sts;
+ int max_retry = 100;
+ u64 sig, mask, val;
+ unsigned long i;
+ int err;
+
+ tx_cfg = (PLL_TX_CFG_ENTX | PLL_TX_CFG_SWING_1375MV);
+ rx_cfg = (PLL_RX_CFG_ENRX | PLL_RX_CFG_TERM_0P8VDDT |
+ PLL_RX_CFG_ALIGN_ENA | PLL_RX_CFG_LOS_LTHRESH |
+ PLL_RX_CFG_EQ_LP_ADAPTIVE);
+
+ if (lp->loopback_mode == LOOPBACK_PHY) {
+ u16 test_cfg = PLL_TEST_CFG_LOOPBACK_CML_DIS;
+
+ mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_TEST_CFG_L, test_cfg);
+
+ tx_cfg |= PLL_TX_CFG_ENTEST;
+ rx_cfg |= PLL_RX_CFG_ENTEST;
+ }
+
+ /* Initialize PLL for 10G */
+ pll_cfg = (PLL_CFG_ENPLL | PLL_CFG_MPY_10X);
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
+ if (err) {
+ dev_err(np->device, PFX "NIU Port %d "
+ "serdes_init_niu_10g_serdes: "
+ "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ return err;
+ }
+
+ pll_sts = PLL_CFG_ENPLL;
+
+ err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
+ ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
+ if (err) {
+ dev_err(np->device, PFX "NIU Port %d "
+ "serdes_init_niu_10g_serdes: "
+ "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ return err;
+ }
+
+ udelay(200);
+
+ /* Initialize all 4 lanes of the SERDES. */
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_tx_cfg(np, i, tx_cfg);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ err = esr2_set_rx_cfg(np, i, rx_cfg);
+ if (err)
+ return err;
+ }
+
+ /* check if serdes is ready */
+
+ switch (np->port) {
+ case 0:
+ mask = ESR_INT_SIGNALS_P0_BITS;
+ val = (ESR_INT_SRDY0_P0 |
+ ESR_INT_DET0_P0 |
+ ESR_INT_XSRDY_P0 |
+ ESR_INT_XDP_P0_CH3 |
+ ESR_INT_XDP_P0_CH2 |
+ ESR_INT_XDP_P0_CH1 |
+ ESR_INT_XDP_P0_CH0);
+ break;
+
+ case 1:
+ mask = ESR_INT_SIGNALS_P1_BITS;
+ val = (ESR_INT_SRDY0_P1 |
+ ESR_INT_DET0_P1 |
+ ESR_INT_XSRDY_P1 |
+ ESR_INT_XDP_P1_CH3 |
+ ESR_INT_XDP_P1_CH2 |
+ ESR_INT_XDP_P1_CH1 |
+ ESR_INT_XDP_P1_CH0);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ while (max_retry--) {
+ sig = nr64(ESR_INT_SIGNALS);
+ if ((sig & mask) == val)
+ break;
+
+ mdelay(500);
+ }
+
+ if ((sig & mask) != val) {
+ pr_info(PFX "NIU Port %u signal bits [%08x] are not "
+ "[%08x] for 10G...trying 1G\n",
+ np->port, (int) (sig & mask), (int) val);
+
+ /* 10G failed, try initializing at 1G */
+ err = serdes_init_niu_1g_serdes(np);
+ if (!err) {
+ np->flags &= ~NIU_FLAGS_10G;
+ np->mac_xcvr = MAC_XCVR_PCS;
+ } else {
+ dev_err(np->device, PFX "Port %u 10G/1G SERDES "
+ "Link Failed \n", np->port);
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
static int esr_read_rxtx_ctrl(struct niu *np, unsigned long chan, u32 *val)
{
int err;
@@ -1955,13 +2171,23 @@ static const struct niu_phy_ops phy_ops_10g_serdes = {
.link_status = link_status_10g_serdes,
};
+static const struct niu_phy_ops phy_ops_10g_serdes_niu = {
+ .serdes_init = serdes_init_niu_10g_serdes,
+ .link_status = link_status_10g_serdes,
+};
+
+static const struct niu_phy_ops phy_ops_1g_serdes_niu = {
+ .serdes_init = serdes_init_niu_1g_serdes,
+ .link_status = link_status_1g_serdes,
+};
+
static const struct niu_phy_ops phy_ops_1g_rgmii = {
.xcvr_init = xcvr_init_1g_rgmii,
.link_status = link_status_1g_rgmii,
};
static const struct niu_phy_ops phy_ops_10g_fiber_niu = {
- .serdes_init = serdes_init_niu,
+ .serdes_init = serdes_init_niu_10g_fiber,
.xcvr_init = xcvr_init_10g,
.link_status = link_status_10g,
};
@@ -1999,11 +2225,21 @@ struct niu_phy_template {
u32 phy_addr_base;
};
-static const struct niu_phy_template phy_template_niu = {
+static const struct niu_phy_template phy_template_niu_10g_fiber = {
.ops = &phy_ops_10g_fiber_niu,
.phy_addr_base = 16,
};
+static const struct niu_phy_template phy_template_niu_10g_serdes = {
+ .ops = &phy_ops_10g_serdes_niu,
+ .phy_addr_base = 0,
+};
+
+static const struct niu_phy_template phy_template_niu_1g_serdes = {
+ .ops = &phy_ops_1g_serdes_niu,
+ .phy_addr_base = 0,
+};
+
static const struct niu_phy_template phy_template_10g_fiber = {
.ops = &phy_ops_10g_fiber,
.phy_addr_base = 8,
@@ -2183,8 +2419,25 @@ static int niu_determine_phy_disposition(struct niu *np)
u32 phy_addr_off = 0;
if (plat_type == PLAT_TYPE_NIU) {
- tp = &phy_template_niu;
- phy_addr_off += np->port;
+ switch (np->flags &
+ (NIU_FLAGS_10G |
+ NIU_FLAGS_FIBER |
+ NIU_FLAGS_XCVR_SERDES)) {
+ case NIU_FLAGS_10G | NIU_FLAGS_XCVR_SERDES:
+ /* 10G Serdes */
+ tp = &phy_template_niu_10g_serdes;
+ break;
+ case NIU_FLAGS_XCVR_SERDES:
+ /* 1G Serdes */
+ tp = &phy_template_niu_1g_serdes;
+ break;
+ case NIU_FLAGS_10G | NIU_FLAGS_FIBER:
+ /* 10G Fiber */
+ default:
+ tp = &phy_template_niu_10g_fiber;
+ phy_addr_off += np->port;
+ break;
+ }
} else {
switch (np->flags &
(NIU_FLAGS_10G |
@@ -7214,6 +7467,12 @@ static int __devinit niu_phy_type_prop_decode(struct niu *np,
np->flags |= NIU_FLAGS_10G;
np->flags &= ~NIU_FLAGS_FIBER;
np->mac_xcvr = MAC_XCVR_XPCS;
+ } else if (!strcmp(phy_prop, "xgsd") || !strcmp(phy_prop, "gsd")) {
+ /* 10G Serdes or 1G Serdes, default to 10G */
+ np->flags |= NIU_FLAGS_10G;
+ np->flags &= ~NIU_FLAGS_FIBER;
+ np->flags |= NIU_FLAGS_XCVR_SERDES;
+ np->mac_xcvr = MAC_XCVR_XPCS;
} else {
return -EINVAL;
}
@@ -7742,6 +8001,8 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
u32 val;
int err;
+ num_10g = num_1g = 0;
+
if (!strcmp(np->vpd.model, NIU_ALONSO_MDL_STR) ||
!strcmp(np->vpd.model, NIU_KIMI_MDL_STR)) {
num_10g = 0;
@@ -7758,6 +8019,16 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
parent->num_ports = 2;
val = (phy_encode(PORT_TYPE_10G, 0) |
phy_encode(PORT_TYPE_10G, 1));
+ } else if ((np->flags & NIU_FLAGS_XCVR_SERDES) &&
+ (parent->plat_type == PLAT_TYPE_NIU)) {
+ /* this is the Monza case */
+ if (np->flags & NIU_FLAGS_10G) {
+ val = (phy_encode(PORT_TYPE_10G, 0) |
+ phy_encode(PORT_TYPE_10G, 1));
+ } else {
+ val = (phy_encode(PORT_TYPE_1G, 0) |
+ phy_encode(PORT_TYPE_1G, 1));
+ }
} else {
err = fill_phy_probe_info(np, parent, info);
if (err)
@@ -8657,7 +8928,9 @@ static void __devinit niu_device_announce(struct niu *np)
dev->name,
(np->flags & NIU_FLAGS_XMAC ? "XMAC" : "BMAC"),
(np->flags & NIU_FLAGS_10G ? "10G" : "1G"),
- (np->flags & NIU_FLAGS_FIBER ? "FIBER" : "COPPER"),
+ (np->flags & NIU_FLAGS_FIBER ? "FIBER" :
+ (np->flags & NIU_FLAGS_XCVR_SERDES ? "SERDES" :
+ "COPPER")),
(np->mac_xcvr == MAC_XCVR_MII ? "MII" :
(np->mac_xcvr == MAC_XCVR_PCS ? "PCS" : "XPCS")),
np->vpd.phy_type);
@@ -8667,7 +8940,6 @@ static void __devinit niu_device_announce(struct niu *np)
static int __devinit niu_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- unsigned long niureg_base, niureg_len;
union niu_parent_id parent_id;
struct net_device *dev;
struct niu *np;
@@ -8758,10 +9030,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM);
- niureg_base = pci_resource_start(pdev, 0);
- niureg_len = pci_resource_len(pdev, 0);
-
- np->regs = ioremap_nocache(niureg_base, niureg_len);
+ np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
dev_err(&pdev->dev, PFX "Cannot map device registers, "
"aborting.\n");
diff --git a/drivers/net/niu.h b/drivers/net/niu.h
index c6fa883daa22..180ca8ae93de 100644
--- a/drivers/net/niu.h
+++ b/drivers/net/niu.h
@@ -1048,6 +1048,13 @@
#define PLL_CFG_LD_SHIFT 8
#define PLL_CFG_MPY 0x0000001e
#define PLL_CFG_MPY_SHIFT 1
+#define PLL_CFG_MPY_4X 0x0
+#define PLL_CFG_MPY_5X 0x00000002
+#define PLL_CFG_MPY_6X 0x00000004
+#define PLL_CFG_MPY_8X 0x00000008
+#define PLL_CFG_MPY_10X 0x0000000a
+#define PLL_CFG_MPY_12X 0x0000000c
+#define PLL_CFG_MPY_12P5X 0x0000000e
#define PLL_CFG_ENPLL 0x00000001
#define ESR2_TI_PLL_STS_L (ESR2_BASE + 0x002)
@@ -1093,6 +1100,9 @@
#define PLL_TX_CFG_INVPAIR 0x00000080
#define PLL_TX_CFG_RATE 0x00000060
#define PLL_TX_CFG_RATE_SHIFT 5
+#define PLL_TX_CFG_RATE_FULL 0x0
+#define PLL_TX_CFG_RATE_HALF 0x20
+#define PLL_TX_CFG_RATE_QUAD 0x40
#define PLL_TX_CFG_BUSWIDTH 0x0000001c
#define PLL_TX_CFG_BUSWIDTH_SHIFT 2
#define PLL_TX_CFG_ENTEST 0x00000002
@@ -1132,6 +1142,9 @@
#define PLL_RX_CFG_INVPAIR 0x00000080
#define PLL_RX_CFG_RATE 0x00000060
#define PLL_RX_CFG_RATE_SHIFT 5
+#define PLL_RX_CFG_RATE_FULL 0x0
+#define PLL_RX_CFG_RATE_HALF 0x20
+#define PLL_RX_CFG_RATE_QUAD 0x40
#define PLL_RX_CFG_BUSWIDTH 0x0000001c
#define PLL_RX_CFG_BUSWIDTH_SHIFT 2
#define PLL_RX_CFG_ENTEST 0x00000002
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index b37a498939ae..0418045166c3 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -779,6 +779,7 @@ static struct pcmcia_device_id axnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
PCMCIA_DEVICE_PROD_ID12("MELCO", "LPC3-TX", 0x481e0094, 0xf91af609),
+ PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "100BASE", 0x281f1c5d, 0x7c2add04),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FastEtherCard", 0x281f1c5d, 0x7ef26116),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "FEP501", 0x281f1c5d, 0x2e272058),
@@ -1174,7 +1175,6 @@ static int ei_start_xmit(struct sk_buff *skb, struct net_device *dev)
* ax_interrupt - handle the interrupts from an 8390
* @irq: interrupt number
* @dev_id: a pointer to the net_device
- * @regs: unused
*
* Handle the ether interface interrupts. We pull packets from
* the 8390 via the card specific functions and fire them at the networking
diff --git a/drivers/net/pcmcia/ibmtr_cs.c b/drivers/net/pcmcia/ibmtr_cs.c
index cf3cca4642f2..f51944b28cfa 100644
--- a/drivers/net/pcmcia/ibmtr_cs.c
+++ b/drivers/net/pcmcia/ibmtr_cs.c
@@ -349,7 +349,7 @@ static int ibmtr_suspend(struct pcmcia_device *link)
return 0;
}
-static int ibmtr_resume(struct pcmcia_device *link)
+static int __devinit ibmtr_resume(struct pcmcia_device *link)
{
ibmtr_dev_t *info = link->priv;
struct net_device *dev = info->dev;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index e40d6301aa7a..ce486f094492 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1693,7 +1693,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("National Semiconductor", "InfoMover NE4100", 0x36e1191f, 0xa6617ec8),
PCMCIA_DEVICE_PROD_ID12("NEC", "PC-9801N-J12", 0x18df0ba0, 0xbc912d76),
PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA410TX", 0x9aa79dc3, 0x60e5bc0e),
- PCMCIA_DEVICE_PROD_ID12("NETGEAR", "FA411", 0x9aa79dc3, 0x40fad875),
PCMCIA_DEVICE_PROD_ID12("Network Everywhere", "Fast Ethernet 10/100 PC Card", 0x820a67b6, 0x31ed1a5f),
PCMCIA_DEVICE_PROD_ID12("NextCom K.K.", "Next Hawk", 0xaedaec74, 0xad050ef1),
PCMCIA_DEVICE_PROD_ID12("PCMCIA", "10/100Mbps Ethernet Card", 0x281f1c5d, 0x6e41773b),
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 4aa547947040..eb6411c4694f 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -227,6 +227,59 @@ static int m88e1111_config_init(struct phy_device *phydev)
return 0;
}
+static int m88e1118_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_M1011_PHY_SCR,
+ MII_M1011_PHY_SCR_AUTO_CROSS);
+ if (err < 0)
+ return err;
+
+ err = genphy_config_aneg(phydev);
+ return 0;
+}
+
+static int m88e1118_config_init(struct phy_device *phydev)
+{
+ int err;
+
+ /* Change address */
+ err = phy_write(phydev, 0x16, 0x0002);
+ if (err < 0)
+ return err;
+
+ /* Enable 1000 Mbit */
+ err = phy_write(phydev, 0x15, 0x1070);
+ if (err < 0)
+ return err;
+
+ /* Change address */
+ err = phy_write(phydev, 0x16, 0x0003);
+ if (err < 0)
+ return err;
+
+ /* Adjust LED Control */
+ err = phy_write(phydev, 0x10, 0x021e);
+ if (err < 0)
+ return err;
+
+ /* Reset address */
+ err = phy_write(phydev, 0x16, 0x0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static int m88e1145_config_init(struct phy_device *phydev)
{
int err;
@@ -416,6 +469,19 @@ static struct phy_driver marvell_drivers[] = {
.driver = { .owner = THIS_MODULE },
},
{
+ .phy_id = 0x01410e10,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Marvell 88E1118",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &m88e1118_config_init,
+ .config_aneg = &m88e1118_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &marvell_ack_interrupt,
+ .config_intr = &marvell_config_intr,
+ .driver = {.owner = THIS_MODULE,},
+ },
+ {
.phy_id = 0x01410cd0,
.phy_id_mask = 0xfffffff0,
.name = "Marvell 88E1145",
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index d0ed1ef284a8..289fc267edf3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -105,8 +105,6 @@ int mdiobus_register(struct mii_bus *bus)
return -EINVAL;
}
- bus->state = MDIOBUS_REGISTERED;
-
mutex_init(&bus->mdio_lock);
if (bus->reset)
@@ -123,6 +121,9 @@ int mdiobus_register(struct mii_bus *bus)
}
}
+ if (!err)
+ bus->state = MDIOBUS_REGISTERED;
+
pr_info("%s: probed\n", bus->name);
return err;
@@ -136,7 +137,7 @@ void mdiobus_unregister(struct mii_bus *bus)
BUG_ON(bus->state != MDIOBUS_REGISTERED);
bus->state = MDIOBUS_UNREGISTERED;
- device_unregister(&bus->dev);
+ device_del(&bus->dev);
for (i = 0; i < PHY_MAX_ADDR; i++) {
if (bus->phy_map[i])
device_unregister(&bus->phy_map[i]->dev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index e11b03b2b25a..25acbbde4a60 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -227,8 +227,17 @@ struct phy_device * get_phy_device(struct mii_bus *bus, int addr)
if (r)
return ERR_PTR(r);
- /* If the phy_id is all Fs, there is no device there */
- if (0xffffffff == phy_id)
+ /* If the phy_id is mostly Fs, there is no device there */
+ if ((phy_id & 0x1fffffff) == 0x1fffffff)
+ return NULL;
+
+ /*
+ * Broken hardware is sometimes missing the pull down resistor on the
+ * MDIO line, which results in reads to non-existent devices returning
+ * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
+ * device as well.
+ */
+ if (phy_id == 0)
return NULL;
dev = phy_device_create(bus, addr, phy_id);
@@ -564,20 +573,32 @@ EXPORT_SYMBOL(genphy_restart_aneg);
*/
int genphy_config_aneg(struct phy_device *phydev)
{
- int result = 0;
+ int result;
- if (AUTONEG_ENABLE == phydev->autoneg) {
- int result = genphy_config_advert(phydev);
+ if (AUTONEG_ENABLE != phydev->autoneg)
+ return genphy_setup_forced(phydev);
- if (result < 0) /* error */
- return result;
+ result = genphy_config_advert(phydev);
+
+ if (result < 0) /* error */
+ return result;
+
+ if (result == 0) {
+ /* Advertisment hasn't changed, but maybe aneg was never on to
+ * begin with? Or maybe phy was isolated? */
+ int ctl = phy_read(phydev, MII_BMCR);
+
+ if (ctl < 0)
+ return ctl;
+
+ if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
+ result = 1; /* do restart aneg */
+ }
- /* Only restart aneg if we are advertising something different
- * than we were before. */
- if (result > 0)
- result = genphy_restart_aneg(phydev);
- } else
- result = genphy_setup_forced(phydev);
+ /* Only restart aneg if we are advertising something different
+ * than we were before. */
+ if (result > 0)
+ result = genphy_restart_aneg(phydev);
return result;
}
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 8874497b6bbf..dd3b2447e85a 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -34,6 +34,8 @@
#define MII_VSC8244_IMASK_DUPLEX 0x1000
#define MII_VSC8244_IMASK_MASK 0xf000
+#define MII_VSC8221_IMASK_MASK 0xa000
+
/* Vitesse Interrupt Status Register */
#define MII_VSC8244_ISTAT 0x1a
#define MII_VSC8244_ISTAT_STATUS 0x8000
@@ -49,6 +51,12 @@
#define MII_VSC8244_AUXCONSTAT_GBIT 0x0010
#define MII_VSC8244_AUXCONSTAT_100 0x0008
+#define MII_VSC8221_AUXCONSTAT_INIT 0x0004 /* need to set this bit? */
+#define MII_VSC8221_AUXCONSTAT_RESERVED 0x0004
+
+#define PHY_ID_VSC8244 0x000fc6c0
+#define PHY_ID_VSC8221 0x000fc550
+
MODULE_DESCRIPTION("Vitesse PHY driver");
MODULE_AUTHOR("Kriston Carson");
MODULE_LICENSE("GPL");
@@ -95,13 +103,15 @@ static int vsc824x_ack_interrupt(struct phy_device *phydev)
return (err < 0) ? err : 0;
}
-static int vsc824x_config_intr(struct phy_device *phydev)
+static int vsc82xx_config_intr(struct phy_device *phydev)
{
int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, MII_VSC8244_IMASK,
- MII_VSC8244_IMASK_MASK);
+ phydev->drv->phy_id == PHY_ID_VSC8244 ?
+ MII_VSC8244_IMASK_MASK :
+ MII_VSC8221_IMASK_MASK);
else {
/*
* The Vitesse PHY cannot clear the interrupt
@@ -120,7 +130,7 @@ static int vsc824x_config_intr(struct phy_device *phydev)
/* Vitesse 824x */
static struct phy_driver vsc8244_driver = {
- .phy_id = 0x000fc6c0,
+ .phy_id = PHY_ID_VSC8244,
.name = "Vitesse VSC8244",
.phy_id_mask = 0x000fffc0,
.features = PHY_GBIT_FEATURES,
@@ -129,19 +139,55 @@ static struct phy_driver vsc8244_driver = {
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
- .config_intr = &vsc824x_config_intr,
+ .config_intr = &vsc82xx_config_intr,
.driver = { .owner = THIS_MODULE,},
};
-static int __init vsc8244_init(void)
+static int vsc8221_config_init(struct phy_device *phydev)
{
- return phy_driver_register(&vsc8244_driver);
+ int err;
+
+ err = phy_write(phydev, MII_VSC8244_AUX_CONSTAT,
+ MII_VSC8221_AUXCONSTAT_INIT);
+ return err;
+
+ /* Perhaps we should set EXT_CON1 based on the interface?
+ Options are 802.3Z SerDes or SGMII */
+}
+
+/* Vitesse 8221 */
+static struct phy_driver vsc8221_driver = {
+ .phy_id = PHY_ID_VSC8221,
+ .phy_id_mask = 0x000ffff0,
+ .name = "Vitesse VSC8221",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &vsc8221_config_init,
+ .config_aneg = &genphy_config_aneg,
+ .read_status = &genphy_read_status,
+ .ack_interrupt = &vsc824x_ack_interrupt,
+ .config_intr = &vsc82xx_config_intr,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init vsc82xx_init(void)
+{
+ int err;
+
+ err = phy_driver_register(&vsc8244_driver);
+ if (err < 0)
+ return err;
+ err = phy_driver_register(&vsc8221_driver);
+ if (err < 0)
+ phy_driver_unregister(&vsc8244_driver);
+ return err;
}
-static void __exit vsc8244_exit(void)
+static void __exit vsc82xx_exit(void)
{
phy_driver_unregister(&vsc8244_driver);
+ phy_driver_unregister(&vsc8221_driver);
}
-module_init(vsc8244_init);
-module_exit(vsc8244_exit);
+module_init(vsc82xx_init);
+module_exit(vsc82xx_exit);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index fc6f4b8c64b3..b646e92134dc 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -399,11 +399,11 @@ static int pppoe_rcv(struct sk_buff *skb,
if (skb->len < len)
goto drop;
- po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
- if (!po)
+ if (pskb_trim_rcsum(skb, len))
goto drop;
- if (pskb_trim_rcsum(skb, len))
+ po = get_item(ph->sid, eth_hdr(skb)->h_source, dev->ifindex);
+ if (!po)
goto drop;
return sk_receive_skb(sk_pppox(po), skb, 0);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 185b1dff10a8..e98d9773158d 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -1353,6 +1353,7 @@ static int pppol2tp_release(struct socket *sock)
kfree_skb(skb);
sock_put(sk);
}
+ sock_put(sk);
}
release_sock(sk);
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 3cdd07c45b6d..508452c02151 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -1515,9 +1515,6 @@ static u32 ql_get_link_state(struct ql3_adapter *qdev)
linkState = LS_UP;
} else {
linkState = LS_DOWN;
- if (netif_msg_link(qdev))
- printk(KERN_WARNING PFX
- "%s: Link is down.\n", qdev->ndev->name);
}
return linkState;
}
@@ -1581,10 +1578,6 @@ static int ql_finish_auto_neg(struct ql3_adapter *qdev)
ql_mac_enable(qdev, 1);
}
- if (netif_msg_link(qdev))
- printk(KERN_DEBUG PFX
- "%s: Change port_link_state LS_DOWN to LS_UP.\n",
- qdev->ndev->name);
qdev->port_link_state = LS_UP;
netif_start_queue(qdev->ndev);
netif_carrier_on(qdev->ndev);
@@ -1655,14 +1648,9 @@ static void ql_link_state_machine_work(struct work_struct *work)
/* Fall Through */
case LS_DOWN:
- if (netif_msg_link(qdev))
- printk(KERN_DEBUG PFX
- "%s: port_link_state = LS_DOWN.\n",
- qdev->ndev->name);
if (curr_link_state == LS_UP) {
if (netif_msg_link(qdev))
- printk(KERN_DEBUG PFX
- "%s: curr_link_state = LS_UP.\n",
+ printk(KERN_INFO PFX "%s: Link is up.\n",
qdev->ndev->name);
if (ql_is_auto_neg_complete(qdev))
ql_finish_auto_neg(qdev);
@@ -1670,6 +1658,7 @@ static void ql_link_state_machine_work(struct work_struct *work)
if (qdev->port_link_state == LS_UP)
ql_link_down_detect_clear(qdev);
+ qdev->port_link_state = LS_UP;
}
break;
@@ -1678,12 +1667,14 @@ static void ql_link_state_machine_work(struct work_struct *work)
* See if the link is currently down or went down and came
* back up
*/
- if ((curr_link_state == LS_DOWN) || ql_link_down_detect(qdev)) {
+ if (curr_link_state == LS_DOWN) {
if (netif_msg_link(qdev))
printk(KERN_INFO PFX "%s: Link is down.\n",
qdev->ndev->name);
qdev->port_link_state = LS_DOWN;
}
+ if (ql_link_down_detect(qdev))
+ qdev->port_link_state = LS_DOWN;
break;
}
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 38116f9d4163..ba2e1c5b6bcf 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1375,7 +1375,6 @@ struct ql_adapter {
spinlock_t adapter_lock;
spinlock_t hw_lock;
spinlock_t stats_lock;
- spinlock_t legacy_lock; /* used for maintaining legacy intr sync */
/* PCI Bus Relative Register Addresses */
void __iomem *reg_base;
@@ -1399,8 +1398,6 @@ struct ql_adapter {
struct msix_entry *msi_x_entry;
struct intr_context intr_context[MAX_RX_RINGS];
- int (*legacy_check) (struct ql_adapter *);
-
int tx_ring_count; /* One per online CPU. */
u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */
u32 rss_ring_count; /* One per online CPU. */
@@ -1502,7 +1499,7 @@ void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
-void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
void ql_set_ethtool_ops(struct net_device *ndev);
int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 4b2caa6b7ac5..b83a9c9b6a97 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -577,41 +577,53 @@ static void ql_disable_interrupts(struct ql_adapter *qdev)
* incremented everytime we queue a worker and decremented everytime
* a worker finishes. Once it hits zero we enable the interrupt.
*/
-void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
+u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
+ u32 var = 0;
+ unsigned long hw_flags = 0;
+ struct intr_context *ctx = qdev->intr_context + intr;
+
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
+ /* Always enable if we're MSIX multi interrupts and
+ * it's not the default (zeroeth) interrupt.
+ */
ql_write32(qdev, INTR_EN,
- qdev->intr_context[intr].intr_en_mask);
- else {
- if (qdev->legacy_check)
- spin_lock(&qdev->legacy_lock);
- if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) {
- QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n",
- intr);
- ql_write32(qdev, INTR_EN,
- qdev->intr_context[intr].intr_en_mask);
- } else {
- QPRINTK(qdev, INTR, ERR,
- "Skip enable, other queue(s) are active.\n");
- }
- if (qdev->legacy_check)
- spin_unlock(&qdev->legacy_lock);
+ ctx->intr_en_mask);
+ var = ql_read32(qdev, STS);
+ return var;
}
+
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (atomic_dec_and_test(&ctx->irq_cnt)) {
+ ql_write32(qdev, INTR_EN,
+ ctx->intr_en_mask);
+ var = ql_read32(qdev, STS);
+ }
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ return var;
}
static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
{
u32 var = 0;
+ unsigned long hw_flags;
+ struct intr_context *ctx;
- if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags)))
- goto exit;
- else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) {
+ /* HW disables for us if we're MSIX multi interrupts and
+ * it's not the default (zeroeth) interrupt.
+ */
+ if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
+ return 0;
+
+ ctx = qdev->intr_context + intr;
+ spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ if (!atomic_read(&ctx->irq_cnt)) {
ql_write32(qdev, INTR_EN,
- qdev->intr_context[intr].intr_dis_mask);
+ ctx->intr_dis_mask);
var = ql_read32(qdev, STS);
}
- atomic_inc(&qdev->intr_context[intr].irq_cnt);
-exit:
+ atomic_inc(&ctx->irq_cnt);
+ spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
return var;
}
@@ -623,7 +635,9 @@ static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
* and enables only if the result is zero.
* So we precharge it here.
*/
- atomic_set(&qdev->intr_context[i].irq_cnt, 1);
+ if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
+ i == 0))
+ atomic_set(&qdev->intr_context[i].irq_cnt, 1);
ql_enable_completion_interrupt(qdev, i);
}
@@ -1725,19 +1739,6 @@ static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-/* We check here to see if we're already handling a legacy
- * interrupt. If we are, then it must belong to another
- * chip with which we're sharing the interrupt line.
- */
-int ql_legacy_check(struct ql_adapter *qdev)
-{
- int err;
- spin_lock(&qdev->legacy_lock);
- err = atomic_read(&qdev->intr_context[0].irq_cnt);
- spin_unlock(&qdev->legacy_lock);
- return err;
-}
-
/* This handles a fatal error, MPI activity, and the default
* rx_ring in an MSI-X multiple vector environment.
* In MSI/Legacy environment it also process the rest of
@@ -1752,12 +1753,15 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
int i;
int work_done = 0;
- if (qdev->legacy_check && qdev->legacy_check(qdev)) {
- QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n");
- return IRQ_NONE; /* Not our interrupt */
+ spin_lock(&qdev->hw_lock);
+ if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
+ QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
+ spin_unlock(&qdev->hw_lock);
+ return IRQ_NONE;
}
+ spin_unlock(&qdev->hw_lock);
- var = ql_read32(qdev, STS);
+ var = ql_disable_completion_interrupt(qdev, intr_context->intr);
/*
* Check for fatal error.
@@ -1823,6 +1827,7 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
}
}
}
+ ql_enable_completion_interrupt(qdev, intr_context->intr);
return work_done ? IRQ_HANDLED : IRQ_NONE;
}
@@ -2701,8 +2706,6 @@ msi:
}
}
irq_type = LEG_IRQ;
- spin_lock_init(&qdev->legacy_lock);
- qdev->legacy_check = ql_legacy_check;
QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
}
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index c821da21d8eb..4b7cb389dc49 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -81,6 +81,10 @@ static const int multicast_filter_limit = 32;
#define RTL8169_TX_TIMEOUT (6*HZ)
#define RTL8169_PHY_TIMEOUT (10*HZ)
+#define RTL_EEPROM_SIG cpu_to_le32(0x8129)
+#define RTL_EEPROM_SIG_MASK cpu_to_le32(0xffff)
+#define RTL_EEPROM_SIG_ADDR 0x0000
+
/* write/read MMIO register */
#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
@@ -1911,74 +1915,6 @@ static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
}
}
-static int rtl_eeprom_read(struct pci_dev *pdev, int cap, int addr, __le32 *val)
-{
- int ret, count = 100;
- u16 status = 0;
- u32 value;
-
- ret = pci_write_config_word(pdev, cap + PCI_VPD_ADDR, addr);
- if (ret < 0)
- return ret;
-
- do {
- udelay(10);
- ret = pci_read_config_word(pdev, cap + PCI_VPD_ADDR, &status);
- if (ret < 0)
- return ret;
- } while (!(status & PCI_VPD_ADDR_F) && --count);
-
- if (!(status & PCI_VPD_ADDR_F))
- return -ETIMEDOUT;
-
- ret = pci_read_config_dword(pdev, cap + PCI_VPD_DATA, &value);
- if (ret < 0)
- return ret;
-
- *val = cpu_to_le32(value);
-
- return 0;
-}
-
-static void rtl_init_mac_address(struct rtl8169_private *tp,
- void __iomem *ioaddr)
-{
- struct pci_dev *pdev = tp->pci_dev;
- u8 cfg1;
- int vpd_cap;
- u8 mac[8];
- DECLARE_MAC_BUF(buf);
-
- cfg1 = RTL_R8(Config1);
- if (!(cfg1 & VPD)) {
- dprintk("VPD access not enabled, enabling\n");
- RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W8(Config1, cfg1 | VPD);
- RTL_W8(Cfg9346, Cfg9346_Lock);
- }
-
- vpd_cap = pci_find_capability(pdev, PCI_CAP_ID_VPD);
- if (!vpd_cap)
- return;
-
- /* MAC address is stored in EEPROM at offset 0x0e
- * Realtek says: "The VPD address does not have to be a DWORD-aligned
- * address as defined in the PCI 2.2 Specifications, but the VPD data
- * is always consecutive 4-byte data starting from the VPD address
- * specified."
- */
- if (rtl_eeprom_read(pdev, vpd_cap, 0x000e, (__le32*)&mac[0]) < 0 ||
- rtl_eeprom_read(pdev, vpd_cap, 0x0012, (__le32*)&mac[4]) < 0) {
- dprintk("Reading MAC address from EEPROM failed\n");
- return;
- }
-
- dprintk("MAC address found in EEPROM: %s\n", print_mac(buf, mac));
-
- /* Write MAC address */
- rtl_rar_set(tp, mac);
-}
-
static int __devinit
rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -2156,8 +2092,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->mmio_addr = ioaddr;
- rtl_init_mac_address(tp, ioaddr);
-
/* Get MAC address */
for (i = 0; i < MAC_ADDR_LEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index fa98af58223e..cd0d0873d978 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -174,8 +174,8 @@ static struct efx_ethtool_stat efx_ethtool_stats[] = {
/* EEPROM range with gPXE configuration */
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
-#define EFX_ETHTOOL_EEPROM_MIN 0x100U
-#define EFX_ETHTOOL_EEPROM_MAX 0x400U
+#define EFX_ETHTOOL_EEPROM_MIN 0x800U
+#define EFX_ETHTOOL_EEPROM_MAX 0x1800U
/**************************************************************************
*
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index a24bb68887ab..59f242a67714 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -927,7 +927,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_txdesc *txdesc;
u32 entry;
- int flags;
+ unsigned long flags;
spin_lock_irqsave(&mdp->lock, flags);
if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
@@ -1141,7 +1141,7 @@ static int sh_mdio_init(struct net_device *ndev, int id)
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
- mdp->mii_bus->id[0] = id;
+ snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
/* PHY IRQ */
mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 3fe01763760e..e6e3bf58a569 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -317,6 +317,7 @@ static struct mii_chip_info {
unsigned int type;
u32 feature;
} mii_chip_table[] = {
+ { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 },
{ "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 },
{ "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 },
{ "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 },
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index fa3a460f8e2f..8e8337e8b072 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1630,7 +1630,6 @@ sis900_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
* sis900_interrupt - sis900 interrupt handler
* @irq: the irq number
* @dev_instance: the client data object
- * @regs: snapshot of processor context
*
* The interrupt handler does all of the Rx thread work,
* and cleans up after the Tx thread
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 8aa7460ef0e3..9a16a79b67d0 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -155,23 +155,17 @@ static void PRINT_PKT(u_char *buf, int length)
/* this enables an interrupt in the interrupt mask register */
#define SMC_ENABLE_INT(lp, x) do { \
unsigned int __mask; \
- unsigned long __flags; \
- spin_lock_irqsave(&lp->lock, __flags); \
__mask = SMC_GET_INT_EN((lp)); \
__mask |= (x); \
SMC_SET_INT_EN((lp), __mask); \
- spin_unlock_irqrestore(&lp->lock, __flags); \
} while (0)
/* this disables an interrupt from the interrupt mask register */
#define SMC_DISABLE_INT(lp, x) do { \
unsigned int __mask; \
- unsigned long __flags; \
- spin_lock_irqsave(&lp->lock, __flags); \
__mask = SMC_GET_INT_EN((lp)); \
__mask &= ~(x); \
SMC_SET_INT_EN((lp), __mask); \
- spin_unlock_irqrestore(&lp->lock, __flags); \
} while (0)
/*
@@ -180,7 +174,7 @@ static void PRINT_PKT(u_char *buf, int length)
static void smc911x_reset(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
- unsigned int reg, timeout=0, resets=1;
+ unsigned int reg, timeout=0, resets=1, irq_cfg;
unsigned long flags;
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
@@ -252,7 +246,12 @@ static void smc911x_reset(struct net_device *dev)
* Deassert IRQ for 1*10us for edge type interrupts
* and drive IRQ pin push-pull
*/
- SMC_SET_IRQ_CFG(lp, (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_);
+ irq_cfg = (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_;
+#ifdef SMC_DYNAMIC_BUS_CONFIG
+ if (lp->cfg.irq_polarity)
+ irq_cfg |= INT_CFG_IRQ_POL_;
+#endif
+ SMC_SET_IRQ_CFG(lp, irq_cfg);
/* clear anything saved */
if (lp->pending_tx_skb != NULL) {
@@ -274,6 +273,8 @@ static void smc911x_enable(struct net_device *dev)
DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__);
+ spin_lock_irqsave(&lp->lock, flags);
+
SMC_SET_MAC_ADDR(lp, dev->dev_addr);
/* Enable TX */
@@ -286,12 +287,10 @@ static void smc911x_enable(struct net_device *dev)
SMC_SET_FIFO_TSL(lp, 64);
SMC_SET_GPT_CFG(lp, GPT_CFG_TIMER_EN_ | 10000);
- spin_lock_irqsave(&lp->lock, flags);
SMC_GET_MAC_CR(lp, cr);
cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
SMC_SET_MAC_CR(lp, cr);
SMC_SET_TX_CFG(lp, TX_CFG_TX_ON_);
- spin_unlock_irqrestore(&lp->lock, flags);
/* Add 2 byte padding to start of packets */
SMC_SET_RX_CFG(lp, (2<<8) & RX_CFG_RXDOFF_);
@@ -300,9 +299,7 @@ static void smc911x_enable(struct net_device *dev)
if (cr & MAC_CR_RXEN_)
DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
- spin_lock_irqsave(&lp->lock, flags);
SMC_SET_MAC_CR(lp, cr | MAC_CR_RXEN_);
- spin_unlock_irqrestore(&lp->lock, flags);
/* Interrupt on every received packet */
SMC_SET_FIFO_RSA(lp, 0x01);
@@ -318,6 +315,8 @@ static void smc911x_enable(struct net_device *dev)
mask|=INT_EN_RDFO_EN_;
}
SMC_ENABLE_INT(lp, mask);
+
+ spin_unlock_irqrestore(&lp->lock, flags);
}
/*
@@ -458,7 +457,6 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
struct sk_buff *skb;
unsigned int cmdA, cmdB, len;
unsigned char *buf;
- unsigned long flags;
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__);
BUG_ON(lp->pending_tx_skb == NULL);
@@ -501,13 +499,11 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
#else
SMC_PUSH_DATA(lp, buf, len);
dev->trans_start = jiffies;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
#endif
- spin_lock_irqsave(&lp->lock, flags);
if (!lp->tx_throttle) {
netif_wake_queue(dev);
}
- spin_unlock_irqrestore(&lp->lock, flags);
SMC_ENABLE_INT(lp, INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
}
@@ -526,6 +522,8 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
dev->name, __func__);
+ spin_lock_irqsave(&lp->lock, flags);
+
BUG_ON(lp->pending_tx_skb != NULL);
free = SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TDFREE_;
@@ -535,12 +533,10 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
dev->name, free);
- spin_lock_irqsave(&lp->lock, flags);
/* Reenable when at least 1 packet of size MTU present */
SMC_SET_FIFO_TDA(lp, (SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
lp->tx_throttle = 1;
netif_stop_queue(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
}
/* Drop packets when we run out of space in TX FIFO
@@ -556,6 +552,7 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
lp->pending_tx_skb = NULL;
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
+ spin_unlock_irqrestore(&lp->lock, flags);
dev_kfree_skb(skb);
return 0;
}
@@ -565,7 +562,6 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* If the DMA is already running then defer this packet Tx until
* the DMA IRQ starts it
*/
- spin_lock_irqsave(&lp->lock, flags);
if (lp->txdma_active) {
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
lp->pending_tx_skb = skb;
@@ -576,11 +572,11 @@ static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
lp->txdma_active = 1;
}
- spin_unlock_irqrestore(&lp->lock, flags);
}
#endif
lp->pending_tx_skb = skb;
smc911x_hardware_send_pkt(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
return 0;
}
@@ -1242,7 +1238,7 @@ smc911x_rx_dma_irq(int dma, void *data)
netif_rx(skb);
spin_lock_irqsave(&lp->lock, flags);
- pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16;
+ pkts = (SMC_GET_RX_FIFO_INF(lp) & RX_FIFO_INF_RXSUSED_) >> 16;
if (pkts != 0) {
smc911x_rcv(dev);
}else {
@@ -1739,7 +1735,7 @@ static const struct ethtool_ops smc911x_ethtool_ops = {
* This routine has a simple purpose -- make the SMC chip generate an
* interrupt, so an auto-detect routine can detect it, and find the IRQ,
*/
-static int __init smc911x_findirq(struct net_device *dev)
+static int __devinit smc911x_findirq(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int timeout = 20;
@@ -1803,7 +1799,7 @@ static int __init smc911x_findirq(struct net_device *dev)
* o actually GRAB the irq.
* o GRAB the region
*/
-static int __init smc911x_probe(struct net_device *dev)
+static int __devinit smc911x_probe(struct net_device *dev)
{
struct smc911x_local *lp = netdev_priv(dev);
int i, retval;
@@ -1817,7 +1813,7 @@ static int __init smc911x_probe(struct net_device *dev)
val = SMC_GET_BYTE_TEST(lp);
DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
if (val != 0x87654321) {
- printk(KERN_ERR "Invalid chip endian 0x08%x\n",val);
+ printk(KERN_ERR "Invalid chip endian 0x%08x\n",val);
retval = -ENODEV;
goto err_out;
}
@@ -2052,9 +2048,11 @@ err_out:
* 0 --> there is a device
* anything else, error
*/
-static int smc911x_drv_probe(struct platform_device *pdev)
+static int __devinit smc911x_drv_probe(struct platform_device *pdev)
{
- struct smc91x_platdata *pd = pdev->dev.platform_data;
+#ifdef SMC_DYNAMIC_BUS_CONFIG
+ struct smc911x_platdata *pd = pdev->dev.platform_data;
+#endif
struct net_device *ndev;
struct resource *res;
struct smc911x_local *lp;
@@ -2126,7 +2124,7 @@ out:
return ret;
}
-static int smc911x_drv_remove(struct platform_device *pdev)
+static int __devexit smc911x_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct smc911x_local *lp = netdev_priv(ndev);
@@ -2186,9 +2184,9 @@ static int smc911x_drv_resume(struct platform_device *dev)
if (netif_running(ndev)) {
smc911x_reset(ndev);
- smc911x_enable(ndev);
if (lp->phy_type != 0)
smc911x_phy_configure(&lp->phy_configure);
+ smc911x_enable(ndev);
netif_device_attach(ndev);
}
}
@@ -2197,7 +2195,7 @@ static int smc911x_drv_resume(struct platform_device *dev)
static struct platform_driver smc911x_driver = {
.probe = smc911x_drv_probe,
- .remove = smc911x_drv_remove,
+ .remove = __devexit_p(smc911x_drv_remove),
.suspend = smc911x_drv_suspend,
.resume = smc911x_drv_resume,
.driver = {
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index bf6240f23f5d..cc7d85bdfb3e 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -50,6 +50,10 @@
#define SMC_DYNAMIC_BUS_CONFIG
#endif
+#ifdef SMC_USE_PXA_DMA
+#define SMC_USE_DMA
+#endif
+
/* store this information for the driver.. */
struct smc911x_local {
/*
@@ -196,8 +200,6 @@ static inline void SMC_outsl(struct smc911x_local *lp, int reg,
#ifdef SMC_USE_PXA_DMA
-#define SMC_USE_DMA
-
/*
* Define the request and free functions
* These are unfortunately architecture specific as no generic allocation
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index c70870e0fd61..35c56abf4113 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1696,7 +1696,7 @@ static const struct ethtool_ops smc_ethtool_ops = {
* I just deleted auto_irq.c, since it was never built...
* --jgarzik
*/
-static int __init smc_findirq(struct smc_local *lp)
+static int __devinit smc_findirq(struct smc_local *lp)
{
void __iomem *ioaddr = lp->base;
int timeout = 20;
@@ -1770,7 +1770,7 @@ static int __init smc_findirq(struct smc_local *lp)
* o actually GRAB the irq.
* o GRAB the region
*/
-static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr,
+static int __devinit smc_probe(struct net_device *dev, void __iomem *ioaddr,
unsigned long irq_flags)
{
struct smc_local *lp = netdev_priv(dev);
@@ -2060,7 +2060,7 @@ static int smc_request_attrib(struct platform_device *pdev,
struct net_device *ndev)
{
struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
- struct smc_local *lp = netdev_priv(ndev);
+ struct smc_local *lp __maybe_unused = netdev_priv(ndev);
if (!res)
return 0;
@@ -2075,7 +2075,7 @@ static void smc_release_attrib(struct platform_device *pdev,
struct net_device *ndev)
{
struct resource * res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smc91x-attrib");
- struct smc_local *lp = netdev_priv(ndev);
+ struct smc_local *lp __maybe_unused = netdev_priv(ndev);
if (res)
release_mem_region(res->start, ATTRIB_SIZE);
@@ -2126,7 +2126,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
* 0 --> there is a device
* anything else, error
*/
-static int smc_drv_probe(struct platform_device *pdev)
+static int __devinit smc_drv_probe(struct platform_device *pdev)
{
struct smc91x_platdata *pd = pdev->dev.platform_data;
struct smc_local *lp;
@@ -2240,7 +2240,7 @@ static int smc_drv_probe(struct platform_device *pdev)
return ret;
}
-static int smc_drv_remove(struct platform_device *pdev)
+static int __devexit smc_drv_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct smc_local *lp = netdev_priv(ndev);
@@ -2305,7 +2305,7 @@ static int smc_drv_resume(struct platform_device *dev)
static struct platform_driver smc_driver = {
.probe = smc_drv_probe,
- .remove = smc_drv_remove,
+ .remove = __devexit_p(smc_drv_remove),
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
.driver = {
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index b6435d0d71f9..07599b492359 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -672,7 +672,6 @@ write_hash:
/**
* spider_net_prepare_tx_descr - fill tx descriptor with skb data
* @card: card structure
- * @descr: descriptor structure to fill out
* @skb: packet to use
*
* returns 0 on success, <0 on failure.
@@ -867,7 +866,6 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
/**
* spider_net_kick_tx_dma - enables TX DMA processing
* @card: card structure
- * @descr: descriptor address to enable TX processing at
*
* This routine will start the transmit DMA running if
* it is not already running. This routine ned only be
@@ -1637,7 +1635,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
* spider_net_interrupt - interrupt handler for spider_net
* @irq: interrupt number
* @ptr: pointer to net_device
- * @regs: PU registers
*
* returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
* interrupt found raised by card.
@@ -2419,7 +2416,6 @@ spider_net_undo_pci_setup(struct spider_net_card *card)
/**
* spider_net_setup_pci_dev - sets up the device in terms of PCI operations
- * @card: card structure
* @pdev: PCI device
*
* Returns the card structure or NULL if any errors occur
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 1d2ef8f47780..5a40f2d78beb 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1509,6 +1509,11 @@ static int __netdev_rx(struct net_device *dev, int *quota)
desc->status = 0;
np->rx_done = (np->rx_done + 1) % DONE_Q_SIZE;
}
+
+ if (*quota == 0) { /* out of rx quota */
+ retcode = 1;
+ goto out;
+ }
writew(np->rx_done, np->base + CompletionQConsumerIdx);
out:
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4291458955ef..fed7eba65ead 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1142,6 +1142,70 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+static void gem_pcs_reset(struct gem *gp)
+{
+ int limit;
+ u32 val;
+
+ /* Reset PCS unit. */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= PCS_MIICTRL_RST;
+ writel(val, gp->regs + PCS_MIICTRL);
+
+ limit = 32;
+ while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
+ udelay(100);
+ if (limit-- <= 0)
+ break;
+ }
+ if (limit <= 0)
+ printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
+ gp->dev->name);
+}
+
+static void gem_pcs_reinit_adv(struct gem *gp)
+{
+ u32 val;
+
+ /* Make sure PCS is disabled while changing advertisement
+ * configuration.
+ */
+ val = readl(gp->regs + PCS_CFG);
+ val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Advertise all capabilities except assymetric
+ * pause.
+ */
+ val = readl(gp->regs + PCS_MIIADV);
+ val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
+ PCS_MIIADV_SP | PCS_MIIADV_AP);
+ writel(val, gp->regs + PCS_MIIADV);
+
+ /* Enable and restart auto-negotiation, disable wrapback/loopback,
+ * and re-enable PCS.
+ */
+ val = readl(gp->regs + PCS_MIICTRL);
+ val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
+ val &= ~PCS_MIICTRL_WB;
+ writel(val, gp->regs + PCS_MIICTRL);
+
+ val = readl(gp->regs + PCS_CFG);
+ val |= PCS_CFG_ENABLE;
+ writel(val, gp->regs + PCS_CFG);
+
+ /* Make sure serialink loopback is off. The meaning
+ * of this bit is logically inverted based upon whether
+ * you are in Serialink or SERDES mode.
+ */
+ val = readl(gp->regs + PCS_SCTRL);
+ if (gp->phy_type == phy_serialink)
+ val &= ~PCS_SCTRL_LOOP;
+ else
+ val |= PCS_SCTRL_LOOP;
+ writel(val, gp->regs + PCS_SCTRL);
+}
+
#define STOP_TRIES 32
/* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1168,6 +1232,9 @@ static void gem_reset(struct gem *gp)
if (limit <= 0)
printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name);
+
+ if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes)
+ gem_pcs_reinit_adv(gp);
}
/* Must be invoked under gp->lock and gp->tx_lock. */
@@ -1324,7 +1391,7 @@ static int gem_set_link_modes(struct gem *gp)
gp->phy_type == phy_serdes) {
u32 pcs_lpa = readl(gp->regs + PCS_MIILP);
- if (pcs_lpa & PCS_MIIADV_FD)
+ if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes)
full_duplex = 1;
speed = SPEED_1000;
}
@@ -1488,6 +1555,9 @@ static void gem_link_timer(unsigned long data)
val = readl(gp->regs + PCS_MIISTAT);
if ((val & PCS_MIISTAT_LS) != 0) {
+ if (gp->lstate == link_up)
+ goto restart;
+
gp->lstate = link_up;
netif_carrier_on(gp->dev);
(void)gem_set_link_modes(gp);
@@ -1708,61 +1778,8 @@ static void gem_init_phy(struct gem *gp)
if (gp->phy_mii.def && gp->phy_mii.def->ops->init)
gp->phy_mii.def->ops->init(&gp->phy_mii);
} else {
- u32 val;
- int limit;
-
- /* Reset PCS unit. */
- val = readl(gp->regs + PCS_MIICTRL);
- val |= PCS_MIICTRL_RST;
- writeb(val, gp->regs + PCS_MIICTRL);
-
- limit = 32;
- while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) {
- udelay(100);
- if (limit-- <= 0)
- break;
- }
- if (limit <= 0)
- printk(KERN_WARNING "%s: PCS reset bit would not clear.\n",
- gp->dev->name);
-
- /* Make sure PCS is disabled while changing advertisement
- * configuration.
- */
- val = readl(gp->regs + PCS_CFG);
- val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO);
- writel(val, gp->regs + PCS_CFG);
-
- /* Advertise all capabilities except assymetric
- * pause.
- */
- val = readl(gp->regs + PCS_MIIADV);
- val |= (PCS_MIIADV_FD | PCS_MIIADV_HD |
- PCS_MIIADV_SP | PCS_MIIADV_AP);
- writel(val, gp->regs + PCS_MIIADV);
-
- /* Enable and restart auto-negotiation, disable wrapback/loopback,
- * and re-enable PCS.
- */
- val = readl(gp->regs + PCS_MIICTRL);
- val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE);
- val &= ~PCS_MIICTRL_WB;
- writel(val, gp->regs + PCS_MIICTRL);
-
- val = readl(gp->regs + PCS_CFG);
- val |= PCS_CFG_ENABLE;
- writel(val, gp->regs + PCS_CFG);
-
- /* Make sure serialink loopback is off. The meaning
- * of this bit is logically inverted based upon whether
- * you are in Serialink or SERDES mode.
- */
- val = readl(gp->regs + PCS_SCTRL);
- if (gp->phy_type == phy_serialink)
- val &= ~PCS_SCTRL_LOOP;
- else
- val |= PCS_SCTRL_LOOP;
- writel(val, gp->regs + PCS_SCTRL);
+ gem_pcs_reset(gp);
+ gem_pcs_reinit_adv(gp);
}
/* Default aneg parameters */
@@ -2680,6 +2697,21 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->speed = 0;
cmd->duplex = cmd->port = cmd->phy_address =
cmd->transceiver = cmd->autoneg = 0;
+
+ /* serdes means usually a Fibre connector, with most fixed */
+ if (gp->phy_type == phy_serdes) {
+ cmd->port = PORT_FIBRE;
+ cmd->supported = (SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_FIBRE | SUPPORTED_Autoneg |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ cmd->advertising = cmd->supported;
+ cmd->transceiver = XCVR_INTERNAL;
+ if (gp->lstate == link_up)
+ cmd->speed = SPEED_1000;
+ cmd->duplex = DUPLEX_FULL;
+ cmd->autoneg = 1;
+ }
}
cmd->maxtxpkt = cmd->maxrxpkt = 0;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index c41d68761364..e60498232b94 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1098,6 +1098,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
dma_addr_t tail_list_phys;
u8 *tail_buffer;
unsigned long flags;
+ unsigned int txlen;
if ( ! priv->phyOnline ) {
TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
@@ -1108,6 +1109,7 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
return 0;
+ txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
tail_list = priv->txList + priv->txTail;
tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail;
@@ -1125,16 +1127,16 @@ static int TLan_StartTx( struct sk_buff *skb, struct net_device *dev )
if ( bbuf ) {
tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE );
- skb_copy_from_linear_data(skb, tail_buffer, skb->len);
+ skb_copy_from_linear_data(skb, tail_buffer, txlen);
} else {
tail_list->buffer[0].address = pci_map_single(priv->pciDev,
- skb->data, skb->len,
+ skb->data, txlen,
PCI_DMA_TODEVICE);
TLan_StoreSKB(tail_list, skb);
}
- tail_list->frameSize = (u16) skb->len;
- tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) skb->len;
+ tail_list->frameSize = (u16) txlen;
+ tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
tail_list->buffer[1].count = 0;
tail_list->buffer[1].address = 0;
@@ -1431,7 +1433,9 @@ static u32 TLan_HandleTxEOF( struct net_device *dev, u16 host_int )
if ( ! bbuf ) {
struct sk_buff *skb = TLan_GetSKB(head_list);
pci_unmap_single(priv->pciDev, head_list->buffer[0].address,
- skb->len, PCI_DMA_TODEVICE);
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
head_list->buffer[8].address = 0;
head_list->buffer[9].address = 0;
@@ -2055,9 +2059,12 @@ static void TLan_FreeLists( struct net_device *dev )
list = priv->txList + i;
skb = TLan_GetSKB(list);
if ( skb ) {
- pci_unmap_single(priv->pciDev,
- list->buffer[0].address, skb->len,
- PCI_DMA_TODEVICE);
+ pci_unmap_single(
+ priv->pciDev,
+ list->buffer[0].address,
+ max(skb->len,
+ (unsigned int)TLAN_MIN_FRAME_SIZE),
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any( skb );
list->buffer[8].address = 0;
list->buffer[9].address = 0;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 8e46a513a252..c91852f49a48 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -420,9 +420,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
/* Allocate Tx/Rx descriptor memory */
db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) *
DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+ if (!db->desc_pool_ptr)
+ goto err_out_res;
db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC *
TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+ if (!db->buf_pool_ptr)
+ goto err_out_free_desc;
db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
db->first_tx_desc_dma = db->desc_pool_dma_ptr;
@@ -469,7 +473,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
err = register_netdev (dev);
if (err)
- goto err_out_res;
+ goto err_out_free_buf;
printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, "
"%s, irq %d.\n",
@@ -483,6 +487,12 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
return 0;
+err_out_free_buf:
+ pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+ db->buf_pool_ptr, db->buf_pool_dma_ptr);
+err_out_free_desc:
+ pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+ db->desc_pool_ptr, db->desc_pool_dma_ptr);
err_out_res:
pci_release_regions(pdev);
err_out_disable:
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 6daea0c91862..33b6d1b122fb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1070,8 +1070,6 @@ static int tun_chr_close(struct inode *inode, struct file *file)
DBG(KERN_INFO "%s: tun_chr_close\n", tun->dev->name);
- tun_chr_fasync(-1, file, 0);
-
rtnl_lock();
/* Detach from net device */
diff --git a/drivers/net/ucc_geth_ethtool.c b/drivers/net/ucc_geth_ethtool.c
index cfbbfee55836..68a7f5414133 100644
--- a/drivers/net/ucc_geth_ethtool.c
+++ b/drivers/net/ucc_geth_ethtool.c
@@ -37,7 +37,6 @@
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/types.h>
-#include <asm/uaccess.h>
#include "ucc_geth.h"
#include "ucc_geth_mii.h"
@@ -324,17 +323,17 @@ static void uec_get_ethtool_stats(struct net_device *netdev,
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_HARDWARE) {
base = (u32 __iomem *)&ugeth->ug_regs->tx64;
for (i = 0; i < UEC_HW_STATS_LEN; i++)
- data[j++] = (u64)in_be32(&base[i]);
+ data[j++] = in_be32(&base[i]);
}
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_TX) {
base = (u32 __iomem *)ugeth->p_tx_fw_statistics_pram;
for (i = 0; i < UEC_TX_FW_STATS_LEN; i++)
- data[j++] = (u64)in_be32(&base[i]);
+ data[j++] = base ? in_be32(&base[i]) : 0;
}
if (stats_mode & UCC_GETH_STATISTICS_GATHERING_MODE_FIRMWARE_RX) {
base = (u32 __iomem *)ugeth->p_rx_fw_statistics_pram;
for (i = 0; i < UEC_RX_FW_STATS_LEN; i++)
- data[j++] = (u64)in_be32(&base[i]);
+ data[j++] = base ? in_be32(&base[i]) : 0;
}
}
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 37ecf845edfe..de57490103fc 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1102,12 +1102,14 @@ static int ax88178_link_reset(struct usbnet *dev)
mode = AX88178_MEDIUM_DEFAULT;
if (ecmd.speed == SPEED_1000)
- mode |= AX_MEDIUM_GM | AX_MEDIUM_ENCK;
+ mode |= AX_MEDIUM_GM;
else if (ecmd.speed == SPEED_100)
mode |= AX_MEDIUM_PS;
else
mode &= ~(AX_MEDIUM_PS | AX_MEDIUM_GM);
+ mode |= AX_MEDIUM_ENCK;
+
if (ecmd.duplex == DUPLEX_FULL)
mode |= AX_MEDIUM_FD;
else
@@ -1444,6 +1446,10 @@ static const struct usb_device_id products [] = {
// Apple USB Ethernet Adapter
USB_DEVICE(0x05ac, 0x1402),
.driver_info = (unsigned long) &ax88772_info,
+}, {
+ // Cables-to-Go USB Ethernet Adapter
+ USB_DEVICE(0x0b95, 0x772a),
+ .driver_info = (unsigned long) &ax88772_info,
},
{ }, // END
};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 78df2be8a728..db3377dae9d5 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -396,6 +396,20 @@ static void dm9601_set_multicast(struct net_device *net)
dm_write_reg_async(dev, DM_RX_CTRL, rx_ctl);
}
+static int dm9601_set_mac_address(struct net_device *net, void *p)
+{
+ struct sockaddr *addr = p;
+ struct usbnet *dev = netdev_priv(net);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(net->dev_addr, addr->sa_data, net->addr_len);
+ dm_write_async(dev, DM_PHY_ADDR, net->addr_len, net->dev_addr);
+
+ return 0;
+}
+
static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
{
int ret;
@@ -406,6 +420,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->do_ioctl = dm9601_ioctl;
dev->net->set_multicast_list = dm9601_set_multicast;
+ dev->net->set_mac_address = dm9601_set_mac_address;
dev->net->ethtool_ops = &dm9601_ethtool_ops;
dev->net->hard_header_len += DM_TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1164c52e2c0a..8e90891f0e42 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2184,19 +2184,20 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
struct usb_interface *interface)
{
struct hso_net *hso_net = dev2net(hso_dev);
- struct device *dev = hso_dev->dev;
+ struct device *dev = &hso_net->net->dev;
char *rfkn;
hso_net->rfkill = rfkill_allocate(&interface_to_usbdev(interface)->dev,
- RFKILL_TYPE_WLAN);
+ RFKILL_TYPE_WWAN);
if (!hso_net->rfkill) {
- dev_err(dev, "%s - Out of memory", __func__);
+ dev_err(dev, "%s - Out of memory\n", __func__);
return;
}
rfkn = kzalloc(20, GFP_KERNEL);
if (!rfkn) {
rfkill_free(hso_net->rfkill);
- dev_err(dev, "%s - Out of memory", __func__);
+ hso_net->rfkill = NULL;
+ dev_err(dev, "%s - Out of memory\n", __func__);
return;
}
snprintf(rfkn, 20, "hso-%d",
@@ -2209,7 +2210,8 @@ static void hso_create_rfkill(struct hso_device *hso_dev,
kfree(rfkn);
hso_net->rfkill->name = NULL;
rfkill_free(hso_net->rfkill);
- dev_err(dev, "%s - Failed to register rfkill", __func__);
+ hso_net->rfkill = NULL;
+ dev_err(dev, "%s - Failed to register rfkill\n", __func__);
return;
}
}
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 2dced383bcfb..11cb3e504e1c 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -521,7 +521,7 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
* we don't duplicate code for each option.
*/
-static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, char *devname)
+static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max, int def, char *name, const char *devname)
{
if (val == -1)
*opt = def;
@@ -550,7 +550,7 @@ static void __devinit velocity_set_int_opt(int *opt, int val, int min, int max,
* we don't duplicate code for each option.
*/
-static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, char *devname)
+static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, char *name, const char *devname)
{
(*opt) &= (~flag);
if (val == -1)
@@ -576,7 +576,7 @@ static void __devinit velocity_set_bool_opt(u32 * opt, int val, int def, u32 fla
* for the current device
*/
-static void __devinit velocity_get_options(struct velocity_opt *opts, int index, char *devname)
+static void __devinit velocity_get_options(struct velocity_opt *opts, int index, const char *devname)
{
velocity_set_int_opt(&opts->rx_thresh, rx_thresh[index], RX_THRESH_MIN, RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", devname);
@@ -863,6 +863,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
static int first = 1;
struct net_device *dev;
int i;
+ const char *drv_string;
const struct velocity_info_tbl *info = &chip_info_table[ent->driver_data];
struct velocity_info *vptr;
struct mac_regs __iomem * regs;
@@ -935,7 +936,9 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
dev->dev_addr[i] = readb(&regs->PAR[i]);
- velocity_get_options(&vptr->options, velocity_nics, dev->name);
+ drv_string = dev_driver_string(&pdev->dev);
+
+ velocity_get_options(&vptr->options, velocity_nics, drv_string);
/*
* Mask out the options cannot be set to the chip
@@ -2293,7 +2296,7 @@ static void velocity_set_multi(struct net_device *dev)
}
mac_set_cam_mask(regs, vptr->mCAMmask);
- rx_mode = (RCR_AM | RCR_AB);
+ rx_mode = RCR_AM | RCR_AB | RCR_AP;
}
if (dev->mtu > 1500)
rx_mode |= RCR_AL;
diff --git a/drivers/net/wan/syncppp.c b/drivers/net/wan/syncppp.c
index 327d58589e12..6e92f7b44b1a 100644
--- a/drivers/net/wan/syncppp.c
+++ b/drivers/net/wan/syncppp.c
@@ -756,10 +756,11 @@ static void sppp_cisco_input (struct sppp *sp, struct sk_buff *skb)
case CISCO_ADDR_REQ:
/* Stolen from net/ipv4/devinet.c -- SIOCGIFADDR ioctl */
{
- struct in_device *in_dev;
- struct in_ifaddr *ifa;
__be32 addr = 0, mask = htonl(~0U); /* FIXME: is the mask correct? */
#ifdef CONFIG_INET
+ struct in_device *in_dev;
+ struct in_ifaddr *ifa;
+
rcu_read_lock();
if ((in_dev = __in_dev_get_rcu(dev)) != NULL)
{
diff --git a/drivers/net/wan/z85230.c b/drivers/net/wan/z85230.c
index ccd9cd35ecbe..5bf7e01ef0e9 100644
--- a/drivers/net/wan/z85230.c
+++ b/drivers/net/wan/z85230.c
@@ -695,7 +695,6 @@ EXPORT_SYMBOL(z8530_nop);
* z8530_interrupt - Handle an interrupt from a Z8530
* @irq: Interrupt number
* @dev_id: The Z8530 device that is interrupting.
- * @regs: unused
*
* A Z85[2]30 device has stuck its hand in the air for attention.
* We scan both the channels on the chip for events and then call
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c
index 9b95c4049b31..2d14255eb103 100644
--- a/drivers/net/wireless/ath5k/base.c
+++ b/drivers/net/wireless/ath5k/base.c
@@ -240,6 +240,10 @@ static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
static int ath5k_beacon_update(struct ieee80211_hw *hw,
struct sk_buff *skb);
+static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes);
static struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
@@ -256,6 +260,7 @@ static struct ieee80211_ops ath5k_hw_ops = {
.get_tx_stats = ath5k_get_tx_stats,
.get_tsf = ath5k_get_tsf,
.reset_tsf = ath5k_reset_tsf,
+ .bss_info_changed = ath5k_bss_info_changed,
};
/*
@@ -340,9 +345,9 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
}
/* Interrupt handling */
-static int ath5k_init(struct ath5k_softc *sc);
+static int ath5k_init(struct ath5k_softc *sc, bool is_resume);
static int ath5k_stop_locked(struct ath5k_softc *sc);
-static int ath5k_stop_hw(struct ath5k_softc *sc);
+static int ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend);
static irqreturn_t ath5k_intr(int irq, void *dev_id);
static void ath5k_tasklet_reset(unsigned long data);
@@ -646,7 +651,7 @@ ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
ath5k_led_off(sc);
- ath5k_stop_hw(sc);
+ ath5k_stop_hw(sc, true);
free_irq(pdev->irq, sc);
pci_save_state(pdev);
@@ -661,8 +666,7 @@ ath5k_pci_resume(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_softc *sc = hw->priv;
- struct ath5k_hw *ah = sc->ah;
- int i, err;
+ int err;
pci_restore_state(pdev);
@@ -683,21 +687,11 @@ ath5k_pci_resume(struct pci_dev *pdev)
goto err_no_irq;
}
- err = ath5k_init(sc);
+ err = ath5k_init(sc, true);
if (err)
goto err_irq;
ath5k_led_enable(sc);
- /*
- * Reset the key cache since some parts do not
- * reset the contents on initial power up or resume.
- *
- * FIXME: This may need to be revisited when mac80211 becomes
- * aware of suspend/resume.
- */
- for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
- ath5k_hw_reset_key(ah, i);
-
return 0;
err_irq:
free_irq(pdev->irq, sc);
@@ -718,7 +712,6 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
struct ath5k_softc *sc = hw->priv;
struct ath5k_hw *ah = sc->ah;
u8 mac[ETH_ALEN];
- unsigned int i;
int ret;
ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
@@ -737,13 +730,6 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
__set_bit(ATH_STAT_MRRETRY, sc->status);
/*
- * Reset the key cache since some parts do not
- * reset the contents on initial power up.
- */
- for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
- ath5k_hw_reset_key(ah, i);
-
- /*
* Collect the channel list. The 802.11 layer
* is resposible for filtering this list based
* on settings like the phy mode and regulatory
@@ -2200,12 +2186,18 @@ ath5k_beacon_config(struct ath5k_softc *sc)
\********************/
static int
-ath5k_init(struct ath5k_softc *sc)
+ath5k_init(struct ath5k_softc *sc, bool is_resume)
{
- int ret;
+ struct ath5k_hw *ah = sc->ah;
+ int ret, i;
mutex_lock(&sc->lock);
+ if (is_resume && !test_bit(ATH_STAT_STARTED, sc->status))
+ goto out_ok;
+
+ __clear_bit(ATH_STAT_STARTED, sc->status);
+
ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "mode %d\n", sc->opmode);
/*
@@ -2230,12 +2222,22 @@ ath5k_init(struct ath5k_softc *sc)
if (ret)
goto done;
+ /*
+ * Reset the key cache since some parts do not reset the
+ * contents on initial power up or resume from suspend.
+ */
+ for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
+ ath5k_hw_reset_key(ah, i);
+
+ __set_bit(ATH_STAT_STARTED, sc->status);
+
/* Set ack to be sent at low bit-rates */
- ath5k_hw_set_ack_bitrate_high(sc->ah, false);
+ ath5k_hw_set_ack_bitrate_high(ah, false);
mod_timer(&sc->calib_tim, round_jiffies(jiffies +
msecs_to_jiffies(ath5k_calinterval * 1000)));
+out_ok:
ret = 0;
done:
mmiowb();
@@ -2290,7 +2292,7 @@ ath5k_stop_locked(struct ath5k_softc *sc)
* stop is preempted).
*/
static int
-ath5k_stop_hw(struct ath5k_softc *sc)
+ath5k_stop_hw(struct ath5k_softc *sc, bool is_suspend)
{
int ret;
@@ -2321,6 +2323,9 @@ ath5k_stop_hw(struct ath5k_softc *sc)
}
}
ath5k_txbuf_free(sc, sc->bbuf);
+ if (!is_suspend)
+ __clear_bit(ATH_STAT_STARTED, sc->status);
+
mmiowb();
mutex_unlock(&sc->lock);
@@ -2718,12 +2723,12 @@ ath5k_reset_wake(struct ath5k_softc *sc)
static int ath5k_start(struct ieee80211_hw *hw)
{
- return ath5k_init(hw->priv);
+ return ath5k_init(hw->priv, false);
}
static void ath5k_stop(struct ieee80211_hw *hw)
{
- ath5k_stop_hw(hw->priv);
+ ath5k_stop_hw(hw->priv, false);
}
static int ath5k_add_interface(struct ieee80211_hw *hw,
@@ -2942,7 +2947,7 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
sc->opmode != NL80211_IFTYPE_MESH_POINT &&
test_bit(ATH_STAT_PROMISC, sc->status))
rfilt |= AR5K_RX_FILTER_PROM;
- if (sc->opmode == NL80211_IFTYPE_STATION ||
+ if ((sc->opmode == NL80211_IFTYPE_STATION && sc->assoc) ||
sc->opmode == NL80211_IFTYPE_ADHOC) {
rfilt |= AR5K_RX_FILTER_BEACON;
}
@@ -3083,4 +3088,32 @@ ath5k_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
end:
return ret;
}
+static void
+set_beacon_filter(struct ieee80211_hw *hw, bool enable)
+{
+ struct ath5k_softc *sc = hw->priv;
+ struct ath5k_hw *ah = sc->ah;
+ u32 rfilt;
+ rfilt = ath5k_hw_get_rx_filter(ah);
+ if (enable)
+ rfilt |= AR5K_RX_FILTER_BEACON;
+ else
+ rfilt &= ~AR5K_RX_FILTER_BEACON;
+ ath5k_hw_set_rx_filter(ah, rfilt);
+ sc->filter_flags = rfilt;
+}
+static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf,
+ u32 changes)
+{
+ struct ath5k_softc *sc = hw->priv;
+ if (changes & BSS_CHANGED_ASSOC) {
+ mutex_lock(&sc->lock);
+ sc->assoc = bss_conf->assoc;
+ if (sc->opmode == NL80211_IFTYPE_STATION)
+ set_beacon_filter(hw, sc->assoc);
+ mutex_unlock(&sc->lock);
+ }
+}
diff --git a/drivers/net/wireless/ath5k/base.h b/drivers/net/wireless/ath5k/base.h
index 9d0b728928e3..facc60ddada2 100644
--- a/drivers/net/wireless/ath5k/base.h
+++ b/drivers/net/wireless/ath5k/base.h
@@ -128,11 +128,12 @@ struct ath5k_softc {
size_t desc_len; /* size of TX/RX descriptors */
u16 cachelsz; /* cache line size */
- DECLARE_BITMAP(status, 4);
+ DECLARE_BITMAP(status, 5);
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
#define ATH_STAT_PROMISC 2
#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
+#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
unsigned int curmode; /* current phy mode */
@@ -178,6 +179,7 @@ struct ath5k_softc {
struct timer_list calib_tim; /* calibration timer */
int power_level; /* Requested tx power in dbm */
+ bool assoc; /* assocate state */
};
#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath5k/debug.c b/drivers/net/wireless/ath5k/debug.c
index 8f92d670f614..ccaeb5c219d2 100644
--- a/drivers/net/wireless/ath5k/debug.c
+++ b/drivers/net/wireless/ath5k/debug.c
@@ -339,7 +339,7 @@ static struct {
{ ATH5K_DEBUG_BEACON, "beacon", "beacon handling" },
{ ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
{ ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
- { ATH5K_DEBUG_LED, "led", "LED mamagement" },
+ { ATH5K_DEBUG_LED, "led", "LED management" },
{ ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
{ ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
@@ -417,19 +417,19 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
sc->debug.debugfs_phydir = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
ath5k_global_debugfs);
- sc->debug.debugfs_debug = debugfs_create_file("debug", 0666,
+ sc->debug.debugfs_debug = debugfs_create_file("debug", S_IWUSR | S_IRUGO,
sc->debug.debugfs_phydir, sc, &fops_debug);
- sc->debug.debugfs_registers = debugfs_create_file("registers", 0444,
+ sc->debug.debugfs_registers = debugfs_create_file("registers", S_IRUGO,
sc->debug.debugfs_phydir, sc, &fops_registers);
- sc->debug.debugfs_tsf = debugfs_create_file("tsf", 0666,
+ sc->debug.debugfs_tsf = debugfs_create_file("tsf", S_IWUSR | S_IRUGO,
sc->debug.debugfs_phydir, sc, &fops_tsf);
- sc->debug.debugfs_beacon = debugfs_create_file("beacon", 0666,
+ sc->debug.debugfs_beacon = debugfs_create_file("beacon", S_IWUSR | S_IRUGO,
sc->debug.debugfs_phydir, sc, &fops_beacon);
- sc->debug.debugfs_reset = debugfs_create_file("reset", 0222,
+ sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
sc->debug.debugfs_phydir, sc, &fops_reset);
}
diff --git a/drivers/net/wireless/ath5k/desc.c b/drivers/net/wireless/ath5k/desc.c
index dd1374052ba9..5e362a7a3620 100644
--- a/drivers/net/wireless/ath5k/desc.c
+++ b/drivers/net/wireless/ath5k/desc.c
@@ -531,10 +531,10 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
- rs->rs_antenna = rx_status->rx_status_0 &
- AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA;
- rs->rs_more = rx_status->rx_status_0 &
- AR5K_5210_RX_DESC_STATUS0_MORE;
+ rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
+ AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANTENNA);
+ rs->rs_more = !!(rx_status->rx_status_0 &
+ AR5K_5210_RX_DESC_STATUS0_MORE);
/* TODO: this timestamp is 13 bit, later on we assume 15 bit */
rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
@@ -607,10 +607,10 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
- rs->rs_antenna = rx_status->rx_status_0 &
- AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA;
- rs->rs_more = rx_status->rx_status_0 &
- AR5K_5212_RX_DESC_STATUS0_MORE;
+ rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
+ AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
+ rs->rs_more = !!(rx_status->rx_status_0 &
+ AR5K_5212_RX_DESC_STATUS0_MORE);
rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
rs->rs_status = 0;
diff --git a/drivers/net/wireless/ath5k/initvals.c b/drivers/net/wireless/ath5k/initvals.c
index ea2e1a20b499..ceaa6c475c06 100644
--- a/drivers/net/wireless/ath5k/initvals.c
+++ b/drivers/net/wireless/ath5k/initvals.c
@@ -806,6 +806,8 @@ static const struct ath5k_ini_mode ar5212_rf5111_ini_mode_end[] = {
{ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
{ AR5K_PHY(642),
{ 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } },
+ { 0xa228,
+ { 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5, 0x000001b5 } },
{ 0xa23c,
{ 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af, 0x13c889af } },
};
diff --git a/drivers/net/wireless/ath5k/reset.c b/drivers/net/wireless/ath5k/reset.c
index 8f1886834e61..1b6d45b6772d 100644
--- a/drivers/net/wireless/ath5k/reset.c
+++ b/drivers/net/wireless/ath5k/reset.c
@@ -537,9 +537,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
mdelay(1);
/*
- * Write some more initial register settings
+ * Write some more initial register settings for revised chips
*/
- if (ah->ah_version == AR5K_AR5212) {
+ if (ah->ah_version == AR5K_AR5212 &&
+ ah->ah_phy_revision > 0x41) {
ath5k_hw_reg_write(ah, 0x0002a002, 0x982c);
if (channel->hw_value == CHANNEL_G)
@@ -558,19 +559,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
else
ath5k_hw_reg_write(ah, 0x00000000, 0x994c);
- /* Some bits are disabled here, we know nothing about
- * register 0xa228 yet, most of the times this ends up
- * with a value 0x9b5 -haven't seen any dump with
- * a different value- */
- /* Got this from decompiling binary HAL */
- data = ath5k_hw_reg_read(ah, 0xa228);
- data &= 0xfffffdff;
- ath5k_hw_reg_write(ah, data, 0xa228);
-
- data = ath5k_hw_reg_read(ah, 0xa228);
- data &= 0xfffe03ff;
- ath5k_hw_reg_write(ah, data, 0xa228);
- data = 0;
+ /* Got this from legacy-hal */
+ AR5K_REG_DISABLE_BITS(ah, 0xa228, 0x200);
+
+ AR5K_REG_MASKED_BITS(ah, 0xa228, 0x800, 0xfffe03ff);
/* Just write 0x9b5 ? */
/* ath5k_hw_reg_write(ah, 0x000009b5, 0xa228); */
diff --git a/drivers/net/wireless/ath9k/beacon.c b/drivers/net/wireless/ath9k/beacon.c
index 9e15c30bbc06..4dd1c1bda0fb 100644
--- a/drivers/net/wireless/ath9k/beacon.c
+++ b/drivers/net/wireless/ath9k/beacon.c
@@ -170,7 +170,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
skb = (struct sk_buff *)bf->bf_mpdu;
if (skb) {
pci_unmap_single(sc->pdev, bf->bf_dmacontext,
- skb_end_pointer(skb) - skb->head,
+ skb->len,
PCI_DMA_TODEVICE);
}
@@ -193,7 +193,7 @@ static struct ath_buf *ath_beacon_generate(struct ath_softc *sc, int if_id)
bf->bf_buf_addr = bf->bf_dmacontext =
pci_map_single(sc->pdev, skb->data,
- skb_end_pointer(skb) - skb->head,
+ skb->len,
PCI_DMA_TODEVICE);
skb = ieee80211_get_buffered_bc(sc->hw, avp->av_if_data);
@@ -352,7 +352,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
if (bf->bf_mpdu != NULL) {
skb = (struct sk_buff *)bf->bf_mpdu;
pci_unmap_single(sc->pdev, bf->bf_dmacontext,
- skb_end_pointer(skb) - skb->head,
+ skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
@@ -412,7 +412,7 @@ int ath_beacon_alloc(struct ath_softc *sc, int if_id)
bf->bf_buf_addr = bf->bf_dmacontext =
pci_map_single(sc->pdev, skb->data,
- skb_end_pointer(skb) - skb->head,
+ skb->len,
PCI_DMA_TODEVICE);
bf->bf_mpdu = skb;
@@ -439,7 +439,7 @@ void ath_beacon_return(struct ath_softc *sc, struct ath_vap *avp)
if (bf->bf_mpdu != NULL) {
struct sk_buff *skb = (struct sk_buff *)bf->bf_mpdu;
pci_unmap_single(sc->pdev, bf->bf_dmacontext,
- skb_end_pointer(skb) - skb->head,
+ skb->len,
PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
bf->bf_mpdu = NULL;
diff --git a/drivers/net/wireless/ath9k/recv.c b/drivers/net/wireless/ath9k/recv.c
index 4983402af559..504a0444d89f 100644
--- a/drivers/net/wireless/ath9k/recv.c
+++ b/drivers/net/wireless/ath9k/recv.c
@@ -49,10 +49,12 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
ASSERT(skb != NULL);
ds->ds_vdata = skb->data;
- /* setup rx descriptors */
+ /* setup rx descriptors. The sc_rxbufsize here tells the harware
+ * how much data it can DMA to us and that we are prepared
+ * to process */
ath9k_hw_setuprxdesc(ah,
ds,
- skb_tailroom(skb), /* buffer size */
+ sc->sc_rxbufsize,
0);
if (sc->sc_rxlink == NULL)
@@ -398,6 +400,13 @@ static struct sk_buff *ath_rxbuf_alloc(struct ath_softc *sc,
* in rx'd frames.
*/
+ /* Note: the kernel can allocate a value greater than
+ * what we ask it to give us. We really only need 4 KB as that
+ * is this hardware supports and in fact we need at least 3849
+ * as that is the MAX AMSDU size this hardware supports.
+ * Unfortunately this means we may get 8 KB here from the
+ * kernel... and that is actually what is observed on some
+ * systems :( */
skb = dev_alloc_skb(len + sc->sc_cachelsz - 1);
if (skb != NULL) {
off = ((unsigned long) skb->data) % sc->sc_cachelsz;
@@ -456,7 +465,7 @@ static int ath_rx_indicate(struct ath_softc *sc,
if (nskb != NULL) {
bf->bf_mpdu = nskb;
bf->bf_buf_addr = pci_map_single(sc->pdev, nskb->data,
- skb_end_pointer(nskb) - nskb->head,
+ sc->sc_rxbufsize,
PCI_DMA_FROMDEVICE);
bf->bf_dmacontext = bf->bf_buf_addr;
ATH_RX_CONTEXT(nskb)->ctx_rxbuf = bf;
@@ -542,7 +551,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
bf->bf_mpdu = skb;
bf->bf_buf_addr = pci_map_single(sc->pdev, skb->data,
- skb_end_pointer(skb) - skb->head,
+ sc->sc_rxbufsize,
PCI_DMA_FROMDEVICE);
bf->bf_dmacontext = bf->bf_buf_addr;
ATH_RX_CONTEXT(skb)->ctx_rxbuf = bf;
@@ -1007,7 +1016,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
pci_dma_sync_single_for_cpu(sc->pdev,
bf->bf_buf_addr,
- skb_tailroom(skb),
+ sc->sc_rxbufsize,
PCI_DMA_FROMDEVICE);
pci_unmap_single(sc->pdev,
bf->bf_buf_addr,
diff --git a/drivers/net/wireless/hostap/hostap_wlan.h b/drivers/net/wireless/hostap/hostap_wlan.h
index ffdf4876121b..a68f97c39359 100644
--- a/drivers/net/wireless/hostap/hostap_wlan.h
+++ b/drivers/net/wireless/hostap/hostap_wlan.h
@@ -918,9 +918,12 @@ struct hostap_interface {
/*
* TX meta data - stored in skb->cb buffer, so this must not be increased over
- * the 40-byte limit
+ * the 48-byte limit.
+ * THE PADDING THIS STARTS WITH IS A HORRIBLE HACK THAT SHOULD NOT LIVE
+ * TO SEE THE DAY.
*/
struct hostap_skb_tx_data {
+ unsigned int __padding_for_default_qdiscs;
u32 magic; /* HOSTAP_SKB_TX_DATA_MAGIC */
u8 rate; /* transmit rate */
#define HOSTAP_TX_FLAGS_WDS BIT(0)
diff --git a/drivers/net/wireless/ipw2200.c b/drivers/net/wireless/ipw2200.c
index dcce3542d5a7..7a9f901d4ff6 100644
--- a/drivers/net/wireless/ipw2200.c
+++ b/drivers/net/wireless/ipw2200.c
@@ -3897,6 +3897,7 @@ static int ipw_disassociate(void *data)
if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING)))
return 0;
ipw_send_disassociate(data, 0);
+ netif_carrier_off(priv->net_dev);
return 1;
}
@@ -10190,6 +10191,9 @@ static int ipw_tx_skb(struct ipw_priv *priv, struct ieee80211_txb *txb,
u16 remaining_bytes;
int fc;
+ if (!(priv->status & STATUS_ASSOCIATED))
+ goto drop;
+
hdr_len = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
switch (priv->ieee->iw_mode) {
case IW_MODE_ADHOC:
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 24a1aeb6448f..c4c0371c763b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1384,9 +1384,11 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxq->queue[i] = NULL;
- pci_dma_sync_single_for_cpu(priv->pci_dev, rxb->dma_addr,
- priv->hw_params.rx_buf_size,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_range_for_cpu(
+ &priv->pci_dev->dev, rxb->real_dma_addr,
+ rxb->aligned_dma_addr - rxb->real_dma_addr,
+ priv->hw_params.rx_buf_size,
+ PCI_DMA_FROMDEVICE);
pkt = (struct iwl_rx_packet *)rxb->skb->data;
/* Reclaim a command buffer only if this packet is a response
@@ -1436,8 +1438,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
rxb->skb = NULL;
}
- pci_unmap_single(priv->pci_dev, rxb->dma_addr,
- priv->hw_params.rx_buf_size,
+ pci_unmap_single(priv->pci_dev, rxb->real_dma_addr,
+ priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
spin_lock_irqsave(&rxq->lock, flags);
list_add_tail(&rxb->list, &priv->rxq.rx_used);
@@ -2090,7 +2092,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
iwl4965_error_recovery(priv);
iwl_power_update_mode(priv, 1);
- ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
iwl4965_set_mode(priv, priv->iw_mode);
@@ -3252,7 +3253,11 @@ static void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
return;
}
- iwl_scan_cancel_timeout(priv, 100);
+ if (iwl_scan_cancel(priv)) {
+ /* cancel scan failed, just live w/ bad key and rely
+ briefly on SW decryption */
+ return;
+ }
key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 4c312c55f90c..01a845851338 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -290,6 +290,9 @@ void iwl_clear_stations_table(struct iwl_priv *priv)
priv->num_stations = 0;
memset(priv->stations, 0, sizeof(priv->stations));
+ /* clean ucode key table bit map */
+ priv->ucode_key_table = 0;
+
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
EXPORT_SYMBOL(iwl_clear_stations_table);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index c018121085e9..9966d4e384ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -89,7 +89,8 @@ extern struct iwl_cfg iwl5100_abg_cfg;
#define DEFAULT_LONG_RETRY_LIMIT 4U
struct iwl_rx_mem_buffer {
- dma_addr_t dma_addr;
+ dma_addr_t real_dma_addr;
+ dma_addr_t aligned_dma_addr;
struct sk_buff *skb;
struct list_head list;
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 7cde9d76ff5d..0509c16dbe75 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -204,7 +204,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
list_del(element);
/* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->dma_addr);
+ rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr);
rxq->queue[rxq->write] = rxb;
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
@@ -251,7 +251,7 @@ void iwl_rx_allocate(struct iwl_priv *priv)
rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
/* Alloc a new receive buffer */
- rxb->skb = alloc_skb(priv->hw_params.rx_buf_size,
+ rxb->skb = alloc_skb(priv->hw_params.rx_buf_size + 256,
__GFP_NOWARN | GFP_ATOMIC);
if (!rxb->skb) {
if (net_ratelimit())
@@ -266,9 +266,17 @@ void iwl_rx_allocate(struct iwl_priv *priv)
list_del(element);
/* Get physical address of RB/SKB */
- rxb->dma_addr =
- pci_map_single(priv->pci_dev, rxb->skb->data,
- priv->hw_params.rx_buf_size, PCI_DMA_FROMDEVICE);
+ rxb->real_dma_addr = pci_map_single(
+ priv->pci_dev,
+ rxb->skb->data,
+ priv->hw_params.rx_buf_size + 256,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256);
+ skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
+
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
@@ -300,8 +308,8 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
- rxq->pool[i].dma_addr,
- priv->hw_params.rx_buf_size,
+ rxq->pool[i].real_dma_addr,
+ priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
dev_kfree_skb(rxq->pool[i].skb);
}
@@ -354,8 +362,8 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
* to an SKB, so we need to unmap and free potential storage */
if (rxq->pool[i].skb != NULL) {
pci_unmap_single(priv->pci_dev,
- rxq->pool[i].dma_addr,
- priv->hw_params.rx_buf_size,
+ rxq->pool[i].real_dma_addr,
+ priv->hw_params.rx_buf_size + 256,
PCI_DMA_FROMDEVICE);
priv->alloc_rxb_skb--;
dev_kfree_skb(rxq->pool[i].skb);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 3b0bee331a33..c89365e2ca58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -896,6 +896,13 @@ static void iwl_bg_request_scan(struct work_struct *data)
return;
done:
+ /* Cannot perform scan. Make sure we clear scanning
+ * bits from status so next scan request can be performed.
+ * If we don't clear scanning status bit here all next scan
+ * will fail
+ */
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCANNING, &priv->status);
/* inform mac80211 scan aborted */
queue_work(priv->workqueue, &priv->scan_completed);
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 61797f3f8d5c..26f7084d3011 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -475,7 +475,7 @@ static int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
if (!test_and_set_bit(i, &priv->ucode_key_table))
return i;
- return -1;
+ return WEP_INVALID_OFFSET;
}
int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
@@ -620,6 +620,9 @@ static int iwl_set_wep_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for new kew");
+
priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
@@ -637,6 +640,7 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
{
unsigned long flags;
__le16 key_flags = 0;
+ int ret;
key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
@@ -664,14 +668,18 @@ static int iwl_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for new kew");
+
priv->stations[sta_id].sta.key.key_flags = key_flags;
priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ ret = iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+
spin_unlock_irqrestore(&priv->sta_lock, flags);
- IWL_DEBUG_INFO("hwcrypto: modify ucode station key info\n");
- return iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ return ret;
}
static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
@@ -696,6 +704,9 @@ static int iwl_set_tkip_dynamic_key_info(struct iwl_priv *priv,
/* else, we are overriding an existing key => no need to allocated room
* in uCode. */
+ WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for new kew");
+
/* This copy is acutally not needed: we get the key with each TX */
memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
@@ -734,6 +745,13 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
return 0;
}
+ if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ IWL_WARNING("Removing wrong key %d 0x%x\n",
+ keyconf->keyidx, key_flags);
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+ return 0;
+ }
+
if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
&priv->ucode_key_table))
IWL_ERROR("index %d not used in uCode key table.\n",
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index d15a2c997954..45a6b0c35695 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -5768,7 +5768,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
if (priv->error_recovering)
iwl3945_error_recovery(priv);
- ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
return;
restart:
@@ -6256,6 +6255,11 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
n_probes,
(void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
+ if (scan->channel_count == 0) {
+ IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
+ goto done;
+ }
+
cmd.len += le16_to_cpu(scan->tx_cmd.len) +
scan->channel_count * sizeof(struct iwl3945_scan_channel);
cmd.data = scan;
@@ -6273,6 +6277,14 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
return;
done:
+ /* can not perform scan make sure we clear scanning
+ * bits from status so next scan request can be performed.
+ * if we dont clear scanning status bit here all next scan
+ * will fail
+ */
+ clear_bit(STATUS_SCAN_HW, &priv->status);
+ clear_bit(STATUS_SCANNING, &priv->status);
+
/* inform mac80211 scan aborted */
queue_work(priv->workqueue, &priv->scan_completed);
mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 297696de2da0..8265c7d25edc 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -605,9 +605,9 @@ int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
if (ret == 0) {
*curlevel = le16_to_cpu(cmd.curlevel);
if (minlevel)
- *minlevel = le16_to_cpu(cmd.minlevel);
+ *minlevel = cmd.minlevel;
if (maxlevel)
- *maxlevel = le16_to_cpu(cmd.maxlevel);
+ *maxlevel = cmd.maxlevel;
}
lbs_deb_leave(LBS_DEB_CMD);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 5749f22b296f..079e6aa874dc 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -328,7 +328,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
lbs_deb_rx("rx err: frame received with bad length\n");
priv->stats.rx_length_errors++;
ret = -EINVAL;
- kfree(skb);
+ kfree_skb(skb);
goto done;
}
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 8f66903641b9..22c4c6110521 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -598,8 +598,8 @@ static int lbs_process_bss(struct bss_descriptor *bss,
switch (elem->id) {
case MFIE_TYPE_SSID:
- bss->ssid_len = elem->len;
- memcpy(bss->ssid, elem->data, elem->len);
+ bss->ssid_len = min_t(int, 32, elem->len);
+ memcpy(bss->ssid, elem->data, bss->ssid_len);
lbs_deb_scan("got SSID IE: '%s', len %u\n",
escape_essid(bss->ssid, bss->ssid_len),
bss->ssid_len);
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 1cc03a8dd67a..59634c33b1f9 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -331,7 +331,7 @@ static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
/* Fill the receive configuration URB and initialise the Rx call back */
usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
- (void *) (skb->tail),
+ skb_tail_pointer(skb),
MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);
cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;
diff --git a/drivers/net/wireless/orinoco.c b/drivers/net/wireless/orinoco.c
index 50904771f291..e0512e49d6d3 100644
--- a/drivers/net/wireless/orinoco.c
+++ b/drivers/net/wireless/orinoco.c
@@ -433,7 +433,7 @@ struct fw_info {
const static struct fw_info orinoco_fw[] = {
{ "", "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 },
{ "", "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 },
- { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 0x100 }
+ { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", "", 0x00003100, 512 }
};
/* Structure used to access fields in FW
@@ -458,7 +458,7 @@ orinoco_dl_firmware(struct orinoco_private *priv,
int ap)
{
/* Plug Data Area (PDA) */
- __le16 pda[512] = { 0 };
+ __le16 *pda;
hermes_t *hw = &priv->hw;
const struct firmware *fw_entry;
@@ -467,7 +467,11 @@ orinoco_dl_firmware(struct orinoco_private *priv,
const unsigned char *end;
const char *firmware;
struct net_device *dev = priv->ndev;
- int err;
+ int err = 0;
+
+ pda = kzalloc(fw->pda_size, GFP_KERNEL);
+ if (!pda)
+ return -ENOMEM;
if (ap)
firmware = fw->ap_fw;
@@ -478,17 +482,17 @@ orinoco_dl_firmware(struct orinoco_private *priv,
dev->name, firmware);
/* Read current plug data */
- err = hermes_read_pda(hw, pda, fw->pda_addr,
- min_t(u16, fw->pda_size, sizeof(pda)), 0);
+ err = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 0);
printk(KERN_DEBUG "%s: Read PDA returned %d\n", dev->name, err);
if (err)
- return err;
+ goto free;
err = request_firmware(&fw_entry, firmware, priv->dev);
if (err) {
printk(KERN_ERR "%s: Cannot find firmware %s\n",
dev->name, firmware);
- return -ENOENT;
+ err = -ENOENT;
+ goto free;
}
hdr = (const struct orinoco_fw_header *) fw_entry->data;
@@ -532,6 +536,9 @@ orinoco_dl_firmware(struct orinoco_private *priv,
abort:
release_firmware(fw_entry);
+
+free:
+ kfree(pda);
return err;
}
@@ -549,12 +556,12 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
int secondary)
{
hermes_t *hw = &priv->hw;
- int ret;
+ int ret = 0;
const unsigned char *ptr;
const unsigned char *first_block;
/* Plug Data Area (PDA) */
- __le16 pda[256];
+ __le16 *pda = NULL;
/* Binary block begins after the 0x1A marker */
ptr = image;
@@ -563,28 +570,33 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
/* Read the PDA from EEPROM */
if (secondary) {
- ret = hermes_read_pda(hw, pda, fw->pda_addr, sizeof(pda), 1);
+ pda = kzalloc(fw->pda_size, GFP_KERNEL);
+ if (!pda)
+ return -ENOMEM;
+
+ ret = hermes_read_pda(hw, pda, fw->pda_addr, fw->pda_size, 1);
if (ret)
- return ret;
+ goto free;
}
/* Stop the firmware, so that it can be safely rewritten */
if (priv->stop_fw) {
ret = priv->stop_fw(priv, 1);
if (ret)
- return ret;
+ goto free;
}
/* Program the adapter with new firmware */
ret = hermes_program(hw, first_block, end);
if (ret)
- return ret;
+ goto free;
/* Write the PDA to the adapter */
if (secondary) {
size_t len = hermes_blocks_length(first_block);
ptr = first_block + len;
ret = hermes_apply_pda(hw, ptr, pda);
+ kfree(pda);
if (ret)
return ret;
}
@@ -608,6 +620,10 @@ symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw,
return -ENODEV;
return 0;
+
+free:
+ kfree(pda);
+ return ret;
}
diff --git a/drivers/net/wireless/p54/p54common.c b/drivers/net/wireless/p54/p54common.c
index 117c7d3a52b0..827ca0384a4c 100644
--- a/drivers/net/wireless/p54/p54common.c
+++ b/drivers/net/wireless/p54/p54common.c
@@ -306,8 +306,8 @@ static int p54_convert_rev1(struct ieee80211_hw *dev,
return 0;
}
-static const char *p54_rf_chips[] = { "NULL", "Indigo?", "Duette",
- "Frisbee", "Xbow", "Longbow" };
+static const char *p54_rf_chips[] = { "NULL", "Duette3", "Duette2",
+ "Frisbee", "Xbow", "Longbow", "NULL", "NULL" };
static int p54_init_xbow_synth(struct ieee80211_hw *dev);
static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
@@ -319,6 +319,7 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
void *tmp;
int err;
u8 *end = (u8 *)eeprom + len;
+ u16 synth = 0;
DECLARE_MAC_BUF(mac);
wrap = (struct eeprom_pda_wrap *) eeprom;
@@ -400,8 +401,8 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
tmp = entry->data;
while ((u8 *)tmp < entry->data + data_len) {
struct bootrec_exp_if *exp_if = tmp;
- if (le16_to_cpu(exp_if->if_id) == 0xF)
- priv->rxhw = le16_to_cpu(exp_if->variant) & 0x07;
+ if (le16_to_cpu(exp_if->if_id) == 0xf)
+ synth = le16_to_cpu(exp_if->variant);
tmp += sizeof(struct bootrec_exp_if);
}
break;
@@ -421,28 +422,20 @@ static int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len)
entry = (void *)entry + (entry_len + 1)*2;
}
- if (!priv->iq_autocal || !priv->output_limit || !priv->curve_data) {
+ if (!synth || !priv->iq_autocal || !priv->output_limit ||
+ !priv->curve_data) {
printk(KERN_ERR "p54: not all required entries found in eeprom!\n");
err = -EINVAL;
goto err;
}
- switch (priv->rxhw) {
- case 4: /* XBow */
+ priv->rxhw = synth & 0x07;
+ if (priv->rxhw == 4)
p54_init_xbow_synth(dev);
- case 1: /* Indigo? */
- case 2: /* Duette */
- dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
- case 3: /* Frisbee */
- case 5: /* Longbow */
+ if (!(synth & 0x40))
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &band_2GHz;
- break;
- default:
- printk(KERN_ERR "%s: unsupported RF-Chip\n",
- wiphy_name(dev->wiphy));
- err = -EINVAL;
- goto err;
- }
+ if (!(synth & 0x80))
+ dev->wiphy->bands[IEEE80211_BAND_5GHZ] = &band_5GHz;
if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
u8 perm_addr[ETH_ALEN];
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1c2a02a741af..88b3cad8b65e 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -346,68 +346,6 @@ static void p54p_tx(struct ieee80211_hw *dev, struct p54_control_hdr *data,
printk(KERN_INFO "%s: tx overflow.\n", wiphy_name(dev->wiphy));
}
-static int p54p_open(struct ieee80211_hw *dev)
-{
- struct p54p_priv *priv = dev->priv;
- int err;
-
- init_completion(&priv->boot_comp);
- err = request_irq(priv->pdev->irq, &p54p_interrupt,
- IRQF_SHARED, "p54pci", dev);
- if (err) {
- printk(KERN_ERR "%s: failed to register IRQ handler\n",
- wiphy_name(dev->wiphy));
- return err;
- }
-
- memset(priv->ring_control, 0, sizeof(*priv->ring_control));
- err = p54p_upload_firmware(dev);
- if (err) {
- free_irq(priv->pdev->irq, dev);
- return err;
- }
- priv->rx_idx_data = priv->tx_idx_data = 0;
- priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
-
- p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
- ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
-
- p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
- ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
-
- P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
- P54P_READ(ring_control_base);
- wmb();
- udelay(10);
-
- P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
- P54P_READ(int_enable);
- wmb();
- udelay(10);
-
- P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
- P54P_READ(dev_int);
-
- if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
- printk(KERN_ERR "%s: Cannot boot firmware!\n",
- wiphy_name(dev->wiphy));
- free_irq(priv->pdev->irq, dev);
- return -ETIMEDOUT;
- }
-
- P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
- P54P_READ(int_enable);
- wmb();
- udelay(10);
-
- P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
- P54P_READ(dev_int);
- wmb();
- udelay(10);
-
- return 0;
-}
-
static void p54p_stop(struct ieee80211_hw *dev)
{
struct p54p_priv *priv = dev->priv;
@@ -474,6 +412,68 @@ static void p54p_stop(struct ieee80211_hw *dev)
memset(ring_control, 0, sizeof(*ring_control));
}
+static int p54p_open(struct ieee80211_hw *dev)
+{
+ struct p54p_priv *priv = dev->priv;
+ int err;
+
+ init_completion(&priv->boot_comp);
+ err = request_irq(priv->pdev->irq, &p54p_interrupt,
+ IRQF_SHARED, "p54pci", dev);
+ if (err) {
+ printk(KERN_ERR "%s: failed to register IRQ handler\n",
+ wiphy_name(dev->wiphy));
+ return err;
+ }
+
+ memset(priv->ring_control, 0, sizeof(*priv->ring_control));
+ err = p54p_upload_firmware(dev);
+ if (err) {
+ free_irq(priv->pdev->irq, dev);
+ return err;
+ }
+ priv->rx_idx_data = priv->tx_idx_data = 0;
+ priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
+
+ p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
+ ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data);
+
+ p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
+ ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt);
+
+ P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
+ P54P_READ(ring_control_base);
+ wmb();
+ udelay(10);
+
+ P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
+ P54P_READ(int_enable);
+ wmb();
+ udelay(10);
+
+ P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
+ P54P_READ(dev_int);
+
+ if (!wait_for_completion_interruptible_timeout(&priv->boot_comp, HZ)) {
+ printk(KERN_ERR "%s: Cannot boot firmware!\n",
+ wiphy_name(dev->wiphy));
+ p54p_stop(dev);
+ return -ETIMEDOUT;
+ }
+
+ P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
+ P54P_READ(int_enable);
+ wmb();
+ udelay(10);
+
+ P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
+ P54P_READ(dev_int);
+ wmb();
+ udelay(10);
+
+ return 0;
+}
+
static int __devinit p54p_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -556,11 +556,13 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
spin_lock_init(&priv->lock);
tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
- p54p_open(dev);
+ err = p54p_open(dev);
+ if (err)
+ goto err_free_common;
err = p54_read_eeprom(dev);
p54p_stop(dev);
if (err)
- goto err_free_desc;
+ goto err_free_common;
err = ieee80211_register_hw(dev);
if (err) {
@@ -573,8 +575,6 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
err_free_common:
p54_free_common(dev);
-
- err_free_desc:
pci_free_consistent(pdev, sizeof(*priv->ring_control),
priv->ring_control, priv->ring_control_dma);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index f839ce044afd..95511ac22470 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,5 +1,5 @@
menuconfig RT2X00
- bool "Ralink driver support"
+ tristate "Ralink driver support"
depends on MAC80211 && WLAN_80211 && EXPERIMENTAL
---help---
This will enable the experimental support for the Ralink drivers,
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
index 431e3c78bf27..69eb0132593b 100644
--- a/drivers/net/wireless/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -48,6 +48,9 @@ static struct usb_device_id rtl8187_table[] __devinitdata = {
{USB_DEVICE(0x03f0, 0xca02), .driver_info = DEVICE_RTL8187},
/* Sitecom */
{USB_DEVICE(0x0df6, 0x000d), .driver_info = DEVICE_RTL8187},
+ {USB_DEVICE(0x0df6, 0x0028), .driver_info = DEVICE_RTL8187B},
+ /* Abocom */
+ {USB_DEVICE(0x13d1, 0xabe6), .driver_info = DEVICE_RTL8187},
{}
};
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index fe1867b25ff7..cac732f4047f 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -615,7 +615,7 @@ static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
struct ieee80211_hdr *tx_hdr;
tx_hdr = (struct ieee80211_hdr *)skb->data;
- if (likely(!compare_ether_addr(tx_hdr->addr2, rx_hdr->addr1)))
+ if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
{
__skb_unlink(skb, q);
tx_status(hw, skb, IEEE80211_TX_STAT_ACK, stats->signal, 1);
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index a60ae86bd5c9..a3ccd8c1c716 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -61,6 +61,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
/* ZD1211B */
{ USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
@@ -82,6 +83,7 @@ static struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
+ { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
/* "Driverless" devices that need ejecting */
{ USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
{ USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index c6948d8f53f6..6d017adc914a 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1785,7 +1785,7 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
return 0;
}
-static struct xenbus_driver netfront = {
+static struct xenbus_driver netfront_driver = {
.name = "vif",
.owner = THIS_MODULE,
.ids = netfront_ids,
@@ -1805,7 +1805,7 @@ static int __init netif_init(void)
printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
- return xenbus_register_frontend(&netfront);
+ return xenbus_register_frontend(&netfront_driver);
}
module_init(netif_init);
@@ -1815,7 +1815,7 @@ static void __exit netif_exit(void)
if (xen_initial_domain())
return;
- xenbus_unregister_driver(&netfront);
+ xenbus_unregister_driver(&netfront_driver);
}
module_exit(netif_exit);
diff --git a/drivers/net/xtsonic.c b/drivers/net/xtsonic.c
new file mode 100644
index 000000000000..da42aa06a3ba
--- /dev/null
+++ b/drivers/net/xtsonic.c
@@ -0,0 +1,319 @@
+/*
+ * xtsonic.c
+ *
+ * (C) 2001 - 2007 Tensilica Inc.
+ * Kevin Chea <kchea@yahoo.com>
+ * Marc Gauthier <marc@linux-xtensa.org>
+ * Chris Zankel <chris@zankel.net>
+ *
+ * (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
+ *
+ * This driver is based on work from Andreas Busse, but most of
+ * the code is rewritten.
+ *
+ * (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
+ *
+ * A driver for the onboard Sonic ethernet controller on the XT2000.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/dma.h>
+
+static char xtsonic_string[] = "xtsonic";
+
+extern unsigned xtboard_nvram_valid(void);
+extern void xtboard_get_ether_addr(unsigned char *buf);
+
+#include "sonic.h"
+
+/*
+ * According to the documentation for the Sonic ethernet controller,
+ * EOBC should be 760 words (1520 bytes) for 32-bit applications, and,
+ * as such, 2 words less than the buffer size. The value for RBSIZE
+ * defined in sonic.h, however is only 1520.
+ *
+ * (Note that in 16-bit configurations, EOBC is 759 words (1518 bytes) and
+ * RBSIZE 1520 bytes)
+ */
+#undef SONIC_RBSIZE
+#define SONIC_RBSIZE 1524
+
+/*
+ * The chip provides 256 byte register space.
+ */
+#define SONIC_MEM_SIZE 0x100
+
+/*
+ * Macros to access SONIC registers
+ */
+#define SONIC_READ(reg) \
+ (0xffff & *((volatile unsigned int *)dev->base_addr+reg))
+
+#define SONIC_WRITE(reg,val) \
+ *((volatile unsigned int *)dev->base_addr+reg) = val
+
+
+/* Use 0 for production, 1 for verification, and >2 for debug */
+#ifdef SONIC_DEBUG
+static unsigned int sonic_debug = SONIC_DEBUG;
+#else
+static unsigned int sonic_debug = 1;
+#endif
+
+/*
+ * We cannot use station (ethernet) address prefixes to detect the
+ * sonic controller since these are board manufacturer depended.
+ * So we check for known Silicon Revision IDs instead.
+ */
+static unsigned short known_revisions[] =
+{
+ 0x101, /* SONIC 83934 */
+ 0xffff /* end of list */
+};
+
+static int xtsonic_open(struct net_device *dev)
+{
+ if (request_irq(dev->irq,&sonic_interrupt,IRQF_DISABLED,"sonic",dev)) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n",
+ dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ return sonic_open(dev);
+}
+
+static int xtsonic_close(struct net_device *dev)
+{
+ int err;
+ err = sonic_close(dev);
+ free_irq(dev->irq, dev);
+ return err;
+}
+
+static int __init sonic_probe1(struct net_device *dev)
+{
+ static unsigned version_printed = 0;
+ unsigned int silicon_revision;
+ struct sonic_local *lp = netdev_priv(dev);
+ unsigned int base_addr = dev->base_addr;
+ int i;
+ int err = 0;
+
+ if (!request_mem_region(base_addr, 0x100, xtsonic_string))
+ return -EBUSY;
+
+ /*
+ * get the Silicon Revision ID. If this is one of the known
+ * one assume that we found a SONIC ethernet controller at
+ * the expected location.
+ */
+ silicon_revision = SONIC_READ(SONIC_SR);
+ if (sonic_debug > 1)
+ printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
+
+ i = 0;
+ while ((known_revisions[i] != 0xffff) &&
+ (known_revisions[i] != silicon_revision))
+ i++;
+
+ if (known_revisions[i] == 0xffff) {
+ printk("SONIC ethernet controller not found (0x%4x)\n",
+ silicon_revision);
+ return -ENODEV;
+ }
+
+ if (sonic_debug && version_printed++ == 0)
+ printk(version);
+
+ /*
+ * Put the sonic into software reset, then retrieve ethernet address.
+ * Note: we are assuming that the boot-loader has initialized the cam.
+ */
+ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+ SONIC_WRITE(SONIC_DCR,
+ SONIC_DCR_WC0|SONIC_DCR_DW|SONIC_DCR_LBR|SONIC_DCR_SBUS);
+ SONIC_WRITE(SONIC_CEP,0);
+ SONIC_WRITE(SONIC_IMR,0);
+
+ SONIC_WRITE(SONIC_CMD,SONIC_CR_RST);
+ SONIC_WRITE(SONIC_CEP,0);
+
+ for (i=0; i<3; i++) {
+ unsigned int val = SONIC_READ(SONIC_CAP0-i);
+ dev->dev_addr[i*2] = val;
+ dev->dev_addr[i*2+1] = val >> 8;
+ }
+
+ /* Initialize the device structure. */
+
+ lp->dma_bitmode = SONIC_BITMODE32;
+
+ /*
+ * Allocate local private descriptor areas in uncached space.
+ * The entire structure must be located within the same 64kb segment.
+ * A simple way to ensure this is to allocate twice the
+ * size of the structure -- given that the structure is
+ * much less than 64 kB, at least one of the halves of
+ * the allocated area will be contained entirely in 64 kB.
+ * We also allocate extra space for a pointer to allow freeing
+ * this structure later on (in xtsonic_cleanup_module()).
+ */
+ lp->descriptors =
+ dma_alloc_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ &lp->descriptors_laddr, GFP_KERNEL);
+
+ if (lp->descriptors == NULL) {
+ printk(KERN_ERR "%s: couldn't alloc DMA memory for "
+ " descriptors.\n", lp->device->bus_id);
+ goto out;
+ }
+
+ lp->cda = lp->descriptors;
+ lp->tda = lp->cda + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda = lp->tda + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra = lp->rda + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ /* get the virtual dma address */
+
+ lp->cda_laddr = lp->descriptors_laddr;
+ lp->tda_laddr = lp->cda_laddr + (SIZEOF_SONIC_CDA
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rda_laddr = lp->tda_laddr + (SIZEOF_SONIC_TD * SONIC_NUM_TDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+ lp->rra_laddr = lp->rda_laddr + (SIZEOF_SONIC_RD * SONIC_NUM_RDS
+ * SONIC_BUS_SCALE(lp->dma_bitmode));
+
+ dev->open = xtsonic_open;
+ dev->stop = xtsonic_close;
+ dev->hard_start_xmit = sonic_send_packet;
+ dev->get_stats = sonic_get_stats;
+ dev->set_multicast_list = &sonic_multicast_list;
+ dev->tx_timeout = sonic_tx_timeout;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ /*
+ * clear tally counter
+ */
+ SONIC_WRITE(SONIC_CRCT,0xffff);
+ SONIC_WRITE(SONIC_FAET,0xffff);
+ SONIC_WRITE(SONIC_MPT,0xffff);
+
+ return 0;
+out:
+ release_region(dev->base_addr, SONIC_MEM_SIZE);
+ return err;
+}
+
+
+/*
+ * Probe for a SONIC ethernet controller on an XT2000 board.
+ * Actually probing is superfluous but we're paranoid.
+ */
+
+int __init xtsonic_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct sonic_local *lp;
+ struct resource *resmem, *resirq;
+ int err = 0;
+
+ DECLARE_MAC_BUF(mac);
+
+ if ((resmem = platform_get_resource(pdev, IORESOURCE_MEM, 0)) == NULL)
+ return -ENODEV;
+
+ if ((resirq = platform_get_resource(pdev, IORESOURCE_IRQ, 0)) == NULL)
+ return -ENODEV;
+
+ if ((dev = alloc_etherdev(sizeof(struct sonic_local))) == NULL)
+ return -ENOMEM;
+
+ lp = netdev_priv(dev);
+ lp->device = &pdev->dev;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ netdev_boot_setup_check(dev);
+
+ dev->base_addr = resmem->start;
+ dev->irq = resirq->start;
+
+ if ((err = sonic_probe1(dev)))
+ goto out;
+ if ((err = register_netdev(dev)))
+ goto out1;
+
+ printk("%s: SONIC ethernet @%08lx, MAC %s, IRQ %d\n", dev->name,
+ dev->base_addr, print_mac(mac, dev->dev_addr), dev->irq);
+
+ return 0;
+
+out1:
+ release_region(dev->base_addr, SONIC_MEM_SIZE);
+out:
+ free_netdev(dev);
+
+ return err;
+}
+
+MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
+module_param(sonic_debug, int, 0);
+MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
+
+#include "sonic.c"
+
+static int __devexit xtsonic_device_remove (struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct sonic_local *lp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
+ release_region (dev->base_addr, SONIC_MEM_SIZE);
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver xtsonic_driver = {
+ .probe = xtsonic_probe,
+ .remove = __devexit_p(xtsonic_device_remove),
+ .driver = {
+ .name = xtsonic_string,
+ },
+};
+
+static int __init xtsonic_init(void)
+{
+ return platform_driver_register(&xtsonic_driver);
+}
+
+static void __exit xtsonic_cleanup(void)
+{
+ platform_driver_unregister(&xtsonic_driver);
+}
+
+module_init(xtsonic_init);
+module_exit(xtsonic_cleanup);