summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/broadcom/bnx2x
diff options
context:
space:
mode:
authorLinus Torvalds2012-03-21 05:04:47 +0100
committerLinus Torvalds2012-03-21 05:04:47 +0100
commit3b59bf081622b6446db77ad06c93fe23677bc533 (patch)
tree3f4bb5a27c90cc86994a1f6d3c53fbf9208003cb /drivers/net/ethernet/broadcom/bnx2x
parentMerge branch 'for-3.4' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq (diff)
parentFix pppol2tp getsockname() (diff)
downloadkernel-qcow2-linux-3b59bf081622b6446db77ad06c93fe23677bc533.tar.gz
kernel-qcow2-linux-3b59bf081622b6446db77ad06c93fe23677bc533.tar.xz
kernel-qcow2-linux-3b59bf081622b6446db77ad06c93fe23677bc533.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking merge from David Miller: "1) Move ixgbe driver over to purely page based buffering on receive. From Alexander Duyck. 2) Add receive packet steering support to e1000e, from Bruce Allan. 3) Convert TCP MD5 support over to RCU, from Eric Dumazet. 4) Reduce cpu usage in handling out-of-order TCP packets on modern systems, also from Eric Dumazet. 5) Support the IP{,V6}_UNICAST_IF socket options, making the wine folks happy, from Erich Hoover. 6) Support VLAN trunking from guests in hyperv driver, from Haiyang Zhang. 7) Support byte-queue-limtis in r8169, from Igor Maravic. 8) Outline code intended for IP_RECVTOS in IP_PKTOPTIONS existed but was never properly implemented, Jiri Benc fixed that. 9) 64-bit statistics support in r8169 and 8139too, from Junchang Wang. 10) Support kernel side dump filtering by ctmark in netfilter ctnetlink, from Pablo Neira Ayuso. 11) Support byte-queue-limits in gianfar driver, from Paul Gortmaker. 12) Add new peek socket options to assist with socket migration, from Pavel Emelyanov. 13) Add sch_plug packet scheduler whose queue is controlled by userland daemons using explicit freeze and release commands. From Shriram Rajagopalan. 14) Fix FCOE checksum offload handling on transmit, from Yi Zou." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1846 commits) Fix pppol2tp getsockname() Remove printk from rds_sendmsg ipv6: fix incorrent ipv6 ipsec packet fragment cpsw: Hook up default ndo_change_mtu. net: qmi_wwan: fix build error due to cdc-wdm dependecy netdev: driver: ethernet: Add TI CPSW driver netdev: driver: ethernet: add cpsw address lookup engine support phy: add am79c874 PHY support mlx4_core: fix race on comm channel bonding: send igmp report for its master fs_enet: Add MPC5125 FEC support and PHY interface selection net: bpf_jit: fix BPF_S_LDX_B_MSH compilation net: update the usage of CHECKSUM_UNNECESSARY fcoe: use CHECKSUM_UNNECESSARY instead of CHECKSUM_PARTIAL on tx net: do not do gso for CHECKSUM_UNNECESSARY in netif_needs_gso ixgbe: Fix issues with SR-IOV loopback when flow control is disabled net/hyperv: Fix the code handling tx busy ixgbe: fix namespace issues when FCoE/DCB is not enabled rtlwifi: Remove unused ETH_ADDR_LEN defines igbvf: Use ETH_ALEN ... Fix up fairly trivial conflicts in drivers/isdn/gigaset/interface.c and drivers/net/usb/{Kconfig,qmi_wwan.c} as per David.
Diffstat (limited to 'drivers/net/ethernet/broadcom/bnx2x')
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h76
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c486
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h184
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c319
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c310
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h62
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h55
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c324
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c974
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c180
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h26
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c432
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h147
20 files changed, 2088 insertions, 1536 deletions
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 8c73d34b2ff1..e37161f19250 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1,6 +1,6 @@
/* bnx2x.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.70.35-0"
-#define DRV_MODULE_RELDATE "2011/11/10"
+#define DRV_MODULE_VERSION "1.72.10-0"
+#define DRV_MODULE_RELDATE "2012/02/20"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -58,18 +58,22 @@
#define DRV_MODULE_NAME "bnx2x"
/* for messages that are currently off */
-#define BNX2X_MSG_OFF 0
-#define BNX2X_MSG_MCP 0x010000 /* was: NETIF_MSG_HW */
-#define BNX2X_MSG_STATS 0x020000 /* was: NETIF_MSG_TIMER */
-#define BNX2X_MSG_NVM 0x040000 /* was: NETIF_MSG_HW */
-#define BNX2X_MSG_DMAE 0x080000 /* was: NETIF_MSG_HW */
-#define BNX2X_MSG_SP 0x100000 /* was: NETIF_MSG_INTR */
-#define BNX2X_MSG_FP 0x200000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_OFF 0x0
+#define BNX2X_MSG_MCP 0x0010000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_STATS 0x0020000 /* was: NETIF_MSG_TIMER */
+#define BNX2X_MSG_NVM 0x0040000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_DMAE 0x0080000 /* was: NETIF_MSG_HW */
+#define BNX2X_MSG_SP 0x0100000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_FP 0x0200000 /* was: NETIF_MSG_INTR */
+#define BNX2X_MSG_IOV 0x0800000
+#define BNX2X_MSG_IDLE 0x2000000 /* used for idle check*/
+#define BNX2X_MSG_ETHTOOL 0x4000000
+#define BNX2X_MSG_DCB 0x8000000
/* regular debug print */
#define DP(__mask, fmt, ...) \
do { \
- if (bp->msg_enable & (__mask)) \
+ if (unlikely(bp->msg_enable & (__mask))) \
pr_notice("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
@@ -78,14 +82,14 @@ do { \
#define DP_CONT(__mask, fmt, ...) \
do { \
- if (bp->msg_enable & (__mask)) \
+ if (unlikely(bp->msg_enable & (__mask))) \
pr_cont(fmt, ##__VA_ARGS__); \
} while (0)
/* errors debug print */
#define BNX2X_DBG_ERR(fmt, ...) \
do { \
- if (netif_msg_probe(bp)) \
+ if (unlikely(netif_msg_probe(bp))) \
pr_err("[%s:%d(%s)]" fmt, \
__func__, __LINE__, \
bp->dev ? (bp->dev->name) : "?", \
@@ -108,7 +112,7 @@ do { \
/* before we have a dev->name use dev_info() */
#define BNX2X_DEV_INFO(fmt, ...) \
do { \
- if (netif_msg_probe(bp)) \
+ if (unlikely(netif_msg_probe(bp))) \
dev_info(&bp->pdev->dev, fmt, ##__VA_ARGS__); \
} while (0)
@@ -341,6 +345,7 @@ union db_prod {
#define SGE_PAGE_SIZE PAGE_SIZE
#define SGE_PAGE_SHIFT PAGE_SHIFT
#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
/* SGE ring related macros */
#define NUM_RX_SGE_PAGES 2
@@ -445,6 +450,8 @@ struct bnx2x_agg_info {
u16 vlan_tag;
u16 len_on_bd;
u32 rxhash;
+ u16 gro_size;
+ u16 full_page;
};
#define Q_STATS_OFFSET32(stat_name) \
@@ -473,6 +480,11 @@ struct bnx2x_fp_txdata {
int txq_index;
};
+enum bnx2x_tpa_mode_t {
+ TPA_MODE_LRO,
+ TPA_MODE_GRO
+};
+
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
@@ -489,6 +501,8 @@ struct bnx2x_fastpath {
dma_addr_t status_blk_mapping;
+ enum bnx2x_tpa_mode_t mode;
+
u8 max_cos; /* actual number of active tx coses */
struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS];
@@ -540,6 +554,7 @@ struct bnx2x_fastpath {
struct ustorm_per_queue_stats old_uclient;
struct xstorm_per_queue_stats old_xclient;
struct bnx2x_eth_q_stats eth_q_stats;
+ struct bnx2x_eth_q_stats_old eth_q_stats_old;
/* The size is calculated using the following:
sizeof name field from netdev structure +
@@ -1046,7 +1061,6 @@ struct bnx2x_slowpath {
struct nig_stats nig_stats;
struct host_port_stats port_stats;
struct host_func_stats func_stats;
- struct host_func_stats func_stats_base;
u32 wb_comp;
u32 wb_data[4];
@@ -1088,7 +1102,8 @@ enum bnx2x_recovery_state {
BNX2X_RECOVERY_DONE,
BNX2X_RECOVERY_INIT,
BNX2X_RECOVERY_WAIT,
- BNX2X_RECOVERY_FAILED
+ BNX2X_RECOVERY_FAILED,
+ BNX2X_RECOVERY_NIC_LOADING
};
/*
@@ -1198,6 +1213,9 @@ struct bnx2x {
#define ETH_MIN_PACKET_SIZE 60
#define ETH_MAX_PACKET_SIZE 1500
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
+/* TCP with Timestamp Option (32) + IPv6 (40) */
+#define ETH_MAX_TPA_HEADER_SIZE 72
+#define ETH_MIN_TPA_HEADER_SIZE 40
/* Max supported alignment is 256 (8 shift) */
#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
@@ -1268,6 +1286,7 @@ struct bnx2x {
#define NO_MCP_FLAG (1 << 9)
#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
+#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
#define NO_ISCSI_OOO_FLAG (1 << 13)
@@ -1316,6 +1335,8 @@ struct bnx2x {
u8 wol;
+ bool gro_check;
+
int rx_ring_size;
u16 tx_quick_cons_trip_int;
@@ -1461,6 +1482,11 @@ struct bnx2x {
u16 stats_counter;
struct bnx2x_eth_stats eth_stats;
+ struct host_func_stats func_stats;
+ struct bnx2x_eth_stats_old eth_stats_old;
+ struct bnx2x_net_stats_old net_stats_old;
+ struct bnx2x_fw_port_stats_old fw_stats_old;
+ bool stats_init;
struct z_stream_s *strm;
void *gunzip_buf;
@@ -2073,8 +2099,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
-int bnx2x_close(struct net_device *dev);
-
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2094,14 +2118,22 @@ void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
-#define BNX2X_MF_PROTOCOL(bp) \
+#define BNX2X_MF_SD_PROTOCOL(bp) \
((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
#ifdef BCM_CNIC
-#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \
- (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+#define BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) \
+ (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp) \
+ (BNX2X_MF_SD_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_FCOE)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp))
+#define IS_MF_FCOE_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp))
-#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp))
+#define IS_MF_STORAGE_SD(bp) (IS_MF_SD(bp) && \
+ (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp) || \
+ BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)))
#endif
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 99389c8dda21..f1f3ca65667a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1,6 +1,6 @@
/* bnx2x_cmn.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -32,46 +32,6 @@
/**
- * bnx2x_bz_fp - zero content of the fastpath structure.
- *
- * @bp: driver handle
- * @index: fastpath index to be zeroed
- *
- * Makes sure the contents of the bp->fp[index].napi is kept
- * intact.
- */
-static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
-{
- struct bnx2x_fastpath *fp = &bp->fp[index];
- struct napi_struct orig_napi = fp->napi;
- /* bzero bnx2x_fastpath contents */
- memset(fp, 0, sizeof(*fp));
-
- /* Restore the NAPI object as it has been already initialized */
- fp->napi = orig_napi;
-
- fp->bp = bp;
- fp->index = index;
- if (IS_ETH_FP(fp))
- fp->max_cos = bp->max_cos;
- else
- /* Special queues support only one CoS */
- fp->max_cos = 1;
-
- /*
- * set the tpa flag for each queue. The tpa flag determines the queue
- * minimal size so it must be set prior to queue memory allocation
- */
- fp->disable_tpa = ((bp->flags & TPA_ENABLE_FLAG) == 0);
-
-#ifdef BCM_CNIC
- /* We don't want TPA on an FCoE L2 ring */
- if (IS_FCOE_FP(fp))
- fp->disable_tpa = 1;
-#endif
-}
-
-/**
* bnx2x_move_fp - move content of the fastpath structure.
*
* @bp: driver handle
@@ -115,11 +75,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* prefetch skb end pointer to speedup dev_kfree_skb() */
prefetch(&skb->end);
- DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
+ DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
txdata->txq_index, idx, tx_buf, skb);
/* unmap first bd */
- DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
@@ -150,7 +109,6 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* now free frags */
while (nbd > 0) {
- DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
@@ -160,10 +118,11 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* release skb */
WARN_ON(!skb);
- if (skb) {
+ if (likely(skb)) {
(*pkts_compl)++;
(*bytes_compl) += skb->len;
}
+
dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -191,8 +150,8 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
pkt_cons = TX_BD(sw_cons);
- DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u sw_cons %u "
- " pkt_cons %u\n",
+ DP(NETIF_MSG_TX_DONE,
+ "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
@@ -249,13 +208,11 @@ static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
fp->last_max_sge = idx;
}
-static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
- struct eth_fast_path_rx_cqe *fp_cqe)
+static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
+ u16 sge_len,
+ struct eth_end_agg_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
- u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
- le16_to_cpu(fp_cqe->len_on_bd)) >>
- SGE_PAGE_SHIFT;
u16 last_max, last_elem, first_elem;
u16 delta = 0;
u16 i;
@@ -266,15 +223,15 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
/* First mark all used pages */
for (i = 0; i < sge_len; i++)
BIT_VEC64_CLEAR_BIT(fp->sge_mask,
- RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
+ RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
- sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+ sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
/* Here we assume that the last SGE index is the biggest */
prefetch((void *)(fp->sge_mask));
bnx2x_update_last_max_sge(fp,
- le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
+ le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
last_max = RX_SGE(fp->last_max_sge);
last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
@@ -368,6 +325,22 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
+ if (fp->mode == TPA_MODE_GRO) {
+ u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
+ tpa_info->full_page =
+ SGE_PAGE_SIZE * PAGES_PER_SGE / gro_size * gro_size;
+ /*
+ * FW 7.2.16 BUG workaround:
+ * if SGE size is (exactly) multiple gro_size
+ * fw will place one less frag on SGE.
+ * the calculation is done only for potentially
+ * dangerous MTUs.
+ */
+ if (unlikely(bp->gro_check))
+ if (!(SGE_PAGE_SIZE * PAGES_PER_SGE % gro_size))
+ tpa_info->full_page -= gro_size;
+ tpa_info->gro_size = gro_size;
+ }
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
@@ -424,25 +397,40 @@ static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
}
static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, struct sk_buff *skb,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct sk_buff *skb,
struct eth_end_agg_rx_cqe *cqe,
u16 cqe_idx)
{
struct sw_rx_page *rx_pg, old_rx_pg;
- u32 i, frag_len, frag_size, pages;
- int err;
- int j;
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
+ u32 i, frag_len, frag_size;
+ int err, j, frag_id = 0;
u16 len_on_bd = tpa_info->len_on_bd;
+ u16 full_page = 0, gro_size = 0;
frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
- pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
+
+ if (fp->mode == TPA_MODE_GRO) {
+ gro_size = tpa_info->gro_size;
+ full_page = tpa_info->full_page;
+ }
/* This is needed in order to enable forwarding support */
- if (frag_size)
+ if (frag_size) {
skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp,
tpa_info->parsing_flags, len_on_bd);
+ /* set for GRO */
+ if (fp->mode == TPA_MODE_GRO)
+ skb_shinfo(skb)->gso_type =
+ (GET_FLAG(tpa_info->parsing_flags,
+ PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
+ PRS_FLAG_OVERETH_IPV6) ?
+ SKB_GSO_TCPV6 : SKB_GSO_TCPV4;
+ }
+
+
#ifdef BNX2X_STOP_ON_ERROR
if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
@@ -459,7 +447,12 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* FW gives the indices of the SGE as if the ring is an array
(meaning that "next" element will consume 2 indices) */
- frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
+ if (fp->mode == TPA_MODE_GRO)
+ frag_len = min_t(u32, frag_size, (u32)full_page);
+ else /* LRO */
+ frag_len = min_t(u32, frag_size,
+ (u32)(SGE_PAGE_SIZE * PAGES_PER_SGE));
+
rx_pg = &fp->rx_page_ring[sge_idx];
old_rx_pg = *rx_pg;
@@ -475,9 +468,21 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(&old_rx_pg, mapping),
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
-
/* Add one frag and update the appropriate fields in the skb */
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ if (fp->mode == TPA_MODE_LRO)
+ skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ else { /* GRO */
+ int rem;
+ int offset = 0;
+ for (rem = frag_len; rem > 0; rem -= gro_size) {
+ int len = rem > gro_size ? gro_size : rem;
+ skb_fill_page_desc(skb, frag_id++,
+ old_rx_pg.page, offset, len);
+ if (offset)
+ get_page(old_rx_pg.page);
+ offset += len;
+ }
+ }
skb->data_len += frag_len;
skb->truesize += SGE_PAGE_SIZE * PAGES_PER_SGE;
@@ -489,18 +494,17 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return 0;
}
-static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
- u16 queue, struct eth_end_agg_rx_cqe *cqe,
- u16 cqe_idx)
+static inline void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
+ struct bnx2x_agg_info *tpa_info,
+ u16 pages,
+ struct eth_end_agg_rx_cqe *cqe,
+ u16 cqe_idx)
{
- struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
- u32 pad = tpa_info->placement_offset;
+ u8 pad = tpa_info->placement_offset;
u16 len = tpa_info->len_on_bd;
struct sk_buff *skb = NULL;
- u8 *data = rx_buf->data;
- /* alloc new skb */
- u8 *new_data;
+ u8 *new_data, *data = rx_buf->data;
u8 old_tpa_state = tpa_info->tpa_state;
tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -525,8 +529,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
- BNX2X_ERR("skb_put is about to fail... "
- "pad %d len %d rx_buf_size %d\n",
+ BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
return;
@@ -540,13 +543,14 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (!bnx2x_fill_frag_skb(bp, fp, queue, skb, cqe, cqe_idx)) {
+ if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
+ skb, cqe, cqe_idx)) {
if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
__vlan_hwaccel_put_tag(skb, tpa_info->vlan_tag);
napi_gro_receive(&fp->napi, skb);
} else {
- DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
- " - dropping packet!\n");
+ DP(NETIF_MSG_RX_STATUS,
+ "Failed to allocate new pages - dropping packet!\n");
dev_kfree_skb_any(skb);
}
@@ -605,7 +609,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
struct eth_fast_path_rx_cqe *cqe_fp;
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
- u16 len, pad;
+ u16 len, pad, queue;
u8 *data;
#ifdef BNX2X_STOP_ON_ERROR
@@ -622,28 +626,32 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
cqe_fp_flags = cqe_fp->type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
- DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
- " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
+ DP(NETIF_MSG_RX_STATUS,
+ "CQE type %x err %x status %x queue %x vlan %x len %u\n",
+ CQE_TYPE(cqe_fp_flags),
cqe_fp_flags, cqe_fp->status_flags,
le32_to_cpu(cqe_fp->rss_hash_result),
- le16_to_cpu(cqe_fp->vlan_tag), le16_to_cpu(cqe_fp->pkt_len));
+ le16_to_cpu(cqe_fp->vlan_tag),
+ le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
/* is this a slowpath msg? */
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
}
+
rx_buf = &fp->rx_buf_ring[bd_cons];
data = rx_buf->data;
if (!CQE_TYPE_FAST(cqe_fp_type)) {
+ struct bnx2x_agg_info *tpa_info;
+ u16 frag_size, pages;
#ifdef BNX2X_STOP_ON_ERROR
/* sanity check */
if (fp->disable_tpa &&
(CQE_TYPE_START(cqe_fp_type) ||
CQE_TYPE_STOP(cqe_fp_type)))
- BNX2X_ERR("START/STOP packet while "
- "disable_tpa type %x\n",
+ BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
CQE_TYPE(cqe_fp_type));
#endif
@@ -656,28 +664,38 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bnx2x_tpa_start(fp, queue,
bd_cons, bd_prod,
cqe_fp);
+
goto next_rx;
- } else {
- u16 queue =
- cqe->end_agg_cqe.queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_stop on queue %d\n",
- queue);
- bnx2x_tpa_stop(bp, fp, queue,
- &cqe->end_agg_cqe,
- comp_ring_cons);
+ }
+ queue = cqe->end_agg_cqe.queue_index;
+ tpa_info = &fp->tpa_info[queue];
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
+ tpa_info->len_on_bd;
+
+ if (fp->mode == TPA_MODE_GRO)
+ pages = (frag_size + tpa_info->full_page - 1) /
+ tpa_info->full_page;
+ else
+ pages = SGE_PAGE_ALIGN(frag_size) >>
+ SGE_PAGE_SHIFT;
+
+ bnx2x_tpa_stop(bp, fp, tpa_info, pages,
+ &cqe->end_agg_cqe, comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
- if (bp->panic)
- return 0;
+ if (bp->panic)
+ return 0;
#endif
- bnx2x_update_sge_prod(fp, cqe_fp);
- goto next_cqe;
- }
+ bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
+ goto next_cqe;
}
/* non TPA */
- len = le16_to_cpu(cqe_fp->pkt_len);
+ len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
pad = cqe_fp->placement_offset;
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
@@ -687,7 +705,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
prefetch(data + pad); /* speedup eth_type_trans() */
/* is this an error packet? */
if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
- DP(NETIF_MSG_RX_ERR,
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR flags %x rx packet %u\n",
cqe_fp_flags, sw_comp_cons);
fp->eth_q_stats.rx_err_discard_pkt++;
@@ -701,7 +719,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
(len <= RX_COPY_THRESH)) {
skb = netdev_alloc_skb_ip_align(bp->dev, len);
if (skb == NULL) {
- DP(NETIF_MSG_RX_ERR,
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
"ERROR packet dropped because of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
goto reuse_rx;
@@ -722,9 +740,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
}
skb_reserve(skb, pad);
} else {
- DP(NETIF_MSG_RX_ERR,
- "ERROR packet dropped because "
- "of alloc failure\n");
+ DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
+ "ERROR packet dropped because of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
reuse_rx:
bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
@@ -793,8 +810,8 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
struct bnx2x *bp = fp->bp;
u8 cos;
- DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
- "[fp %d fw_sd %d igusb %d]\n",
+ DP(NETIF_MSG_INTR,
+ "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
fp->index, fp->fw_sb_id, fp->igu_sb_id);
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
@@ -1007,10 +1024,8 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
GFP_ATOMIC);
if (!first_buf->data) {
- BNX2X_ERR("Failed to allocate TPA "
- "skb pool for queue[%d] - "
- "disabling TPA on this "
- "queue!\n", j);
+ BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
+ j);
bnx2x_free_tpa_pool(bp, fp, i);
fp->disable_tpa = 1;
break;
@@ -1030,10 +1045,10 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
- BNX2X_ERR("was only able to allocate "
- "%d rx sges\n", i);
- BNX2X_ERR("disabling TPA for "
- "queue[%d]\n", j);
+ BNX2X_ERR("was only able to allocate %d rx sges\n",
+ i);
+ BNX2X_ERR("disabling TPA for queue[%d]\n",
+ j);
/* Cleanup already allocated elements */
bnx2x_free_rx_sge_range(bp, fp,
ring_prod);
@@ -1188,8 +1203,8 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
for_each_eth_queue(bp, i) {
if (nvecs == offset)
return;
- DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
- "irq\n", i, bp->msix_table[offset].vector);
+ DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
+ i, bp->msix_table[offset].vector);
free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
}
@@ -1211,21 +1226,21 @@ int bnx2x_enable_msix(struct bnx2x *bp)
int msix_vec = 0, i, rc, req_cnt;
bp->msix_table[msix_vec].entry = msix_vec;
- DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
+ BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
bp->msix_table[0].entry);
msix_vec++;
#ifdef BCM_CNIC
bp->msix_table[msix_vec].entry = msix_vec;
- DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
msix_vec++;
#endif
/* We need separate vectors for ETH queues only (not FCoE) */
for_each_eth_queue(bp, i) {
bp->msix_table[msix_vec].entry = msix_vec;
- DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
- "(fastpath #%u)\n", msix_vec, msix_vec, i);
+ BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
+ msix_vec, msix_vec, i);
msix_vec++;
}
@@ -1241,14 +1256,12 @@ int bnx2x_enable_msix(struct bnx2x *bp)
/* how less vectors we will have? */
int diff = req_cnt - rc;
- DP(NETIF_MSG_IFUP,
- "Trying to use less MSI-X vectors: %d\n", rc);
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
if (rc) {
- DP(NETIF_MSG_IFUP,
- "MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
return rc;
}
/*
@@ -1256,13 +1269,13 @@ int bnx2x_enable_msix(struct bnx2x *bp)
*/
bp->num_queues -= diff;
- DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
bp->num_queues);
} else if (rc) {
/* fall to INTx if not enough memory */
if (rc == -ENOMEM)
bp->flags |= DISABLE_MSI_FLAG;
- DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
return rc;
}
@@ -1305,8 +1318,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
i = BNX2X_NUM_ETH_QUEUES(bp);
offset = 1 + CNIC_PRESENT;
- netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
- " ... fp[%d] %d\n",
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
bp->msix_table[0].vector,
0, bp->msix_table[offset].vector,
i - 1, bp->msix_table[offset + i - 1].vector);
@@ -1320,7 +1332,7 @@ int bnx2x_enable_msi(struct bnx2x *bp)
rc = pci_enable_msi(bp->pdev);
if (rc) {
- DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
+ BNX2X_DEV_INFO("MSI is not attainable\n");
return -1;
}
bp->flags |= USING_MSI_FLAG;
@@ -1441,8 +1453,8 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
}
#ifdef BCM_CNIC
- /* override in ISCSI SD mod */
- if (IS_MF_ISCSI_SD(bp))
+ /* override in STORAGE SD mode */
+ if (IS_MF_STORAGE_SD(bp))
bp->num_queues = 1;
#endif
/* Add special queues */
@@ -1497,7 +1509,7 @@ static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
return rc;
}
- DP(NETIF_MSG_DRV, "Setting real num queues to (tx, rx) (%d, %d)\n",
+ DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
tx, rx);
return rc;
@@ -1562,7 +1574,7 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
{
- struct bnx2x_config_rss_params params = {0};
+ struct bnx2x_config_rss_params params = {NULL};
int i;
/* Although RSS is meaningless when there is a single HW queue we
@@ -1625,7 +1637,7 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, u8 *ind_table, bool config_hash)
static inline int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
/* Prepare parameters for function state transitions */
__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
@@ -1646,7 +1658,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
{
int rc;
unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
- struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj;
/***************** Cleanup MACs' object first *************************/
@@ -1678,8 +1690,8 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp)
/* Add a DEL command... */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0)
- BNX2X_ERR("Failed to add a new DEL command to a multi-cast "
- "object: %d\n", rc);
+ BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
+ rc);
/* ...and wait until all pending commands are cleared */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
@@ -1717,8 +1729,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
int i, rc;
#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't load NIC when there is panic\n");
return -EPERM;
+ }
#endif
bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
@@ -1738,6 +1752,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
* allocated only once, fp index, max_cos, bp pointer.
* Also set fp->disable_tpa.
*/
+ DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
bnx2x_bz_fp(bp, i);
@@ -1766,12 +1781,27 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bnx2x_napi_enable(bp);
+ /* set pf load just before approaching the MCP */
+ bnx2x_set_pf_load(bp);
+
/* Send LOAD_REQUEST command to MCP
* Returns the type of LOAD command:
* if it is the first port to be initialized
* common blocks should be initialized, otherwise - not
*/
if (!BP_NOMCP(bp)) {
+ /* init fw_seq */
+ bp->fw_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
+ DRV_MSG_SEQ_NUMBER_MASK);
+ BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
+
+ /* Get current FW pulse sequence */
+ bp->fw_drv_pulse_wr_seq =
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
+ DRV_PULSE_SEQ_MASK);
+ BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
+
load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
if (!load_code) {
BNX2X_ERR("MCP response failure, aborting\n");
@@ -1779,9 +1809,33 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
LOAD_ERROR_EXIT(bp, load_error1);
}
if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
+ BNX2X_ERR("Driver load refused\n");
rc = -EBUSY; /* other port in diagnostic mode */
LOAD_ERROR_EXIT(bp, load_error1);
}
+ if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
+ load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
+ /* build FW version dword */
+ u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
+ (BCM_5710_FW_MINOR_VERSION << 8) +
+ (BCM_5710_FW_REVISION_VERSION << 16) +
+ (BCM_5710_FW_ENGINEERING_VERSION << 24);
+
+ /* read loaded FW from chip */
+ u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
+
+ DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x",
+ loaded_fw, my_fw);
+
+ /* abort nic load if version mismatch */
+ if (my_fw != loaded_fw) {
+ BNX2X_ERR("bnx2x with FW %x already loaded, "
+ "which mismatches my %x FW. aborting",
+ loaded_fw, my_fw);
+ rc = -EBUSY;
+ LOAD_ERROR_EXIT(bp, load_error2);
+ }
+ }
} else {
int path = BP_PATH(bp);
@@ -1816,7 +1870,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
} else
bp->port.pmf = 0;
- DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+ DP(NETIF_MSG_IFUP, "pmf %d\n", bp->port.pmf);
/* Init Function state controlling object */
bnx2x__init_func_obj(bp);
@@ -1832,6 +1886,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/* Connect to IRQs */
rc = bnx2x_setup_irqs(bp);
if (rc) {
+ BNX2X_ERR("IRQs setup failed\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
LOAD_ERROR_EXIT(bp, load_error2);
}
@@ -1882,21 +1937,27 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
for_each_nondefault_queue(bp, i) {
rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
- if (rc)
+ if (rc) {
+ BNX2X_ERR("Queue setup failed\n");
LOAD_ERROR_EXIT(bp, load_error4);
+ }
}
rc = bnx2x_init_rss_pf(bp);
- if (rc)
+ if (rc) {
+ BNX2X_ERR("PF RSS init failed\n");
LOAD_ERROR_EXIT(bp, load_error4);
+ }
/* Now when Clients are configured we are ready to work */
bp->state = BNX2X_STATE_OPEN;
/* Configure a ucast MAC */
rc = bnx2x_set_eth_mac(bp, true);
- if (rc)
+ if (rc) {
+ BNX2X_ERR("Setting Ethernet MAC failed\n");
LOAD_ERROR_EXIT(bp, load_error4);
+ }
if (bp->pending_max) {
bnx2x_update_max_mf_config(bp, bp->pending_max);
@@ -1948,7 +2009,15 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
#endif
- bnx2x_inc_load_cnt(bp);
+
+ /* mark driver is loaded in shmem2 */
+ if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
+ DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
/* Wait for all pending SP commands to complete */
if (!bnx2x_wait_sp_comp(bp, ~0x0UL)) {
@@ -1988,6 +2057,8 @@ load_error2:
bp->port.pmf = 0;
load_error1:
bnx2x_napi_disable(bp);
+ /* clear pf_load status, as it was already set */
+ bnx2x_clear_pf_load(bp);
load_error0:
bnx2x_free_mem(bp);
@@ -2001,6 +2072,14 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
int i;
bool global = false;
+ /* mark driver is unloaded in shmem2 */
+ if (SHMEM2_HAS(bp, drv_capabilities_flag)) {
+ u32 val;
+ val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
+ SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
+ val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
+ }
+
if ((bp->state == BNX2X_STATE_CLOSED) ||
(bp->state == BNX2X_STATE_ERROR)) {
/* We can get here if the driver has been unloaded
@@ -2015,8 +2094,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_release_leader_lock(bp);
smp_mb();
- DP(NETIF_MSG_HW, "Releasing a leadership...\n");
-
+ DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
+ BNX2X_ERR("Can't unload in closed or error state\n");
return -EINVAL;
}
@@ -2045,6 +2124,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_drv_pulse(bp);
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
+ bnx2x_save_statistics(bp);
/* Cleanup the chip if needed */
if (unload_mode != UNLOAD_RECOVERY)
@@ -2108,7 +2188,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
/* The last driver must disable a "close the gate" if there is no
* parity attention or "process kill" pending.
*/
- if (!bnx2x_dec_load_cnt(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
+ if (!bnx2x_clear_pf_load(bp) && bnx2x_reset_is_done(bp, BP_PATH(bp)))
bnx2x_disable_close_the_gate(bp);
return 0;
@@ -2120,7 +2200,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
/* If there is no power capability, silently succeed */
if (!bp->pm_cap) {
- DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
+ BNX2X_DEV_INFO("No power capability. Breaking.\n");
return 0;
}
@@ -2161,6 +2241,7 @@ int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
break;
default:
+ dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
return -EINVAL;
}
return 0;
@@ -2230,7 +2311,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
napi_complete(napi);
/* Re-enable interrupts */
- DP(NETIF_MSG_HW,
+ DP(NETIF_MSG_RX_STATUS,
"Update index to %d\n", fp->fp_hc_idx);
bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
le16_to_cpu(fp->fp_hc_idx),
@@ -2264,9 +2345,8 @@ static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
h_tx_bd->nbd = cpu_to_le16(nbd);
h_tx_bd->nbytes = cpu_to_le16(hlen);
- DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
- "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
- h_tx_bd->addr_lo, h_tx_bd->nbd);
+ DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x) nbd %d\n",
+ h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo, h_tx_bd->nbd);
/* now get a new data BD
* (after the pbd) and fill it */
@@ -2406,8 +2486,7 @@ static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
exit_lbl:
if (unlikely(to_copy))
DP(NETIF_MSG_TX_QUEUED,
- "Linearization IS REQUIRED for %s packet. "
- "num_frags %d hlen %d first_bd_sz %d\n",
+ "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
(xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
@@ -2615,7 +2694,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
#endif
/* enable this debug print to view the transmission queue being used
- DP(BNX2X_MSG_FP, "indices: txq %d, fp %d, txdata %d\n",
+ DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
txq_index, fp_index, txdata_index); */
/* locate the fastpath and the txdata */
@@ -2623,8 +2702,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
txdata = &fp->txdata[txdata_index];
/* enable this debug print to view the tranmission details
- DP(BNX2X_MSG_FP,"transmitting packet cid %d fp index %d txdata_index %d"
- " tx_data ptr %p fp pointer %p\n",
+ DP(NETIF_MSG_TX_QUEUED,
+ "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
txdata->cid, fp_index, txdata_index, txdata, fp); */
if (unlikely(bnx2x_tx_avail(bp, txdata) <
@@ -2635,8 +2714,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x protocol %x "
- "protocol(%x,%x) gso type %x xmit_type %x\n",
+ DP(NETIF_MSG_TX_QUEUED,
+ "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x\n",
txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
@@ -2658,8 +2737,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Statistics of linearization */
bp->lin_cnt++;
if (skb_linearize(skb) != 0) {
- DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
- "silently dropping this SKB\n");
+ DP(NETIF_MSG_TX_QUEUED,
+ "SKB linearization failed - silently dropping this SKB\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2669,8 +2748,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = dma_map_single(&bp->pdev->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- DP(NETIF_MSG_TX_QUEUED, "SKB mapping failed - "
- "silently dropping this SKB\n");
+ DP(NETIF_MSG_TX_QUEUED,
+ "SKB mapping failed - silently dropping this SKB\n");
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2765,8 +2844,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
pkt_size = tx_start_bd->nbytes;
- DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
- " nbytes %d flags %x vlan %x\n",
+ DP(NETIF_MSG_TX_QUEUED,
+ "first bd @%p addr (%x:%x) nbd %d nbytes %d flags %x vlan %x\n",
tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
tx_start_bd->bd_flags.as_bitfield,
@@ -2809,8 +2888,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
unsigned int pkts_compl = 0, bytes_compl = 0;
- DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
- "dropping packet...\n");
+ DP(NETIF_MSG_TX_QUEUED,
+ "Unable to map page - dropping packet...\n");
/* we need unmap all buffers already mapped
* for this SKB;
@@ -2866,8 +2945,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (pbd_e1x)
DP(NETIF_MSG_TX_QUEUED,
- "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
- " tcp_flags %x xsum %x seq %u hlen %u\n",
+ "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
@@ -2943,23 +3021,22 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
/* requested to support too many traffic classes */
if (num_tc > bp->max_cos) {
- DP(NETIF_MSG_TX_ERR, "support for too many traffic classes"
- " requested: %d. max supported is %d\n",
- num_tc, bp->max_cos);
+ BNX2X_ERR("support for too many traffic classes requested: %d. max supported is %d\n",
+ num_tc, bp->max_cos);
return -EINVAL;
}
/* declare amount of supported traffic classes */
if (netdev_set_num_tc(dev, num_tc)) {
- DP(NETIF_MSG_TX_ERR, "failed to declare %d traffic classes\n",
- num_tc);
+ BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
return -EINVAL;
}
/* configure priority to traffic class mapping */
for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
- DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n",
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "mapping priority %d to tc %d\n",
prio, bp->prio_to_cos[prio]);
}
@@ -2979,7 +3056,8 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
count = BNX2X_NUM_ETH_QUEUES(bp);
offset = cos * MAX_TXQS_PER_COS;
netdev_set_tc_queue(dev, cos, count, offset);
- DP(BNX2X_MSG_SP, "mapping tc %d to offset %d count %d\n",
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "mapping tc %d to offset %d count %d\n",
cos, offset, count);
}
@@ -2993,12 +3071,16 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
- if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
+ if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
+ BNX2X_ERR("Requested MAC address is not valid\n");
return -EINVAL;
+ }
#ifdef BCM_CNIC
- if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+ if (IS_MF_STORAGE_SD(bp) && !is_zero_ether_addr(addr->sa_data)) {
+ BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
return -EINVAL;
+ }
#endif
if (netif_running(dev)) {
@@ -3007,6 +3089,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
if (netif_running(dev))
@@ -3071,7 +3154,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
- DP(BNX2X_MSG_SP,
+ DP(NETIF_MSG_IFDOWN,
"freeing tx memory of fp %d cos %d cid %d\n",
fp_index, cos, txdata->cid);
@@ -3116,15 +3199,22 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
int rx_ring_size = 0;
#ifdef BCM_CNIC
- if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
+ if (!bp->rx_ring_size && IS_MF_STORAGE_SD(bp)) {
rx_ring_size = MIN_RX_SIZE_NONTPA;
bp->rx_ring_size = rx_ring_size;
} else
#endif
if (!bp->rx_ring_size) {
+ u32 cfg = SHMEM_RD(bp,
+ dev_info.port_hw_config[BP_PORT(bp)].default_cfg);
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
+ /* Dercease ring size for 1G functions */
+ if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
+ PORT_HW_CFG_NET_SERDES_IF_SGMII)
+ rx_ring_size /= 10;
+
/* allocate at least number of buffers required by FW */
rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA, rx_ring_size);
@@ -3163,8 +3253,8 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
- DP(BNX2X_MSG_SP, "allocating tx memory of "
- "fp %d cos %d\n",
+ DP(NETIF_MSG_IFUP,
+ "allocating tx memory of fp %d cos %d\n",
index, cos);
BNX2X_ALLOC(txdata->tx_buf_ring,
@@ -3401,6 +3491,7 @@ int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
cp->fcoe_wwn_port_name_lo);
break;
default:
+ BNX2X_ERR("Wrong WWN type requested - %d\n", type);
return -EINVAL;
}
@@ -3414,13 +3505,15 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ BNX2X_ERR("Can't perform change MTU during parity recovery\n");
return -EAGAIN;
}
if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
- ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
+ ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
+ BNX2X_ERR("Can't support requested MTU size\n");
return -EINVAL;
+ }
/* This does not race with packet allocation
* because the actual alloc size is
@@ -3428,17 +3521,21 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
*/
dev->mtu = new_mtu;
+ bp->gro_check = bnx2x_need_gro_check(new_mtu);
+
return bnx2x_reload_if_running(dev);
}
netdev_features_t bnx2x_fix_features(struct net_device *dev,
- netdev_features_t features)
+ netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
/* TPA requires Rx CSUM offloading */
- if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
+ if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
features &= ~NETIF_F_LRO;
+ features &= ~NETIF_F_GRO;
+ }
return features;
}
@@ -3454,6 +3551,11 @@ int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
else
flags &= ~TPA_ENABLE_FLAG;
+ if (features & NETIF_F_GRO)
+ flags |= GRO_ENABLE_FLAG;
+ else
+ flags &= ~GRO_ENABLE_FLAG;
+
if (features & NETIF_F_LOOPBACK) {
if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
bp->link_params.loopback_mode = LOOPBACK_BMAC;
@@ -3541,7 +3643,7 @@ int bnx2x_resume(struct pci_dev *pdev)
bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ BNX2X_ERR("Handling parity error recovery. Try again later\n");
return -EAGAIN;
}
@@ -3557,8 +3659,6 @@ int bnx2x_resume(struct pci_dev *pdev)
bnx2x_set_power_state(bp, PCI_D0);
netif_device_attach(dev);
- /* Since the chip was reset, clear the FW sequence number */
- bp->fw_seq = 0;
rc = bnx2x_nic_load(bp, LOAD_OPEN);
rtnl_unlock();
@@ -3588,8 +3688,9 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
REG_WR8(bp, addr, ticks);
- DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
- port, fw_sb_id, sb_index, ticks);
+ DP(NETIF_MSG_IFUP,
+ "port %x fw_sb_id %d sb_index %d ticks %d\n",
+ port, fw_sb_id, sb_index, ticks);
}
static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
@@ -3604,8 +3705,9 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag;
REG_WR16(bp, addr, flags);
- DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
- port, fw_sb_id, sb_index, disable);
+ DP(NETIF_MSG_IFUP,
+ "port %x fw_sb_id %d sb_index %d disable %d\n",
+ port, fw_sb_id, sb_index, disable);
}
void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 4f40f7d7d8c6..8b163388659a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -1,6 +1,6 @@
/* bnx2x_cmn.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -379,8 +379,8 @@ void bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
unsigned long ramrod_flags);
/* Parity errors related */
-void bnx2x_inc_load_cnt(struct bnx2x *bp);
-u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
+void bnx2x_set_pf_load(struct bnx2x *bp);
+bool bnx2x_clear_pf_load(struct bnx2x *bp);
bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print);
bool bnx2x_reset_is_done(struct bnx2x *bp, int engine);
void bnx2x_set_reset_in_progress(struct bnx2x *bp);
@@ -534,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
*/
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
#endif
+
netdev_features_t bnx2x_fix_features(struct net_device *dev,
- netdev_features_t features);
+ netdev_features_t features);
int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
/**
@@ -597,7 +598,7 @@ static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id,
(update << IGU_REGULAR_BUPDATE_SHIFT) |
(op << IGU_REGULAR_ENABLE_INT_SHIFT));
- DP(NETIF_MSG_HW, "write 0x%08x to IGU addr 0x%x\n",
+ DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
cmd_data.sb_id_and_flags, igu_addr);
REG_WR(bp, igu_addr, cmd_data.sb_id_and_flags);
@@ -614,8 +615,7 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
u32 sb_bit = 1 << (idu_sb_id%32);
- u32 func_encode = func |
- ((is_Pf == true ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT);
+ u32 func_encode = func | (is_Pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
/* Not supported in BC mode */
@@ -648,8 +648,8 @@ static inline void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func,
if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
- DP(NETIF_MSG_HW, "Unable to finish IGU cleanup: "
- "idu_sb_id %d offset %d bit %d (cnt %d)\n",
+ DP(NETIF_MSG_HW,
+ "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
}
}
@@ -668,8 +668,6 @@ static inline void bnx2x_hc_ack_sb(struct bnx2x *bp, u8 sb_id,
(update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
(op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
- DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
- (*(u32 *)&igu_ack), hc_addr);
REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
/* Make sure that ACK is written */
@@ -703,9 +701,6 @@ static inline u16 bnx2x_hc_ack_int(struct bnx2x *bp)
COMMAND_REG_SIMD_MASK);
u32 result = REG_RD(bp, hc_addr);
- DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
- result, hc_addr);
-
barrier();
return result;
}
@@ -715,7 +710,7 @@ static inline u16 bnx2x_igu_ack_int(struct bnx2x *bp)
u32 igu_addr = (BAR_IGU_INTMEM + IGU_REG_SISR_MDPC_WMASK_LSB_UPPER*8);
u32 result = REG_RD(bp, igu_addr);
- DP(NETIF_MSG_HW, "read 0x%08x from IGU addr 0x%x\n",
+ DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
result, igu_addr);
barrier();
@@ -893,13 +888,16 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
dma_addr_t mapping;
- if (unlikely(page == NULL))
+ if (unlikely(page == NULL)) {
+ BNX2X_ERR("Can't alloc sge\n");
return -ENOMEM;
+ }
mapping = dma_map_page(&bp->pdev->dev, page, 0,
SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
__free_pages(page, PAGES_PER_SGE_SHIFT);
+ BNX2X_ERR("Can't map sge\n");
return -ENOMEM;
}
@@ -929,6 +927,7 @@ static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
kfree(data);
+ BNX2X_ERR("Can't map rx data\n");
return -ENOMEM;
}
@@ -971,7 +970,7 @@ static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
*/
static inline int bnx2x_func_start(struct bnx2x *bp)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_start_params *start_params =
&func_params.params.start;
@@ -984,10 +983,11 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
/* Function parameters */
start_params->mf_mode = bp->mf_mode;
start_params->sd_vlan_tag = bp->mf_ov;
- if (CHIP_IS_E1x(bp))
- start_params->network_cos_mode = OVERRIDE_COS;
- else
+
+ if (CHIP_IS_E2(bp) || CHIP_IS_E3(bp))
start_params->network_cos_mode = STATIC_COS;
+ else /* CHIP_IS_E1X */
+ start_params->network_cos_mode = FW_WRR;
return bnx2x_func_state_change(bp, &func_params);
}
@@ -1142,7 +1142,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
{
struct bnx2x *bp = fp->bp;
u16 ring_prod, cqe_ring_prod;
- int i;
+ int i, failure_cnt = 0;
fp->rx_comp_cons = 0;
cqe_ring_prod = ring_prod = 0;
@@ -1152,18 +1152,17 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
*/
for (i = 0; i < rx_ring_size; i++) {
if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
- fp->eth_q_stats.rx_skb_alloc_failed++;
+ failure_cnt++;
continue;
}
ring_prod = NEXT_RX_IDX(ring_prod);
cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
- WARN_ON(ring_prod <= (i - fp->eth_q_stats.rx_skb_alloc_failed));
+ WARN_ON(ring_prod <= (i - failure_cnt));
}
- if (fp->eth_q_stats.rx_skb_alloc_failed)
- BNX2X_ERR("was only able to allocate "
- "%d rx skbs on queue[%d]\n",
- (i - fp->eth_q_stats.rx_skb_alloc_failed), fp->index);
+ if (failure_cnt)
+ BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
+ i - failure_cnt, fp->index);
fp->rx_bd_prod = ring_prod;
/* Limit the CQE producer by the CQE ring size */
@@ -1171,7 +1170,9 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
cqe_ring_prod);
fp->rx_pkt = fp->rx_calls = 0;
- return i - fp->eth_q_stats.rx_skb_alloc_failed;
+ fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
+
+ return i - failure_cnt;
}
/* Statistics ID are global per chip/path, while Client IDs for E1x are per
@@ -1297,7 +1298,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp,
txdata->txq_index = txq_index;
txdata->tx_cons_sb = tx_cons_sb;
- DP(BNX2X_MSG_SP, "created tx data cid %d, txq %d\n",
+ DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n",
txdata->cid, txdata->txq_index);
}
@@ -1342,7 +1343,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]),
fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX);
- DP(BNX2X_MSG_SP, "created fcoe tx data (fp index %d)\n", fp->index);
+ DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index);
/* qZone id equals to FW (per path) client id */
bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fp_qzone_id(fp);
@@ -1361,8 +1362,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
bnx2x_sp_mapping(bp, q_rdata), q_type);
- DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d "
- "igu_sb %d\n",
+ DP(NETIF_MSG_IFUP,
+ "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
fp->index, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
fp->igu_sb_id);
}
@@ -1375,8 +1376,7 @@ static inline int bnx2x_clean_tx_queue(struct bnx2x *bp,
while (bnx2x_has_tx_work_unload(txdata)) {
if (!cnt) {
- BNX2X_ERR("timeout waiting for queue[%d]: "
- "txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
+ BNX2X_ERR("timeout waiting for queue[%d]: txdata->tx_pkt_prod(%d) != txdata->tx_pkt_cons(%d)\n",
txdata->txq_index, txdata->tx_pkt_prod,
txdata->tx_pkt_cons);
#ifdef BNX2X_STOP_ON_ERROR
@@ -1453,8 +1453,8 @@ static inline bool bnx2x_wait_sp_comp(struct bnx2x *bp, unsigned long mask)
netif_addr_lock_bh(bp->dev);
if (bp->sp_state & mask) {
- BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, "
- "mask 0x%lx\n", bp->sp_state, mask);
+ BNX2X_ERR("Filtering completion timed out. sp_state 0x%lx, mask 0x%lx\n",
+ bp->sp_state, mask);
netif_addr_unlock_bh(bp->dev);
return false;
}
@@ -1490,13 +1490,113 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
FUNC_MF_CFG_MAX_BW_SHIFT;
if (!max_cfg) {
- DP(NETIF_MSG_LINK,
+ DP(NETIF_MSG_IFUP | BNX2X_MSG_ETHTOOL,
"Max BW configured to 0 - using 100 instead\n");
max_cfg = 100;
}
return max_cfg;
}
+/* checks if HW supports GRO for given MTU */
+static inline bool bnx2x_mtu_allows_gro(int mtu)
+{
+ /* gro frags per page */
+ int fpp = SGE_PAGE_SIZE / (mtu - ETH_MAX_TPA_HEADER_SIZE);
+
+ /*
+ * 1. number of frags should not grow above MAX_SKB_FRAGS
+ * 2. frag must fit the page
+ */
+ return mtu <= SGE_PAGE_SIZE && (U_ETH_SGL_SIZE * fpp) <= MAX_SKB_FRAGS;
+}
+
+static inline bool bnx2x_need_gro_check(int mtu)
+{
+ return (SGE_PAGES / (mtu - ETH_MAX_TPA_HEADER_SIZE - 1)) !=
+ (SGE_PAGES / (mtu - ETH_MIN_TPA_HEADER_SIZE + 1));
+}
+
+/**
+ * bnx2x_bz_fp - zero content of the fastpath structure.
+ *
+ * @bp: driver handle
+ * @index: fastpath index to be zeroed
+ *
+ * Makes sure the contents of the bp->fp[index].napi is kept
+ * intact.
+ */
+static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
+{
+ struct bnx2x_fastpath *fp = &bp->fp[index];
+ struct napi_struct orig_napi = fp->napi;
+ /* bzero bnx2x_fastpath contents */
+ if (bp->stats_init)
+ memset(fp, 0, sizeof(*fp));
+ else {
+ /* Keep Queue statistics */
+ struct bnx2x_eth_q_stats *tmp_eth_q_stats;
+ struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old;
+
+ tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats),
+ GFP_KERNEL);
+ if (tmp_eth_q_stats)
+ memcpy(tmp_eth_q_stats, &fp->eth_q_stats,
+ sizeof(struct bnx2x_eth_q_stats));
+
+ tmp_eth_q_stats_old =
+ kzalloc(sizeof(struct bnx2x_eth_q_stats_old),
+ GFP_KERNEL);
+ if (tmp_eth_q_stats_old)
+ memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old,
+ sizeof(struct bnx2x_eth_q_stats_old));
+
+ memset(fp, 0, sizeof(*fp));
+
+ if (tmp_eth_q_stats) {
+ memcpy(&fp->eth_q_stats, tmp_eth_q_stats,
+ sizeof(struct bnx2x_eth_q_stats));
+ kfree(tmp_eth_q_stats);
+ }
+
+ if (tmp_eth_q_stats_old) {
+ memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old,
+ sizeof(struct bnx2x_eth_q_stats_old));
+ kfree(tmp_eth_q_stats_old);
+ }
+
+ }
+
+ /* Restore the NAPI object as it has been already initialized */
+ fp->napi = orig_napi;
+
+ fp->bp = bp;
+ fp->index = index;
+ if (IS_ETH_FP(fp))
+ fp->max_cos = bp->max_cos;
+ else
+ /* Special queues support only one CoS */
+ fp->max_cos = 1;
+
+ /*
+ * set the tpa flag for each queue. The tpa flag determines the queue
+ * minimal size so it must be set prior to queue memory allocation
+ */
+ fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
+ (bp->flags & GRO_ENABLE_FLAG &&
+ bnx2x_mtu_allows_gro(bp->dev->mtu)));
+ if (bp->flags & TPA_ENABLE_FLAG)
+ fp->mode = TPA_MODE_LRO;
+ else if (bp->flags & GRO_ENABLE_FLAG)
+ fp->mode = TPA_MODE_GRO;
+
+#ifdef BCM_CNIC
+ /* We don't want TPA on an FCoE L2 ring */
+ if (IS_FCOE_FP(fp))
+ fp->disable_tpa = 1;
+#endif
+}
+
+#ifdef BCM_CNIC
/**
* bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
*
@@ -1504,7 +1604,7 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
*
*/
void bnx2x_get_iscsi_info(struct bnx2x *bp);
-
+#endif
/* returns func by VN for current port */
static inline int func_by_vn(struct bnx2x *bp, int vn)
{
@@ -1545,7 +1645,7 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
{
if (SHMEM2_HAS(bp, drv_flags)) {
u32 drv_flags;
- bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
drv_flags = SHMEM2_RD(bp, drv_flags);
if (set)
@@ -1554,8 +1654,8 @@ static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
RESET_FLAGS(drv_flags, flags);
SHMEM2_WR(bp, drv_flags, drv_flags);
- DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
- bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ DP(NETIF_MSG_IFUP, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_DRV_FLAGS);
}
}
@@ -1564,7 +1664,7 @@ static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
if (is_valid_ether_addr(addr))
return true;
#ifdef BCM_CNIC
- if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp))
+ if (is_zero_ether_addr(addr) && IS_MF_STORAGE_SD(bp))
return true;
#endif
return false;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 6d82ade4c31c..4f9244bd7530 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -1,6 +1,6 @@
/* bnx2x_dcb.c: Broadcom Everest network driver.
*
- * Copyright 2009-2011 Broadcom Corporation
+ * Copyright 2009-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -121,26 +121,6 @@ static void bnx2x_pfc_clear(struct bnx2x *bp)
{
struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
nig_params.pause_enable = 1;
-#ifdef BNX2X_SAFC
- if (bp->flags & SAFC_TX_FLAG) {
- u32 high = 0, low = 0;
- int i;
-
- for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
- if (bp->pri_map[i] == 1)
- high |= (1 << i);
- if (bp->pri_map[i] == 0)
- low |= (1 << i);
- }
-
- nig_params.llfc_low_priority_classes = high;
- nig_params.llfc_low_priority_classes = low;
-
- nig_params.pause_enable = 0;
- nig_params.llfc_enable = 1;
- nig_params.llfc_out_en = 1;
- }
-#endif /* BNX2X_SAFC */
bnx2x_acquire_phy_lock(bp);
bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
@@ -167,27 +147,27 @@ static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
/* pfc */
- DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pri_en_bitmap %x\n",
features->pfc.pri_en_bitmap);
- DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.pfc_caps %x\n",
features->pfc.pfc_caps);
- DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.pfc.enabled %x\n",
features->pfc.enabled);
- DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.app.default_pri %x\n",
features->app.default_pri);
- DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.app.tc_supported %x\n",
features->app.tc_supported);
- DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_features.app.enabled %x\n",
features->app.enabled);
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
i, features->app.app_pri_tbl[i].app_id);
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
i, features->app.app_pri_tbl[i].pri_bitmap);
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
i, features->app.app_pri_tbl[i].appBitfield);
}
@@ -221,13 +201,16 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_ERROR\n");
if (GET_FLAGS(error, DCBX_LOCAL_APP_MISMATCH))
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_MISMATCH\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_MISMATCH\n");
+ if (GET_FLAGS(error, DCBX_REMOTE_APP_TLV_NOT_FOUND))
+ DP(BNX2X_MSG_DCB, "DCBX_REMOTE_APP_TLV_NOT_FOUND\n");
if (app->enabled &&
- !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH)) {
+ !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR | DCBX_LOCAL_APP_MISMATCH |
+ DCBX_REMOTE_APP_TLV_NOT_FOUND)) {
bp->dcbx_port_params.app.enabled = true;
@@ -256,7 +239,7 @@ static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
LLFC_TRAFFIC_TYPE_ISCSI);
}
} else {
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_APP_DISABLED\n");
bp->dcbx_port_params.app.enabled = false;
for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
@@ -276,8 +259,10 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ERROR\n");
+ if (GET_FLAGS(error, DCBX_REMOTE_ETS_TLV_NOT_FOUND))
+ DP(BNX2X_MSG_DCB, "DCBX_REMOTE_ETS_TLV_NOT_FOUND\n");
/* Clean up old settings of ets on COS */
for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params) ; i++) {
@@ -287,10 +272,10 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
cos_params[i].pri_bitmask = 0;
}
- if (bp->dcbx_port_params.app.enabled &&
- !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
- ets->enabled) {
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
+ if (bp->dcbx_port_params.app.enabled && ets->enabled &&
+ !GET_FLAGS(error,
+ DCBX_LOCAL_ETS_ERROR | DCBX_REMOTE_ETS_TLV_NOT_FOUND)) {
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_ENABLE\n");
bp->dcbx_port_params.ets.enabled = true;
bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
@@ -305,7 +290,7 @@ static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
ets, pg_pri_orginal_spread);
} else {
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_ETS_DISABLED\n");
bp->dcbx_port_params.ets.enabled = false;
ets->pri_pg_tbl[0] = 0;
@@ -319,16 +304,18 @@ static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
{
if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_ERROR\n");
- if (bp->dcbx_port_params.app.enabled &&
- !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH) &&
- pfc->enabled) {
+ if (GET_FLAGS(error, DCBX_REMOTE_PFC_TLV_NOT_FOUND))
+ DP(BNX2X_MSG_DCB, "DCBX_REMOTE_PFC_TLV_NOT_FOUND\n");
+ if (bp->dcbx_port_params.app.enabled && pfc->enabled &&
+ !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR | DCBX_LOCAL_PFC_MISMATCH |
+ DCBX_REMOTE_PFC_TLV_NOT_FOUND)) {
bp->dcbx_port_params.pfc.enabled = true;
bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
~(pfc->pri_en_bitmap);
} else {
- DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
+ DP(BNX2X_MSG_DCB, "DCBX_LOCAL_PFC_DISABLED\n");
bp->dcbx_port_params.pfc.enabled = false;
bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
}
@@ -352,7 +339,7 @@ static void bnx2x_dcbx_map_nw(struct bnx2x *bp)
for (i = 0; i < ARRAY_SIZE(bp->dcbx_port_params.ets.cos_params); i++) {
if (cos_params[i].pri_bitmask & nw_prio) {
/* extend the bitmask with unmapped */
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"cos %d extended with 0x%08x\n", i, unmapped);
cos_params[i].pri_bitmask |= unmapped;
break;
@@ -443,18 +430,18 @@ static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
static int bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
func_params.f_obj = &bp->func_obj;
func_params.cmd = BNX2X_F_CMD_TX_STOP;
- DP(NETIF_MSG_LINK, "STOP TRAFFIC\n");
+ DP(BNX2X_MSG_DCB, "STOP TRAFFIC\n");
return bnx2x_func_state_change(bp, &func_params);
}
static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
struct bnx2x_func_tx_start_params *tx_params =
&func_params.params.tx_start;
@@ -463,7 +450,7 @@ static int bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
bnx2x_dcbx_fw_struct(bp, tx_params);
- DP(NETIF_MSG_LINK, "START TRAFFIC\n");
+ DP(BNX2X_MSG_DCB, "START TRAFFIC\n");
return bnx2x_func_state_change(bp, &func_params);
}
@@ -529,7 +516,7 @@ static void bnx2x_dcbx_2cos_limit_update_ets_config(struct bnx2x *bp)
/*
* In E3B0 the configuration may have more than 2 COS.
*/
-void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
+static void bnx2x_dcbx_update_ets_config(struct bnx2x *bp)
{
struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
struct bnx2x_ets_params ets_params = { 0 };
@@ -588,7 +575,7 @@ static int bnx2x_dcbx_read_shmem_remote_mib(struct bnx2x *bp)
u32 dcbx_remote_mib_offset = SHMEM2_RD(bp, dcbx_remote_mib_offset);
int rc;
- DP(NETIF_MSG_LINK, "dcbx_remote_mib_offset 0x%x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_remote_mib_offset 0x%x\n",
dcbx_remote_mib_offset);
if (SHMEM_DCBX_REMOTE_MIB_NONE == dcbx_remote_mib_offset) {
@@ -617,7 +604,7 @@ static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
int rc;
- DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
+ DP(BNX2X_MSG_DCB, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
@@ -693,7 +680,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
if (bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask
& (1 << prio)) {
bp->prio_to_cos[prio] = cos;
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"tx_mapping %d --> %d\n", prio, cos);
}
}
@@ -712,7 +699,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
switch (state) {
case BNX2X_DCBX_STATE_NEG_RECEIVED:
{
- DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
+ DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
#ifdef BCM_DCBNL
/**
* Delete app tlvs from dcbnl before reading new
@@ -762,7 +749,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
- DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
+ DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_PAUSED\n");
bnx2x_pfc_set_pfc(bp);
bnx2x_dcbx_update_ets_params(bp);
@@ -770,7 +757,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
- DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
+ DP(BNX2X_MSG_DCB, "BNX2X_DCBX_STATE_TX_RELEASED\n");
bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
#ifdef BCM_DCBNL
/*
@@ -861,7 +848,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
(u8)dp->admin_configuration_bw_precentage[i]);
- DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
+ DP(BNX2X_MSG_DCB, "pg_bw_tbl[%d] = %02x\n",
i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
}
@@ -869,7 +856,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
(u8)dp->admin_configuration_ets_pg[i]);
- DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
+ DP(BNX2X_MSG_DCB, "pri_pg_tbl[%d] = %02x\n",
i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
}
@@ -923,7 +910,7 @@ void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
bp->dcb_state = false;
bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
}
- DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
+ DP(BNX2X_MSG_DCB, "DCB state [%s:%s]\n",
dcb_on ? "ON" : "OFF",
dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
@@ -945,30 +932,30 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
bp->dcbx_config_params.admin_ets_reco_valid = 1;
bp->dcbx_config_params.admin_app_priority_willing = 1;
- bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
- bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
- bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 100;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 0;
+ bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
- bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
+ bp->dcbx_config_params.admin_configuration_ets_pg[0] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
- bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
+ bp->dcbx_config_params.admin_configuration_ets_pg[3] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 100;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 0;
bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
- bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 0;
+ bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 0;
bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
@@ -977,25 +964,12 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp)
bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
- bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
- bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
- bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
+ bp->dcbx_config_params.admin_pfc_bitmap = 0x0;
+ bp->dcbx_config_params.admin_priority_app_table[0].valid = 0;
+ bp->dcbx_config_params.admin_priority_app_table[1].valid = 0;
bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
- bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
- bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
- bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
- bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
- bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
- bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
- bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
- bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
- bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
- bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
- bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
- bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
- bp->dcbx_config_params.admin_default_priority =
- bp->dcbx_config_params.admin_priority_app_table[1].priority;
+ bp->dcbx_config_params.admin_default_priority = 0;
}
void bnx2x_dcbx_init(struct bnx2x *bp)
@@ -1011,7 +985,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
* the function is pmf
* shmem2 contains DCBX support fields
*/
- DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
+ DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n",
bp->dcb_state, bp->port.pmf);
if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
@@ -1019,7 +993,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
dcbx_lldp_params_offset =
SHMEM2_RD(bp, dcbx_lldp_params_offset);
- DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
+ DP(BNX2X_MSG_DCB, "dcbx_lldp_params_offset 0x%x\n",
dcbx_lldp_params_offset);
bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
@@ -1041,38 +1015,36 @@ bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
u8 pri = 0;
u8 cos = 0;
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
- DP(NETIF_MSG_LINK,
- "pdev->params.dcbx_port_params.pfc."
- "priority_non_pauseable_mask %x\n",
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.pfc.priority_non_pauseable_mask %x\n",
bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
- DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
- "cos_params[%d].pri_bitmask %x\n", cos,
- bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].pri_bitmask %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
- DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
- "cos_params[%d].bw_tbl %x\n", cos,
- bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].bw_tbl %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
- DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
- "cos_params[%d].strict %x\n", cos,
- bp->dcbx_port_params.ets.cos_params[cos].strict);
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].strict %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].strict);
- DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
- "cos_params[%d].pauseable %x\n", cos,
- bp->dcbx_port_params.ets.cos_params[cos].pauseable);
+ DP(BNX2X_MSG_DCB,
+ "pdev->params.dcbx_port_params.ets.cos_params[%d].pauseable %x\n",
+ cos, bp->dcbx_port_params.ets.cos_params[cos].pauseable);
}
for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
- DP(NETIF_MSG_LINK,
- "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
- "priority %x\n", pri,
- pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
+ DP(BNX2X_MSG_DCB,
+ "pfc_fw_cfg->traffic_type_to_priority_cos[%d].priority %x\n",
+ pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
}
@@ -1119,7 +1091,7 @@ static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
help_data->num_of_pg++;
}
}
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_DCB,
"add_traf_type %d pg_found %s num_of_pg %d\n",
add_traf_type, (false == pg_found) ? "NO" : "YES",
help_data->num_of_pg);
@@ -1312,8 +1284,7 @@ static void bnx2x_dcbx_2cos_limit_cee_single_pg_to_cos_params(struct bnx2x *bp,
}
if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
- BNX2X_ERR("Invalid value for pri_join_mask -"
- " could not find a priority\n");
+ BNX2X_ERR("Invalid value for pri_join_mask - could not find a priority\n");
cos_data->data[0].pri_join_mask = pri_mask_without_pri;
cos_data->data[1].pri_join_mask = pri_tested;
@@ -1626,8 +1597,10 @@ static int bnx2x_dcbx_spread_strict_pri(struct bnx2x *bp,
num_of_app_pri--;
}
- if (num_spread_of_entries)
+ if (num_spread_of_entries) {
+ BNX2X_ERR("Didn't succeed to spread strict priorities\n");
return -EINVAL;
+ }
return 0;
}
@@ -1675,8 +1648,7 @@ static void bnx2x_dcbx_cee_fill_cos_params(struct bnx2x *bp,
if (help_data->num_of_pg > DCBX_COS_MAX_NUM_E3B0) {
if (bnx2x_dcbx_join_pgs(bp, ets, help_data,
DCBX_COS_MAX_NUM_E3B0)) {
- BNX2X_ERR("Unable to reduce the number of PGs -"
- "we will disables ETS\n");
+ BNX2X_ERR("Unable to reduce the number of PGs - we will disables ETS\n");
bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data,
pri_join_mask);
return;
@@ -1776,24 +1748,24 @@ static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
if (p->pauseable &&
DCBX_PFC_PRI_GET_NON_PAUSE(bp,
p->pri_bitmask) != 0)
- BNX2X_ERR("Inconsistent config for "
- "pausable COS %d\n", i);
+ BNX2X_ERR("Inconsistent config for pausable COS %d\n",
+ i);
if (!p->pauseable &&
DCBX_PFC_PRI_GET_PAUSE(bp,
p->pri_bitmask) != 0)
- BNX2X_ERR("Inconsistent config for "
- "nonpausable COS %d\n", i);
+ BNX2X_ERR("Inconsistent config for nonpausable COS %d\n",
+ i);
}
}
if (p->pauseable)
- DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
+ DP(BNX2X_MSG_DCB, "COS %d PAUSABLE prijoinmask 0x%x\n",
i, cos_data.data[i].pri_join_mask);
else
- DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
- "0x%x\n",
- i, cos_data.data[i].pri_join_mask);
+ DP(BNX2X_MSG_DCB,
+ "COS %d NONPAUSABLE prijoinmask 0x%x\n",
+ i, cos_data.data[i].pri_join_mask);
}
bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
@@ -1808,7 +1780,7 @@ static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
- DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
+ DP(BNX2X_MSG_DCB, "set_configuration_ets_pg[%d] = 0x%x\n",
i, set_configuration_ets_pg[i]);
}
}
@@ -1904,14 +1876,14 @@ static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
+ DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcb_state);
return bp->dcb_state;
}
static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+ DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
return 0;
@@ -1921,7 +1893,7 @@ static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
u8 *perm_addr)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
+ DP(BNX2X_MSG_DCB, "GET-PERM-ADDR\n");
/* first the HW mac address */
memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
@@ -1938,7 +1910,7 @@ static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
+ DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, pgid);
if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
return;
@@ -1963,7 +1935,7 @@ static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
int pgid, u8 bw_pct)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
+ DP(BNX2X_MSG_DCB, "pgid[%d] = %d\n", pgid, bw_pct);
if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
return;
@@ -1977,14 +1949,14 @@ static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
u8 up_map)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+ DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n");
}
static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
int pgid, u8 bw_pct)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
+ DP(BNX2X_MSG_DCB, "Nothing to set; No RX support\n");
}
static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
@@ -1992,7 +1964,7 @@ static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
u8 *up_map)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+ DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
/**
* bw_pct ingnored - band-width percentage devision between user
@@ -2018,7 +1990,7 @@ static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
int pgid, u8 *bw_pct)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
+ DP(BNX2X_MSG_DCB, "pgid = %d\n", pgid);
*bw_pct = 0;
@@ -2033,7 +2005,7 @@ static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
u8 *up_map)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+ DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n");
*prio_type = *pgid = *bw_pct = *up_map = 0;
}
@@ -2042,7 +2014,7 @@ static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
int pgid, u8 *bw_pct)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
+ DP(BNX2X_MSG_DCB, "Nothing to get; No RX support\n");
*bw_pct = 0;
}
@@ -2051,7 +2023,7 @@ static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
u8 setting)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
+ DP(BNX2X_MSG_DCB, "prio[%d] = %d\n", prio, setting);
if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
return;
@@ -2066,7 +2038,7 @@ static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
u8 *setting)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "prio = %d\n", prio);
+ DP(BNX2X_MSG_DCB, "prio = %d\n", prio);
*setting = 0;
@@ -2081,21 +2053,21 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
struct bnx2x *bp = netdev_priv(netdev);
int rc = 0;
- DP(NETIF_MSG_LINK, "SET-ALL\n");
+ DP(BNX2X_MSG_DCB, "SET-ALL\n");
if (!bnx2x_dcbnl_set_valid(bp))
return 1;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- netdev_err(bp->dev, "Handling parity error recovery. "
- "Try again later\n");
+ netdev_err(bp->dev,
+ "Handling parity error recovery. Try again later\n");
return 1;
}
if (netif_running(bp->dev)) {
bnx2x_nic_unload(bp, UNLOAD_NORMAL);
rc = bnx2x_nic_load(bp, LOAD_NORMAL);
}
- DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
+ DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc);
if (rc)
return 1;
@@ -2134,22 +2106,25 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
*cap = BNX2X_DCBX_CAPS;
break;
default:
+ BNX2X_ERR("Non valid capability ID\n");
rval = -EINVAL;
break;
}
- } else
+ } else {
+ DP(BNX2X_MSG_DCB, "DCB disabled\n");
rval = -EINVAL;
+ }
- DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
+ DP(BNX2X_MSG_DCB, "capid %d:%x\n", capid, *cap);
return rval;
}
-static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
+static int bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
{
struct bnx2x *bp = netdev_priv(netdev);
u8 rval = 0;
- DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
+ DP(BNX2X_MSG_DCB, "tcid %d\n", tcid);
if (bp->dcb_state) {
switch (tcid) {
@@ -2162,26 +2137,29 @@ static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
DCBX_COS_MAX_NUM_E2;
break;
default:
+ BNX2X_ERR("Non valid TC-ID\n");
rval = -EINVAL;
break;
}
- } else
+ } else {
+ DP(BNX2X_MSG_DCB, "DCB disabled\n");
rval = -EINVAL;
+ }
return rval;
}
-static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
+static int bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
+ DP(BNX2X_MSG_DCB, "num tcs = %d; Not supported\n", num);
return -EINVAL;
}
static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
+ DP(BNX2X_MSG_DCB, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
if (!bp->dcb_state)
return 0;
@@ -2192,7 +2170,7 @@ static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
+ DP(BNX2X_MSG_DCB, "state = %s\n", state ? "on" : "off");
if (!bnx2x_dcbnl_set_valid(bp))
return;
@@ -2269,9 +2247,11 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
bnx2x_admin_app_set_ent(
&bp->dcbx_config_params.admin_priority_app_table[ff],
idtype, idval, up);
- else
+ else {
/* app table is full */
+ BNX2X_ERR("Application table is too large\n");
return -EBUSY;
+ }
/* up configured, if not 0 make sure feature is enabled */
if (up)
@@ -2285,11 +2265,13 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
+ DP(BNX2X_MSG_DCB, "app_type %d, app_id %x, prio bitmap %d\n",
idtype, idval, up);
- if (!bnx2x_dcbnl_set_valid(bp))
+ if (!bnx2x_dcbnl_set_valid(bp)) {
+ DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
return -EINVAL;
+ }
/* verify idtype */
switch (idtype) {
@@ -2297,6 +2279,7 @@ static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
case DCB_APP_IDTYPE_PORTNUM:
break;
default:
+ DP(BNX2X_MSG_DCB, "Wrong ID type\n");
return -EINVAL;
}
return bnx2x_set_admin_app_up(bp, idtype, idval, up);
@@ -2318,13 +2301,13 @@ static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
{
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "state = %02x\n", state);
+ DP(BNX2X_MSG_DCB, "state = %02x\n", state);
/* set dcbx mode */
if ((state & BNX2X_DCBX_CAPS) != state) {
- BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
- "capabilities\n", state);
+ BNX2X_ERR("Requested DCBX mode %x is beyond advertised capabilities\n",
+ state);
return 1;
}
@@ -2348,7 +2331,7 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
struct bnx2x *bp = netdev_priv(netdev);
u8 rval = 0;
- DP(NETIF_MSG_LINK, "featid %d\n", featid);
+ DP(BNX2X_MSG_DCB, "featid %d\n", featid);
if (bp->dcb_state) {
*flags = 0;
@@ -2374,11 +2357,14 @@ static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
*flags |= DCB_FEATCFG_ERROR;
break;
default:
+ BNX2X_ERR("Non valid featrue-ID\n");
rval = -EINVAL;
break;
}
- } else
+ } else {
+ DP(BNX2X_MSG_DCB, "DCB disabled\n");
rval = -EINVAL;
+ }
return rval;
}
@@ -2389,7 +2375,7 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
struct bnx2x *bp = netdev_priv(netdev);
u8 rval = 0;
- DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
+ DP(BNX2X_MSG_DCB, "featid = %d flags = %02x\n", featid, flags);
/* ignore the 'advertise' flag */
if (bnx2x_dcbnl_set_valid(bp)) {
@@ -2412,11 +2398,14 @@ static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
flags & DCB_FEATCFG_WILLING ? 1 : 0;
break;
default:
+ BNX2X_ERR("Non valid featrue-ID\n");
rval = -EINVAL;
break;
}
- } else
+ } else {
+ DP(BNX2X_MSG_DCB, "dcbnl call not valid\n");
rval = -EINVAL;
+ }
return rval;
}
@@ -2427,7 +2416,7 @@ static int bnx2x_peer_appinfo(struct net_device *netdev,
int i;
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "APP-INFO\n");
+ DP(BNX2X_MSG_DCB, "APP-INFO\n");
info->willing = (bp->dcbx_remote_flags & DCBX_APP_REM_WILLING) ?: 0;
info->error = (bp->dcbx_remote_flags & DCBX_APP_RX_ERROR) ?: 0;
@@ -2446,7 +2435,7 @@ static int bnx2x_peer_apptable(struct net_device *netdev,
int i, j;
struct bnx2x *bp = netdev_priv(netdev);
- DP(NETIF_MSG_LINK, "APP-TABLE\n");
+ DP(BNX2X_MSG_DCB, "APP-TABLE\n");
for (i = 0, j = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
struct dcbx_app_priority_entry *ent =
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 2ab9254e2d5e..06c7a0435948 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -1,6 +1,6 @@
/* bnx2x_dcb.h: Broadcom Everest network driver.
*
- * Copyright 2009-2011 Broadcom Corporation
+ * Copyright 2009-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
index b983825d0ee9..3e4cff9b1ebe 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h
@@ -1,6 +1,6 @@
/* bnx2x_dump.h: Broadcom Everest network driver.
*
- * Copyright (c) 2011 Broadcom Corporation
+ * Copyright (c) 2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 31a8b38ab15e..2cc0a1703970 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1,6 +1,6 @@
/* bnx2x_ethtool.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -175,7 +175,11 @@ static const struct {
{ STATS_OFFSET32(total_tpa_aggregated_frames_hi),
8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"},
{ STATS_OFFSET32(total_tpa_bytes_hi),
- 8, STATS_FLAGS_FUNC, "tpa_bytes"}
+ 8, STATS_FLAGS_FUNC, "tpa_bytes"},
+ { STATS_OFFSET32(recoverable_error),
+ 4, STATS_FLAGS_FUNC, "recoverable_errors" },
+ { STATS_OFFSET32(unrecoverable_error),
+ 4, STATS_FLAGS_FUNC, "unrecoverable_errors" },
};
#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
@@ -218,20 +222,23 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
(SUPPORTED_TP | SUPPORTED_FIBRE));
cmd->advertising = bp->port.advertising[cfg_idx];
- if ((bp->state == BNX2X_STATE_OPEN) &&
- !(bp->flags & MF_FUNC_DIS) &&
- (bp->link_vars.link_up)) {
- ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
- cmd->duplex = bp->link_vars.duplex;
+ if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) {
+ if (!(bp->flags & MF_FUNC_DIS)) {
+ ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+ cmd->duplex = bp->link_vars.duplex;
+ } else {
+ ethtool_cmd_speed_set(
+ cmd, bp->link_params.req_line_speed[cfg_idx]);
+ cmd->duplex = bp->link_params.req_duplex[cfg_idx];
+ }
+
+ if (IS_MF(bp) && !BP_NOMCP(bp))
+ ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
} else {
- ethtool_cmd_speed_set(
- cmd, bp->link_params.req_line_speed[cfg_idx]);
- cmd->duplex = bp->link_params.req_duplex[cfg_idx];
+ cmd->duplex = DUPLEX_UNKNOWN;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
}
- if (IS_MF(bp))
- ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
-
cmd->port = bnx2x_get_port_type(bp);
cmd->phy_address = bp->mdio.prtad;
@@ -242,10 +249,38 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else
cmd->autoneg = AUTONEG_DISABLE;
+ /* Publish LP advertised speeds and FC */
+ if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ u32 status = bp->link_vars.link_status;
+
+ cmd->lp_advertising |= ADVERTISED_Autoneg;
+ if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Pause;
+ if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
+ cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_10baseT_Half;
+ if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_10baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_100baseT_Half;
+ if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_100baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+ if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_1000baseT_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
+ if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE)
+ cmd->lp_advertising |= ADVERTISED_10000baseT_Full;
+ }
+
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
- DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
" duplex %d port %d phy_address %d transceiver %d\n"
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
@@ -266,7 +301,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (IS_MF_SD(bp))
return 0;
- DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
" duplex %d port %d phy_address %d transceiver %d\n"
" autoneg %d maxtxpkt %d maxrxpkt %d\n",
@@ -277,6 +312,10 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
speed = ethtool_cmd_speed(cmd);
+ /* If recieved a request for an unknown duplex, assume full*/
+ if (cmd->duplex == DUPLEX_UNKNOWN)
+ cmd->duplex = DUPLEX_FULL;
+
if (IS_MF_SI(bp)) {
u32 part;
u32 line_speed = bp->link_vars.line_speed;
@@ -286,18 +325,17 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
line_speed = 10000;
if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
- BNX2X_DEV_INFO("To set speed BC %X or higher "
- "is required, please upgrade BC\n",
- REQ_BC_VER_4_SET_MF_BW);
+ DP(BNX2X_MSG_ETHTOOL,
+ "To set speed BC %X or higher is required, please upgrade BC\n",
+ REQ_BC_VER_4_SET_MF_BW);
return -EINVAL;
}
part = (speed * 100) / line_speed;
if (line_speed < speed || !part) {
- BNX2X_DEV_INFO("Speed setting should be in a range "
- "from 1%% to 100%% "
- "of actual line speed\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "Speed setting should be in a range from 1%% to 100%% of actual line speed\n");
return -EINVAL;
}
@@ -319,7 +357,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (!(bp->port.supported[0] & SUPPORTED_TP ||
bp->port.supported[1] & SUPPORTED_TP)) {
- DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
bp->link_params.multi_phy_config &=
@@ -339,7 +377,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
bp->port.supported[1] & SUPPORTED_FIBRE)) {
- DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
bp->link_params.multi_phy_config &=
@@ -353,7 +391,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
PORT_HW_CFG_PHY_SELECTION_SECOND_PHY;
break;
default:
- DP(NETIF_MSG_LINK, "Unsupported port type\n");
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported port type\n");
return -EINVAL;
}
/* Save new config in case command complete successully */
@@ -362,7 +400,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
/* Restore old config in case command failed */
bp->link_params.multi_phy_config = old_multi_phy_config;
- DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
+ DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
if (cmd->autoneg == AUTONEG_ENABLE) {
u32 an_supported_speed = bp->port.supported[cfg_idx];
@@ -371,14 +409,14 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
an_supported_speed |= (SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full);
if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
- DP(NETIF_MSG_LINK, "Autoneg not supported\n");
+ DP(BNX2X_MSG_ETHTOOL, "Autoneg not supported\n");
return -EINVAL;
}
/* advertise the requested speed and duplex if supported */
if (cmd->advertising & ~an_supported_speed) {
- DP(NETIF_MSG_LINK, "Advertisement parameters "
- "are not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "Advertisement parameters are not supported\n");
return -EINVAL;
}
@@ -427,7 +465,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Full)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"10M full not supported\n");
return -EINVAL;
}
@@ -437,7 +475,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Half)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"10M half not supported\n");
return -EINVAL;
}
@@ -451,7 +489,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Full)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"100M full not supported\n");
return -EINVAL;
}
@@ -461,7 +499,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
} else {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Half)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"100M half not supported\n");
return -EINVAL;
}
@@ -473,13 +511,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
case SPEED_1000:
if (cmd->duplex != DUPLEX_FULL) {
- DP(NETIF_MSG_LINK, "1G half not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "1G half not supported\n");
return -EINVAL;
}
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_1000baseT_Full)) {
- DP(NETIF_MSG_LINK, "1G full not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "1G full not supported\n");
return -EINVAL;
}
@@ -489,14 +529,14 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
case SPEED_2500:
if (cmd->duplex != DUPLEX_FULL) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"2.5G half not supported\n");
return -EINVAL;
}
if (!(bp->port.supported[cfg_idx]
& SUPPORTED_2500baseX_Full)) {
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"2.5G full not supported\n");
return -EINVAL;
}
@@ -507,13 +547,15 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
case SPEED_10000:
if (cmd->duplex != DUPLEX_FULL) {
- DP(NETIF_MSG_LINK, "10G half not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "10G half not supported\n");
return -EINVAL;
}
if (!(bp->port.supported[cfg_idx]
& SUPPORTED_10000baseT_Full)) {
- DP(NETIF_MSG_LINK, "10G full not supported\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "10G full not supported\n");
return -EINVAL;
}
@@ -522,7 +564,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
default:
- DP(NETIF_MSG_LINK, "Unsupported speed %u\n", speed);
+ DP(BNX2X_MSG_ETHTOOL, "Unsupported speed %u\n", speed);
return -EINVAL;
}
@@ -531,7 +573,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->port.advertising[cfg_idx] = advertising;
}
- DP(NETIF_MSG_LINK, "req_line_speed %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "req_line_speed %d\n"
" req_duplex %d advertising 0x%x\n",
bp->link_params.req_line_speed[cfg_idx],
bp->link_params.req_duplex[cfg_idx],
@@ -774,14 +816,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
phy_fw_ver[0] = '\0';
- if (bp->port.pmf) {
- bnx2x_acquire_phy_lock(bp);
- bnx2x_get_ext_phy_fw_version(&bp->link_params,
- (bp->state != BNX2X_STATE_CLOSED),
- phy_fw_ver, PHY_FW_VER_LEN);
- bnx2x_release_phy_lock(bp);
- }
-
+ bnx2x_get_ext_phy_fw_version(&bp->link_params,
+ phy_fw_ver, PHY_FW_VER_LEN);
strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
@@ -817,13 +853,16 @@ static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct bnx2x *bp = netdev_priv(dev);
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (wol->wolopts & ~WAKE_MAGIC) {
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
return -EINVAL;
+ }
if (wol->wolopts & WAKE_MAGIC) {
- if (bp->flags & NO_WOL_FLAG)
+ if (bp->flags & NO_WOL_FLAG) {
+ DP(BNX2X_MSG_ETHTOOL, "WOL not supproted\n");
return -EINVAL;
-
+ }
bp->wol = 1;
} else
bp->wol = 0;
@@ -882,11 +921,27 @@ static int bnx2x_get_eeprom_len(struct net_device *dev)
return bp->common.flash_size;
}
+/* Per pf misc lock must be aquired before the per port mcp lock. Otherwise, had
+ * we done things the other way around, if two pfs from the same port would
+ * attempt to access nvram at the same time, we could run into a scenario such
+ * as:
+ * pf A takes the port lock.
+ * pf B succeeds in taking the same lock since they are from the same port.
+ * pf A takes the per pf misc lock. Performs eeprom access.
+ * pf A finishes. Unlocks the per pf misc lock.
+ * Pf B takes the lock and proceeds to perform it's own access.
+ * pf A unlocks the per port lock, while pf B is still working (!).
+ * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
+ * acess corrupted by pf B).*
+ */
static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int count, i;
- u32 val = 0;
+ u32 val;
+
+ /* acquire HW lock: protect against other PFs in PF Direct Assignment */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
/* adjust timeout for emulation/FPGA */
count = BNX2X_NVRAM_TIMEOUT_COUNT;
@@ -906,7 +961,8 @@ static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
}
if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
- DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot get access to nvram interface\n");
return -EBUSY;
}
@@ -917,7 +973,7 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp)
{
int port = BP_PORT(bp);
int count, i;
- u32 val = 0;
+ u32 val;
/* adjust timeout for emulation/FPGA */
count = BNX2X_NVRAM_TIMEOUT_COUNT;
@@ -937,10 +993,13 @@ static int bnx2x_release_nvram_lock(struct bnx2x *bp)
}
if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
- DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot free access to nvram interface\n");
return -EBUSY;
}
+ /* release HW lock: protect against other PFs in PF Direct Assignment */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM);
return 0;
}
@@ -1009,7 +1068,9 @@ static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
break;
}
}
-
+ if (rc == -EBUSY)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram read timeout expired\n");
return rc;
}
@@ -1021,15 +1082,15 @@ static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
__be32 val;
if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
- DP(BNX2X_MSG_NVM,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"Invalid parameter: offset 0x%x buf_size 0x%x\n",
offset, buf_size);
return -EINVAL;
}
if (offset + buf_size > bp->common.flash_size) {
- DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
- " buf_size (0x%x) > flash_size (0x%x)\n",
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
offset, buf_size, bp->common.flash_size);
return -EINVAL;
}
@@ -1074,10 +1135,13 @@ static int bnx2x_get_eeprom(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
int rc;
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return -EAGAIN;
+ }
- DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
eeprom->len, eeprom->len);
@@ -1126,6 +1190,9 @@ static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
}
}
+ if (rc == -EBUSY)
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram write timeout expired\n");
return rc;
}
@@ -1140,8 +1207,8 @@ static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
__be32 val;
if (offset + buf_size > bp->common.flash_size) {
- DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
- " buf_size (0x%x) > flash_size (0x%x)\n",
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
offset, buf_size, bp->common.flash_size);
return -EINVAL;
}
@@ -1189,15 +1256,15 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
- DP(BNX2X_MSG_NVM,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"Invalid parameter: offset 0x%x buf_size 0x%x\n",
offset, buf_size);
return -EINVAL;
}
if (offset + buf_size > bp->common.flash_size) {
- DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
- " buf_size (0x%x) > flash_size (0x%x)\n",
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "Invalid parameter: offset (0x%x) + buf_size (0x%x) > flash_size (0x%x)\n",
offset, buf_size, bp->common.flash_size);
return -EINVAL;
}
@@ -1245,10 +1312,13 @@ static int bnx2x_set_eeprom(struct net_device *dev,
int port = BP_PORT(bp);
int rc = 0;
u32 ext_phy_config;
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return -EAGAIN;
+ }
- DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
" magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
eeprom->len, eeprom->len);
@@ -1257,8 +1327,11 @@ static int bnx2x_set_eeprom(struct net_device *dev,
/* PHY eeprom can be accessed only by the PMF */
if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
- !bp->port.pmf)
+ !bp->port.pmf) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "wrong magic or interface is not pmf\n");
return -EINVAL;
+ }
ext_phy_config =
SHMEM_RD(bp,
@@ -1370,7 +1443,8 @@ static int bnx2x_set_ringparam(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ DP(BNX2X_MSG_ETHTOOL,
+ "Handling parity error recovery. Try again later\n");
return -EAGAIN;
}
@@ -1378,8 +1452,10 @@ static int bnx2x_set_ringparam(struct net_device *dev,
(ering->rx_pending < (bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
MIN_RX_SIZE_TPA)) ||
(ering->tx_pending > MAX_TX_AVAIL) ||
- (ering->tx_pending <= MAX_SKB_FRAGS + 4))
+ (ering->tx_pending <= MAX_SKB_FRAGS + 4)) {
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
+ }
bp->rx_ring_size = ering->rx_pending;
bp->tx_ring_size = ering->tx_pending;
@@ -1392,15 +1468,22 @@ static void bnx2x_get_pauseparam(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ int cfg_reg;
+
epause->autoneg = (bp->link_params.req_flow_ctrl[cfg_idx] ==
BNX2X_FLOW_CTRL_AUTO);
- epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
+ if (!epause->autoneg)
+ cfg_reg = bp->link_params.req_flow_ctrl[cfg_idx];
+ else
+ cfg_reg = bp->link_params.req_fc_auto_adv;
+
+ epause->rx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_RX) ==
BNX2X_FLOW_CTRL_RX);
- epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
+ epause->tx_pause = ((cfg_reg & BNX2X_FLOW_CTRL_TX) ==
BNX2X_FLOW_CTRL_TX);
- DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
" autoneg %d rx_pause %d tx_pause %d\n",
epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
}
@@ -1413,7 +1496,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
if (IS_MF(bp))
return 0;
- DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
+ DP(BNX2X_MSG_ETHTOOL, "ethtool_pauseparam: cmd %d\n"
" autoneg %d rx_pause %d tx_pause %d\n",
epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
@@ -1430,7 +1513,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
if (epause->autoneg) {
if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
- DP(NETIF_MSG_LINK, "autoneg not supported\n");
+ DP(BNX2X_MSG_ETHTOOL, "autoneg not supported\n");
return -EINVAL;
}
@@ -1440,7 +1523,7 @@ static int bnx2x_set_pauseparam(struct net_device *dev,
}
}
- DP(NETIF_MSG_LINK,
+ DP(BNX2X_MSG_ETHTOOL,
"req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl[cfg_idx]);
if (netif_running(dev)) {
@@ -1572,8 +1655,11 @@ static int bnx2x_test_registers(struct bnx2x *bp)
{ BNX2X_CHIP_MASK_ALL, 0xffffffff, 0, 0x00000000 }
};
- if (!netif_running(bp->dev))
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return rc;
+ }
if (CHIP_IS_E1(bp))
hw = BNX2X_CHIP_MASK_E1;
@@ -1618,7 +1704,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
/* verify value is as expected */
if ((val & mask) != (wr_val & mask)) {
- DP(NETIF_MSG_HW,
+ DP(BNX2X_MSG_ETHTOOL,
"offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
offset, val, wr_val, mask);
goto test_reg_exit;
@@ -1672,8 +1758,11 @@ static int bnx2x_test_memory(struct bnx2x *bp)
{ NULL, 0xffffffff, {0, 0, 0, 0} }
};
- if (!netif_running(bp->dev))
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return rc;
+ }
if (CHIP_IS_E1(bp))
index = BNX2X_CHIP_E1_OFST;
@@ -1688,7 +1777,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
if (val & ~(prty_tbl[i].hw_mask[index])) {
- DP(NETIF_MSG_HW,
+ DP(BNX2X_MSG_ETHTOOL,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
}
@@ -1703,7 +1792,7 @@ static int bnx2x_test_memory(struct bnx2x *bp)
for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
val = REG_RD(bp, prty_tbl[i].offset);
if (val & ~(prty_tbl[i].hw_mask[index])) {
- DP(NETIF_MSG_HW,
+ DP(BNX2X_MSG_ETHTOOL,
"%s is 0x%x\n", prty_tbl[i].name, val);
goto test_mem_exit;
}
@@ -1724,7 +1813,7 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes)
msleep(20);
if (cnt <= 0 && bnx2x_link_test(bp, is_serdes))
- DP(NETIF_MSG_LINK, "Timeout waiting for link up\n");
+ DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n");
}
}
@@ -1774,6 +1863,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EINVAL;
}
@@ -1782,6 +1872,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
skb = netdev_alloc_skb(bp->dev, fp_rx->rx_buf_size);
if (!skb) {
+ DP(BNX2X_MSG_ETHTOOL, "Can't allocate skb\n");
rc = -ENOMEM;
goto test_loopback_exit;
}
@@ -1796,7 +1887,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
rc = -ENOMEM;
dev_kfree_skb(skb);
- BNX2X_ERR("Unable to map SKB\n");
+ DP(BNX2X_MSG_ETHTOOL, "Unable to map SKB\n");
goto test_loopback_exit;
}
@@ -1879,7 +1970,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
goto test_loopback_rx_exit;
- len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
+ len = le16_to_cpu(cqe->fast_path_cqe.pkt_len_or_gro_seg_len);
if (len != pkt_size)
goto test_loopback_rx_exit;
@@ -1926,13 +2017,13 @@ static int bnx2x_test_loopback(struct bnx2x *bp)
res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK);
if (res) {
- DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
+ DP(BNX2X_MSG_ETHTOOL, " PHY loopback failed (res %d)\n", res);
rc |= BNX2X_PHY_LOOPBACK_FAILED;
}
res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK);
if (res) {
- DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
+ DP(BNX2X_MSG_ETHTOOL, " MAC loopback failed (res %d)\n", res);
rc |= BNX2X_MAC_LOOPBACK_FAILED;
}
@@ -1958,23 +2049,33 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
{ 0x708, 0x70 }, /* manuf_key_info */
{ 0, 0 }
};
- __be32 buf[0x350 / 4];
- u8 *data = (u8 *)buf;
+ __be32 *buf;
+ u8 *data;
int i, rc;
u32 magic, crc;
if (BP_NOMCP(bp))
return 0;
+ buf = kmalloc(0x350, GFP_KERNEL);
+ if (!buf) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, "kmalloc failed\n");
+ rc = -ENOMEM;
+ goto test_nvram_exit;
+ }
+ data = (u8 *)buf;
+
rc = bnx2x_nvram_read(bp, 0, data, 4);
if (rc) {
- DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "magic value read (rc %d)\n", rc);
goto test_nvram_exit;
}
magic = be32_to_cpu(buf[0]);
if (magic != 0x669955aa) {
- DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "wrong magic value (0x%08x)\n", magic);
rc = -ENODEV;
goto test_nvram_exit;
}
@@ -1984,31 +2085,35 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
nvram_tbl[i].size);
if (rc) {
- DP(NETIF_MSG_PROBE,
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
"nvram_tbl[%d] read data (rc %d)\n", i, rc);
goto test_nvram_exit;
}
crc = ether_crc_le(nvram_tbl[i].size, data);
if (crc != CRC32_RESIDUAL) {
- DP(NETIF_MSG_PROBE,
- "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "nvram_tbl[%d] wrong crc value (0x%08x)\n", i, crc);
rc = -ENODEV;
goto test_nvram_exit;
}
}
test_nvram_exit:
+ kfree(buf);
return rc;
}
/* Send an EMPTY ramrod on the first queue */
static int bnx2x_test_intr(struct bnx2x *bp)
{
- struct bnx2x_queue_state_params params = {0};
+ struct bnx2x_queue_state_params params = {NULL};
- if (!netif_running(bp->dev))
+ if (!netif_running(bp->dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return -ENODEV;
+ }
params.q_obj = &bp->fp->q_obj;
params.cmd = BNX2X_Q_CMD_EMPTY;
@@ -2024,7 +2129,8 @@ static void bnx2x_self_test(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
u8 is_serdes;
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- pr_err("Handling parity error recovery. Try again later\n");
+ netdev_err(bp->dev,
+ "Handling parity error recovery. Try again later\n");
etest->flags |= ETH_TEST_FL_FAILED;
return;
}
@@ -2237,11 +2343,16 @@ static int bnx2x_set_phys_id(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
- if (!netif_running(dev))
+ if (!netif_running(dev)) {
+ DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
+ "cannot access eeprom when the interface is down\n");
return -EAGAIN;
+ }
- if (!bp->port.pmf)
+ if (!bp->port.pmf) {
+ DP(BNX2X_MSG_ETHTOOL, "Interface is not pmf\n");
return -EOPNOTSUPP;
+ }
switch (state) {
case ETHTOOL_ID_ACTIVE:
@@ -2278,6 +2389,7 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return 0;
default:
+ DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n");
return -EOPNOTSUPP;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 998652a1b858..cd6dfa9eaa3a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -243,18 +243,6 @@
(IRO[48].base + ((funcId) * IRO[48].m1))
#define COMMON_ASM_INVALID_ASSERT_OPCODE 0x0
-/**
-* This file defines HSI constants for the ETH flow
-*/
-#ifdef _EVEREST_MICROCODE
-#include "Microcode\Generated\DataTypes\eth_rx_bd.h"
-#include "Microcode\Generated\DataTypes\eth_tx_bd.h"
-#include "Microcode\Generated\DataTypes\eth_rx_cqe.h"
-#include "Microcode\Generated\DataTypes\eth_rx_sge.h"
-#include "Microcode\Generated\DataTypes\eth_rx_cqe_next_page.h"
-#endif
-
-
/* Ethernet Ring parameters */
#define X_ETH_LOCAL_RING_SIZE 13
#define FIRST_BD_IN_PKT 0
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f4a07fbaed05..4bed52ba300d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_file_hdr.h: FW binary file header structure.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index 3e30c8642c26..5d71b7d43237 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1,6 +1,6 @@
/* bnx2x_hsi.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -34,9 +34,10 @@ struct license_key {
};
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_MAX 2
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_MAX 2
+#define NVM_PATH_MAX 2
/****************************************************************************
* Shared HW configuration *
@@ -618,12 +619,6 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
#define PORT_HW_CFG_ENABLE_CMS_DISABLED 0x00000000
#define PORT_HW_CFG_ENABLE_CMS_ENABLED 0x00200000
- /* Enable RJ45 magjack pair swapping on 10GBase-T PHY, 84833 only */
- #define PORT_HW_CFG_RJ45_PR_SWP_MASK 0x00400000
- #define PORT_HW_CFG_RJ45_PR_SWP_SHIFT 22
- #define PORT_HW_CFG_RJ45_PR_SWP_DISABLED 0x00000000
- #define PORT_HW_CFG_RJ45_PR_SWP_ENABLED 0x00400000
-
/* Determine the Serdes electrical interface */
#define PORT_HW_CFG_NET_SERDES_IF_MASK 0x0F000000
#define PORT_HW_CFG_NET_SERDES_IF_SHIFT 24
@@ -898,11 +893,6 @@ struct port_feat_cfg { /* port 0: 0x454 port 1: 0x4c8 */
#define PORT_FEAT_CFG_DCBX_DISABLED 0x00000000
#define PORT_FEAT_CFG_DCBX_ENABLED 0x00000100
- #define PORT_FEAT_CFG_AUTOGREEN_MASK 0x00000200
- #define PORT_FEAT_CFG_AUTOGREEN_SHIFT 9
- #define PORT_FEAT_CFG_AUTOGREEN_DISABLED 0x00000000
- #define PORT_FEAT_CFG_AUTOGREEN_ENABLED 0x00000200
-
#define PORT_FEATURE_EN_SIZE_MASK 0x0f000000
#define PORT_FEATURE_EN_SIZE_SHIFT 24
#define PORT_FEATURE_WOL_ENABLED 0x01000000
@@ -1139,8 +1129,7 @@ struct shm_dev_info { /* size */
#define FW_ACK_NUM_OF_POLL (FW_ACK_TIME_OUT_MS/FW_ACK_POLL_TIME_MS)
-/* LED Blink rate that will achieve ~15.9Hz */
-#define LED_BLINK_RATE_VAL 480
+#define MFW_TRACE_SIGNATURE 0x54524342
/****************************************************************************
* Driver <-> FW Mailbox *
@@ -1407,7 +1396,7 @@ struct port_mf_cfg {
#define PORT_MF_CFG_E1HOV_TAG_SHIFT 0
#define PORT_MF_CFG_E1HOV_TAG_DEFAULT PORT_MF_CFG_E1HOV_TAG_MASK
- u32 reserved[3];
+ u32 reserved[1];
};
@@ -1493,7 +1482,8 @@ struct func_ext_cfg {
struct mf_cfg {
struct shared_mf_cfg shared_mf_config; /* 0x4 */
- struct port_mf_cfg port_mf_config[PORT_MAX]; /* 0x10 * 2 = 0x20 */
+ /* 0x8*2*2=0x20 */
+ struct port_mf_cfg port_mf_config[NVM_PATH_MAX][PORT_MAX];
/* for all chips, there are 8 mf functions */
struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; /* 0x18 * 8 = 0xc0 */
/*
@@ -1845,6 +1835,9 @@ struct lldp_local_mib {
#define DCBX_LOCAL_PFC_MISMATCH 0x00000010
#define DCBX_LOCAL_APP_MISMATCH 0x00000020
#define DCBX_REMOTE_MIB_ERROR 0x00000040
+ #define DCBX_REMOTE_ETS_TLV_NOT_FOUND 0x00000080
+ #define DCBX_REMOTE_PFC_TLV_NOT_FOUND 0x00000100
+ #define DCBX_REMOTE_APP_TLV_NOT_FOUND 0x00000200
struct dcbx_features features;
u32 suffix_seq_num;
};
@@ -2002,6 +1995,7 @@ struct shmem2_region {
#define DRV_INFO_CONTROL_VER_SHIFT 0
#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
+ u32 ibft_host_addr; /* initialized by option ROM */
};
@@ -2700,8 +2694,8 @@ union drv_info_to_mcp {
struct iscsi_stats_info iscsi_stat;
};
#define BCM_5710_FW_MAJOR_VERSION 7
-#define BCM_5710_FW_MINOR_VERSION 0
-#define BCM_5710_FW_REVISION_VERSION 29
+#define BCM_5710_FW_MINOR_VERSION 2
+#define BCM_5710_FW_REVISION_VERSION 16
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -3308,8 +3302,10 @@ struct client_init_rx_data {
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV4_SHIFT 0
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6 (0x1<<1)
#define CLIENT_INIT_RX_DATA_TPA_EN_IPV6_SHIFT 1
-#define CLIENT_INIT_RX_DATA_RESERVED5 (0x3F<<2)
-#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 2
+#define CLIENT_INIT_RX_DATA_TPA_MODE (0x1<<2)
+#define CLIENT_INIT_RX_DATA_TPA_MODE_SHIFT 2
+#define CLIENT_INIT_RX_DATA_RESERVED5 (0x1F<<3)
+#define CLIENT_INIT_RX_DATA_RESERVED5_SHIFT 3
u8 vmqueue_mode_en_flg;
u8 extra_data_over_sgl_en_flg;
u8 cache_line_alignment_log_size;
@@ -3324,7 +3320,7 @@ struct client_init_rx_data {
u8 outer_vlan_removal_enable_flg;
u8 status_block_id;
u8 rx_sb_index_number;
- u8 reserved0;
+ u8 dont_verify_rings_pause_thr_flg;
u8 max_tpa_queues;
u8 silent_vlan_removal_flg;
__le16 max_bytes_on_bd;
@@ -3657,7 +3653,7 @@ struct eth_fast_path_rx_cqe {
u8 placement_offset;
__le32 rss_hash_result;
__le16 vlan_tag;
- __le16 pkt_len;
+ __le16 pkt_len_or_gro_seg_len;
__le16 len_on_bd;
struct parsing_flags pars_flags;
union eth_sgl_or_raw_data sgl_or_raw_data;
@@ -4215,6 +4211,15 @@ enum set_mac_action_type {
/*
+ * Ethernet TPA Modes
+ */
+enum tpa_mode {
+ TPA_LRO,
+ TPA_GRO,
+ MAX_TPA_MODE};
+
+
+/*
* tpa update ramrod data
*/
struct tpa_update_ramrod_data {
@@ -4224,7 +4229,8 @@ struct tpa_update_ramrod_data {
u8 max_tpa_queues;
u8 max_sges_for_packet;
u8 complete_on_both_clients;
- __le16 reserved1;
+ u8 dont_verify_rings_pause_thr_flg;
+ u8 tpa_mode;
__le16 sge_buff_size;
__le16 max_agg_size;
__le32 sge_page_base_lo;
@@ -4447,13 +4453,13 @@ enum common_spqe_cmd_id {
RAMROD_CMD_ID_COMMON_UNUSED,
RAMROD_CMD_ID_COMMON_FUNCTION_START,
RAMROD_CMD_ID_COMMON_FUNCTION_STOP,
+ RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE,
RAMROD_CMD_ID_COMMON_CFC_DEL,
RAMROD_CMD_ID_COMMON_CFC_DEL_WB,
RAMROD_CMD_ID_COMMON_STAT_QUERY,
RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
RAMROD_CMD_ID_COMMON_START_TRAFFIC,
RAMROD_CMD_ID_COMMON_RESERVED1,
- RAMROD_CMD_ID_COMMON_RESERVED2,
MAX_COMMON_SPQE_CMD_ID
};
@@ -4733,8 +4739,8 @@ enum event_ring_opcode {
EVENT_RING_OPCODE_MALICIOUS_VF,
EVENT_RING_OPCODE_FORWARD_SETUP,
EVENT_RING_OPCODE_RSS_UPDATE_RULES,
+ EVENT_RING_OPCODE_FUNCTION_UPDATE,
EVENT_RING_OPCODE_RESERVED1,
- EVENT_RING_OPCODE_RESERVED2,
EVENT_RING_OPCODE_SET_MAC,
EVENT_RING_OPCODE_CLASSIFICATION_RULES,
EVENT_RING_OPCODE_FILTERS_RULES,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index 4d748e77d1ac..29f5c3cca31a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -1,7 +1,7 @@
/* bnx2x_init.h: Broadcom Everest network driver.
* Structures and macroes needed during the initialization.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 7ec1724753ad..fe66d902dc62 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -69,12 +69,12 @@ static void bnx2x_write_big_buf(struct bnx2x *bp, u32 addr, u32 len,
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
- else if (wb)
- /*
- * Wide bus registers with no dmae need to be written
- * using indirect write.
- */
+
+ /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+ else if (wb && CHIP_IS_E1(bp))
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+
+ /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
else
bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
}
@@ -99,8 +99,14 @@ static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
{
if (bp->dmae_ready)
bnx2x_write_dmae_phys_len(bp, GUNZIP_PHYS(bp), addr, len);
- else
+
+ /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+ else if (CHIP_IS_E1(bp))
bnx2x_init_ind_wr(bp, addr, GUNZIP_BUF(bp), len);
+
+ /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+ else
+ bnx2x_init_str_wr(bp, addr, GUNZIP_BUF(bp), len);
}
static void bnx2x_init_wr_64(struct bnx2x *bp, u32 addr,
@@ -177,8 +183,14 @@ static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr,
{
if (bp->dmae_ready)
VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
- else
+
+ /* in E1 chips BIOS initiated ZLR may interrupt widebus writes */
+ else if (CHIP_IS_E1(bp))
bnx2x_init_ind_wr(bp, addr, data, len);
+
+ /* in later chips PXP root complex handles BIOS ZLR w/o interrupting */
+ else
+ bnx2x_init_str_wr(bp, addr, data, len);
}
static void bnx2x_wr_64(struct bnx2x *bp, u32 reg, u32 val_lo,
@@ -840,25 +852,15 @@ static void bnx2x_qm_init_cid_count(struct bnx2x *bp, int qm_cid_count,
}
}
-static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count)
+static void bnx2x_qm_set_ptr_table(struct bnx2x *bp, int qm_cid_count,
+ u32 base_reg, u32 reg)
{
int i;
- u32 wb_data[2];
-
- wb_data[0] = wb_data[1] = 0;
-
+ u32 wb_data[2] = {0, 0};
for (i = 0; i < 4 * QM_QUEUES_PER_FUNC; i++) {
- REG_WR(bp, QM_REG_BASEADDR + i*4,
+ REG_WR(bp, base_reg + i*4,
qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
- bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8,
- wb_data, 2);
-
- if (CHIP_IS_E1H(bp)) {
- REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4,
- qm_cid_count * 4 * (i % QM_QUEUES_PER_FUNC));
- bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
- wb_data, 2);
- }
+ bnx2x_init_wr_wb(bp, reg + i*8, wb_data, 2);
}
}
@@ -873,7 +875,12 @@ static void bnx2x_qm_init_ptr_table(struct bnx2x *bp, int qm_cid_count,
case INITOP_INIT:
/* set in the init-value array */
case INITOP_SET:
- bnx2x_qm_set_ptr_table(bp, qm_cid_count);
+ bnx2x_qm_set_ptr_table(bp, qm_cid_count,
+ QM_REG_BASEADDR, QM_REG_PTRTBL);
+ if (CHIP_IS_E1H(bp))
+ bnx2x_qm_set_ptr_table(bp, qm_cid_count,
+ QM_REG_BASEADDR_EXT_A,
+ QM_REG_PTRTBL_EXT_A);
break;
case INITOP_CLEAR:
break;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 2091e5dbbcdd..beb4cdbdb6e1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -1,4 +1,4 @@
-/* Copyright 2008-2011 Broadcom Corporation
+/* Copyright 2008-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -1612,6 +1612,9 @@ static void bnx2x_umac_enable(struct link_params *params,
if (!(vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
val |= UMAC_COMMAND_CONFIG_REG_PAUSE_IGNORE;
+ if (vars->duplex == DUPLEX_HALF)
+ val |= UMAC_COMMAND_CONFIG_REG_HD_ENA;
+
REG_WR(bp, umac_base + UMAC_REG_COMMAND_CONFIG, val);
udelay(50);
@@ -3635,45 +3638,50 @@ static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result)
vars->link_status |= LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE;
}
-static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
- struct link_params *params,
- struct link_vars *vars)
+static void bnx2x_ext_phy_update_adv_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
{
- struct bnx2x *bp = params->bp;
u16 ld_pause; /* local */
u16 lp_pause; /* link partner */
u16 pause_result;
- u8 ret = 0;
- /* read twice */
+ struct bnx2x *bp = params->bp;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
+ bnx2x_cl22_read(bp, phy, 0x4, &ld_pause);
+ bnx2x_cl22_read(bp, phy, 0x5, &lp_pause);
+ } else {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_ADV_PAUSE, &ld_pause);
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
+ }
+ pause_result = (ld_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result);
+ bnx2x_pause_resolve(vars, pause_result);
+}
+static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u8 ret = 0;
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
-
- if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
+ /* Update the advertised flow-controled of LD/LP in AN */
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_ext_phy_update_adv_fc(phy, params, vars);
+ /* But set the flow-control result as the requested one */
vars->flow_ctrl = phy->req_flow_ctrl;
- else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ } else if (phy->req_line_speed != SPEED_AUTO_NEG)
vars->flow_ctrl = params->req_fc_auto_adv;
else if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
ret = 1;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM54618SE) {
- bnx2x_cl22_read(bp, phy,
- 0x4, &ld_pause);
- bnx2x_cl22_read(bp, phy,
- 0x5, &lp_pause);
- } else {
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_ADV_PAUSE, &ld_pause);
- bnx2x_cl45_read(bp, phy,
- MDIO_AN_DEVAD,
- MDIO_AN_REG_LP_AUTO_NEG, &lp_pause);
- }
- pause_result = (ld_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
- pause_result |= (lp_pause &
- MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
- DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
- pause_result);
- bnx2x_pause_resolve(vars, pause_result);
+ bnx2x_ext_phy_update_adv_fc(phy, params, vars);
}
return ret;
}
@@ -3785,7 +3793,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Enable Autoneg */
bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
- MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1000);
+ MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200);
}
@@ -5216,22 +5224,69 @@ static int bnx2x_direct_parallel_detect_used(struct bnx2x_phy *phy,
return 0;
}
+static void bnx2x_update_adv_fc(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars,
+ u32 gp_status)
+{
+ u16 ld_pause; /* local driver */
+ u16 lp_pause; /* link partner */
+ u16 pause_result;
+ struct bnx2x *bp = params->bp;
+ if ((gp_status &
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
+ (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
+ MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
+
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_ADV1,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK) >> 8;
+ pause_result |= (lp_pause &
+ MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK) >> 10;
+ DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n", pause_result);
+ } else {
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
+ &ld_pause);
+ CL22_RD_OVER_CL45(bp, phy,
+ MDIO_REG_BANK_COMBO_IEEE0,
+ MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
+ &lp_pause);
+ pause_result = (ld_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
+ pause_result |= (lp_pause &
+ MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
+ DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result);
+ }
+ bnx2x_pause_resolve(vars, pause_result);
+
+}
+
static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars,
u32 gp_status)
{
struct bnx2x *bp = params->bp;
- u16 ld_pause; /* local driver */
- u16 lp_pause; /* link partner */
- u16 pause_result;
-
vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE;
/* resolve from gp_status in case of AN complete and not sgmii */
- if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO)
+ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) {
+ /* Update the advertised flow-controled of LD/LP in AN */
+ if (phy->req_line_speed == SPEED_AUTO_NEG)
+ bnx2x_update_adv_fc(phy, params, vars, gp_status);
+ /* But set the flow-control result as the requested one */
vars->flow_ctrl = phy->req_flow_ctrl;
- else if (phy->req_line_speed != SPEED_AUTO_NEG)
+ } else if (phy->req_line_speed != SPEED_AUTO_NEG)
vars->flow_ctrl = params->req_fc_auto_adv;
else if ((gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
(!(vars->phy_flags & PHY_SGMII_FLAG))) {
@@ -5239,45 +5294,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy,
vars->flow_ctrl = params->req_fc_auto_adv;
return;
}
- if ((gp_status &
- (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
- MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
- (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
- MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
-
- CL22_RD_OVER_CL45(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_ADV1,
- &ld_pause);
- CL22_RD_OVER_CL45(bp, phy,
- MDIO_REG_BANK_CL73_IEEEB1,
- MDIO_CL73_IEEEB1_AN_LP_ADV1,
- &lp_pause);
- pause_result = (ld_pause &
- MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
- >> 8;
- pause_result |= (lp_pause &
- MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
- >> 10;
- DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
- pause_result);
- } else {
- CL22_RD_OVER_CL45(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
- &ld_pause);
- CL22_RD_OVER_CL45(bp, phy,
- MDIO_REG_BANK_COMBO_IEEE0,
- MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
- &lp_pause);
- pause_result = (ld_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
- pause_result |= (lp_pause &
- MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
- DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
- pause_result);
- }
- bnx2x_pause_resolve(vars, pause_result);
+ bnx2x_update_adv_fc(phy, params, vars, gp_status);
}
DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", vars->flow_ctrl);
}
@@ -5496,6 +5513,33 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
}
}
+ /* Read LP advertised speeds*/
+ if (SINGLE_MEDIA_DIRECT(params) &&
+ (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE)) {
+ u16 val;
+
+ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_CL73_IEEEB1,
+ MDIO_CL73_IEEEB1_AN_LP_ADV2, &val);
+
+ if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+ MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G,
+ MDIO_OVER_1G_LP_UP1, &val);
+
+ if (val & MDIO_OVER_1G_UP1_2_5G)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+
DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x link_status 0x%x\n",
vars->duplex, vars->flow_ctrl, vars->link_status);
return rc;
@@ -5553,6 +5597,34 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
}
}
+ if ((vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) &&
+ SINGLE_MEDIA_DIRECT(params)) {
+ u16 val;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &val);
+
+ if (val & MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val & (MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 |
+ MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_DIGITAL3_LP_UP1, &val);
+
+ if (val & MDIO_OVER_1G_UP1_2_5G)
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE;
+ if (val & (MDIO_OVER_1G_UP1_10G | MDIO_OVER_1G_UP1_10GH))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+
+ }
+
+
if (lane < 2) {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_GP2_STATUS_GP_2_2, &gp_speed);
@@ -5970,8 +6042,8 @@ static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
return 0;
}
-int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
- u8 *version, u16 len)
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+ u16 len)
{
struct bnx2x *bp;
u32 spirom_ver = 0;
@@ -6418,7 +6490,9 @@ static int bnx2x_update_link_down(struct link_params *params,
LINK_STATUS_AUTO_NEGOTIATE_COMPLETE |
LINK_STATUS_RX_FLOW_CONTROL_FLAG_MASK |
LINK_STATUS_TX_FLOW_CONTROL_FLAG_MASK |
- LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK);
+ LINK_STATUS_PARALLEL_DETECTION_FLAG_MASK |
+ LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE |
+ LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE);
vars->line_speed = 0;
bnx2x_update_mng(params, vars->link_status);
@@ -7367,6 +7441,19 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy,
bnx2x_8073_resolve_fc(phy, params, vars);
vars->duplex = DUPLEX_FULL;
}
+
+ if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_LP_AUTO_NEG2, &val1);
+
+ if (val1 & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ if (val1 & (1<<7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
+ }
+
return link_up;
}
@@ -9405,13 +9492,8 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
{
struct bnx2x *bp = params->bp;
u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
- u16 tmp_req_line_speed;
- tmp_req_line_speed = phy->req_line_speed;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
- if (phy->req_line_speed == SPEED_10000)
- phy->req_line_speed = SPEED_AUTO_NEG;
- } else {
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, bp, params->port);
}
@@ -9555,8 +9637,6 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
1);
- phy->req_line_speed = tmp_req_line_speed;
-
return 0;
}
@@ -9948,6 +10028,42 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n",
vars->line_speed);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ /* Read LP advertised speeds */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_CL37_FC_LP, &val);
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ if (val & (1<<6))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ if (val & (1<<7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ if (val & (1<<8))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ if (val & (1<<9))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_1000T_STATUS, &val);
+
+ if (val & (1<<10))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ if (val & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_MASTER_STATUS, &val);
+
+ if (val & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
return link_up;
@@ -10571,6 +10687,35 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy,
}
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ if (vars->link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
+ /* report LP advertised speeds */
+ bnx2x_cl22_read(bp, phy, 0x5, &val);
+
+ if (val & (1<<5))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10THD_CAPABLE;
+ if (val & (1<<6))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE;
+ if (val & (1<<7))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE;
+ if (val & (1<<8))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE;
+ if (val & (1<<9))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_100T4_CAPABLE;
+
+ bnx2x_cl22_read(bp, phy, 0xa, &val);
+ if (val & (1<<10))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE;
+ if (val & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE;
+ }
}
return link_up;
}
@@ -10699,6 +10844,11 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy,
val2, (val2 & (1<<14)));
bnx2x_ext_phy_10G_an_resolve(bp, phy, vars);
bnx2x_ext_phy_resolve_fc(phy, params, vars);
+
+ /* read LP advertised speeds */
+ if (val2 & (1<<11))
+ vars->link_status |=
+ LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE;
}
return link_up;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index e02a68a7fb85..7ba557a610da 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -1,4 +1,4 @@
-/* Copyright 2008-2011 Broadcom Corporation
+/* Copyright 2008-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -337,8 +337,8 @@ int bnx2x_phy_write(struct link_params *params, u8 phy_addr,
void bnx2x_link_status_update(struct link_params *input,
struct link_vars *output);
/* returns string representing the fw_version of the external phy */
-int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
- u8 *version, u16 len);
+int bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 *version,
+ u16 len);
/* Set/Unset the led
Basically, the CLC takes care of the led for the link, but in case one needs
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b69f8762b339..f7f9aa807264 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -1,6 +1,6 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -375,9 +375,6 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
-
- DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
- idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
}
REG_WR(bp, dmae_reg_go_c[idx], 1);
}
@@ -442,10 +439,6 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
int rc = 0;
- DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
- bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
- bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
-
/*
* Lock the dmae channel. Disable BHs to prevent a dead-lock
* as long as this code is called both from syscall context and
@@ -462,9 +455,10 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
/* wait for completion */
udelay(5);
while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
- DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
- if (!cnt) {
+ if (!cnt ||
+ (bp->recovery_state != BNX2X_RECOVERY_DONE &&
+ bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
BNX2X_ERR("DMAE timeout!\n");
rc = DMAE_TIMEOUT;
goto unlock;
@@ -477,10 +471,6 @@ static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
rc = DMAE_PCI_ERROR;
}
- DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
- bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
- bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
-
unlock:
spin_unlock_bh(&bp->dmae_lock);
return rc;
@@ -494,9 +484,10 @@ void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
if (!bp->dmae_ready) {
u32 *data = bnx2x_sp(bp, wb_data[0]);
- DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
- " using indirect\n", dst_addr, len32);
- bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+ if (CHIP_IS_E1(bp))
+ bnx2x_init_ind_wr(bp, dst_addr, data, len32);
+ else
+ bnx2x_init_str_wr(bp, dst_addr, data, len32);
return;
}
@@ -524,10 +515,13 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
u32 *data = bnx2x_sp(bp, wb_data[0]);
int i;
- DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
- " using indirect\n", src_addr, len32);
- for (i = 0; i < len32; i++)
- data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+ if (CHIP_IS_E1(bp))
+ for (i = 0; i < len32; i++)
+ data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
+ else
+ for (i = 0; i < len32; i++)
+ data[i] = REG_RD(bp, src_addr + i*4);
+
return;
}
@@ -609,8 +603,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
XSTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
- " 0x%08x 0x%08x 0x%08x\n",
+ BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
@@ -637,8 +630,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
TSTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
- " 0x%08x 0x%08x 0x%08x\n",
+ BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
@@ -665,8 +657,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
CSTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
- " 0x%08x 0x%08x 0x%08x\n",
+ BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
@@ -693,8 +684,7 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
USTORM_ASSERT_LIST_OFFSET(i) + 12);
if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
- BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
- " 0x%08x 0x%08x 0x%08x\n",
+ BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
i, row3, row2, row1, row0);
rc++;
} else {
@@ -723,13 +713,23 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
- printk("%s" "MCP PC at 0x%x\n", lvl, val);
+ BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
if (BP_PATH(bp) == 0)
trace_shmem_base = bp->common.shmem_base;
else
trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
- addr = trace_shmem_base - 0x0800 + 4;
+ addr = trace_shmem_base - 0x800;
+
+ /* validate TRCB signature */
+ mark = REG_RD(bp, addr);
+ if (mark != MFW_TRACE_SIGNATURE) {
+ BNX2X_ERR("Trace buffer signature is missing.");
+ return ;
+ }
+
+ /* read cyclic buffer pointer */
+ addr += 4;
mark = REG_RD(bp, addr);
mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+ ((mark + 0x3) & ~0x3) - 0x08000000;
@@ -768,14 +768,14 @@ void bnx2x_panic_dump(struct bnx2x *bp)
#endif
bp->stats_state = STATS_STATE_DISABLED;
+ bp->eth_stats.unrecoverable_error++;
DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
BNX2X_ERR("begin crash dump -----------------\n");
/* Indices */
/* Common */
- BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
- " spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
bp->def_idx, bp->def_att_idx, bp->attn_state,
bp->spq_prod_idx, bp->stats_counter);
BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
@@ -822,14 +822,11 @@ void bnx2x_panic_dump(struct bnx2x *bp)
struct bnx2x_fp_txdata txdata;
/* Rx */
- BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
- " rx_comp_prod(0x%x)"
- " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
+ BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
i, fp->rx_bd_prod, fp->rx_bd_cons,
fp->rx_comp_prod,
fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
- BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
- " fp_hc_idx(0x%x)\n",
+ BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
fp->rx_sge_prod, fp->last_max_sge,
le16_to_cpu(fp->fp_hc_idx));
@@ -837,9 +834,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
for_each_cos_in_tx_queue(fp, cos)
{
txdata = fp->txdata[cos];
- BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
- " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
- " *tx_cons_sb(0x%x)\n",
+ BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
i, txdata.tx_pkt_prod,
txdata.tx_pkt_cons, txdata.tx_bd_prod,
txdata.tx_bd_cons,
@@ -881,9 +876,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
j * sizeof(u32));
if (!CHIP_IS_E1x(bp)) {
- pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
- "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
- "state(0x%x)\n",
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
sb_data_e2.common.p_func.pf_id,
sb_data_e2.common.p_func.vf_id,
sb_data_e2.common.p_func.vf_valid,
@@ -891,9 +884,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
sb_data_e2.common.same_igu_sb_1b,
sb_data_e2.common.state);
} else {
- pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) "
- "vnic_id(0x%x) same_igu_sb_1b(0x%x) "
- "state(0x%x)\n",
+ pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
sb_data_e1x.common.p_func.pf_id,
sb_data_e1x.common.p_func.vf_id,
sb_data_e1x.common.p_func.vf_valid,
@@ -904,21 +895,17 @@ void bnx2x_panic_dump(struct bnx2x *bp)
/* SB_SMs data */
for (j = 0; j < HC_SB_MAX_SM; j++) {
- pr_cont("SM[%d] __flags (0x%x) "
- "igu_sb_id (0x%x) igu_seg_id(0x%x) "
- "time_to_expire (0x%x) "
- "timer_value(0x%x)\n", j,
- hc_sm_p[j].__flags,
- hc_sm_p[j].igu_sb_id,
- hc_sm_p[j].igu_seg_id,
- hc_sm_p[j].time_to_expire,
- hc_sm_p[j].timer_value);
+ pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
+ j, hc_sm_p[j].__flags,
+ hc_sm_p[j].igu_sb_id,
+ hc_sm_p[j].igu_seg_id,
+ hc_sm_p[j].time_to_expire,
+ hc_sm_p[j].timer_value);
}
/* Indecies data */
for (j = 0; j < loop; j++) {
- pr_cont("INDEX[%d] flags (0x%x) "
- "timeout (0x%x)\n", j,
+ pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
hc_index_p[j].flags,
hc_index_p[j].timeout);
}
@@ -972,8 +959,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
struct sw_tx_bd *sw_bd =
&txdata->tx_buf_ring[j];
- BNX2X_ERR("fp%d: txdata %d, "
- "packet[%x]=[%p,%x]\n",
+ BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
i, cos, j, sw_bd->skb,
sw_bd->first_bd);
}
@@ -983,8 +969,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
for (j = start; j != end; j = TX_BD(j + 1)) {
u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
- BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]="
- "[%x:%x:%x:%x]\n",
+ BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
i, cos, j, tx_bd[0], tx_bd[1],
tx_bd[2], tx_bd[3]);
}
@@ -1003,8 +988,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
* initialization.
*/
#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
-#define FLR_WAIT_INTERAVAL 50 /* usec */
-#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERAVAL) /* 200 */
+#define FLR_WAIT_INTERVAL 50 /* usec */
+#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
struct pbf_pN_buf_regs {
int pN;
@@ -1037,7 +1022,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
(init_crd - crd_start))) {
if (cur_cnt--) {
- udelay(FLR_WAIT_INTERAVAL);
+ udelay(FLR_WAIT_INTERVAL);
crd = REG_RD(bp, regs->crd);
crd_freed = REG_RD(bp, regs->crd_freed);
} else {
@@ -1051,7 +1036,7 @@ static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
}
}
DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
- poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+ poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
}
static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
@@ -1069,7 +1054,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
if (cur_cnt--) {
- udelay(FLR_WAIT_INTERAVAL);
+ udelay(FLR_WAIT_INTERVAL);
occup = REG_RD(bp, regs->lines_occup);
freed = REG_RD(bp, regs->lines_freed);
} else {
@@ -1083,7 +1068,7 @@ static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
}
}
DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
- poll_count-cur_cnt, FLR_WAIT_INTERAVAL, regs->pN);
+ poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
}
static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
@@ -1093,7 +1078,7 @@ static inline u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
u32 val;
while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
- udelay(FLR_WAIT_INTERAVAL);
+ udelay(FLR_WAIT_INTERVAL);
return val;
}
@@ -1206,7 +1191,7 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
int ret = 0;
if (REG_RD(bp, comp_addr)) {
- BNX2X_ERR("Cleanup complete is not 0\n");
+ BNX2X_ERR("Cleanup complete was not 0 before sending\n");
return 1;
}
@@ -1215,11 +1200,13 @@ static inline int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func,
op_gen.command |= OP_GEN_AGG_VECT(clnup_func);
op_gen.command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
- DP(BNX2X_MSG_SP, "FW Final cleanup\n");
+ DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen.command);
if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
BNX2X_ERR("FW final cleanup did not succeed\n");
+ DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
+ (REG_RD(bp, comp_addr)));
ret = 1;
}
/* Zero completion for nxt FLR */
@@ -1330,6 +1317,7 @@ static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
/* Poll HW usage counters */
+ DP(BNX2X_MSG_SP, "Polling usage counters\n");
if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
return -EBUSY;
@@ -1388,8 +1376,8 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
if (!CHIP_IS_E1(bp)) {
- DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
- val, port, addr);
+ DP(NETIF_MSG_IFUP,
+ "write %x to HC %d (addr 0x%x)\n", val, port, addr);
REG_WR(bp, addr, val);
@@ -1400,8 +1388,9 @@ static void bnx2x_hc_int_enable(struct bnx2x *bp)
if (CHIP_IS_E1(bp))
REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
- DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
- val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
+ DP(NETIF_MSG_IFUP,
+ "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
+ (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
REG_WR(bp, addr, val);
/*
@@ -1456,7 +1445,7 @@ static void bnx2x_igu_int_enable(struct bnx2x *bp)
IGU_PF_CONF_SINGLE_ISR_EN);
}
- DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
+ DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
@@ -1514,7 +1503,8 @@ static void bnx2x_hc_int_disable(struct bnx2x *bp)
HC_CONFIG_0_REG_INT_LINE_EN_0 |
HC_CONFIG_0_REG_ATTN_BIT_EN_0);
- DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
+ DP(NETIF_MSG_IFDOWN,
+ "write %x to HC %d (addr 0x%x)\n",
val, port, addr);
/* flush all outstanding writes */
@@ -1533,7 +1523,7 @@ static void bnx2x_igu_int_disable(struct bnx2x *bp)
IGU_PF_CONF_INT_LINE_EN |
IGU_PF_CONF_ATTN_BIT_EN);
- DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
+ DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
/* flush all outstanding writes */
mmiowb();
@@ -1592,11 +1582,12 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
int func = BP_FUNC(bp);
u32 hw_lock_control_reg;
- DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+ "Trying to take a lock on resource %d\n", resource);
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- DP(NETIF_MSG_HW,
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
"resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
return false;
@@ -1614,7 +1605,8 @@ static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
if (lock_status & resource_bit)
return true;
- DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
+ "Failed to get a lock on resource %d\n", resource);
return false;
}
@@ -1675,7 +1667,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
break;
case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
- DP(NETIF_MSG_IFUP, "got MULTI[%d] tx-only setup ramrod\n", cid);
+ DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
break;
@@ -1817,8 +1809,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- DP(NETIF_MSG_HW,
- "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
return -EINVAL;
}
@@ -1833,7 +1824,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
/* Validating that the resource is not already taken */
lock_status = REG_RD(bp, hw_lock_control_reg);
if (lock_status & resource_bit) {
- DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
lock_status, resource_bit);
return -EEXIST;
}
@@ -1848,7 +1839,7 @@ int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
msleep(5);
}
- DP(NETIF_MSG_HW, "Timeout\n");
+ BNX2X_ERR("Timeout\n");
return -EAGAIN;
}
@@ -1864,12 +1855,9 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
int func = BP_FUNC(bp);
u32 hw_lock_control_reg;
- DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
-
/* Validating that the resource is within range */
if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
- DP(NETIF_MSG_HW,
- "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
+ BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
resource, HW_LOCK_MAX_RESOURCE_VALUE);
return -EINVAL;
}
@@ -1884,7 +1872,7 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
/* Validating that the resource is currently taken */
lock_status = REG_RD(bp, hw_lock_control_reg);
if (!(lock_status & resource_bit)) {
- DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
+ BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
lock_status, resource_bit);
return -EFAULT;
}
@@ -1945,7 +1933,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
switch (mode) {
case MISC_REGISTERS_GPIO_OUTPUT_LOW:
- DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
+ DP(NETIF_MSG_LINK,
+ "Set GPIO %d (shift %d) -> output low\n",
gpio_num, gpio_shift);
/* clear FLOAT and set CLR */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
@@ -1953,7 +1942,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
break;
case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
- DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
+ DP(NETIF_MSG_LINK,
+ "Set GPIO %d (shift %d) -> output high\n",
gpio_num, gpio_shift);
/* clear FLOAT and set SET */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
@@ -1961,7 +1951,8 @@ int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
break;
case MISC_REGISTERS_GPIO_INPUT_HI_Z:
- DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
+ DP(NETIF_MSG_LINK,
+ "Set GPIO %d (shift %d) -> input\n",
gpio_num, gpio_shift);
/* set FLOAT */
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
@@ -2045,16 +2036,18 @@ int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
switch (mode) {
case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
- DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
- "output low\n", gpio_num, gpio_shift);
+ DP(NETIF_MSG_LINK,
+ "Clear GPIO INT %d (shift %d) -> output low\n",
+ gpio_num, gpio_shift);
/* clear SET and set CLR */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
break;
case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
- DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
- "output high\n", gpio_num, gpio_shift);
+ DP(NETIF_MSG_LINK,
+ "Set GPIO INT %d (shift %d) -> output high\n",
+ gpio_num, gpio_shift);
/* clear CLR and set SET */
gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
@@ -2087,21 +2080,21 @@ static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
switch (mode) {
case MISC_REGISTERS_SPIO_OUTPUT_LOW:
- DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
+ DP(NETIF_MSG_HW, "Set SPIO %d -> output low\n", spio_num);
/* clear FLOAT and set CLR */
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
break;
case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
- DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
+ DP(NETIF_MSG_HW, "Set SPIO %d -> output high\n", spio_num);
/* clear FLOAT and set SET */
spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
break;
case MISC_REGISTERS_SPIO_INPUT_HI_Z:
- DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
+ DP(NETIF_MSG_HW, "Set SPIO %d -> input\n", spio_num);
/* set FLOAT */
spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
break;
@@ -2543,7 +2536,7 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
u32 val;
bp->port.pmf = 1;
- DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
+ DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
/*
* We need the mb() to ensure the ordering between the writing to
@@ -2688,6 +2681,8 @@ static inline unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
if (!fp->disable_tpa) {
__set_bit(BNX2X_Q_FLG_TPA, &flags);
__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
+ if (fp->mode == TPA_MODE_GRO)
+ __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
}
if (leading) {
@@ -2784,6 +2779,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
rxq_init->sge_buf_sz = sge_sz;
rxq_init->max_sges_pkt = max_sge;
rxq_init->rss_engine_id = BP_FUNC(bp);
+ rxq_init->mcast_engine_id = BP_FUNC(bp);
/* Maximum number or simultaneous TPA aggregation for this Queue.
*
@@ -3121,12 +3117,12 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
* locks
*/
if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
- DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
+ DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
bp->flags |= MF_FUNC_DIS;
bnx2x_e1h_disable(bp);
} else {
- DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
+ DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
bp->flags &= ~MF_FUNC_DIS;
bnx2x_e1h_enable(bp);
@@ -3153,7 +3149,7 @@ static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
if (bp->spq_prod_bd == bp->spq_last_bd) {
bp->spq_prod_bd = bp->spq;
bp->spq_prod_idx = 0;
- DP(NETIF_MSG_TIMER, "end of spq\n");
+ DP(BNX2X_MSG_SP, "end of spq\n");
} else {
bp->spq_prod_bd++;
bp->spq_prod_idx++;
@@ -3222,8 +3218,10 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't post SP when there is panic\n");
return -EIO;
+ }
#endif
spin_lock_bh(&bp->spq_lock);
@@ -3270,9 +3268,8 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
atomic_dec(&bp->cq_spq_left);
- DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
- "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) "
- "type(0x%x) left (CQ, EQ) (%x,%x)\n",
+ DP(BNX2X_MSG_SP,
+ "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
(u32)(U64_LO(bp->spq_mapping) +
(void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
@@ -3464,9 +3461,8 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
ext_phy_config);
/* log the failure */
- netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
- " the driver to shutdown the card to prevent permanent"
- " damage. Please contact OEM Support for assistance\n");
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
+ "Please contact OEM Support for assistance\n");
/*
* Scheudle device reset (unload)
@@ -3709,11 +3705,11 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
*/
void bnx2x_set_reset_global(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-
+ u32 val;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
- barrier();
- mmiowb();
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/*
@@ -3723,11 +3719,11 @@ void bnx2x_set_reset_global(struct bnx2x *bp)
*/
static inline void bnx2x_clear_reset_global(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
-
+ u32 val;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
- barrier();
- mmiowb();
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/*
@@ -3750,15 +3746,17 @@ static inline bool bnx2x_reset_is_global(struct bnx2x *bp)
*/
static inline void bnx2x_set_reset_done(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val;
u32 bit = BP_PATH(bp) ?
BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
/* Clear the bit */
val &= ~bit;
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
- barrier();
- mmiowb();
+
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/*
@@ -3768,15 +3766,16 @@ static inline void bnx2x_set_reset_done(struct bnx2x *bp)
*/
void bnx2x_set_reset_in_progress(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val;
u32 bit = BP_PATH(bp) ?
BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
/* Set the bit */
val |= bit;
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
- barrier();
- mmiowb();
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/*
@@ -3794,25 +3793,28 @@ bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
}
/*
- * Increment the load counter for the current engine.
+ * set pf load for the current pf.
*
* should be run under rtnl lock
*/
-void bnx2x_inc_load_cnt(struct bnx2x *bp)
+void bnx2x_set_pf_load(struct bnx2x *bp)
{
- u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val1, val;
u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
BNX2X_PATH0_LOAD_CNT_MASK;
u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
BNX2X_PATH0_LOAD_CNT_SHIFT;
- DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+
+ DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
/* get the current counter value */
val1 = (val & mask) >> shift;
- /* increment... */
- val1++;
+ /* set bit of that PF */
+ val1 |= (1 << bp->pf_num);
/* clear the old value */
val &= ~mask;
@@ -3821,34 +3823,35 @@ void bnx2x_inc_load_cnt(struct bnx2x *bp)
val |= ((val1 << shift) & mask);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
- barrier();
- mmiowb();
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
/**
- * bnx2x_dec_load_cnt - decrement the load counter
+ * bnx2x_clear_pf_load - clear pf load mark
*
* @bp: driver handle
*
* Should be run under rtnl lock.
* Decrements the load counter for the current engine. Returns
- * the new counter value.
+ * whether other functions are still loaded
*/
-u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
+bool bnx2x_clear_pf_load(struct bnx2x *bp)
{
- u32 val1, val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val1, val;
u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
BNX2X_PATH0_LOAD_CNT_MASK;
u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
BNX2X_PATH0_LOAD_CNT_SHIFT;
- DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
/* get the current counter value */
val1 = (val & mask) >> shift;
- /* decrement... */
- val1--;
+ /* clear bit of that PF */
+ val1 &= ~(1 << bp->pf_num);
/* clear the old value */
val &= ~mask;
@@ -3857,18 +3860,16 @@ u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
val |= ((val1 << shift) & mask);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
- barrier();
- mmiowb();
-
- return val1;
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ return val1 != 0;
}
/*
- * Read the load counter for the current engine.
+ * Read the load status for the current engine.
*
* should be run under rtnl lock
*/
-static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
+static inline bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
{
u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
BNX2X_PATH0_LOAD_CNT_MASK);
@@ -3876,27 +3877,28 @@ static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp, int engine)
BNX2X_PATH0_LOAD_CNT_SHIFT);
u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
- DP(NETIF_MSG_HW, "GLOB_REG=0x%08x\n", val);
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
val = (val & mask) >> shift;
- DP(NETIF_MSG_HW, "load_cnt for engine %d = %d\n", engine, val);
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
+ engine, val);
- return val;
+ return val != 0;
}
/*
- * Reset the load counter for the current engine.
- *
- * should be run under rtnl lock
+ * Reset the load status for the current engine.
*/
-static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
+static inline void bnx2x_clear_load_status(struct bnx2x *bp)
{
- u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
+ u32 val;
u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
- BNX2X_PATH0_LOAD_CNT_MASK);
-
+ BNX2X_PATH0_LOAD_CNT_MASK);
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
+ val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask));
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
}
static inline void _print_next_block(int idx, const char *blk)
@@ -4168,9 +4170,8 @@ static inline bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
(sig[3] & HW_PRTY_ASSERT_SET_3) ||
(sig[4] & HW_PRTY_ASSERT_SET_4)) {
int par_num = 0;
- DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
- "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x "
- "[4]:0x%08x\n",
+ DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
+ "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
sig[0] & HW_PRTY_ASSERT_SET_0,
sig[1] & HW_PRTY_ASSERT_SET_1,
sig[2] & HW_PRTY_ASSERT_SET_2,
@@ -4240,34 +4241,25 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "ADDRESS_ERROR\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "INCORRECT_RCV_BEHAVIOR\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "WAS_ERROR_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "VF_LENGTH_VIOLATION_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
if (val &
PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "VF_GRC_SPACE_VIOLATION_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
if (val &
PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "VF_MSIX_BAR_VIOLATION_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "TCPL_ERROR_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "TCPL_IN_TWO_RCBS_ATTN\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
- BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
- "CSSNOOP_FIFO_OVERFLOW\n");
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
}
if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
@@ -4275,19 +4267,15 @@ static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
- BNX2X_ERR("ATC_ATC_INT_STS_REG"
- "_ATC_TCPL_TO_NOT_PEND\n");
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
- BNX2X_ERR("ATC_ATC_INT_STS_REG_"
- "ATC_GPA_MULTIPLE_HITS\n");
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
- BNX2X_ERR("ATC_ATC_INT_STS_REG_"
- "ATC_RCPL_TO_EMPTY_CNT\n");
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
- BNX2X_ERR("ATC_ATC_INT_STS_REG_"
- "ATC_IREQ_LESS_THAN_STU\n");
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
}
if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
@@ -4346,8 +4334,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
if (deasserted & (1 << index)) {
group_mask = &bp->attn_group[index];
- DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
- "%08x %08x %08x\n",
+ DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
index,
group_mask->sig[0], group_mask->sig[1],
group_mask->sig[2], group_mask->sig[3],
@@ -4507,6 +4494,7 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
switch (elem->message.data.eth_event.echo >> BNX2X_SWCID_SHIFT) {
case BNX2X_FILTER_MAC_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
#ifdef BCM_CNIC
if (cid == BNX2X_ISCSI_ETH_CID)
vlan_mac_obj = &bp->iscsi_l2_mac_obj;
@@ -4516,6 +4504,7 @@ static inline void bnx2x_handle_classification_eqe(struct bnx2x *bp,
break;
case BNX2X_FILTER_MCAST_PENDING:
+ DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
/* This is only relevant for 57710 where multicast MACs are
* configured as unicast MACs using the same ramrod.
*/
@@ -4617,7 +4606,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
/* handle eq element */
switch (opcode) {
case EVENT_RING_OPCODE_STAT_QUERY:
- DP(NETIF_MSG_TIMER, "got statistics comp event %d\n",
+ DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
+ "got statistics comp event %d\n",
bp->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -4644,7 +4634,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
goto next_spqe;
case EVENT_RING_OPCODE_STOP_TRAFFIC:
- DP(BNX2X_MSG_SP, "got STOP TRAFFIC\n");
+ DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_STOP))
break;
@@ -4652,21 +4642,23 @@ static void bnx2x_eq_int(struct bnx2x *bp)
goto next_spqe;
case EVENT_RING_OPCODE_START_TRAFFIC:
- DP(BNX2X_MSG_SP, "got START TRAFFIC\n");
+ DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
if (f_obj->complete_cmd(bp, f_obj,
BNX2X_F_CMD_TX_START))
break;
bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_START:
- DP(BNX2X_MSG_SP, "got FUNC_START ramrod\n");
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "got FUNC_START ramrod\n");
if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
break;
goto next_spqe;
case EVENT_RING_OPCODE_FUNCTION_STOP:
- DP(BNX2X_MSG_SP, "got FUNC_STOP ramrod\n");
+ DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
+ "got FUNC_STOP ramrod\n");
if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
break;
@@ -4748,7 +4740,7 @@ static void bnx2x_sp_task(struct work_struct *work)
/* if (status == 0) */
/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
- DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
+ DP(BNX2X_MSG_SP, "got a slowpath interrupt (status 0x%x)\n", status);
/* HW attentions */
if (status & BNX2X_DEF_SB_ATT_IDX) {
@@ -4782,7 +4774,7 @@ static void bnx2x_sp_task(struct work_struct *work)
}
if (unlikely(status))
- DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
+ DP(BNX2X_MSG_SP, "got an unknown interrupt! (status 0x%x)\n",
status);
bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
@@ -5060,7 +5052,7 @@ static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
igu_sb_id, igu_seg_id);
- DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
+ DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
/* write indecies to HW */
bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
@@ -5410,6 +5402,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
/* init shortcut */
fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
+
/* Setup SB indicies */
fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
@@ -5437,8 +5430,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
*/
bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
- DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
- "cl_id %d fw_sb %d igu_sb %d\n",
+ DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
fp->igu_sb_id);
bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
@@ -5525,8 +5517,7 @@ gunzip_nomem2:
bp->gunzip_buf = NULL;
gunzip_nomem1:
- netdev_err(bp->dev, "Cannot allocate firmware buffer for"
- " un-compression\n");
+ BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
return -ENOMEM;
}
@@ -5578,8 +5569,8 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
if (bp->gunzip_outlen & 0x3)
- netdev_err(bp->dev, "Firmware decompression error:"
- " gunzip_outlen (%d) not aligned\n",
+ netdev_err(bp->dev,
+ "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
bp->gunzip_outlen);
bp->gunzip_outlen >>= 2;
@@ -5998,7 +5989,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
{
u32 val;
- DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
+ DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
/*
* take the UNDI lock to protect undi_unload flow from accessing
@@ -6322,9 +6313,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp)
if (sizeof(union cdu_context) != 1024)
/* we currently assume that a context is 1024 bytes */
- dev_alert(&bp->pdev->dev, "please adjust the size "
- "of cdu_context(%ld)\n",
- (long)sizeof(union cdu_context));
+ dev_alert(&bp->pdev->dev,
+ "please adjust the size of cdu_context(%ld)\n",
+ (long)sizeof(union cdu_context));
bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
val = (4 << 24) + (0 << 12) + 1024;
@@ -6453,7 +6444,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
bnx2x__link_reset(bp);
- DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
+ DP(NETIF_MSG_HW, "starting port init port %d\n", port);
REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
@@ -6674,13 +6665,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
u16 cdu_ilt_start;
u32 addr, val;
u32 main_mem_base, main_mem_size, main_mem_prty_clr;
- int i, main_mem_width;
+ int i, main_mem_width, rc;
- DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
+ DP(NETIF_MSG_HW, "starting func init func %d\n", func);
/* FLR cleanup - hmmm */
- if (!CHIP_IS_E1x(bp))
- bnx2x_pf_flr_clnup(bp);
+ if (!CHIP_IS_E1x(bp)) {
+ rc = bnx2x_pf_flr_clnup(bp);
+ if (rc)
+ return rc;
+ }
/* set MSI reconfigure capability */
if (bp->common.int_block == INT_BLOCK_HC) {
@@ -6933,9 +6927,9 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
val = REG_RD(bp, main_mem_prty_clr);
if (val)
- DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
- "block during "
- "function init (0x%x)!\n", val);
+ DP(NETIF_MSG_HW,
+ "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
+ val);
/* Clear "false" parity errors in MSI-X table */
for (i = main_mem_base;
@@ -7063,6 +7057,7 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
alloc_mem_err:
BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ BNX2X_ERR("Can't allocate memory\n");
return -ENOMEM;
}
@@ -7089,6 +7084,11 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
sizeof(struct bnx2x_slowpath));
+#ifdef BCM_CNIC
+ /* write address to which L5 should insert its values */
+ bp->cnic_eth_dev.addr_drv_info_to_mcp = &bp->slowpath->drv_info_to_mcp;
+#endif
+
/* Allocated memory for FW statistics */
if (bnx2x_alloc_fw_stats_mem(bp))
goto alloc_mem_err;
@@ -7121,6 +7121,7 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
alloc_mem_err:
bnx2x_free_mem(bp);
+ BNX2X_ERR("Can't allocate memory\n");
return -ENOMEM;
}
@@ -7186,8 +7187,9 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
unsigned long ramrod_flags = 0;
#ifdef BCM_CNIC
- if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
- DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+ if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_STORAGE_SD(bp)) {
+ DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
+ "Ignoring Zero MAC for STORAGE SD mode\n");
return 0;
}
#endif
@@ -7220,14 +7222,13 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
/* falling through... */
case INT_MODE_INTx:
bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
- DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
+ BNX2X_DEV_INFO("set number of queues to 1\n");
break;
default:
/* Set number of queues according to bp->multi_mode value */
bnx2x_set_num_queues(bp);
- DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
- bp->num_queues);
+ BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
/* if we can't use MSI-X we only need one fp,
* so try to enable MSI-X with the requested number of fp's
@@ -7235,13 +7236,9 @@ static void __devinit bnx2x_set_int_mode(struct bnx2x *bp)
*/
if (bnx2x_enable_msix(bp)) {
/* failed to enable MSI-X */
- if (bp->multi_mode)
- DP(NETIF_MSG_IFUP,
- "Multi requested but failed to "
- "enable MSI-X (%d), "
- "set number of queues to %d\n",
- bp->num_queues,
- 1 + NON_ETH_CONTEXT_USE);
+ BNX2X_DEV_INFO("Failed to enable MSI-X (%d), set number of queues to %d\n",
+ bp->num_queues, 1 + NON_ETH_CONTEXT_USE);
+
bp->num_queues = 1 + NON_ETH_CONTEXT_USE;
/* Try to enable MSI */
@@ -7279,8 +7276,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
#endif
ilt_client->end = line - 1;
- DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
- "flags 0x%x, hw psz %d\n",
+ DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start,
ilt_client->end,
ilt_client->page_size,
@@ -7301,8 +7297,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
ilt_client->end = line - 1;
- DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
- "flags 0x%x, hw psz %d\n",
+ DP(NETIF_MSG_IFUP,
+ "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start,
ilt_client->end,
ilt_client->page_size,
@@ -7320,8 +7316,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
line += SRC_ILT_LINES;
ilt_client->end = line - 1;
- DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
- "flags 0x%x, hw psz %d\n",
+ DP(NETIF_MSG_IFUP,
+ "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start,
ilt_client->end,
ilt_client->page_size,
@@ -7342,8 +7338,8 @@ void bnx2x_ilt_set_info(struct bnx2x *bp)
line += TM_ILT_LINES;
ilt_client->end = line - 1;
- DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
- "flags 0x%x, hw psz %d\n",
+ DP(NETIF_MSG_IFUP,
+ "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
ilt_client->start,
ilt_client->end,
ilt_client->page_size,
@@ -7404,7 +7400,7 @@ static inline void bnx2x_pf_q_prep_init(struct bnx2x *bp,
/* set maximum number of COSs supported by this queue */
init_params->max_cos = fp->max_cos;
- DP(BNX2X_MSG_SP, "fp: %d setting queue params max cos to: %d\n",
+ DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
fp->index, init_params->max_cos);
/* set the context pointers queue object */
@@ -7435,9 +7431,8 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
/* Set Tx TX_ONLY_SETUP parameters */
bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
- DP(BNX2X_MSG_SP, "preparing to send tx-only ramrod for connection:"
- "cos %d, primary cid %d, cid %d, "
- "client id %d, sp-client id %d, flags %lx\n",
+ DP(NETIF_MSG_IFUP,
+ "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
tx_only_params->gen_params.spcl_id, tx_only_params->flags);
@@ -7461,7 +7456,7 @@ int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool leading)
{
- struct bnx2x_queue_state_params q_params = {0};
+ struct bnx2x_queue_state_params q_params = {NULL};
struct bnx2x_queue_setup_params *setup_params =
&q_params.params.setup;
struct bnx2x_queue_setup_tx_only_params *tx_only_params =
@@ -7469,7 +7464,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
int rc;
u8 tx_index;
- DP(BNX2X_MSG_SP, "setting up queue %d\n", fp->index);
+ DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
/* reset IGU state skip FCoE L2 queue */
if (!IS_FCOE_FP(fp))
@@ -7493,7 +7488,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return rc;
}
- DP(BNX2X_MSG_SP, "init complete\n");
+ DP(NETIF_MSG_IFUP, "init complete\n");
/* Now move the Queue to the SETUP state... */
@@ -7544,10 +7539,10 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
{
struct bnx2x_fastpath *fp = &bp->fp[index];
struct bnx2x_fp_txdata *txdata;
- struct bnx2x_queue_state_params q_params = {0};
+ struct bnx2x_queue_state_params q_params = {NULL};
int rc, tx_index;
- DP(BNX2X_MSG_SP, "stopping queue %d cid %d\n", index, fp->cid);
+ DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
q_params.q_obj = &fp->q_obj;
/* We want to wait for completion in this context */
@@ -7562,7 +7557,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index)
/* ascertain this is a normal queue*/
txdata = &fp->txdata[tx_index];
- DP(BNX2X_MSG_SP, "stopping tx-only queue %d\n",
+ DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
txdata->txq_index);
/* send halt terminate on tx-only connection */
@@ -7720,7 +7715,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
/* Prepare parameters for function state transitions */
__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
@@ -7735,7 +7730,7 @@ static inline int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
static inline int bnx2x_func_stop(struct bnx2x *bp)
{
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
int rc;
/* Prepare parameters for function state transitions */
@@ -7754,8 +7749,7 @@ static inline int bnx2x_func_stop(struct bnx2x *bp)
#ifdef BNX2X_STOP_ON_ERROR
return rc;
#else
- BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry "
- "transaction\n");
+ BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
__set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
return bnx2x_func_state_change(bp, &func_params);
#endif
@@ -7818,14 +7812,12 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else {
int path = BP_PATH(bp);
- DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
- "%d, %d, %d\n",
+ DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
load_count[path][0]--;
load_count[path][1 + port]--;
- DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
- "%d, %d, %d\n",
+ DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
path, load_count[path][0], load_count[path][1],
load_count[path][2]);
if (load_count[path][0] == 0)
@@ -7888,16 +7880,17 @@ static inline int bnx2x_func_wait_started(struct bnx2x *bp)
if (bnx2x_func_get_state(bp, &bp->func_obj) !=
BNX2X_F_STATE_STARTED) {
#ifdef BNX2X_STOP_ON_ERROR
+ BNX2X_ERR("Wrong function state\n");
return -EBUSY;
#else
/*
* Failed to complete the transaction in a "good way"
* Force both transactions with CLR bit
*/
- struct bnx2x_func_state_params func_params = {0};
+ struct bnx2x_func_state_params func_params = {NULL};
- DP(BNX2X_MSG_SP, "Hmmm... unexpected function state! "
- "Forcing STARTED-->TX_ST0PPED-->STARTED\n");
+ DP(NETIF_MSG_IFDOWN,
+ "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
func_params.f_obj = &bp->func_obj;
__set_bit(RAMROD_DRV_CLR_ONLY,
@@ -7921,7 +7914,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
int port = BP_PORT(bp);
int i, rc = 0;
u8 cos;
- struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
u32 reset_code;
/* Wait until tx fastpath tasks complete */
@@ -7948,8 +7941,8 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC,
true);
if (rc < 0)
- BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: "
- "%d\n", rc);
+ BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
+ rc);
/* Disable LLH */
if (!CHIP_IS_E1(bp))
@@ -8042,7 +8035,7 @@ void bnx2x_disable_close_the_gate(struct bnx2x *bp)
{
u32 val;
- DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
+ DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
if (CHIP_IS_E1(bp)) {
int port = BP_PORT(bp);
@@ -8095,7 +8088,7 @@ static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
(val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
}
- DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
close ? "closing" : "opening");
mmiowb();
}
@@ -8137,7 +8130,7 @@ static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
u32 shmem;
u32 validity_offset;
- DP(NETIF_MSG_HW, "Starting\n");
+ DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
/* Set `magic' bit in order to save MF config */
if (!CHIP_IS_E1(bp))
@@ -8374,12 +8367,8 @@ static int bnx2x_process_kill(struct bnx2x *bp, bool global)
} while (cnt-- > 0);
if (cnt <= 0) {
- DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
- " are still"
- " outstanding read requests after 1s!\n");
- DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
- " port_is_idle_0=0x%08x,"
- " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
+ BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
+ BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
pgl_exp_rom2);
return -EAGAIN;
@@ -8445,13 +8434,38 @@ int bnx2x_leader_reset(struct bnx2x *bp)
{
int rc = 0;
bool global = bnx2x_reset_is_global(bp);
+ u32 load_code;
+
+ /* if not going to reset MCP - load "fake" driver to reset HW while
+ * driver is owner of the HW
+ */
+ if (!global && !BP_NOMCP(bp)) {
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EAGAIN;
+ goto exit_leader_reset;
+ }
+ if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
+ (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
+ BNX2X_ERR("MCP unexpected resp, aborting\n");
+ rc = -EAGAIN;
+ goto exit_leader_reset2;
+ }
+ load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
+ if (!load_code) {
+ BNX2X_ERR("MCP response failure, aborting\n");
+ rc = -EAGAIN;
+ goto exit_leader_reset2;
+ }
+ }
/* Try to recover after the failure */
if (bnx2x_process_kill(bp, global)) {
- netdev_err(bp->dev, "Something bad had happen on engine %d! "
- "Aii!\n", BP_PATH(bp));
+ BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
+ BP_PATH(bp));
rc = -EAGAIN;
- goto exit_leader_reset;
+ goto exit_leader_reset2;
}
/*
@@ -8462,6 +8476,12 @@ int bnx2x_leader_reset(struct bnx2x *bp)
if (global)
bnx2x_clear_reset_global(bp);
+exit_leader_reset2:
+ /* unload "fake driver" if it was loaded */
+ if (!global && !BP_NOMCP(bp)) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
+ }
exit_leader_reset:
bp->is_leader = 0;
bnx2x_release_leader_lock(bp);
@@ -8498,13 +8518,16 @@ static inline void bnx2x_recovery_failed(struct bnx2x *bp)
static void bnx2x_parity_recover(struct bnx2x *bp)
{
bool global = false;
+ u32 error_recovered, error_unrecovered;
+ bool is_parity;
DP(NETIF_MSG_HW, "Handling parity\n");
while (1) {
switch (bp->recovery_state) {
case BNX2X_RECOVERY_INIT:
DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
- bnx2x_chk_parity_attn(bp, &global, false);
+ is_parity = bnx2x_chk_parity_attn(bp, &global, false);
+ WARN_ON(!is_parity);
/* Try to get a LEADER_LOCK HW lock */
if (bnx2x_trylock_leader_lock(bp)) {
@@ -8528,15 +8551,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
bp->recovery_state = BNX2X_RECOVERY_WAIT;
- /*
- * Reset MCP command sequence number and MCP mail box
- * sequence as we are going to reset the MCP.
- */
- if (global) {
- bp->fw_seq = 0;
- bp->fw_drv_pulse_wr_seq = 0;
- }
-
/* Ensure "is_leader", MCP command sequence and
* "recovery_state" update values are seen on other
* CPUs.
@@ -8548,10 +8562,10 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
if (bp->is_leader) {
int other_engine = BP_PATH(bp) ? 0 : 1;
- u32 other_load_counter =
- bnx2x_get_load_cnt(bp, other_engine);
- u32 load_counter =
- bnx2x_get_load_cnt(bp, BP_PATH(bp));
+ bool other_load_status =
+ bnx2x_get_load_status(bp, other_engine);
+ bool load_status =
+ bnx2x_get_load_status(bp, BP_PATH(bp));
global = bnx2x_reset_is_global(bp);
/*
@@ -8562,8 +8576,8 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
* the the gates will remain closed for that
* engine.
*/
- if (load_counter ||
- (global && other_load_counter)) {
+ if (load_status ||
+ (global && other_load_status)) {
/* Wait until all other functions get
* down.
*/
@@ -8620,13 +8634,32 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
return;
}
- if (bnx2x_nic_load(bp, LOAD_NORMAL))
- bnx2x_recovery_failed(bp);
- else {
+ error_recovered =
+ bp->eth_stats.recoverable_error;
+ error_unrecovered =
+ bp->eth_stats.unrecoverable_error;
+ bp->recovery_state =
+ BNX2X_RECOVERY_NIC_LOADING;
+ if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
+ error_unrecovered++;
+ netdev_err(bp->dev,
+ "Recovery failed. Power cycle needed\n");
+ /* Disconnect this device */
+ netif_device_detach(bp->dev);
+ /* Shut down the power */
+ bnx2x_set_power_state(
+ bp, PCI_D3hot);
+ smp_mb();
+ } else {
bp->recovery_state =
BNX2X_RECOVERY_DONE;
+ error_recovered++;
smp_mb();
}
+ bp->eth_stats.recoverable_error =
+ error_recovered;
+ bp->eth_stats.unrecoverable_error =
+ error_unrecovered;
return;
}
@@ -8637,6 +8670,8 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
}
}
+static int bnx2x_close(struct net_device *dev);
+
/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
* scheduled on a general queue in order to prevent a dead lock.
*/
@@ -8651,8 +8686,7 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
/* if stop on error is defined no recovery flows should be executed */
#ifdef BNX2X_STOP_ON_ERROR
- BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined "
- "so reset not done to allow debug dump,\n"
+ BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
"you will need to reboot when done\n");
goto sp_rtnl_not_reset;
#endif
@@ -8695,7 +8729,7 @@ sp_rtnl_not_reset:
* damage
*/
if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
- DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+ DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
netif_device_detach(bp->dev);
bnx2x_close(bp->dev);
}
@@ -8782,11 +8816,13 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
{
u32 val;
- /* Check if there is any driver already loaded */
- val = REG_RD(bp, MISC_REG_UNPREPARED);
- if (val == 0x1) {
+ /* possibly another driver is trying to reset the chip */
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
+
+ /* check if doorbell queue is reset */
+ if (REG_RD(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET)
+ & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
- bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
/*
* Check if it is the UNDI driver
* UNDI driver initializes CID offset for normal bell to 0x7
@@ -8874,14 +8910,11 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
/* restore our func and fw_seq */
bp->pf_num = orig_pf_num;
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
}
-
- /* now it's safe to release the lock */
- bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
+
+ /* now it's safe to release the lock */
+ bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
}
static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
@@ -8924,6 +8957,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->pfid = bp->pf_num; /* 0..7 */
}
+ BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
+
bp->link_params.chip_id = bp->common.chip_id;
BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
@@ -8981,8 +9016,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
if (val < BNX2X_BC_VER) {
/* for now only warn
* later we might need to enforce this */
- BNX2X_ERR("This driver needs bc_ver %X but found %X, "
- "please upgrade BC\n", BNX2X_BC_VER, val);
+ BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
+ BNX2X_BC_VER, val);
}
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
@@ -9123,8 +9158,7 @@ static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
}
if (!(bp->port.supported[0] || bp->port.supported[1])) {
- BNX2X_ERR("NVRAM config error. BAD phy config."
- "PHY1 config 0x%x, PHY2 config 0x%x\n",
+ BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
SHMEM_RD(bp,
dev_info.port_hw_config[port].external_phy_config),
SHMEM_RD(bp,
@@ -9212,6 +9246,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
SPEED_AUTO_NEG;
bp->port.advertising[idx] |=
bp->port.supported[idx];
+ if (bp->link_params.phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ bp->port.advertising[idx] |=
+ (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
} else {
/* force 10G, no AN */
bp->link_params.req_line_speed[idx] =
@@ -9231,9 +9270,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_10baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9250,9 +9287,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_10baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9268,9 +9303,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_100baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9288,9 +9321,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_100baseT_Half |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9306,9 +9337,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_1000baseT_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9324,9 +9353,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_2500baseX_Full |
ADVERTISED_TP);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9342,9 +9369,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
(ADVERTISED_10000baseT_Full |
ADVERTISED_FIBRE);
} else {
- BNX2X_ERR("NVRAM config error. "
- "Invalid link_config 0x%x"
- " speed_cap_mask 0x%x\n",
+ BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
link_config,
bp->link_params.speed_cap_mask[idx]);
return;
@@ -9355,8 +9380,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
break;
default:
- BNX2X_ERR("NVRAM config error. "
- "BAD link speed link_config 0x%x\n",
+ BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
link_config);
bp->link_params.req_line_speed[idx] =
SPEED_AUTO_NEG;
@@ -9374,8 +9398,7 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
BNX2X_FLOW_CTRL_NONE;
}
- BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
- " 0x%x advertising 0x%x\n",
+ BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
bp->link_params.req_line_speed[idx],
bp->link_params.req_duplex[idx],
bp->link_params.req_flow_ctrl[idx],
@@ -9424,8 +9447,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
(config & PORT_FEATURE_WOL_ENABLED));
- BNX2X_DEV_INFO("lane_config 0x%08x "
- "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
+ BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
bp->link_params.lane_config,
bp->link_params.speed_cap_mask[0],
bp->port.link_config[0]);
@@ -9467,6 +9489,7 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
void bnx2x_get_iscsi_info(struct bnx2x *bp)
{
+ u32 no_flags = NO_ISCSI_FLAG;
#ifdef BCM_CNIC
int port = BP_PORT(bp);
@@ -9486,12 +9509,28 @@ void bnx2x_get_iscsi_info(struct bnx2x *bp)
* disable the feature.
*/
if (!bp->cnic_eth_dev.max_iscsi_conn)
- bp->flags |= NO_ISCSI_FLAG;
+ bp->flags |= no_flags;
#else
- bp->flags |= NO_ISCSI_FLAG;
+ bp->flags |= no_flags;
#endif
}
+#ifdef BCM_CNIC
+static void __devinit bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
+{
+ /* Port info */
+ bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
+
+ /* Node info */
+ bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
+ bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
+ MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
+}
+#endif
static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
{
#ifdef BCM_CNIC
@@ -9534,24 +9573,11 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
* Read the WWN info only if the FCoE feature is enabled for
* this function.
*/
- if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
- /* Port info */
- bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
- MF_CFG_RD(bp, func_ext_config[func].
- fcoe_wwn_port_name_upper);
- bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
- MF_CFG_RD(bp, func_ext_config[func].
- fcoe_wwn_port_name_lower);
-
- /* Node info */
- bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
- MF_CFG_RD(bp, func_ext_config[func].
- fcoe_wwn_node_name_upper);
- bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
- MF_CFG_RD(bp, func_ext_config[func].
- fcoe_wwn_node_name_lower);
- }
- }
+ if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD)
+ bnx2x_get_ext_wwn_info(bp, func);
+
+ } else if (IS_MF_FCOE_SD(bp))
+ bnx2x_get_ext_wwn_info(bp, func);
BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
@@ -9592,7 +9618,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
- random_ether_addr(bp->dev->dev_addr);
+ eth_hw_addr_random(bp->dev);
} else if (IS_MF(bp)) {
val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
@@ -9604,8 +9630,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
/*
* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
* FCoE MAC then the appropriate feature should be disabled.
+ *
+ * In non SD mode features configuration comes from
+ * struct func_ext_config.
*/
- if (IS_MF_SI(bp)) {
+ if (!IS_MF_SD(bp)) {
u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
val2 = MF_CFG_RD(bp, func_ext_config[func].
@@ -9629,16 +9658,25 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
} else
bp->flags |= NO_FCOE_FLAG;
- } else { /* SD mode */
- if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
- /* use primary mac as iscsi mac */
- memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+ } else { /* SD MODE */
+ if (IS_MF_STORAGE_SD(bp)) {
+ if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
+ /* use primary mac as iscsi mac */
+ memcpy(iscsi_mac, bp->dev->dev_addr,
+ ETH_ALEN);
+
+ BNX2X_DEV_INFO("SD ISCSI MODE\n");
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
+ } else { /* FCoE */
+ memcpy(fip_mac, bp->dev->dev_addr,
+ ETH_ALEN);
+ BNX2X_DEV_INFO("SD FCoE MODE\n");
+ BNX2X_DEV_INFO("Read FIP MAC: %pM\n",
+ fip_mac);
+ }
/* Zero primary MAC configuration */
memset(bp->dev->dev_addr, 0, ETH_ALEN);
-
- BNX2X_DEV_INFO("SD ISCSI MODE\n");
- BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
- iscsi_mac);
}
}
#endif
@@ -9667,10 +9705,6 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
#ifdef BCM_CNIC
- /* Set the FCoE MAC in MF_SD mode */
- if (!CHIP_IS_E1x(bp) && IS_MF_SD(bp))
- memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
-
/* Disable iSCSI if MAC configuration is
* invalid.
*/
@@ -9690,10 +9724,11 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
- "bad Ethernet MAC address configuration: "
- "%pM, change it manually before bringing up "
- "the appropriate network interface\n",
+ "bad Ethernet MAC address configuration: %pM\n"
+ "change it manually before bringing up the appropriate network interface\n",
bp->dev->dev_addr);
+
+
}
static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -9814,8 +9849,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_config[vn] = MF_CFG_RD(bp,
func_mf_config[func].config);
} else
- BNX2X_DEV_INFO("illegal MAC address "
- "for SI\n");
+ BNX2X_DEV_INFO("illegal MAC address for SI\n");
break;
case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
/* get OV configuration */
@@ -9833,7 +9867,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
default:
/* Unknown configuration: reset mf_config */
bp->mf_config[vn] = 0;
- BNX2X_DEV_INFO("unkown MF mode 0x%x\n", val);
+ BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
}
}
@@ -9848,25 +9882,24 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bp->mf_ov = val;
bp->path_has_ovlan = true;
- BNX2X_DEV_INFO("MF OV for func %d is %d "
- "(0x%04x)\n", func, bp->mf_ov,
- bp->mf_ov);
+ BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
+ func, bp->mf_ov, bp->mf_ov);
} else {
dev_err(&bp->pdev->dev,
- "No valid MF OV for func %d, "
- "aborting\n", func);
+ "No valid MF OV for func %d, aborting\n",
+ func);
return -EPERM;
}
break;
case MULTI_FUNCTION_SI:
- BNX2X_DEV_INFO("func %d is in MF "
- "switch-independent mode\n", func);
+ BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
+ func);
break;
default:
if (vn) {
dev_err(&bp->pdev->dev,
- "VN %d is in a single function mode, "
- "aborting\n", vn);
+ "VN %d is in a single function mode, aborting\n",
+ vn);
return -EPERM;
}
break;
@@ -9902,16 +9935,6 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
bnx2x_get_cnic_info(bp);
- /* Get current FW pulse sequence */
- if (!BP_NOMCP(bp)) {
- int mb_idx = BP_FW_MB_IDX(bp);
-
- bp->fw_drv_pulse_wr_seq =
- (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
- DRV_PULSE_SEQ_MASK);
- BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
- }
-
return rc;
}
@@ -10080,35 +10103,26 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (!BP_NOMCP(bp))
bnx2x_undi_unload(bp);
- /* init fw_seq after undi_unload! */
- if (!BP_NOMCP(bp)) {
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- }
-
if (CHIP_REV_IS_FPGA(bp))
dev_err(&bp->pdev->dev, "FPGA detected\n");
if (BP_NOMCP(bp) && (func == 0))
- dev_err(&bp->pdev->dev, "MCP disabled, "
- "must load devices in order!\n");
+ dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
bp->multi_mode = multi_mode;
bp->disable_tpa = disable_tpa;
#ifdef BCM_CNIC
- bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+ bp->disable_tpa |= IS_MF_STORAGE_SD(bp);
#endif
/* Set TPA flags */
if (bp->disable_tpa) {
- bp->flags &= ~TPA_ENABLE_FLAG;
+ bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
bp->dev->features &= ~NETIF_F_LRO;
} else {
- bp->flags |= TPA_ENABLE_FLAG;
+ bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
bp->dev->features |= NETIF_F_LRO;
}
@@ -10150,6 +10164,8 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
if (CHIP_IS_E3B0(bp))
bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
+ bp->gro_check = bnx2x_need_gro_check(bp->dev->mtu);
+
return rc;
}
@@ -10168,14 +10184,16 @@ static int bnx2x_open(struct net_device *dev)
struct bnx2x *bp = netdev_priv(dev);
bool global = false;
int other_engine = BP_PATH(bp) ? 0 : 1;
- u32 other_load_counter, load_counter;
+ bool other_load_status, load_status;
+
+ bp->stats_init = true;
netif_carrier_off(dev);
bnx2x_set_power_state(bp, PCI_D0);
- other_load_counter = bnx2x_get_load_cnt(bp, other_engine);
- load_counter = bnx2x_get_load_cnt(bp, BP_PATH(bp));
+ other_load_status = bnx2x_get_load_status(bp, other_engine);
+ load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
/*
* If parity had happen during the unload, then attentions
@@ -10201,8 +10219,8 @@ static int bnx2x_open(struct net_device *dev)
* global blocks only the first in the chip should try
* to recover.
*/
- if ((!load_counter &&
- (!global || !other_load_counter)) &&
+ if ((!load_status &&
+ (!global || !other_load_status)) &&
bnx2x_trylock_leader_lock(bp) &&
!bnx2x_leader_reset(bp)) {
netdev_info(bp->dev, "Recovered in open\n");
@@ -10213,10 +10231,8 @@ static int bnx2x_open(struct net_device *dev)
bnx2x_set_power_state(bp, PCI_D3hot);
bp->recovery_state = BNX2X_RECOVERY_FAILED;
- netdev_err(bp->dev, "Recovery flow hasn't been properly"
- " completed yet. Try again later. If u still see this"
- " message after a few retries then power cycle is"
- " required.\n");
+ BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
+ "If you still see this message after a few retries then power cycle is required.\n");
return -EAGAIN;
} while (0);
@@ -10226,7 +10242,7 @@ static int bnx2x_open(struct net_device *dev)
}
/* called with rtnl_lock */
-int bnx2x_close(struct net_device *dev)
+static int bnx2x_close(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10315,7 +10331,7 @@ static inline int bnx2x_set_uc_list(struct bnx2x *bp)
static inline int bnx2x_set_mc_list(struct bnx2x *bp)
{
struct net_device *dev = bp->dev;
- struct bnx2x_mcast_ramrod_params rparam = {0};
+ struct bnx2x_mcast_ramrod_params rparam = {NULL};
int rc = 0;
rparam.mcast_obj = &bp->mcast_obj;
@@ -10323,8 +10339,7 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp)
/* first, clear all configured multicast MACs */
rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
if (rc < 0) {
- BNX2X_ERR("Failed to clear multicast "
- "configuration: %d\n", rc);
+ BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
return rc;
}
@@ -10332,8 +10347,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp)
if (netdev_mc_count(dev)) {
rc = bnx2x_init_mcast_macs_list(bp, &rparam);
if (rc) {
- BNX2X_ERR("Failed to create multicast MACs "
- "list: %d\n", rc);
+ BNX2X_ERR("Failed to create multicast MACs list: %d\n",
+ rc);
return rc;
}
@@ -10341,8 +10356,8 @@ static inline int bnx2x_set_mc_list(struct bnx2x *bp)
rc = bnx2x_config_mcast(bp, &rparam,
BNX2X_MCAST_CMD_ADD);
if (rc < 0)
- BNX2X_ERR("Failed to set a new multicast "
- "configuration: %d\n", rc);
+ BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
+ rc);
bnx2x_free_mcast_macs_list(&rparam);
}
@@ -10426,8 +10441,9 @@ static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
struct bnx2x *bp = netdev_priv(netdev);
int rc;
- DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
- " value 0x%x\n", prtad, devad, addr, value);
+ DP(NETIF_MSG_LINK,
+ "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
+ prtad, devad, addr, value);
/* The HW expects different devad if CL22 is used */
devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
@@ -10468,8 +10484,10 @@ static int bnx2x_validate_addr(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+ if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
+ BNX2X_ERR("Non-valid Ethernet address\n");
return -EADDRNOTAVAIL;
+ }
return 0;
}
@@ -10503,8 +10521,7 @@ static inline int bnx2x_set_coherency_mask(struct bnx2x *bp)
if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
- dev_err(dev, "dma_set_coherent_mask failed, "
- "aborting\n");
+ dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
return -EIO;
}
} else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
@@ -10521,6 +10538,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
{
struct bnx2x *bp;
int rc;
+ u32 pci_cfg_dword;
bool chip_is_e1x = (board_type == BCM57710 ||
board_type == BCM57711 ||
board_type == BCM57711E);
@@ -10531,7 +10549,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->dev = dev;
bp->pdev = pdev;
bp->flags = 0;
- bp->pf_num = PCI_FUNC(pdev->devfn);
rc = pci_enable_device(pdev);
if (rc) {
@@ -10575,7 +10592,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
}
if (!pci_is_pcie(pdev)) {
- dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
+ dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -10598,6 +10615,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
goto err_out_release;
}
+ /* In E1/E1H use pci device function given by kernel.
+ * In E2/E3 read physical function from ME register since these chips
+ * support Physical Device Assignment where kernel BDF maybe arbitrary
+ * (depending on hypervisor).
+ */
+ if (chip_is_e1x)
+ bp->pf_num = PCI_FUNC(pdev->devfn);
+ else {/* chip is E2/3*/
+ pci_read_config_dword(bp->pdev,
+ PCICFG_ME_REGISTER, &pci_cfg_dword);
+ bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
+ ME_REG_ABS_PF_NUM_SHIFT);
+ }
+ BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
+
bnx2x_set_power_state(bp, PCI_D0);
/* clean indirect addresses */
@@ -10627,7 +10659,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
/* Reset the load counter */
- bnx2x_clear_load_cnt(bp);
+ bnx2x_clear_load_status(bp);
dev->watchdog_timeo = TX_TIMEOUT;
@@ -10637,8 +10669,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
dev->priv_flags |= IFF_UNICAST_FLT;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_LRO |
- NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
+ NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
+ NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
+ NETIF_F_RXHASH | NETIF_F_HW_VLAN_TX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
@@ -10697,8 +10730,10 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
int i;
const u8 *fw_ver;
- if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
+ if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
+ BNX2X_ERR("Wrong FW size\n");
return -EINVAL;
+ }
fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
sections = (struct bnx2x_fw_file_section *)fw_hdr;
@@ -10709,8 +10744,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
offset = be32_to_cpu(sections[i].offset);
len = be32_to_cpu(sections[i].len);
if (offset + len > firmware->size) {
- dev_err(&bp->pdev->dev,
- "Section %d length is out of bounds\n", i);
+ BNX2X_ERR("Section %d length is out of bounds\n", i);
return -EINVAL;
}
}
@@ -10722,8 +10756,7 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
if (be16_to_cpu(ops_offsets[i]) > num_ops) {
- dev_err(&bp->pdev->dev,
- "Section offset %d is out of bounds\n", i);
+ BNX2X_ERR("Section offset %d is out of bounds\n", i);
return -EINVAL;
}
}
@@ -10735,10 +10768,9 @@ static int bnx2x_check_firmware(struct bnx2x *bp)
(fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
(fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
(fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
- dev_err(&bp->pdev->dev,
- "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
- fw_ver[0], fw_ver[1], fw_ver[2],
- fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
+ BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
+ fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
+ BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
BCM_5710_FW_REVISION_VERSION,
BCM_5710_FW_ENGINEERING_VERSION);
@@ -10814,15 +10846,13 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
do { \
u32 len = be32_to_cpu(fw_hdr->arr.len); \
bp->arr = kmalloc(len, GFP_KERNEL); \
- if (!bp->arr) { \
- pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+ if (!bp->arr) \
goto lbl; \
- } \
func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
(u8 *)bp->arr, len); \
} while (0)
-int bnx2x_init_firmware(struct bnx2x *bp)
+static int bnx2x_init_firmware(struct bnx2x *bp)
{
const char *fw_file_name;
struct bnx2x_fw_file_hdr *fw_hdr;
@@ -11054,14 +11084,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
- if (!dev) {
- dev_err(&pdev->dev, "Cannot allocate net device\n");
+ if (!dev)
return -ENOMEM;
- }
bp = netdev_priv(dev);
- DP(NETIF_MSG_DRV, "Allocated netdev with %d tx and %d rx queues\n",
+ BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
tx_count, rx_count);
bp->igu_sb_cnt = max_non_def_sbs;
@@ -11074,7 +11102,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
return rc;
}
- DP(NETIF_MSG_DRV, "max_non_def_sbs %d\n", max_non_def_sbs);
+ BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs);
rc = bnx2x_init_bp(bp);
if (rc)
@@ -11129,7 +11157,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ BNX2X_DEV_INFO(
+ "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
board_info[ent->driver_data].name,
(CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
pcie_width,
@@ -11263,29 +11292,11 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
- bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
- bp->link_params.shmem_base = bp->common.shmem_base;
- BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
-
- if (!bp->common.shmem_base ||
- (bp->common.shmem_base < 0xA0000) ||
- (bp->common.shmem_base >= 0xC0000)) {
- BNX2X_DEV_INFO("MCP not active\n");
- bp->flags |= NO_MCP_FLAG;
- return;
- }
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
BNX2X_ERR("BAD MCP validity signature\n");
-
- if (!BP_NOMCP(bp)) {
- bp->fw_seq =
- (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
- DRV_MSG_SEQ_NUMBER_MASK);
- BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
- }
}
/**
@@ -11366,8 +11377,7 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
struct bnx2x *bp = netdev_priv(dev);
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
- netdev_err(bp->dev, "Handling parity error recovery. "
- "Try again later\n");
+ netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
return;
}
@@ -11518,7 +11528,7 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
spe = bnx2x_sp_get_next(bp);
*spe = *bp->cnic_kwq_cons;
- DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
+ DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
@@ -11537,10 +11547,18 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
int i;
#ifdef BNX2X_STOP_ON_ERROR
- if (unlikely(bp->panic))
+ if (unlikely(bp->panic)) {
+ BNX2X_ERR("Can't post to SP queue while panic\n");
return -EIO;
+ }
#endif
+ if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
+ (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
+ BNX2X_ERR("Handling parity error recovery. Try again later\n");
+ return -EAGAIN;
+ }
+
spin_lock_bh(&bp->spq_lock);
for (i = 0; i < count; i++) {
@@ -11553,7 +11571,7 @@ static int bnx2x_cnic_sp_queue(struct net_device *dev,
bp->cnic_kwq_pending++;
- DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
+ DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
spe->hdr.conn_and_cmd_data, spe->hdr.type,
spe->data.update_data_addr.hi,
spe->data.update_data_addr.lo,
@@ -11834,8 +11852,10 @@ static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
struct bnx2x *bp = netdev_priv(dev);
struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
- if (ops == NULL)
+ if (ops == NULL) {
+ BNX2X_ERR("NULL ops received\n");
return -EINVAL;
+ }
bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
if (!bp->cnic_kwq)
@@ -11918,8 +11938,8 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
if (NO_FCOE(bp))
cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
- DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
- "starting cid %d\n",
+ BNX2X_DEV_INFO(
+ "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
cp->ctx_blk_size,
cp->ctx_tbl_offset,
cp->ctx_tbl_len,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index dddbcf6e154e..fd7fb4581849 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -1,6 +1,6 @@
/* bnx2x_reg.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -4812,6 +4812,7 @@
The fields are: [4:0] - tail pointer; 10:5] - Link List size; 15:11] -
header pointer. */
#define UCM_REG_XX_TABLE 0xe0300
+#define UMAC_COMMAND_CONFIG_REG_HD_ENA (0x1<<10)
#define UMAC_COMMAND_CONFIG_REG_IGNORE_TX_PAUSE (0x1<<28)
#define UMAC_COMMAND_CONFIG_REG_LOOP_ENA (0x1<<15)
#define UMAC_COMMAND_CONFIG_REG_NO_LGTH_CHECK (0x1<<24)
@@ -5731,6 +5732,7 @@
#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
#define MISC_REGISTERS_GPIO_SET_POS 8
#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
+#define MISC_REGISTERS_RESET_REG_1_RST_DORQ (0x1<<19)
#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
@@ -5783,15 +5785,17 @@
#define MISC_REGISTERS_SPIO_OUTPUT_HIGH 1
#define MISC_REGISTERS_SPIO_OUTPUT_LOW 0
#define MISC_REGISTERS_SPIO_SET_POS 8
-#define HW_LOCK_DRV_FLAGS 10
#define HW_LOCK_MAX_RESOURCE_VALUE 31
+#define HW_LOCK_RESOURCE_DRV_FLAGS 10
#define HW_LOCK_RESOURCE_GPIO 1
#define HW_LOCK_RESOURCE_MDIO 0
+#define HW_LOCK_RESOURCE_NVRAM 12
#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_0 8
#define HW_LOCK_RESOURCE_RECOVERY_LEADER_1 9
-#define HW_LOCK_RESOURCE_SPIO 2
+#define HW_LOCK_RESOURCE_RECOVERY_REG 11
#define HW_LOCK_RESOURCE_RESET 5
+#define HW_LOCK_RESOURCE_SPIO 2
#define AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT (0x1<<4)
#define AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR (0x1<<5)
#define AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR (0x1<<18)
@@ -6023,7 +6027,8 @@
#define PCICFG_MSI_CONTROL_64_BIT_ADDR_CAP (0x1<<23)
#define PCICFG_MSI_CONTROL_MSI_PVMASK_CAPABLE (0x1<<24)
#define PCICFG_GRC_ADDRESS 0x78
-#define PCICFG_GRC_DATA 0x80
+#define PCICFG_GRC_DATA 0x80
+#define PCICFG_ME_REGISTER 0x98
#define PCICFG_MSIX_CAP_ID_OFFSET 0xa0
#define PCICFG_MSIX_CONTROL_TABLE_SIZE (0x7ff<<16)
#define PCICFG_MSIX_CONTROL_RESERVED (0x7<<27)
@@ -6401,6 +6406,7 @@
#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
+#define MDIO_CL73_IEEEB1_AN_LP_ADV2 0x04
#define MDIO_REG_BANK_RX0 0x80b0
#define MDIO_RX0_RX_STATUS 0x10
@@ -6794,14 +6800,16 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_AN_REG_ADV_PAUSE_MASK 0x0C00
#define MDIO_AN_REG_ADV 0x0011
#define MDIO_AN_REG_ADV2 0x0012
-#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_LP_AUTO_NEG 0x0013
+#define MDIO_AN_REG_LP_AUTO_NEG2 0x0014
#define MDIO_AN_REG_MASTER_STATUS 0x0021
/*bcm*/
#define MDIO_AN_REG_LINK_STATUS 0x8304
#define MDIO_AN_REG_CL37_CL73 0x8370
#define MDIO_AN_REG_CL37_AN 0xffe0
#define MDIO_AN_REG_CL37_FC_LD 0xffe4
-#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+#define MDIO_AN_REG_CL37_FC_LP 0xffe5
+#define MDIO_AN_REG_1000T_STATUS 0xffea
#define MDIO_AN_REG_8073_2_5G 0x8329
#define MDIO_AN_REG_8073_BAM 0x8350
@@ -6966,6 +6974,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_SERDESDIGITAL_MISC1 0x8308
#define MDIO_WC_REG_SERDESDIGITAL_MISC2 0x8309
#define MDIO_WC_REG_DIGITAL3_UP1 0x8329
+#define MDIO_WC_REG_DIGITAL3_LP_UP1 0x832c
#define MDIO_WC_REG_DIGITAL4_MISC3 0x833c
#define MDIO_WC_REG_DIGITAL5_MISC6 0x8345
#define MDIO_WC_REG_DIGITAL5_MISC7 0x8349
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 94110e9ce51d..3f52fadee3ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -1,6 +1,6 @@
/* bnx2x_sp.c: Broadcom Everest network driver.
*
- * Copyright 2011 Broadcom Corporation
+ * Copyright (c) 2011-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -72,8 +72,8 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
o->execute = exec;
o->get = get;
- DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
- "length of %d\n", exe_len);
+ DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk length of %d\n",
+ exe_len);
}
static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
@@ -203,8 +203,7 @@ static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
*/
if (!list_empty(&o->pending_comp)) {
if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
- DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
- "resetting pending_comp\n");
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: resetting a pending_comp list\n");
__bnx2x_exe_queue_reset_pending(bp, o);
} else {
spin_unlock_bh(&o->lock);
@@ -476,11 +475,14 @@ static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
}
/* check_add() callbacks */
-static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
+static int bnx2x_check_mac_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking MAC %pM for ADD command\n", data->mac.mac);
+
if (!is_valid_ether_addr(data->mac.mac))
return -EINVAL;
@@ -492,11 +494,14 @@ static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
return 0;
}
-static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
+static int bnx2x_check_vlan_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking VLAN %d for ADD command\n", data->vlan.vlan);
+
list_for_each_entry(pos, &o->head, link)
if (data->vlan.vlan == pos->u.vlan.vlan)
return -EEXIST;
@@ -504,11 +509,15 @@ static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
return 0;
}
-static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
+static int bnx2x_check_vlan_mac_add(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for ADD command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
@@ -521,11 +530,14 @@ static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
/* check_del() callbacks */
static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
+ bnx2x_check_mac_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking MAC %pM for DEL command\n", data->mac.mac);
+
list_for_each_entry(pos, &o->head, link)
if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
return pos;
@@ -534,11 +546,14 @@ static struct bnx2x_vlan_mac_registry_elem *
}
static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
+ bnx2x_check_vlan_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking VLAN %d for DEL command\n", data->vlan.vlan);
+
list_for_each_entry(pos, &o->head, link)
if (data->vlan.vlan == pos->u.vlan.vlan)
return pos;
@@ -547,11 +562,15 @@ static struct bnx2x_vlan_mac_registry_elem *
}
static struct bnx2x_vlan_mac_registry_elem *
- bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
+ bnx2x_check_vlan_mac_del(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
{
struct bnx2x_vlan_mac_registry_elem *pos;
+ DP(BNX2X_MSG_SP, "Checking VLAN_MAC (%pM, %d) for DEL command\n",
+ data->vlan_mac.mac, data->vlan_mac.vlan);
+
list_for_each_entry(pos, &o->head, link)
if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
(!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
@@ -562,7 +581,8 @@ static struct bnx2x_vlan_mac_registry_elem *
}
/* check_move() callback */
-static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
+static bool bnx2x_check_move(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *src_o,
struct bnx2x_vlan_mac_obj *dst_o,
union bnx2x_classification_ramrod_data *data)
{
@@ -572,10 +592,10 @@ static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
/* Check if we can delete the requested configuration from the first
* object.
*/
- pos = src_o->check_del(src_o, data);
+ pos = src_o->check_del(bp, src_o, data);
/* check if configuration can be added */
- rc = dst_o->check_add(dst_o, data);
+ rc = dst_o->check_add(bp, dst_o, data);
/* If this classification can not be added (is already set)
* or can't be deleted - return an error.
@@ -587,6 +607,7 @@ static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
}
static bool bnx2x_check_move_always_err(
+ struct bnx2x *bp,
struct bnx2x_vlan_mac_obj *src_o,
struct bnx2x_vlan_mac_obj *dst_o,
union bnx2x_classification_ramrod_data *data)
@@ -611,12 +632,6 @@ static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
return rx_tx_flag;
}
-/* LLH CAM line allocations */
-enum {
- LLH_CAM_ISCSI_ETH_LINE = 0,
- LLH_CAM_ETH_LINE,
- LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
-};
static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
bool add, unsigned char *dev_addr, int index)
@@ -625,7 +640,7 @@ static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
NIG_REG_LLH0_FUNC_MEM;
- if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+ if (!IS_MF_SI(bp) || index > BNX2X_LLH_CAM_MAX_PF_LINE)
return;
DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
@@ -731,9 +746,10 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
if (cmd != BNX2X_VLAN_MAC_MOVE) {
if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
bnx2x_set_mac_in_nig(bp, add, mac,
- LLH_CAM_ISCSI_ETH_LINE);
+ BNX2X_LLH_CAM_ISCSI_ETH_LINE);
else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
- bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
+ bnx2x_set_mac_in_nig(bp, add, mac,
+ BNX2X_LLH_CAM_ETH_LINE);
}
/* Reset the ramrod data buffer for the first rule */
@@ -745,7 +761,7 @@ static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
&rule_entry->mac.header);
DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
- add ? "add" : "delete", mac, raw->cl_id);
+ (add ? "add" : "delete"), mac, raw->cl_id);
/* Set a MAC itself */
bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
@@ -838,7 +854,7 @@ static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
cfg_entry);
DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
- add ? "setting" : "clearing",
+ (add ? "setting" : "clearing"),
mac, raw->cl_id, cam_offset);
}
@@ -869,7 +885,7 @@ static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
/* Reset the ramrod data buffer */
memset(config, 0, sizeof(*config));
- bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
+ bnx2x_vlan_mac_set_rdata_e1x(bp, o, raw->state,
cam_offset, add,
elem->cmd_data.vlan_mac.u.mac.mac, 0,
ETH_VLAN_FILTER_ANY_VLAN, config);
@@ -1157,10 +1173,9 @@ static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
int rc;
/* Check the registry */
- rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
+ rc = o->check_add(bp, o, &elem->cmd_data.vlan_mac.u);
if (rc) {
- DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
- "current registry state\n");
+ DP(BNX2X_MSG_SP, "ADD command is not allowed considering current registry state.\n");
return rc;
}
@@ -1211,10 +1226,9 @@ static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
/* If this classification can not be deleted (doesn't exist)
* - return a BNX2X_EXIST.
*/
- pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+ pos = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
if (!pos) {
- DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
- "current registry state\n");
+ DP(BNX2X_MSG_SP, "DEL command is not allowed considering current registry state\n");
return -EEXIST;
}
@@ -1274,9 +1288,9 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
* Check if we can perform this operation based on the current registry
* state.
*/
- if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
- DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
- "current registry state\n");
+ if (!src_o->check_move(bp, src_o, dest_o,
+ &elem->cmd_data.vlan_mac.u)) {
+ DP(BNX2X_MSG_SP, "MOVE command is not allowed considering current registry state\n");
return -EINVAL;
}
@@ -1290,8 +1304,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
/* Check DEL on source */
query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
if (src_exeq->get(src_exeq, &query_elem)) {
- BNX2X_ERR("There is a pending DEL command on the source "
- "queue already\n");
+ BNX2X_ERR("There is a pending DEL command on the source queue already\n");
return -EINVAL;
}
@@ -1304,8 +1317,7 @@ static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
/* Check ADD on destination */
query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
if (dest_exeq->get(dest_exeq, &query_elem)) {
- BNX2X_ERR("There is a pending ADD command on the "
- "destination queue already\n");
+ BNX2X_ERR("There is a pending ADD command on the destination queue already\n");
return -EINVAL;
}
@@ -1480,12 +1492,10 @@ static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
&pos->cmd_data.vlan_mac.vlan_mac_flags)) {
if ((query.cmd_data.vlan_mac.cmd ==
BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
- BNX2X_ERR("Failed to return the credit for the "
- "optimized ADD command\n");
+ BNX2X_ERR("Failed to return the credit for the optimized ADD command\n");
return -EINVAL;
} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
- BNX2X_ERR("Failed to recover the credit from "
- "the optimized DEL command\n");
+ BNX2X_ERR("Failed to recover the credit from the optimized DEL command\n");
return -EINVAL;
}
}
@@ -1551,7 +1561,7 @@ static inline int bnx2x_vlan_mac_get_registry_elem(
reg_elem->vlan_mac_flags =
elem->cmd_data.vlan_mac.vlan_mac_flags;
} else /* DEL, RESTORE */
- reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+ reg_elem = o->check_del(bp, o, &elem->cmd_data.vlan_mac.u);
*re = reg_elem;
return 0;
@@ -1649,7 +1659,8 @@ static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
cmd = elem->cmd_data.vlan_mac.cmd;
if ((cmd == BNX2X_VLAN_MAC_DEL) ||
(cmd == BNX2X_VLAN_MAC_MOVE)) {
- reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
+ reg_elem = o->check_del(bp, o,
+ &elem->cmd_data.vlan_mac.u);
WARN_ON(!reg_elem);
@@ -1680,7 +1691,7 @@ error_exit:
if (!restore &&
((cmd == BNX2X_VLAN_MAC_ADD) ||
(cmd == BNX2X_VLAN_MAC_MOVE))) {
- reg_elem = o->check_del(cam_obj,
+ reg_elem = o->check_del(bp, cam_obj,
&elem->cmd_data.vlan_mac.u);
if (reg_elem) {
list_del(&reg_elem->link);
@@ -1755,8 +1766,7 @@ int bnx2x_config_vlan_mac(
rc = 1;
if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
- DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
- "clearing a pending bit.\n");
+ DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: clearing a pending bit.\n");
raw->clear_pending(raw);
}
@@ -1836,6 +1846,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
rc = exeq->remove(bp, exeq->owner, exeq_pos);
if (rc) {
BNX2X_ERR("Failed to remove command\n");
+ spin_unlock_bh(&exeq->lock);
return rc;
}
list_del(&exeq_pos->link);
@@ -2153,12 +2164,10 @@ static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
mac_filters->unmatched_unicast & ~mask;
DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
- "accp_mcast 0x%x\naccp_bcast 0x%x\n",
- mac_filters->ucast_drop_all,
- mac_filters->mcast_drop_all,
- mac_filters->ucast_accept_all,
- mac_filters->mcast_accept_all,
- mac_filters->bcast_accept_all);
+ "accp_mcast 0x%x\naccp_bcast 0x%x\n",
+ mac_filters->ucast_drop_all, mac_filters->mcast_drop_all,
+ mac_filters->ucast_accept_all, mac_filters->mcast_accept_all,
+ mac_filters->bcast_accept_all);
/* write the MAC filter structure*/
__storm_memset_mac_filters(bp, mac_filters, p->func_id);
@@ -2307,8 +2316,7 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
*/
bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
- DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
- "tx_accept_flags 0x%lx\n",
+ DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, tx_accept_flags 0x%lx\n",
data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags);
@@ -2441,8 +2449,8 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
if (!new_cmd)
return -ENOMEM;
- DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
- "macs_list_len=%d\n", cmd, macs_list_len);
+ DP(BNX2X_MSG_SP, "About to enqueue a new %d command. macs_list_len=%d\n",
+ cmd, macs_list_len);
INIT_LIST_HEAD(&new_cmd->data.macs_head);
@@ -2657,7 +2665,7 @@ static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
cnt++;
DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
- pmac_pos->mac);
+ pmac_pos->mac);
list_del(&pmac_pos->link);
@@ -3181,8 +3189,8 @@ static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
* matter.
*/
if (p->mcast_list_len > o->max_cmd_len) {
- BNX2X_ERR("Can't configure more than %d multicast MACs"
- "on 57710\n", o->max_cmd_len);
+ BNX2X_ERR("Can't configure more than %d multicast MACs on 57710\n",
+ o->max_cmd_len);
return -EINVAL;
}
/* Every configured MAC should be cleared if DEL command is
@@ -3430,7 +3438,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
&data->config_table[i].lsb_mac_addr,
elem->mac);
DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
- elem->mac);
+ elem->mac);
list_add_tail(&elem->link,
&o->registry.exact_match.macs);
}
@@ -3571,9 +3579,8 @@ int bnx2x_config_mcast(struct bnx2x *bp,
if ((!p->mcast_list_len) && (!o->check_sched(o)))
return 0;
- DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
- "o->max_cmd_len=%d\n", o->total_pending_num,
- p->mcast_list_len, o->max_cmd_len);
+ DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d o->max_cmd_len=%d\n",
+ o->total_pending_num, p->mcast_list_len, o->max_cmd_len);
/* Enqueue the current command to the pending list if we can't complete
* it in the current iteration
@@ -4298,9 +4305,8 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
unsigned long cur_pending = o->pending;
if (!test_and_clear_bit(cmd, &cur_pending)) {
- BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
- "pending 0x%lx, next_state %d\n", cmd,
- o->cids[BNX2X_PRIMARY_CID_INDEX],
+ BNX2X_ERR("Bad MC reply %d for queue %d in state %d pending 0x%lx, next_state %d\n",
+ cmd, o->cids[BNX2X_PRIMARY_CID_INDEX],
o->state, cur_pending, o->next_state);
return -EINVAL;
}
@@ -4312,13 +4318,13 @@ static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
o->next_tx_only, o->max_cos);
- DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
- "setting state to %d\n", cmd,
- o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
+ DP(BNX2X_MSG_SP,
+ "Completing command %d for queue %d, setting state to %d\n",
+ cmd, o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
if (o->next_tx_only) /* print num tx-only if any exist */
DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
- o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
+ o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
o->state = o->next_state;
o->num_tx_only = o->next_tx_only;
@@ -4430,9 +4436,10 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
struct client_init_rx_data *rx_data,
unsigned long *flags)
{
- /* Rx data */
rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
+ rx_data->tpa_en |= test_bit(BNX2X_Q_FLG_TPA_GRO, flags) *
+ CLIENT_INIT_RX_DATA_TPA_MODE;
rx_data->vmqueue_mode_en_flg = 0;
rx_data->cache_line_alignment_log_size =
@@ -4476,7 +4483,7 @@ static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
- rx_data->approx_mcast_engine_id = o->func_id;
+ rx_data->approx_mcast_engine_id = params->mcast_engine_id;
rx_data->is_approx_mcast = 1;
}
@@ -4532,8 +4539,10 @@ static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
&data->tx,
&cmd_params->params.tx_only.flags);
- DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0],
- data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
+ DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x",
+ cmd_params->q_obj->cids[0],
+ data->tx.tx_bd_page_base.lo,
+ data->tx.tx_bd_page_base.hi);
}
/**
@@ -4680,10 +4689,8 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_setup_tx_only(bp, params, rdata);
- DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
- "sp-client id %d, cos %d\n",
- o->cids[cid_index],
- rdata->general.client_id,
+ DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d, sp-client id %d, cos %d\n",
+ o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
/*
@@ -5184,13 +5191,6 @@ void bnx2x_init_queue_obj(struct bnx2x *bp,
obj->set_pending = bnx2x_queue_set_pending;
}
-void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
- struct bnx2x_queue_sp_obj *obj,
- u32 cid, u8 index)
-{
- obj->cids[index] = cid;
-}
-
/********************** Function state object *********************************/
enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
struct bnx2x_func_sp_obj *o)
@@ -5232,9 +5232,9 @@ static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
unsigned long cur_pending = o->pending;
if (!test_and_clear_bit(cmd, &cur_pending)) {
- BNX2X_ERR("Bad MC reply %d for func %d in state %d "
- "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
- o->state, cur_pending, o->next_state);
+ BNX2X_ERR("Bad MC reply %d for func %d in state %d pending 0x%lx, next_state %d\n",
+ cmd, BP_FUNC(bp), o->state,
+ cur_pending, o->next_state);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 66da39f0c84a..61a7670adfcd 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -1,6 +1,6 @@
/* bnx2x_sp.h: Broadcom Everest network driver.
*
- * Copyright 2011 Broadcom Corporation
+ * Copyright (c) 2011-2012 Broadcom Corporation
*
* Unless you and Broadcom execute a separate written software license
* agreement governing use of this software, this software is licensed to you
@@ -315,7 +315,8 @@ struct bnx2x_vlan_mac_obj {
* @return zero if the element may be added
*/
- int (*check_add)(struct bnx2x_vlan_mac_obj *o,
+ int (*check_add)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data);
/**
@@ -324,7 +325,8 @@ struct bnx2x_vlan_mac_obj {
* @return true if the element may be deleted
*/
struct bnx2x_vlan_mac_registry_elem *
- (*check_del)(struct bnx2x_vlan_mac_obj *o,
+ (*check_del)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data);
/**
@@ -332,7 +334,8 @@ struct bnx2x_vlan_mac_obj {
*
* @return true if the element may be deleted
*/
- bool (*check_move)(struct bnx2x_vlan_mac_obj *src_o,
+ bool (*check_move)(struct bnx2x *bp,
+ struct bnx2x_vlan_mac_obj *src_o,
struct bnx2x_vlan_mac_obj *dst_o,
union bnx2x_classification_ramrod_data *data);
@@ -423,6 +426,13 @@ struct bnx2x_vlan_mac_obj {
int (*wait)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o);
};
+enum {
+ BNX2X_LLH_CAM_ISCSI_ETH_LINE = 0,
+ BNX2X_LLH_CAM_ETH_LINE,
+ BNX2X_LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
+};
+
+
/** RX_MODE verbs:DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
/* RX_MODE ramrod spesial flags: set in rx_mode_flags field in
@@ -774,6 +784,7 @@ enum bnx2x_queue_cmd {
enum {
BNX2X_Q_FLG_TPA,
BNX2X_Q_FLG_TPA_IPV6,
+ BNX2X_Q_FLG_TPA_GRO,
BNX2X_Q_FLG_STATS,
BNX2X_Q_FLG_ZERO_STATS,
BNX2X_Q_FLG_ACTIVE,
@@ -803,10 +814,10 @@ enum bnx2x_q_type {
};
#define BNX2X_PRIMARY_CID_INDEX 0
-#define BNX2X_MULTI_TX_COS_E1X 1
+#define BNX2X_MULTI_TX_COS_E1X 3 /* QM only */
#define BNX2X_MULTI_TX_COS_E2_E3A0 2
#define BNX2X_MULTI_TX_COS_E3B0 3
-#define BNX2X_MULTI_TX_COS BNX2X_MULTI_TX_COS_E3B0
+#define BNX2X_MULTI_TX_COS 3 /* Maximum possible */
struct bnx2x_queue_init_params {
@@ -889,6 +900,9 @@ struct bnx2x_rxq_setup_params {
u8 max_tpa_queues;
u8 rss_engine_id;
+ /* valid iff BNX2X_Q_FLG_MCAST */
+ u8 mcast_engine_id;
+
u8 cache_line_log;
u8 sb_cq_index;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index a766b25eec5f..e1c9310fb07c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1,6 +1,6 @@
/* bnx2x_stats.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -75,7 +75,7 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
bp->fw_stats_req->hdr.drv_stats_counter =
cpu_to_le16(bp->stats_counter++);
- DP(NETIF_MSG_TIMER, "Sending statistics ramrod %d\n",
+ DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n",
bp->fw_stats_req->hdr.drv_stats_counter);
@@ -128,6 +128,8 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp)
} else if (bp->func_stx) {
*stats_comp = 0;
+ memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats,
+ sizeof(bp->func_stats));
bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
}
}
@@ -161,7 +163,7 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
u32 *stats_comp = bnx2x_sp(bp, stats_comp);
/* sanity */
- if (!IS_MF(bp) || !bp->port.pmf || !bp->port.port_stx) {
+ if (!bp->port.pmf || !bp->port.port_stx) {
BNX2X_ERR("BUG!\n");
return;
}
@@ -626,31 +628,30 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
tx_stat_dot3statsinternalmactransmiterrors);
ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl);
- ADD_64(estats->etherstatspkts1024octetsto1522octets_hi,
- new->stats_tx.tx_gt1518_hi,
- estats->etherstatspkts1024octetsto1522octets_lo,
- new->stats_tx.tx_gt1518_lo);
+ estats->etherstatspkts1024octetsto1522octets_hi =
+ pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi;
+ estats->etherstatspkts1024octetsto1522octets_lo =
+ pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo;
- ADD_64(estats->etherstatspktsover1522octets_hi,
- new->stats_tx.tx_gt2047_hi,
- estats->etherstatspktsover1522octets_lo,
- new->stats_tx.tx_gt2047_lo);
+ estats->etherstatspktsover1522octets_hi =
+ pstats->mac_stx[1].tx_stat_mac_2047_hi;
+ estats->etherstatspktsover1522octets_lo =
+ pstats->mac_stx[1].tx_stat_mac_2047_lo;
ADD_64(estats->etherstatspktsover1522octets_hi,
- new->stats_tx.tx_gt4095_hi,
+ pstats->mac_stx[1].tx_stat_mac_4095_hi,
estats->etherstatspktsover1522octets_lo,
- new->stats_tx.tx_gt4095_lo);
+ pstats->mac_stx[1].tx_stat_mac_4095_lo);
ADD_64(estats->etherstatspktsover1522octets_hi,
- new->stats_tx.tx_gt9216_hi,
+ pstats->mac_stx[1].tx_stat_mac_9216_hi,
estats->etherstatspktsover1522octets_lo,
- new->stats_tx.tx_gt9216_lo);
-
+ pstats->mac_stx[1].tx_stat_mac_9216_lo);
ADD_64(estats->etherstatspktsover1522octets_hi,
- new->stats_tx.tx_gt16383_hi,
+ pstats->mac_stx[1].tx_stat_mac_16383_hi,
estats->etherstatspktsover1522octets_lo,
- new->stats_tx.tx_gt16383_lo);
+ pstats->mac_stx[1].tx_stat_mac_16383_lo);
estats->pause_frames_received_hi =
pstats->mac_stx[1].rx_stat_mac_xpf_hi;
@@ -803,8 +804,9 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
&bp->fw_stats_data->port.tstorm_port_statistics;
struct tstorm_per_pf_stats *tfunc =
&bp->fw_stats_data->pf.tstorm_pf_statistics;
- struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
+ struct host_func_stats *fstats = &bp->func_stats;
struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old;
struct stats_counter *counters = &bp->fw_stats_data->storm_counters;
int i;
u16 cur_stats_counter;
@@ -818,48 +820,35 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
/* are storm stats valid? */
if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
- " xstorm counter (0x%x) != stats_counter (0x%x)\n",
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n",
le16_to_cpu(counters->xstats_counter), bp->stats_counter);
return -EAGAIN;
}
if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "stats not updated by ustorm"
- " ustorm counter (0x%x) != stats_counter (0x%x)\n",
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n",
le16_to_cpu(counters->ustats_counter), bp->stats_counter);
return -EAGAIN;
}
if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "stats not updated by cstorm"
- " cstorm counter (0x%x) != stats_counter (0x%x)\n",
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n",
le16_to_cpu(counters->cstats_counter), bp->stats_counter);
return -EAGAIN;
}
if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) {
- DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
- " tstorm counter (0x%x) != stats_counter (0x%x)\n",
+ DP(BNX2X_MSG_STATS,
+ "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n",
le16_to_cpu(counters->tstats_counter), bp->stats_counter);
return -EAGAIN;
}
- memcpy(&(fstats->total_bytes_received_hi),
- &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
- sizeof(struct host_func_stats) - 2*sizeof(u32));
estats->error_bytes_received_hi = 0;
estats->error_bytes_received_lo = 0;
- estats->etherstatsoverrsizepkts_hi = 0;
- estats->etherstatsoverrsizepkts_lo = 0;
- estats->no_buff_discard_hi = 0;
- estats->no_buff_discard_lo = 0;
- estats->total_tpa_aggregations_hi = 0;
- estats->total_tpa_aggregations_lo = 0;
- estats->total_tpa_aggregated_frames_hi = 0;
- estats->total_tpa_aggregated_frames_lo = 0;
- estats->total_tpa_bytes_hi = 0;
- estats->total_tpa_bytes_lo = 0;
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -876,29 +865,22 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
xstorm_queue_statistics;
struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient;
struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+
u32 diff;
- DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, "
- "bcast_sent 0x%x mcast_sent 0x%x\n",
+ DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n",
i, xclient->ucast_pkts_sent,
xclient->bcast_pkts_sent, xclient->mcast_pkts_sent);
DP(BNX2X_MSG_STATS, "---------------\n");
- qstats->total_broadcast_bytes_received_hi =
- le32_to_cpu(tclient->rcv_bcast_bytes.hi);
- qstats->total_broadcast_bytes_received_lo =
- le32_to_cpu(tclient->rcv_bcast_bytes.lo);
-
- qstats->total_multicast_bytes_received_hi =
- le32_to_cpu(tclient->rcv_mcast_bytes.hi);
- qstats->total_multicast_bytes_received_lo =
- le32_to_cpu(tclient->rcv_mcast_bytes.lo);
-
- qstats->total_unicast_bytes_received_hi =
- le32_to_cpu(tclient->rcv_ucast_bytes.hi);
- qstats->total_unicast_bytes_received_lo =
- le32_to_cpu(tclient->rcv_ucast_bytes.lo);
+ UPDATE_QSTAT(tclient->rcv_bcast_bytes,
+ total_broadcast_bytes_received);
+ UPDATE_QSTAT(tclient->rcv_mcast_bytes,
+ total_multicast_bytes_received);
+ UPDATE_QSTAT(tclient->rcv_ucast_bytes,
+ total_unicast_bytes_received);
/*
* sum to total_bytes_received all
@@ -931,9 +913,9 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
total_multicast_packets_received);
UPDATE_EXTEND_TSTAT(rcv_bcast_pkts,
total_broadcast_packets_received);
- UPDATE_EXTEND_TSTAT(pkts_too_big_discard,
- etherstatsoverrsizepkts);
- UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
+ UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard,
+ etherstatsoverrsizepkts);
+ UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard);
SUB_EXTEND_USTAT(ucast_no_buff_pkts,
total_unicast_packets_received);
@@ -941,24 +923,17 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
total_multicast_packets_received);
SUB_EXTEND_USTAT(bcast_no_buff_pkts,
total_broadcast_packets_received);
- UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
- UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
- UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
-
- qstats->total_broadcast_bytes_transmitted_hi =
- le32_to_cpu(xclient->bcast_bytes_sent.hi);
- qstats->total_broadcast_bytes_transmitted_lo =
- le32_to_cpu(xclient->bcast_bytes_sent.lo);
-
- qstats->total_multicast_bytes_transmitted_hi =
- le32_to_cpu(xclient->mcast_bytes_sent.hi);
- qstats->total_multicast_bytes_transmitted_lo =
- le32_to_cpu(xclient->mcast_bytes_sent.lo);
-
- qstats->total_unicast_bytes_transmitted_hi =
- le32_to_cpu(xclient->ucast_bytes_sent.hi);
- qstats->total_unicast_bytes_transmitted_lo =
- le32_to_cpu(xclient->ucast_bytes_sent.lo);
+ UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard);
+ UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard);
+
+ UPDATE_QSTAT(xclient->bcast_bytes_sent,
+ total_broadcast_bytes_transmitted);
+ UPDATE_QSTAT(xclient->mcast_bytes_sent,
+ total_multicast_bytes_transmitted);
+ UPDATE_QSTAT(xclient->ucast_bytes_sent,
+ total_unicast_bytes_transmitted);
+
/*
* sum to total_bytes_transmitted all
* unicast/multicast/broadcast
@@ -994,110 +969,54 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
total_transmitted_dropped_packets_error);
/* TPA aggregations completed */
- UPDATE_EXTEND_USTAT(coalesced_events, total_tpa_aggregations);
+ UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations);
/* Number of network frames aggregated by TPA */
- UPDATE_EXTEND_USTAT(coalesced_pkts,
- total_tpa_aggregated_frames);
+ UPDATE_EXTEND_E_USTAT(coalesced_pkts,
+ total_tpa_aggregated_frames);
/* Total number of bytes in completed TPA aggregations */
- qstats->total_tpa_bytes_lo =
- le32_to_cpu(uclient->coalesced_bytes.lo);
- qstats->total_tpa_bytes_hi =
- le32_to_cpu(uclient->coalesced_bytes.hi);
-
- /* TPA stats per-function */
- ADD_64(estats->total_tpa_aggregations_hi,
- qstats->total_tpa_aggregations_hi,
- estats->total_tpa_aggregations_lo,
- qstats->total_tpa_aggregations_lo);
- ADD_64(estats->total_tpa_aggregated_frames_hi,
- qstats->total_tpa_aggregated_frames_hi,
- estats->total_tpa_aggregated_frames_lo,
- qstats->total_tpa_aggregated_frames_lo);
- ADD_64(estats->total_tpa_bytes_hi,
- qstats->total_tpa_bytes_hi,
- estats->total_tpa_bytes_lo,
- qstats->total_tpa_bytes_lo);
-
- ADD_64(fstats->total_bytes_received_hi,
- qstats->total_bytes_received_hi,
- fstats->total_bytes_received_lo,
- qstats->total_bytes_received_lo);
- ADD_64(fstats->total_bytes_transmitted_hi,
- qstats->total_bytes_transmitted_hi,
- fstats->total_bytes_transmitted_lo,
- qstats->total_bytes_transmitted_lo);
- ADD_64(fstats->total_unicast_packets_received_hi,
- qstats->total_unicast_packets_received_hi,
- fstats->total_unicast_packets_received_lo,
- qstats->total_unicast_packets_received_lo);
- ADD_64(fstats->total_multicast_packets_received_hi,
- qstats->total_multicast_packets_received_hi,
- fstats->total_multicast_packets_received_lo,
- qstats->total_multicast_packets_received_lo);
- ADD_64(fstats->total_broadcast_packets_received_hi,
- qstats->total_broadcast_packets_received_hi,
- fstats->total_broadcast_packets_received_lo,
- qstats->total_broadcast_packets_received_lo);
- ADD_64(fstats->total_unicast_packets_transmitted_hi,
- qstats->total_unicast_packets_transmitted_hi,
- fstats->total_unicast_packets_transmitted_lo,
- qstats->total_unicast_packets_transmitted_lo);
- ADD_64(fstats->total_multicast_packets_transmitted_hi,
- qstats->total_multicast_packets_transmitted_hi,
- fstats->total_multicast_packets_transmitted_lo,
- qstats->total_multicast_packets_transmitted_lo);
- ADD_64(fstats->total_broadcast_packets_transmitted_hi,
- qstats->total_broadcast_packets_transmitted_hi,
- fstats->total_broadcast_packets_transmitted_lo,
- qstats->total_broadcast_packets_transmitted_lo);
- ADD_64(fstats->valid_bytes_received_hi,
- qstats->valid_bytes_received_hi,
- fstats->valid_bytes_received_lo,
- qstats->valid_bytes_received_lo);
-
- ADD_64(estats->etherstatsoverrsizepkts_hi,
- qstats->etherstatsoverrsizepkts_hi,
- estats->etherstatsoverrsizepkts_lo,
- qstats->etherstatsoverrsizepkts_lo);
- ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
- estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
+ UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes);
+
+ UPDATE_ESTAT_QSTAT_64(total_tpa_bytes);
+
+ UPDATE_FSTAT_QSTAT(total_bytes_received);
+ UPDATE_FSTAT_QSTAT(total_bytes_transmitted);
+ UPDATE_FSTAT_QSTAT(total_unicast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_multicast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_broadcast_packets_received);
+ UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted);
+ UPDATE_FSTAT_QSTAT(valid_bytes_received);
}
- ADD_64(fstats->total_bytes_received_hi,
+ ADD_64(estats->total_bytes_received_hi,
estats->rx_stat_ifhcinbadoctets_hi,
- fstats->total_bytes_received_lo,
+ estats->total_bytes_received_lo,
estats->rx_stat_ifhcinbadoctets_lo);
- ADD_64(fstats->total_bytes_received_hi,
+ ADD_64(estats->total_bytes_received_hi,
le32_to_cpu(tfunc->rcv_error_bytes.hi),
- fstats->total_bytes_received_lo,
+ estats->total_bytes_received_lo,
le32_to_cpu(tfunc->rcv_error_bytes.lo));
- memcpy(estats, &(fstats->total_bytes_received_hi),
- sizeof(struct host_func_stats) - 2*sizeof(u32));
-
ADD_64(estats->error_bytes_received_hi,
le32_to_cpu(tfunc->rcv_error_bytes.hi),
estats->error_bytes_received_lo,
le32_to_cpu(tfunc->rcv_error_bytes.lo));
- ADD_64(estats->etherstatsoverrsizepkts_hi,
- estats->rx_stat_dot3statsframestoolong_hi,
- estats->etherstatsoverrsizepkts_lo,
- estats->rx_stat_dot3statsframestoolong_lo);
+ UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong);
+
ADD_64(estats->error_bytes_received_hi,
estats->rx_stat_ifhcinbadoctets_hi,
estats->error_bytes_received_lo,
estats->rx_stat_ifhcinbadoctets_lo);
if (bp->port.pmf) {
- estats->mac_filter_discard =
- le32_to_cpu(tport->mac_filter_discard);
- estats->mf_tag_discard =
- le32_to_cpu(tport->mf_tag_discard);
- estats->brb_truncate_discard =
- le32_to_cpu(tport->brb_truncate_discard);
- estats->mac_discard = le32_to_cpu(tport->mac_discard);
+ struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
+ UPDATE_FW_STAT(mac_filter_discard);
+ UPDATE_FW_STAT(mf_tag_discard);
+ UPDATE_FW_STAT(brb_truncate_discard);
+ UPDATE_FW_STAT(mac_discard);
}
fstats->host_func_stats_start = ++fstats->host_func_stats_end;
@@ -1131,7 +1050,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
tmp = estats->mac_discard;
for_each_rx_queue(bp, i)
tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
- nstats->rx_dropped = tmp;
+ nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped;
nstats->tx_dropped = 0;
@@ -1179,17 +1098,15 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp)
struct bnx2x_eth_stats *estats = &bp->eth_stats;
int i;
- estats->driver_xoff = 0;
- estats->rx_err_discard_pkt = 0;
- estats->rx_skb_alloc_failed = 0;
- estats->hw_csum_err = 0;
for_each_queue(bp, i) {
struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old =
+ &bp->fp[i].eth_q_stats_old;
- estats->driver_xoff += qstats->driver_xoff;
- estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
- estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
- estats->hw_csum_err += qstats->hw_csum_err;
+ UPDATE_ESTAT_QSTAT(driver_xoff);
+ UPDATE_ESTAT_QSTAT(rx_err_discard_pkt);
+ UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed);
+ UPDATE_ESTAT_QSTAT(hw_csum_err);
}
}
@@ -1231,51 +1148,9 @@ static void bnx2x_stats_update(struct bnx2x *bp)
if (netif_msg_timer(bp)) {
struct bnx2x_eth_stats *estats = &bp->eth_stats;
- int i, cos;
netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n",
estats->brb_drop_lo, estats->brb_truncate_lo);
-
- for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
-
- pr_debug("%s: rx usage(%4u) *rx_cons_sb(%u) rx pkt(%lu) rx calls(%lu %lu)\n",
- fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
- fp->rx_comp_cons),
- le16_to_cpu(*fp->rx_cons_sb),
- bnx2x_hilo(&qstats->
- total_unicast_packets_received_hi),
- fp->rx_calls, fp->rx_pkt);
- }
-
- for_each_eth_queue(bp, i) {
- struct bnx2x_fastpath *fp = &bp->fp[i];
- struct bnx2x_fp_txdata *txdata;
- struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
- struct netdev_queue *txq;
-
- pr_debug("%s: tx pkt(%lu) (Xoff events %u)",
- fp->name,
- bnx2x_hilo(
- &qstats->total_unicast_packets_transmitted_hi),
- qstats->driver_xoff);
-
- for_each_cos_in_tx_queue(fp, cos) {
- txdata = &fp->txdata[cos];
- txq = netdev_get_tx_queue(bp->dev,
- FP_COS_TO_TXQ(fp, cos));
-
- pr_debug("%d: tx avail(%4u) *tx_cons_sb(%u) tx calls (%lu) %s\n",
- cos,
- bnx2x_tx_avail(bp, txdata),
- le16_to_cpu(*txdata->tx_cons_sb),
- txdata->tx_pkt,
- (netif_tx_queue_stopped(txq) ?
- "Xoff" : "Xon")
- );
- }
- }
}
bnx2x_hw_stats_post(bp);
@@ -1434,63 +1309,6 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
bnx2x_stats_comp(bp);
}
-static void bnx2x_func_stats_base_init(struct bnx2x *bp)
-{
- int vn, vn_max = IS_MF(bp) ? BP_MAX_VN_NUM(bp) : E1VN_MAX;
- u32 func_stx;
-
- /* sanity */
- if (!bp->port.pmf || !bp->func_stx) {
- BNX2X_ERR("BUG!\n");
- return;
- }
-
- /* save our func_stx */
- func_stx = bp->func_stx;
-
- for (vn = VN_0; vn < vn_max; vn++) {
- int mb_idx = BP_FW_MB_IDX_VN(bp, vn);
-
- bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param);
- bnx2x_func_stats_init(bp);
- bnx2x_hw_stats_post(bp);
- bnx2x_stats_comp(bp);
- }
-
- /* restore our func_stx */
- bp->func_stx = func_stx;
-}
-
-static void bnx2x_func_stats_base_update(struct bnx2x *bp)
-{
- struct dmae_command *dmae = &bp->stats_dmae;
- u32 *stats_comp = bnx2x_sp(bp, stats_comp);
-
- /* sanity */
- if (!bp->func_stx) {
- BNX2X_ERR("BUG!\n");
- return;
- }
-
- bp->executer_idx = 0;
- memset(dmae, 0, sizeof(struct dmae_command));
-
- dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI,
- true, DMAE_COMP_PCI);
- dmae->src_addr_lo = bp->func_stx >> 2;
- dmae->src_addr_hi = 0;
- dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
- dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
- dmae->len = sizeof(struct host_func_stats) >> 2;
- dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
- dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
- dmae->comp_val = DMAE_COMP_VAL;
-
- *stats_comp = 0;
- bnx2x_hw_stats_post(bp);
- bnx2x_stats_comp(bp);
-}
-
/**
* This function will prepare the statistics ramrod data the way
* we will only have to increment the statistics counter and
@@ -1641,6 +1459,10 @@ void bnx2x_stats_init(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
bp->port.port_stx, bp->func_stx);
+ /* pmf should retrieve port statistics from SP on a non-init*/
+ if (!bp->stats_init && bp->port.pmf && bp->port.port_stx)
+ bnx2x_stats_handle(bp, STATS_EVENT_PMF);
+
port = BP_PORT(bp);
/* port stats */
memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
@@ -1662,24 +1484,80 @@ void bnx2x_stats_init(struct bnx2x *bp)
memset(&fp->old_tclient, 0, sizeof(fp->old_tclient));
memset(&fp->old_uclient, 0, sizeof(fp->old_uclient));
memset(&fp->old_xclient, 0, sizeof(fp->old_xclient));
- memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
+ if (bp->stats_init) {
+ memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats));
+ memset(&fp->eth_q_stats_old, 0,
+ sizeof(fp->eth_q_stats_old));
+ }
}
/* Prepare statistics ramrod data */
bnx2x_prep_fw_stats_req(bp);
memset(&bp->dev->stats, 0, sizeof(bp->dev->stats));
- memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ if (bp->stats_init) {
+ memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old));
+ memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old));
+ memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old));
+ memset(&bp->eth_stats, 0, sizeof(bp->eth_stats));
+ memset(&bp->func_stats, 0, sizeof(bp->func_stats));
+
+ /* Clean SP from previous statistics */
+ if (bp->func_stx) {
+ memset(bnx2x_sp(bp, func_stats), 0,
+ sizeof(struct host_func_stats));
+ bnx2x_func_stats_init(bp);
+ bnx2x_hw_stats_post(bp);
+ bnx2x_stats_comp(bp);
+ }
+ }
bp->stats_state = STATS_STATE_DISABLED;
- if (bp->port.pmf) {
- if (bp->port.port_stx)
- bnx2x_port_stats_base_init(bp);
+ if (bp->port.pmf && bp->port.port_stx)
+ bnx2x_port_stats_base_init(bp);
- if (bp->func_stx)
- bnx2x_func_stats_base_init(bp);
+ /* mark the end of statistics initializiation */
+ bp->stats_init = false;
+}
+
+void bnx2x_save_statistics(struct bnx2x *bp)
+{
+ int i;
+ struct net_device_stats *nstats = &bp->dev->stats;
+
+ /* save queue statistics */
+ for_each_eth_queue(bp, i) {
+ struct bnx2x_fastpath *fp = &bp->fp[i];
+ struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
+ struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old;
+
+ UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi);
+ UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo);
+ UPDATE_QSTAT_OLD(total_tpa_bytes_hi);
+ UPDATE_QSTAT_OLD(total_tpa_bytes_lo);
+ }
+
+ /* save net_device_stats statistics */
+ bp->net_stats_old.rx_dropped = nstats->rx_dropped;
- } else if (bp->func_stx)
- bnx2x_func_stats_base_update(bp);
+ /* store port firmware statistics */
+ if (bp->port.pmf && IS_MF(bp)) {
+ struct bnx2x_eth_stats *estats = &bp->eth_stats;
+ struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old;
+ UPDATE_FW_STAT_OLD(mac_filter_discard);
+ UPDATE_FW_STAT_OLD(mf_tag_discard);
+ UPDATE_FW_STAT_OLD(brb_truncate_discard);
+ UPDATE_FW_STAT_OLD(mac_discard);
+ }
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 683deb053109..2b46e1eb7fd1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -1,6 +1,6 @@
/* bnx2x_stats.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2011 Broadcom Corporation
+ * Copyright (c) 2007-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -199,6 +199,10 @@ struct bnx2x_eth_stats {
u32 pfc_frames_received_lo;
u32 pfc_frames_sent_hi;
u32 pfc_frames_sent_lo;
+
+ /* Recovery */
+ u32 recoverable_error;
+ u32 unrecoverable_error;
};
@@ -260,6 +264,69 @@ struct bnx2x_eth_q_stats {
u32 total_tpa_bytes_lo;
};
+struct bnx2x_eth_stats_old {
+ u32 rx_stat_dot3statsframestoolong_hi;
+ u32 rx_stat_dot3statsframestoolong_lo;
+};
+
+struct bnx2x_eth_q_stats_old {
+ /* Fields to perserve over fw reset*/
+ u32 total_unicast_bytes_received_hi;
+ u32 total_unicast_bytes_received_lo;
+ u32 total_broadcast_bytes_received_hi;
+ u32 total_broadcast_bytes_received_lo;
+ u32 total_multicast_bytes_received_hi;
+ u32 total_multicast_bytes_received_lo;
+ u32 total_unicast_bytes_transmitted_hi;
+ u32 total_unicast_bytes_transmitted_lo;
+ u32 total_broadcast_bytes_transmitted_hi;
+ u32 total_broadcast_bytes_transmitted_lo;
+ u32 total_multicast_bytes_transmitted_hi;
+ u32 total_multicast_bytes_transmitted_lo;
+ u32 total_tpa_bytes_hi;
+ u32 total_tpa_bytes_lo;
+
+ /* Fields to perserve last of */
+ u32 total_bytes_received_hi;
+ u32 total_bytes_received_lo;
+ u32 total_bytes_transmitted_hi;
+ u32 total_bytes_transmitted_lo;
+ u32 total_unicast_packets_received_hi;
+ u32 total_unicast_packets_received_lo;
+ u32 total_multicast_packets_received_hi;
+ u32 total_multicast_packets_received_lo;
+ u32 total_broadcast_packets_received_hi;
+ u32 total_broadcast_packets_received_lo;
+ u32 total_unicast_packets_transmitted_hi;
+ u32 total_unicast_packets_transmitted_lo;
+ u32 total_multicast_packets_transmitted_hi;
+ u32 total_multicast_packets_transmitted_lo;
+ u32 total_broadcast_packets_transmitted_hi;
+ u32 total_broadcast_packets_transmitted_lo;
+ u32 valid_bytes_received_hi;
+ u32 valid_bytes_received_lo;
+
+ u32 total_tpa_bytes_hi_old;
+ u32 total_tpa_bytes_lo_old;
+
+ u32 driver_xoff_old;
+ u32 rx_err_discard_pkt_old;
+ u32 rx_skb_alloc_failed_old;
+ u32 hw_csum_err_old;
+};
+
+struct bnx2x_net_stats_old {
+ u32 rx_dropped;
+};
+
+struct bnx2x_fw_port_stats_old {
+ u32 mac_filter_discard;
+ u32 mf_tag_discard;
+ u32 brb_truncate_discard;
+ u32 mac_discard;
+};
+
+
/****************************************************************************
* Macros
****************************************************************************/
@@ -344,6 +411,12 @@ struct bnx2x_eth_q_stats {
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
+#define UPDATE_EXTEND_E_TSTAT(s, t) \
+ do { \
+ UPDATE_EXTEND_TSTAT(s, t); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
#define UPDATE_EXTEND_USTAT(s, t) \
do { \
diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
@@ -351,6 +424,12 @@ struct bnx2x_eth_q_stats {
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
+#define UPDATE_EXTEND_E_USTAT(s, t) \
+ do { \
+ UPDATE_EXTEND_USTAT(s, t); \
+ ADD_EXTEND_64(estats->t##_hi, estats->t##_lo, diff); \
+ } while (0)
+
#define UPDATE_EXTEND_XSTAT(s, t) \
do { \
diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
@@ -358,6 +437,66 @@ struct bnx2x_eth_q_stats {
ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
} while (0)
+#define UPDATE_QSTAT(s, t) \
+ do { \
+ qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi); \
+ qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \
+ } while (0)
+
+#define UPDATE_QSTAT_OLD(f) \
+ do { \
+ qstats_old->f = qstats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT_64(s) \
+ do { \
+ ADD_64(estats->s##_hi, qstats->s##_hi, \
+ estats->s##_lo, qstats->s##_lo); \
+ SUB_64(estats->s##_hi, qstats_old->s##_hi_old, \
+ estats->s##_lo, qstats_old->s##_lo_old); \
+ qstats_old->s##_hi_old = qstats->s##_hi; \
+ qstats_old->s##_lo_old = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_ESTAT_QSTAT(s) \
+ do { \
+ estats->s += qstats->s; \
+ estats->s -= qstats_old->s##_old; \
+ qstats_old->s##_old = qstats->s; \
+ } while (0)
+
+#define UPDATE_FSTAT_QSTAT(s) \
+ do { \
+ ADD_64(fstats->s##_hi, qstats->s##_hi, \
+ fstats->s##_lo, qstats->s##_lo); \
+ SUB_64(fstats->s##_hi, qstats_old->s##_hi, \
+ fstats->s##_lo, qstats_old->s##_lo); \
+ estats->s##_hi = fstats->s##_hi; \
+ estats->s##_lo = fstats->s##_lo; \
+ qstats_old->s##_hi = qstats->s##_hi; \
+ qstats_old->s##_lo = qstats->s##_lo; \
+ } while (0)
+
+#define UPDATE_FW_STAT(s) \
+ do { \
+ estats->s = le32_to_cpu(tport->s) + fwstats->s; \
+ } while (0)
+
+#define UPDATE_FW_STAT_OLD(f) \
+ do { \
+ fwstats->f = estats->f; \
+ } while (0)
+
+#define UPDATE_ESTAT(s, t) \
+ do { \
+ SUB_64(estats->s##_hi, estats_old->t##_hi, \
+ estats->s##_lo, estats_old->t##_lo); \
+ ADD_64(estats->s##_hi, estats->t##_hi, \
+ estats->s##_lo, estats->t##_lo); \
+ estats_old->t##_hi = estats->t##_hi; \
+ estats_old->t##_lo = estats->t##_lo; \
+ } while (0)
+
/* minuend -= subtrahend */
#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
do { \
@@ -384,4 +523,10 @@ void bnx2x_stats_init(struct bnx2x *bp);
void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
+/**
+ * bnx2x_save_statistics - save statistics when unloading.
+ *
+ * @bp: driver handle
+ */
+void bnx2x_save_statistics(struct bnx2x *bp);
#endif /* BNX2X_STATS_H */