summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/chelsio/cxgb4
diff options
context:
space:
mode:
authorLinus Torvalds2018-01-31 23:31:10 +0100
committerLinus Torvalds2018-01-31 23:31:10 +0100
commitb2fe5fa68642860e7de76167c3111623aa0d5de1 (patch)
treeb7f9b89b7039ecefbc35fe3c8e73a6ff972641dd /drivers/net/ethernet/chelsio/cxgb4
parentMerge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert... (diff)
parenttls: Add support for encryption using async offload accelerator (diff)
downloadkernel-qcow2-linux-b2fe5fa68642860e7de76167c3111623aa0d5de1.tar.gz
kernel-qcow2-linux-b2fe5fa68642860e7de76167c3111623aa0d5de1.tar.xz
kernel-qcow2-linux-b2fe5fa68642860e7de76167c3111623aa0d5de1.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Significantly shrink the core networking routing structures. Result of http://vger.kernel.org/~davem/seoul2017_netdev_keynote.pdf 2) Add netdevsim driver for testing various offloads, from Jakub Kicinski. 3) Support cross-chip FDB operations in DSA, from Vivien Didelot. 4) Add a 2nd listener hash table for TCP, similar to what was done for UDP. From Martin KaFai Lau. 5) Add eBPF based queue selection to tun, from Jason Wang. 6) Lockless qdisc support, from John Fastabend. 7) SCTP stream interleave support, from Xin Long. 8) Smoother TCP receive autotuning, from Eric Dumazet. 9) Lots of erspan tunneling enhancements, from William Tu. 10) Add true function call support to BPF, from Alexei Starovoitov. 11) Add explicit support for GRO HW offloading, from Michael Chan. 12) Support extack generation in more netlink subsystems. From Alexander Aring, Quentin Monnet, and Jakub Kicinski. 13) Add 1000BaseX, flow control, and EEE support to mvneta driver. From Russell King. 14) Add flow table abstraction to netfilter, from Pablo Neira Ayuso. 15) Many improvements and simplifications to the NFP driver bpf JIT, from Jakub Kicinski. 16) Support for ipv6 non-equal cost multipath routing, from Ido Schimmel. 17) Add resource abstration to devlink, from Arkadi Sharshevsky. 18) Packet scheduler classifier shared filter block support, from Jiri Pirko. 19) Avoid locking in act_csum, from Davide Caratti. 20) devinet_ioctl() simplifications from Al viro. 21) More TCP bpf improvements from Lawrence Brakmo. 22) Add support for onlink ipv6 route flag, similar to ipv4, from David Ahern. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1925 commits) tls: Add support for encryption using async offload accelerator ip6mr: fix stale iterator net/sched: kconfig: Remove blank help texts openvswitch: meter: Use 64-bit arithmetic instead of 32-bit tcp_nv: fix potential integer overflow in tcpnv_acked r8169: fix RTL8168EP take too long to complete driver initialization. qmi_wwan: Add support for Quectel EP06 rtnetlink: enable IFLA_IF_NETNSID for RTM_NEWLINK ipmr: Fix ptrdiff_t print formatting ibmvnic: Wait for device response when changing MAC qlcnic: fix deadlock bug tcp: release sk_frag.page in tcp_disconnect ipv4: Get the address of interface correctly. net_sched: gen_estimator: fix lockdep splat net: macb: Handle HRESP error net/mlx5e: IPoIB, Fix copy-paste bug in flow steering refactoring ipv6: addrconf: break critical section in addrconf_verify_rtnl() ipv6: change route cache aging logic i40e/i40evf: Update DESC_NEEDED value to reflect larger value bnxt_en: cleanup DIM work on device shutdown ...
Diffstat (limited to 'drivers/net/ethernet/chelsio/cxgb4')
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h156
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c1173
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h43
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c117
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c304
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c107
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c140
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c605
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c221
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c269
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h164
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h45
25 files changed, 2772 insertions, 877 deletions
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 8c9c6b0d2e5d..53b6a02c778e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o smt.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o \
cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o \
cxgb4_ptp.o cxgb4_tc_flower.o cxgb4_cudbg.o \
- cudbg_common.o cudbg_lib.o
+ cudbg_common.o cudbg_lib.o cudbg_zlib.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
index f78ba1743b5a..8edc49827af0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_common.c
@@ -19,7 +19,8 @@
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff)
{
u32 offset;
@@ -28,17 +29,30 @@ int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
if (offset + size > pdbg_buff->size)
return CUDBG_STATUS_NO_MEM;
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE) {
+ if (size > pdbg_init->compress_buff_size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_init->compress_buff;
+ pin_buff->offset = 0;
+ pin_buff->size = size;
+ return 0;
+ }
+
pin_buff->data = (char *)pdbg_buff->data + offset;
pin_buff->offset = offset;
pin_buff->size = size;
- pdbg_buff->size -= size;
return 0;
}
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff)
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff)
{
- pdbg_buff->size += pin_buff->size;
+ /* Clear compression buffer for re-use */
+ if (pdbg_init->compress_type != CUDBG_COMPRESSION_NONE)
+ memset(pdbg_init->compress_buff, 0,
+ pdbg_init->compress_buff_size);
+
pin_buff->data = NULL;
pin_buff->offset = 0;
pin_buff->size = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
index 605689957496..b57acb8dc35b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_entity.h
@@ -18,17 +18,15 @@
#ifndef __CUDBG_ENTITY_H__
#define __CUDBG_ENTITY_H__
-#define EDC0_FLAG 3
-#define EDC1_FLAG 4
+#define EDC0_FLAG 0
+#define EDC1_FLAG 1
+#define MC_FLAG 2
+#define MC0_FLAG 3
+#define MC1_FLAG 4
+#define HMA_FLAG 5
#define CUDBG_ENTITY_SIGNATURE 0xCCEDB001
-struct card_mem {
- u16 size_edc0;
- u16 size_edc1;
- u16 mem_flag;
-};
-
struct cudbg_mbox_log {
struct mbox_cmd entry;
u32 hi[MBOX_LEN / 8];
@@ -87,6 +85,48 @@ struct cudbg_tp_la {
u8 data[0];
};
+static const char * const cudbg_region[] = {
+ "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
+ "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
+ "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
+ "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
+ "RQUDP region:", "PBL region:", "TXPBL region:",
+ "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
+ "On-chip queues:"
+};
+
+/* Memory region info relative to current memory (i.e. wrt 0). */
+struct cudbg_region_info {
+ bool exist; /* Does region exists in current memory? */
+ u32 start; /* Start wrt 0 */
+ u32 end; /* End wrt 0 */
+};
+
+struct cudbg_mem_desc {
+ u32 base;
+ u32 limit;
+ u32 idx;
+};
+
+struct cudbg_meminfo {
+ struct cudbg_mem_desc avail[4];
+ struct cudbg_mem_desc mem[ARRAY_SIZE(cudbg_region) + 3];
+ u32 avail_c;
+ u32 mem_c;
+ u32 up_ram_lo;
+ u32 up_ram_hi;
+ u32 up_extmem2_lo;
+ u32 up_extmem2_hi;
+ u32 rx_pages_data[3];
+ u32 tx_pages_data[4];
+ u32 p_structs;
+ u32 reserved[12];
+ u32 port_used[4];
+ u32 port_alloc[4];
+ u32 loopback_used[NCHAN];
+ u32 loopback_alloc[NCHAN];
+};
+
struct cudbg_cim_pif_la {
int size;
u8 data[0];
@@ -145,6 +185,7 @@ struct cudbg_tid_info_region_rev1 {
u32 reserved[16];
};
+#define CUDBG_LOWMEM_MAX_CTXT_QIDS 256
#define CUDBG_MAX_FL_QIDS 1024
struct cudbg_ch_cntxt {
@@ -334,6 +375,25 @@ static const u32 t5_pm_tx_array[][IREG_NUM_ELEM] = {
{0x8FF0, 0x8FF4, 0x10021, 0x1D}, /* t5_pm_tx_regs_10021_to_1003c */
};
+#define CUDBG_NUM_PCIE_CONFIG_REGS 0x61
+
+static const u32 t5_pcie_config_array[][2] = {
+ {0x0, 0x34},
+ {0x3c, 0x40},
+ {0x50, 0x64},
+ {0x70, 0x80},
+ {0x94, 0xa0},
+ {0xb0, 0xb8},
+ {0xd0, 0xd4},
+ {0x100, 0x128},
+ {0x140, 0x148},
+ {0x150, 0x164},
+ {0x170, 0x178},
+ {0x180, 0x194},
+ {0x1a0, 0x1b8},
+ {0x1c0, 0x208},
+};
+
static const u32 t6_ma_ireg_array[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xa000, 23}, /* t6_ma_regs_a000_to_a016 */
{0x78f8, 0x78fc, 0xa400, 30}, /* t6_ma_regs_a400_to_a41e */
@@ -345,37 +405,55 @@ static const u32 t6_ma_ireg_array2[][IREG_NUM_ELEM] = {
{0x78f8, 0x78fc, 0xe640, 13} /* t6_ma_regs_e640_to_e7c0 */
};
-static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM] = {
- {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
- {0x7b50, 0x7b54, 0x2080, 0x1d}, /* up_cim_2080_to_20fc */
- {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
- {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
- {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
- {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
- {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
- {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
- {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
- {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
- {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
- {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
- {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
-
-};
-
-static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM] = {
- {0x7b50, 0x7b54, 0x2000, 0x20}, /* up_cim_2000_to_207c */
- {0x7b50, 0x7b54, 0x2080, 0x19}, /* up_cim_2080_to_20ec */
- {0x7b50, 0x7b54, 0x00, 0x20}, /* up_cim_00_to_7c */
- {0x7b50, 0x7b54, 0x80, 0x20}, /* up_cim_80_to_fc */
- {0x7b50, 0x7b54, 0x100, 0x11}, /* up_cim_100_to_14c */
- {0x7b50, 0x7b54, 0x200, 0x10}, /* up_cim_200_to_23c */
- {0x7b50, 0x7b54, 0x240, 0x2}, /* up_cim_240_to_244 */
- {0x7b50, 0x7b54, 0x250, 0x2}, /* up_cim_250_to_254 */
- {0x7b50, 0x7b54, 0x260, 0x2}, /* up_cim_260_to_264 */
- {0x7b50, 0x7b54, 0x270, 0x2}, /* up_cim_270_to_274 */
- {0x7b50, 0x7b54, 0x280, 0x20}, /* up_cim_280_to_2fc */
- {0x7b50, 0x7b54, 0x300, 0x20}, /* up_cim_300_to_37c */
- {0x7b50, 0x7b54, 0x380, 0x14}, /* up_cim_380_to_3cc */
+static const u32 t6_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x1d, 0}, /* up_cim_2080_to_20fc */
+ {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+ {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+ {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+ {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+ {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+ {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+ {0x7b50, 0x7b54, 0x2920, 0x10, 0x10}, /* up_cim_2920_to_2a10 */
+ {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2a14 */
+ {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+ {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
+};
+
+static const u32 t5_up_cim_reg_array[][IREG_NUM_ELEM + 1] = {
+ {0x7b50, 0x7b54, 0x2000, 0x20, 0}, /* up_cim_2000_to_207c */
+ {0x7b50, 0x7b54, 0x2080, 0x19, 0}, /* up_cim_2080_to_20ec */
+ {0x7b50, 0x7b54, 0x00, 0x20, 0}, /* up_cim_00_to_7c */
+ {0x7b50, 0x7b54, 0x80, 0x20, 0}, /* up_cim_80_to_fc */
+ {0x7b50, 0x7b54, 0x100, 0x11, 0}, /* up_cim_100_to_14c */
+ {0x7b50, 0x7b54, 0x200, 0x10, 0}, /* up_cim_200_to_23c */
+ {0x7b50, 0x7b54, 0x240, 0x2, 0}, /* up_cim_240_to_244 */
+ {0x7b50, 0x7b54, 0x250, 0x2, 0}, /* up_cim_250_to_254 */
+ {0x7b50, 0x7b54, 0x260, 0x2, 0}, /* up_cim_260_to_264 */
+ {0x7b50, 0x7b54, 0x270, 0x2, 0}, /* up_cim_270_to_274 */
+ {0x7b50, 0x7b54, 0x280, 0x20, 0}, /* up_cim_280_to_2fc */
+ {0x7b50, 0x7b54, 0x300, 0x20, 0}, /* up_cim_300_to_37c */
+ {0x7b50, 0x7b54, 0x380, 0x14, 0}, /* up_cim_380_to_3cc */
+ {0x7b50, 0x7b54, 0x2900, 0x4, 0x4}, /* up_cim_2900_to_3d40 */
+ {0x7b50, 0x7b54, 0x2904, 0x4, 0x4}, /* up_cim_2904_to_3d44 */
+ {0x7b50, 0x7b54, 0x2908, 0x4, 0x4}, /* up_cim_2908_to_3d48 */
+ {0x7b50, 0x7b54, 0x2910, 0x4, 0x4}, /* up_cim_2910_to_3d4c */
+ {0x7b50, 0x7b54, 0x2914, 0x4, 0x4}, /* up_cim_2914_to_3d50 */
+ {0x7b50, 0x7b54, 0x2918, 0x4, 0x4}, /* up_cim_2918_to_3d54 */
+ {0x7b50, 0x7b54, 0x291c, 0x4, 0x4}, /* up_cim_291c_to_3d58 */
+ {0x7b50, 0x7b54, 0x2924, 0x10, 0x10}, /* up_cim_2924_to_2914 */
+ {0x7b50, 0x7b54, 0x2928, 0x10, 0x10}, /* up_cim_2928_to_2a18 */
+ {0x7b50, 0x7b54, 0x292c, 0x10, 0x10}, /* up_cim_292c_to_2a1c */
};
static const u32 t6_hma_ireg_array[][IREG_NUM_ELEM] = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
index e10ff1ee62c5..8568a51f6414 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_if.h
@@ -21,6 +21,7 @@
/* Error codes */
#define CUDBG_STATUS_NO_MEM -19
#define CUDBG_STATUS_ENTITY_NOT_FOUND -24
+#define CUDBG_STATUS_NOT_IMPLEMENTED -28
#define CUDBG_SYSTEM_ERROR -29
#define CUDBG_STATUS_CCLK_NOT_DEFINED -32
@@ -47,6 +48,8 @@ enum cudbg_dbg_entity_type {
CUDBG_CIM_OBQ_NCSI = 17,
CUDBG_EDC0 = 18,
CUDBG_EDC1 = 19,
+ CUDBG_MC0 = 20,
+ CUDBG_MC1 = 21,
CUDBG_RSS = 22,
CUDBG_RSS_VF_CONF = 25,
CUDBG_PATH_MTU = 27,
@@ -56,6 +59,7 @@ enum cudbg_dbg_entity_type {
CUDBG_SGE_INDIRECT = 37,
CUDBG_ULPRX_LA = 41,
CUDBG_TP_LA = 43,
+ CUDBG_MEMINFO = 44,
CUDBG_CIM_PIF_LA = 45,
CUDBG_CLK = 46,
CUDBG_CIM_OBQ_RXQ0 = 47,
@@ -63,6 +67,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PCIE_INDIRECT = 50,
CUDBG_PM_INDIRECT = 51,
CUDBG_TID_INFO = 54,
+ CUDBG_PCIE_CONFIG = 55,
CUDBG_DUMP_CONTEXT = 56,
CUDBG_MPS_TCAM = 57,
CUDBG_VPD_DATA = 58,
@@ -74,6 +79,7 @@ enum cudbg_dbg_entity_type {
CUDBG_PBT_TABLE = 65,
CUDBG_MBOX_LOG = 66,
CUDBG_HMA_INDIRECT = 67,
+ CUDBG_HMA = 68,
CUDBG_MAX_ENTITY = 70,
};
@@ -81,6 +87,10 @@ struct cudbg_init {
struct adapter *adap; /* Pointer to adapter structure */
void *outbuf; /* Output buffer */
u32 outbuf_size; /* Output buffer size */
+ u8 compress_type; /* Type of compression to use */
+ void *compress_buff; /* Compression buffer */
+ u32 compress_buff_size; /* Compression buffer size */
+ void *workspace; /* Workspace for zlib */
};
static inline unsigned int cudbg_mbytes_to_bytes(unsigned int size)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index d699bf88d18f..557fd8bfd54e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -15,18 +15,65 @@
*
*/
+#include <linux/sort.h>
+
#include "t4_regs.h"
#include "cxgb4.h"
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
-#include "cudbg_lib.h"
#include "cudbg_entity.h"
+#include "cudbg_lib.h"
+#include "cudbg_zlib.h"
-static void cudbg_write_and_release_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *dbg_buff)
+static int cudbg_do_compression(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
{
- cudbg_update_buff(pin_buff, dbg_buff);
- cudbg_put_buff(pin_buff, dbg_buff);
+ struct cudbg_buffer temp_in_buff = { 0 };
+ int bytes_left, bytes_read, bytes;
+ u32 offset = dbg_buff->offset;
+ int rc;
+
+ temp_in_buff.offset = pin_buff->offset;
+ temp_in_buff.data = pin_buff->data;
+ temp_in_buff.size = pin_buff->size;
+
+ bytes_left = pin_buff->size;
+ bytes_read = 0;
+ while (bytes_left > 0) {
+ /* Do compression in smaller chunks */
+ bytes = min_t(unsigned long, bytes_left,
+ (unsigned long)CUDBG_CHUNK_SIZE);
+ temp_in_buff.data = (char *)pin_buff->data + bytes_read;
+ temp_in_buff.size = bytes;
+ rc = cudbg_compress_buff(pdbg_init, &temp_in_buff, dbg_buff);
+ if (rc)
+ return rc;
+ bytes_left -= bytes;
+ bytes_read += bytes;
+ }
+
+ pin_buff->size = dbg_buff->offset - offset;
+ return 0;
+}
+
+static int cudbg_write_and_release_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *dbg_buff)
+{
+ int rc = 0;
+
+ if (pdbg_init->compress_type == CUDBG_COMPRESSION_NONE) {
+ cudbg_update_buff(pin_buff, dbg_buff);
+ } else {
+ rc = cudbg_do_compression(pdbg_init, pin_buff, dbg_buff);
+ if (rc)
+ goto out;
+ }
+
+out:
+ cudbg_put_buff(pdbg_init, pin_buff);
+ return rc;
}
static int is_fw_attached(struct cudbg_init *pdbg_init)
@@ -84,6 +131,277 @@ static int cudbg_read_vpd_reg(struct adapter *padap, u32 addr, u32 len,
return 0;
}
+static int cudbg_mem_desc_cmp(const void *a, const void *b)
+{
+ return ((const struct cudbg_mem_desc *)a)->base -
+ ((const struct cudbg_mem_desc *)b)->base;
+}
+
+int cudbg_fill_meminfo(struct adapter *padap,
+ struct cudbg_meminfo *meminfo_buff)
+{
+ struct cudbg_mem_desc *md;
+ u32 lo, hi, used, alloc;
+ int n, i;
+
+ memset(meminfo_buff->avail, 0,
+ ARRAY_SIZE(meminfo_buff->avail) *
+ sizeof(struct cudbg_mem_desc));
+ memset(meminfo_buff->mem, 0,
+ (ARRAY_SIZE(cudbg_region) + 3) * sizeof(struct cudbg_mem_desc));
+ md = meminfo_buff->mem;
+
+ for (i = 0; i < ARRAY_SIZE(meminfo_buff->mem); i++) {
+ meminfo_buff->mem[i].limit = 0;
+ meminfo_buff->mem[i].idx = i;
+ }
+
+ /* Find and sort the populated memory ranges */
+ i = 0;
+ lo = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
+ if (lo & EDRAM0_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EDRAM0_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EDRAM0_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EDRAM0_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 0;
+ i++;
+ }
+
+ if (lo & EDRAM1_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EDRAM1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EDRAM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EDRAM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 1;
+ i++;
+ }
+
+ if (is_t5(padap->params.chip)) {
+ if (lo & EXT_MEM0_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY0_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 3;
+ i++;
+ }
+
+ if (lo & EXT_MEM1_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 4;
+ i++;
+ }
+ } else {
+ if (lo & EXT_MEM_ENABLE_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 2;
+ i++;
+ }
+
+ if (lo & HMA_MUX_F) {
+ hi = t4_read_reg(padap, MA_EXT_MEMORY1_BAR_A);
+ meminfo_buff->avail[i].base =
+ cudbg_mbytes_to_bytes(EXT_MEM1_BASE_G(hi));
+ meminfo_buff->avail[i].limit =
+ meminfo_buff->avail[i].base +
+ cudbg_mbytes_to_bytes(EXT_MEM1_SIZE_G(hi));
+ meminfo_buff->avail[i].idx = 5;
+ i++;
+ }
+ }
+
+ if (!i) /* no memory available */
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+
+ meminfo_buff->avail_c = i;
+ sort(meminfo_buff->avail, i, sizeof(struct cudbg_mem_desc),
+ cudbg_mem_desc_cmp, NULL);
+ (md++)->base = t4_read_reg(padap, SGE_DBQ_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(padap, SGE_IMSG_CTXT_BADDR_A);
+ (md++)->base = t4_read_reg(padap, SGE_FLM_CACHE_BADDR_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_TCB_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_TIMER_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_RX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_TX_FLST_BASE_A);
+ (md++)->base = t4_read_reg(padap, TP_CMM_MM_PS_FLST_BASE_A);
+
+ /* the next few have explicit upper bounds */
+ md->base = t4_read_reg(padap, TP_PMM_TX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A) *
+ PMTXMAXPAGE_G(t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A));
+ md++;
+
+ md->base = t4_read_reg(padap, TP_PMM_RX_BASE_A);
+ md->limit = md->base - 1 +
+ t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) *
+ PMRXMAXPAGE_G(t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A));
+ md++;
+
+ if (t4_read_reg(padap, LE_DB_CONFIG_A) & HASHEN_F) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) <= CHELSIO_T5) {
+ hi = t4_read_reg(padap, LE_DB_TID_HASHBASE_A) / 4;
+ md->base = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+ } else {
+ hi = t4_read_reg(padap, LE_DB_HASH_TID_BASE_A);
+ md->base = t4_read_reg(padap,
+ LE_DB_HASH_TBL_BASE_ADDR_A);
+ }
+ md->limit = 0;
+ } else {
+ md->base = 0;
+ md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
+ }
+ md++;
+
+#define ulp_region(reg) do { \
+ md->base = t4_read_reg(padap, ULP_ ## reg ## _LLIMIT_A);\
+ (md++)->limit = t4_read_reg(padap, ULP_ ## reg ## _ULIMIT_A);\
+} while (0)
+
+ ulp_region(RX_ISCSI);
+ ulp_region(RX_TDDP);
+ ulp_region(TX_TPT);
+ ulp_region(RX_STAG);
+ ulp_region(RX_RQ);
+ ulp_region(RX_RQUDP);
+ ulp_region(RX_PBL);
+ ulp_region(TX_PBL);
+#undef ulp_region
+ md->base = 0;
+ md->idx = ARRAY_SIZE(cudbg_region);
+ if (!is_t4(padap->params.chip)) {
+ u32 fifo_size = t4_read_reg(padap, SGE_DBVFIFO_SIZE_A);
+ u32 sge_ctrl = t4_read_reg(padap, SGE_CONTROL2_A);
+ u32 size = 0;
+
+ if (is_t5(padap->params.chip)) {
+ if (sge_ctrl & VFIFO_ENABLE_F)
+ size = DBVFIFO_SIZE_G(fifo_size);
+ } else {
+ size = T6_DBVFIFO_SIZE_G(fifo_size);
+ }
+
+ if (size) {
+ md->base = BASEADDR_G(t4_read_reg(padap,
+ SGE_DBVFIFO_BADDR_A));
+ md->limit = md->base + (size << 2) - 1;
+ }
+ }
+
+ md++;
+
+ md->base = t4_read_reg(padap, ULP_RX_CTX_BASE_A);
+ md->limit = 0;
+ md++;
+ md->base = t4_read_reg(padap, ULP_TX_ERR_TABLE_BASE_A);
+ md->limit = 0;
+ md++;
+
+ md->base = padap->vres.ocq.start;
+ if (padap->vres.ocq.size)
+ md->limit = md->base + padap->vres.ocq.size - 1;
+ else
+ md->idx = ARRAY_SIZE(cudbg_region); /* hide it */
+ md++;
+
+ /* add any address-space holes, there can be up to 3 */
+ for (n = 0; n < i - 1; n++)
+ if (meminfo_buff->avail[n].limit <
+ meminfo_buff->avail[n + 1].base)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ if (meminfo_buff->avail[n].limit)
+ (md++)->base = meminfo_buff->avail[n].limit;
+
+ n = md - meminfo_buff->mem;
+ meminfo_buff->mem_c = n;
+
+ sort(meminfo_buff->mem, n, sizeof(struct cudbg_mem_desc),
+ cudbg_mem_desc_cmp, NULL);
+
+ lo = t4_read_reg(padap, CIM_SDRAM_BASE_ADDR_A);
+ hi = t4_read_reg(padap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
+ meminfo_buff->up_ram_lo = lo;
+ meminfo_buff->up_ram_hi = hi;
+
+ lo = t4_read_reg(padap, CIM_EXTMEM2_BASE_ADDR_A);
+ hi = t4_read_reg(padap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
+ meminfo_buff->up_extmem2_lo = lo;
+ meminfo_buff->up_extmem2_hi = hi;
+
+ lo = t4_read_reg(padap, TP_PMM_RX_MAX_PAGE_A);
+ meminfo_buff->rx_pages_data[0] = PMRXMAXPAGE_G(lo);
+ meminfo_buff->rx_pages_data[1] =
+ t4_read_reg(padap, TP_PMM_RX_PAGE_SIZE_A) >> 10;
+ meminfo_buff->rx_pages_data[2] = (lo & PMRXNUMCHN_F) ? 2 : 1;
+
+ lo = t4_read_reg(padap, TP_PMM_TX_MAX_PAGE_A);
+ hi = t4_read_reg(padap, TP_PMM_TX_PAGE_SIZE_A);
+ meminfo_buff->tx_pages_data[0] = PMTXMAXPAGE_G(lo);
+ meminfo_buff->tx_pages_data[1] =
+ hi >= (1 << 20) ? (hi >> 20) : (hi >> 10);
+ meminfo_buff->tx_pages_data[2] =
+ hi >= (1 << 20) ? 'M' : 'K';
+ meminfo_buff->tx_pages_data[3] = 1 << PMTXNUMCHN_G(lo);
+
+ meminfo_buff->p_structs = t4_read_reg(padap, TP_CMM_MM_MAX_PSTRUCT_A);
+
+ for (i = 0; i < 4; i++) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(padap, MPS_RX_PG_RSV0_A + i * 4);
+ if (is_t5(padap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ meminfo_buff->port_used[i] = used;
+ meminfo_buff->port_alloc[i] = alloc;
+ }
+
+ for (i = 0; i < padap->params.arch.nchan; i++) {
+ if (CHELSIO_CHIP_VERSION(padap->params.chip) > CHELSIO_T5)
+ lo = t4_read_reg(padap,
+ MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
+ else
+ lo = t4_read_reg(padap, MPS_RX_PG_RSV4_A + i * 4);
+ if (is_t5(padap->params.chip)) {
+ used = T5_USED_G(lo);
+ alloc = T5_ALLOC_G(lo);
+ } else {
+ used = USED_G(lo);
+ alloc = ALLOC_G(lo);
+ }
+ meminfo_buff->loopback_used[i] = used;
+ meminfo_buff->loopback_alloc[i] = alloc;
+ }
+
+ return 0;
+}
+
int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
@@ -98,12 +416,11 @@ int cudbg_collect_reg_dump(struct cudbg_init *pdbg_init,
else if (is_t5(padap->params.chip) || is_t6(padap->params.chip))
buf_size = T5_REGMAP_SIZE;
- rc = cudbg_get_buff(dbg_buff, buf_size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, buf_size, &temp_buff);
if (rc)
return rc;
t4_get_regs(padap, (void *)temp_buff.data, temp_buff.size);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
@@ -122,7 +439,7 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
}
dparams = &padap->params.devlog;
- rc = cudbg_get_buff(dbg_buff, dparams->size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, dparams->size, &temp_buff);
if (rc)
return rc;
@@ -137,12 +454,11 @@ int cudbg_collect_fw_devlog(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
@@ -163,14 +479,14 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
}
size += sizeof(cfg);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
rc = t4_cim_read(padap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -180,11 +496,10 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
NULL);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
@@ -196,7 +511,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
int size, rc;
size = 2 * CIM_MALA_SIZE * 5 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -204,8 +519,7 @@ int cudbg_collect_cim_ma_la(struct cudbg_init *pdbg_init,
(u32 *)temp_buff.data,
(u32 *)((char *)temp_buff.data +
5 * CIM_MALA_SIZE));
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
@@ -217,7 +531,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
struct cudbg_cim_qcfg *cim_qcfg_data;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_cim_qcfg),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_cim_qcfg),
&temp_buff);
if (rc)
return rc;
@@ -228,7 +542,7 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
ARRAY_SIZE(cim_qcfg_data->stat), cim_qcfg_data->stat);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -237,14 +551,13 @@ int cudbg_collect_cim_qcfg(struct cudbg_init *pdbg_init,
cim_qcfg_data->obq_wr);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
t4_read_cimq_cfg(padap, cim_qcfg_data->base, cim_qcfg_data->size,
cim_qcfg_data->thres);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
@@ -258,7 +571,7 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
/* collect CIM IBQ */
qsize = CIM_IBQ_SIZE * 4 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -272,11 +585,10 @@ static int cudbg_read_cim_ibq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_ibq_tp0(struct cudbg_init *pdbg_init,
@@ -343,7 +655,7 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
/* collect CIM OBQ */
qsize = cudbg_cim_obq_size(padap, qid);
- rc = cudbg_get_buff(dbg_buff, qsize, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, qsize, &temp_buff);
if (rc)
return rc;
@@ -357,11 +669,10 @@ static int cudbg_read_cim_obq(struct cudbg_init *pdbg_init,
else
rc = no_of_read_words;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_obq_ulp0(struct cudbg_init *pdbg_init,
@@ -420,23 +731,211 @@ int cudbg_collect_obq_sge_rx_q1(struct cudbg_init *pdbg_init,
return cudbg_read_cim_obq(pdbg_init, dbg_buff, cudbg_err, 7);
}
+static int cudbg_meminfo_get_mem_index(struct adapter *padap,
+ struct cudbg_meminfo *mem_info,
+ u8 mem_type, u8 *idx)
+{
+ u8 i, flag;
+
+ switch (mem_type) {
+ case MEM_EDC0:
+ flag = EDC0_FLAG;
+ break;
+ case MEM_EDC1:
+ flag = EDC1_FLAG;
+ break;
+ case MEM_MC0:
+ /* Some T5 cards have both MC0 and MC1. */
+ flag = is_t5(padap->params.chip) ? MC0_FLAG : MC_FLAG;
+ break;
+ case MEM_MC1:
+ flag = MC1_FLAG;
+ break;
+ case MEM_HMA:
+ flag = HMA_FLAG;
+ break;
+ default:
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+ }
+
+ for (i = 0; i < mem_info->avail_c; i++) {
+ if (mem_info->avail[i].idx == flag) {
+ *idx = i;
+ return 0;
+ }
+ }
+
+ return CUDBG_STATUS_ENTITY_NOT_FOUND;
+}
+
+/* Fetch the @region_name's start and end from @meminfo. */
+static int cudbg_get_mem_region(struct adapter *padap,
+ struct cudbg_meminfo *meminfo,
+ u8 mem_type, const char *region_name,
+ struct cudbg_mem_desc *mem_desc)
+{
+ u8 mc, found = 0;
+ u32 i, idx = 0;
+ int rc;
+
+ rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < ARRAY_SIZE(cudbg_region); i++) {
+ if (!strcmp(cudbg_region[i], region_name)) {
+ found = 1;
+ idx = i;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ found = 0;
+ for (i = 0; i < meminfo->mem_c; i++) {
+ if (meminfo->mem[i].idx >= ARRAY_SIZE(cudbg_region))
+ continue; /* Skip holes */
+
+ if (!(meminfo->mem[i].limit))
+ meminfo->mem[i].limit =
+ i < meminfo->mem_c - 1 ?
+ meminfo->mem[i + 1].base - 1 : ~0;
+
+ if (meminfo->mem[i].idx == idx) {
+ /* Check if the region exists in @mem_type memory */
+ if (meminfo->mem[i].base < meminfo->avail[mc].base &&
+ meminfo->mem[i].limit < meminfo->avail[mc].base)
+ return -EINVAL;
+
+ if (meminfo->mem[i].base > meminfo->avail[mc].limit)
+ return -EINVAL;
+
+ memcpy(mem_desc, &meminfo->mem[i],
+ sizeof(struct cudbg_mem_desc));
+ found = 1;
+ break;
+ }
+ }
+ if (!found)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Fetch and update the start and end of the requested memory region w.r.t 0
+ * in the corresponding EDC/MC/HMA.
+ */
+static int cudbg_get_mem_relative(struct adapter *padap,
+ struct cudbg_meminfo *meminfo,
+ u8 mem_type, u32 *out_base, u32 *out_end)
+{
+ u8 mc_idx;
+ int rc;
+
+ rc = cudbg_meminfo_get_mem_index(padap, meminfo, mem_type, &mc_idx);
+ if (rc)
+ return rc;
+
+ if (*out_base < meminfo->avail[mc_idx].base)
+ *out_base = 0;
+ else
+ *out_base -= meminfo->avail[mc_idx].base;
+
+ if (*out_end > meminfo->avail[mc_idx].limit)
+ *out_end = meminfo->avail[mc_idx].limit;
+ else
+ *out_end -= meminfo->avail[mc_idx].base;
+
+ return 0;
+}
+
+/* Get TX and RX Payload region */
+static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
+ const char *region_name,
+ struct cudbg_region_info *payload)
+{
+ struct cudbg_mem_desc mem_desc = { 0 };
+ struct cudbg_meminfo meminfo;
+ int rc;
+
+ rc = cudbg_fill_meminfo(padap, &meminfo);
+ if (rc)
+ return rc;
+
+ rc = cudbg_get_mem_region(padap, &meminfo, mem_type, region_name,
+ &mem_desc);
+ if (rc) {
+ payload->exist = false;
+ return 0;
+ }
+
+ payload->exist = true;
+ payload->start = mem_desc.base;
+ payload->end = mem_desc.limit;
+
+ return cudbg_get_mem_relative(padap, &meminfo, mem_type,
+ &payload->start, &payload->end);
+}
+
+#define CUDBG_YIELD_ITERATION 256
+
static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff, u8 mem_type,
unsigned long tot_len,
struct cudbg_error *cudbg_err)
{
+ static const char * const region_name[] = { "Tx payload:",
+ "Rx payload:" };
unsigned long bytes, bytes_left, bytes_read = 0;
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_region_info payload[2];
+ u32 yield_count = 0;
int rc = 0;
+ u8 i;
+
+ /* Get TX/RX Payload region range if they exist */
+ memset(payload, 0, sizeof(payload));
+ for (i = 0; i < ARRAY_SIZE(region_name); i++) {
+ rc = cudbg_get_payload_range(padap, mem_type, region_name[i],
+ &payload[i]);
+ if (rc)
+ return rc;
+
+ if (payload[i].exist) {
+ /* Align start and end to avoid wrap around */
+ payload[i].start = roundup(payload[i].start,
+ CUDBG_CHUNK_SIZE);
+ payload[i].end = rounddown(payload[i].end,
+ CUDBG_CHUNK_SIZE);
+ }
+ }
bytes_left = tot_len;
while (bytes_left > 0) {
+ /* As MC size is huge and read through PIO access, this
+ * loop will hold cpu for a longer time. OS may think that
+ * the process is hanged and will generate CPU stall traces.
+ * So yield the cpu regularly.
+ */
+ yield_count++;
+ if (!(yield_count % CUDBG_YIELD_ITERATION))
+ schedule();
+
bytes = min_t(unsigned long, bytes_left,
(unsigned long)CUDBG_CHUNK_SIZE);
- rc = cudbg_get_buff(dbg_buff, bytes, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, bytes, &temp_buff);
if (rc)
return rc;
+
+ for (i = 0; i < ARRAY_SIZE(payload); i++)
+ if (payload[i].exist &&
+ bytes_read >= payload[i].start &&
+ bytes_read + bytes <= payload[i].end)
+ /* TX and RX Payload regions can't overlap */
+ goto skip_read;
+
spin_lock(&padap->win0_lock);
rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
bytes_read, bytes,
@@ -445,37 +944,23 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
+
+skip_read:
bytes_left -= bytes;
bytes_read += bytes;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
+ rc = cudbg_write_and_release_buff(pdbg_init, &temp_buff,
+ dbg_buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
}
return rc;
}
-static void cudbg_collect_mem_info(struct cudbg_init *pdbg_init,
- struct card_mem *mem_info)
-{
- struct adapter *padap = pdbg_init->adap;
- u32 value;
-
- value = t4_read_reg(padap, MA_EDRAM0_BAR_A);
- value = EDRAM0_SIZE_G(value);
- mem_info->size_edc0 = (u16)value;
-
- value = t4_read_reg(padap, MA_EDRAM1_BAR_A);
- value = EDRAM1_SIZE_G(value);
- mem_info->size_edc1 = (u16)value;
-
- value = t4_read_reg(padap, MA_TARGET_MEM_ENABLE_A);
- if (value & EDRAM0_ENABLE_F)
- mem_info->mem_flag |= (1 << EDC0_FLAG);
- if (value & EDRAM1_ENABLE_F)
- mem_info->mem_flag |= (1 << EDC1_FLAG);
-}
-
static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err)
{
@@ -495,37 +980,25 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
struct cudbg_error *cudbg_err,
u8 mem_type)
{
- struct card_mem mem_info = {0};
- unsigned long flag, size;
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_meminfo mem_info;
+ unsigned long size;
+ u8 mc_idx;
int rc;
+ memset(&mem_info, 0, sizeof(struct cudbg_meminfo));
+ rc = cudbg_fill_meminfo(padap, &mem_info);
+ if (rc)
+ return rc;
+
cudbg_t4_fwcache(pdbg_init, cudbg_err);
- cudbg_collect_mem_info(pdbg_init, &mem_info);
- switch (mem_type) {
- case MEM_EDC0:
- flag = (1 << EDC0_FLAG);
- size = cudbg_mbytes_to_bytes(mem_info.size_edc0);
- break;
- case MEM_EDC1:
- flag = (1 << EDC1_FLAG);
- size = cudbg_mbytes_to_bytes(mem_info.size_edc1);
- break;
- default:
- rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
- goto err;
- }
+ rc = cudbg_meminfo_get_mem_index(padap, &mem_info, mem_type, &mc_idx);
+ if (rc)
+ return rc;
- if (mem_info.mem_flag & flag) {
- rc = cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type,
- size, cudbg_err);
- if (rc)
- goto err;
- } else {
- rc = CUDBG_STATUS_ENTITY_NOT_FOUND;
- goto err;
- }
-err:
- return rc;
+ size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+ return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
+ cudbg_err);
}
int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
@@ -544,26 +1017,51 @@ int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
MEM_EDC1);
}
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_MC0);
+}
+
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_MC1);
+}
+
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ return cudbg_collect_mem_region(pdbg_init, dbg_buff, cudbg_err,
+ MEM_HMA);
+}
+
int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
- int rc;
+ int rc, nentries;
- rc = cudbg_get_buff(dbg_buff, RSS_NENTRIES * sizeof(u16), &temp_buff);
+ nentries = t4_chip_rss_size(padap);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, nentries * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
rc = t4_read_rss(padap, (u16 *)temp_buff.data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
@@ -576,7 +1074,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
int vf, rc, vf_count;
vf_count = padap->params.arch.vfcount;
- rc = cudbg_get_buff(dbg_buff,
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
vf_count * sizeof(struct cudbg_rss_vf_conf),
&temp_buff);
if (rc)
@@ -586,8 +1084,7 @@ int cudbg_collect_rss_vf_config(struct cudbg_init *pdbg_init,
for (vf = 0; vf < vf_count; vf++)
t4_read_rss_vf_config(padap, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh, true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
@@ -598,13 +1095,13 @@ int cudbg_collect_path_mtu(struct cudbg_init *pdbg_init,
struct cudbg_buffer temp_buff = { 0 };
int rc;
- rc = cudbg_get_buff(dbg_buff, NMTUS * sizeof(u16), &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, NMTUS * sizeof(u16),
+ &temp_buff);
if (rc)
return rc;
t4_read_mtu_tbl(padap, (u16 *)temp_buff.data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
@@ -616,7 +1113,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
struct cudbg_pm_stats *pm_stats_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pm_stats),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_pm_stats),
&temp_buff);
if (rc)
return rc;
@@ -624,8 +1121,7 @@ int cudbg_collect_pm_stats(struct cudbg_init *pdbg_init,
pm_stats_buff = (struct cudbg_pm_stats *)temp_buff.data;
t4_pmtx_get_stats(padap, pm_stats_buff->tx_cnt, pm_stats_buff->tx_cyc);
t4_pmrx_get_stats(padap, pm_stats_buff->rx_cnt, pm_stats_buff->rx_cyc);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
@@ -640,7 +1136,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_hw_sched),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_hw_sched),
&temp_buff);
hw_sched_buff = (struct cudbg_hw_sched *)temp_buff.data;
hw_sched_buff->map = t4_read_reg(padap, TP_TX_MOD_QUEUE_REQ_MAP_A);
@@ -649,8 +1145,7 @@ int cudbg_collect_hw_sched(struct cudbg_init *pdbg_init,
for (i = 0; i < NTX_SCHED; ++i)
t4_get_tx_sched(padap, i, &hw_sched_buff->kbps[i],
&hw_sched_buff->ipg[i], true);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
@@ -674,7 +1169,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
n = n / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -763,8 +1258,7 @@ int cudbg_collect_tp_indirect(struct cudbg_init *pdbg_init,
tp_pio->ireg_local_offset, true);
ch_tp_pio++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
@@ -776,7 +1270,8 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
struct ireg_buf *ch_sge_dbg;
int i, rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(*ch_sge_dbg) * 2, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(*ch_sge_dbg) * 2,
+ &temp_buff);
if (rc)
return rc;
@@ -797,8 +1292,7 @@ int cudbg_collect_sge_indirect(struct cudbg_init *pdbg_init,
sge_pio->ireg_local_offset);
ch_sge_dbg++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
@@ -810,7 +1304,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
struct cudbg_ulprx_la *ulprx_la_buff;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulprx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulprx_la),
&temp_buff);
if (rc)
return rc;
@@ -818,8 +1312,7 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
ulprx_la_buff = (struct cudbg_ulprx_la *)temp_buff.data;
t4_ulprx_read_la(padap, (u32 *)ulprx_la_buff->data);
ulprx_la_buff->size = ULPRX_LA_SIZE;
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
@@ -832,15 +1325,39 @@ int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
int size, rc;
size = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
tp_la_buff = (struct cudbg_tp_la *)temp_buff.data;
tp_la_buff->mode = DBGLAMODE_G(t4_read_reg(padap, TP_DBG_LA_CONFIG_A));
t4_tp_read_la(padap, (u64 *)tp_la_buff->data, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
+
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ struct cudbg_meminfo *meminfo_buff;
+ int rc;
+
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_meminfo),
+ &temp_buff);
+ if (rc)
+ return rc;
+
+ meminfo_buff = (struct cudbg_meminfo *)temp_buff.data;
+ rc = cudbg_fill_meminfo(padap, meminfo_buff);
+ if (rc) {
+ cudbg_err->sys_err = rc;
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
+
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
@@ -854,7 +1371,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_cim_pif_la) +
2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -863,8 +1380,7 @@ int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
t4_cim_read_pif_la(padap, (u32 *)cim_pif_la_buff->data,
(u32 *)cim_pif_la_buff->data + 6 * CIM_PIFLA_SIZE,
NULL, NULL);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
@@ -880,7 +1396,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
if (!padap->params.vpd.cclk)
return CUDBG_STATUS_CCLK_NOT_DEFINED;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_clk_info),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_clk_info),
&temp_buff);
if (rc)
return rc;
@@ -912,8 +1428,7 @@ int cudbg_collect_clk_info(struct cudbg_init *pdbg_init,
clk_info_buff->finwait2_timer =
tp_tick_us * t4_read_reg(padap, TP_FINWAIT2_TIMER_A);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
@@ -928,7 +1443,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -969,8 +1484,7 @@ int cudbg_collect_pcie_indirect(struct cudbg_init *pdbg_init,
pcie_pio->ireg_local_offset);
ch_pcie++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
@@ -985,7 +1499,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1026,8 +1540,7 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
pm_pio->ireg_local_offset);
ch_pm++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
@@ -1041,7 +1554,8 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
u32 para[2], val[2];
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_tid_info_region_rev1),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_tid_info_region_rev1),
&temp_buff);
if (rc)
return rc;
@@ -1053,6 +1567,12 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
tid1->ver_hdr.size = sizeof(struct cudbg_tid_info_region_rev1) -
sizeof(struct cudbg_ver_hdr);
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (!is_fw_attached(pdbg_init))
+ goto fill_tid;
+
#define FW_PARAM_PFVF_A(param) \
(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param) | \
@@ -1064,7 +1584,7 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
rc = t4_query_params(padap, padap->mbox, padap->pf, 0, 2, para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->uotid_base = val[0];
@@ -1083,13 +1603,16 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
para, val);
if (rc < 0) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
tid->hpftid_base = val[0];
tid->nhpftids = val[1] - val[0] + 1;
}
+#undef FW_PARAM_PFVF_A
+
+fill_tid:
tid->ntids = padap->tids.ntids;
tid->nstids = padap->tids.nstids;
tid->stid_base = padap->tids.stid_base;
@@ -1109,28 +1632,137 @@ int cudbg_collect_tid(struct cudbg_init *pdbg_init,
tid->ip_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV4_A);
tid->ipv6_users = t4_read_reg(padap, LE_DB_ACT_CNT_IPV6_A);
-#undef FW_PARAM_PFVF_A
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
+}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err)
+{
+ struct adapter *padap = pdbg_init->adap;
+ struct cudbg_buffer temp_buff = { 0 };
+ u32 size, *value, j;
+ int i, rc, n;
+
+ size = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+ n = sizeof(t5_pcie_config_array) / (2 * sizeof(u32));
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
+ if (rc)
+ return rc;
+
+ value = (u32 *)temp_buff.data;
+ for (i = 0; i < n; i++) {
+ for (j = t5_pcie_config_array[i][0];
+ j <= t5_pcie_config_array[i][1]; j += 4) {
+ t4_hw_pci_read_cfg4(padap, j, value);
+ value++;
+ }
+ }
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
-int cudbg_dump_context_size(struct adapter *padap)
+static int cudbg_sge_ctxt_check_valid(u32 *buf, int type)
+{
+ int index, bit, bit_pos = 0;
+
+ switch (type) {
+ case CTXT_EGRESS:
+ bit_pos = 176;
+ break;
+ case CTXT_INGRESS:
+ bit_pos = 141;
+ break;
+ case CTXT_FLM:
+ bit_pos = 89;
+ break;
+ }
+ index = bit_pos / 32;
+ bit = bit_pos % 32;
+ return buf[index] & (1U << bit);
+}
+
+static int cudbg_get_ctxt_region_info(struct adapter *padap,
+ struct cudbg_region_info *ctx_info,
+ u8 *mem_type)
{
- u32 value, size;
+ struct cudbg_mem_desc mem_desc;
+ struct cudbg_meminfo meminfo;
+ u32 i, j, value, found;
u8 flq;
+ int rc;
+
+ rc = cudbg_fill_meminfo(padap, &meminfo);
+ if (rc)
+ return rc;
+
+ /* Get EGRESS and INGRESS context region size */
+ for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+ found = 0;
+ memset(&mem_desc, 0, sizeof(struct cudbg_mem_desc));
+ for (j = 0; j < ARRAY_SIZE(meminfo.avail); j++) {
+ rc = cudbg_get_mem_region(padap, &meminfo, j,
+ cudbg_region[i],
+ &mem_desc);
+ if (!rc) {
+ found = 1;
+ rc = cudbg_get_mem_relative(padap, &meminfo, j,
+ &mem_desc.base,
+ &mem_desc.limit);
+ if (rc) {
+ ctx_info[i].exist = false;
+ break;
+ }
+ ctx_info[i].exist = true;
+ ctx_info[i].start = mem_desc.base;
+ ctx_info[i].end = mem_desc.limit;
+ mem_type[i] = j;
+ break;
+ }
+ }
+ if (!found)
+ ctx_info[i].exist = false;
+ }
+ /* Get FLM and CNM max qid. */
value = t4_read_reg(padap, SGE_FLM_CFG_A);
/* Get number of data freelist queues */
flq = HDRSTARTFLQ_G(value);
- size = CUDBG_MAX_FL_QIDS >> flq;
+ ctx_info[CTXT_FLM].exist = true;
+ ctx_info[CTXT_FLM].end = (CUDBG_MAX_FL_QIDS >> flq) * SGE_CTXT_SIZE;
- /* Add extra space for congestion manager contexts.
- * The number of CONM contexts are same as number of freelist
+ /* The number of CONM contexts are same as number of freelist
* queues.
*/
- size += size;
+ ctx_info[CTXT_CNM].exist = true;
+ ctx_info[CTXT_CNM].end = ctx_info[CTXT_FLM].end;
+
+ return 0;
+}
+
+int cudbg_dump_context_size(struct adapter *padap)
+{
+ struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
+ u8 mem_type[CTXT_INGRESS + 1] = { 0 };
+ u32 i, size = 0;
+ int rc;
+
+ /* Get max valid qid for each type of queue */
+ rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < CTXT_CNM; i++) {
+ if (!region_info[i].exist) {
+ if (i == CTXT_EGRESS || i == CTXT_INGRESS)
+ size += CUDBG_LOWMEM_MAX_CTXT_QIDS *
+ SGE_CTXT_SIZE;
+ continue;
+ }
+
+ size += (region_info[i].end - region_info[i].start + 1) /
+ SGE_CTXT_SIZE;
+ }
return size * sizeof(struct cudbg_ch_cntxt);
}
@@ -1153,44 +1785,144 @@ static void cudbg_read_sge_ctxt(struct cudbg_init *pdbg_init, u32 cid,
t4_sge_ctxt_rd_bd(padap, cid, ctype, data);
}
+static void cudbg_get_sge_ctxt_fw(struct cudbg_init *pdbg_init, u32 max_qid,
+ u8 ctxt_type,
+ struct cudbg_ch_cntxt **out_buff)
+{
+ struct cudbg_ch_cntxt *buff = *out_buff;
+ int rc;
+ u32 j;
+
+ for (j = 0; j < max_qid; j++) {
+ cudbg_read_sge_ctxt(pdbg_init, j, ctxt_type, buff->data);
+ rc = cudbg_sge_ctxt_check_valid(buff->data, ctxt_type);
+ if (!rc)
+ continue;
+
+ buff->cntxt_type = ctxt_type;
+ buff->cntxt_id = j;
+ buff++;
+ if (ctxt_type == CTXT_FLM) {
+ cudbg_read_sge_ctxt(pdbg_init, j, CTXT_CNM, buff->data);
+ buff->cntxt_type = CTXT_CNM;
+ buff->cntxt_id = j;
+ buff++;
+ }
+ }
+
+ *out_buff = buff;
+}
+
int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err)
{
+ struct cudbg_region_info region_info[CTXT_CNM + 1] = { {0} };
struct adapter *padap = pdbg_init->adap;
+ u32 j, size, max_ctx_size, max_ctx_qid;
+ u8 mem_type[CTXT_INGRESS + 1] = { 0 };
struct cudbg_buffer temp_buff = { 0 };
struct cudbg_ch_cntxt *buff;
- u32 size, i = 0;
+ u64 *dst_off, *src_off;
+ u8 *ctx_buf;
+ u8 i, k;
int rc;
+ /* Get max valid qid for each type of queue */
+ rc = cudbg_get_ctxt_region_info(padap, region_info, mem_type);
+ if (rc)
+ return rc;
+
rc = cudbg_dump_context_size(padap);
if (rc <= 0)
return CUDBG_STATUS_ENTITY_NOT_FOUND;
size = rc;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
+ /* Get buffer with enough space to read the biggest context
+ * region in memory.
+ */
+ max_ctx_size = max(region_info[CTXT_EGRESS].end -
+ region_info[CTXT_EGRESS].start + 1,
+ region_info[CTXT_INGRESS].end -
+ region_info[CTXT_INGRESS].start + 1);
+
+ ctx_buf = kvzalloc(max_ctx_size, GFP_KERNEL);
+ if (!ctx_buf) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return -ENOMEM;
+ }
+
buff = (struct cudbg_ch_cntxt *)temp_buff.data;
- while (size > 0) {
- buff->cntxt_type = CTXT_FLM;
- buff->cntxt_id = i;
- cudbg_read_sge_ctxt(pdbg_init, i, CTXT_FLM, buff->data);
- buff++;
- size -= sizeof(struct cudbg_ch_cntxt);
- buff->cntxt_type = CTXT_CNM;
- buff->cntxt_id = i;
- cudbg_read_sge_ctxt(pdbg_init, i, CTXT_CNM, buff->data);
- buff++;
- size -= sizeof(struct cudbg_ch_cntxt);
+ /* Collect EGRESS and INGRESS context data.
+ * In case of failures, fallback to collecting via FW or
+ * backdoor access.
+ */
+ for (i = CTXT_EGRESS; i <= CTXT_INGRESS; i++) {
+ if (!region_info[i].exist) {
+ max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+ &buff);
+ continue;
+ }
- i++;
+ max_ctx_size = region_info[i].end - region_info[i].start + 1;
+ max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (is_fw_attached(pdbg_init)) {
+ t4_sge_ctxt_flush(padap, padap->mbox, i);
+
+ rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type[i],
+ region_info[i].start, max_ctx_size,
+ (__be32 *)ctx_buf, 1);
+ }
+
+ if (rc || !is_fw_attached(pdbg_init)) {
+ max_ctx_qid = CUDBG_LOWMEM_MAX_CTXT_QIDS;
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, i,
+ &buff);
+ continue;
+ }
+
+ for (j = 0; j < max_ctx_qid; j++) {
+ src_off = (u64 *)(ctx_buf + j * SGE_CTXT_SIZE);
+ dst_off = (u64 *)buff->data;
+
+ /* The data is stored in 64-bit cpu order. Convert it
+ * to big endian before parsing.
+ */
+ for (k = 0; k < SGE_CTXT_SIZE / sizeof(u64); k++)
+ dst_off[k] = cpu_to_be64(src_off[k]);
+
+ rc = cudbg_sge_ctxt_check_valid(buff->data, i);
+ if (!rc)
+ continue;
+
+ buff->cntxt_type = i;
+ buff->cntxt_id = j;
+ buff++;
+ }
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ kvfree(ctx_buf);
+
+ /* Collect FREELIST and CONGESTION MANAGER contexts */
+ max_ctx_size = region_info[CTXT_FLM].end -
+ region_info[CTXT_FLM].start + 1;
+ max_ctx_qid = max_ctx_size / SGE_CTXT_SIZE;
+ /* Since FLM and CONM are 1-to-1 mapped, the below function
+ * will fetch both FLM and CONM contexts.
+ */
+ cudbg_get_sge_ctxt_fw(pdbg_init, max_ctx_qid, CTXT_FLM, &buff);
+
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static inline void cudbg_tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -1228,9 +1960,10 @@ static void cudbg_mps_rpl_backdoor(struct adapter *padap,
mps_rplc->rplc31_0 = htonl(t4_read_reg(padap, MPS_VF_RPLCT_MAP0_A));
}
-static int cudbg_collect_tcam_index(struct adapter *padap,
+static int cudbg_collect_tcam_index(struct cudbg_init *pdbg_init,
struct cudbg_mps_tcam *tcam, u32 idx)
{
+ struct adapter *padap = pdbg_init->adap;
u64 tcamy, tcamx, val;
u32 ctl, data2;
int rc = 0;
@@ -1315,12 +2048,22 @@ static int cudbg_collect_tcam_index(struct adapter *padap,
htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
FW_LDST_CMD_IDX_V(idx));
- rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd, sizeof(ldst_cmd),
- &ldst_cmd);
- if (rc)
+ /* If firmware is not attached/alive, use backdoor register
+ * access to collect dump.
+ */
+ if (is_fw_attached(pdbg_init))
+ rc = t4_wr_mbox(padap, padap->mbox, &ldst_cmd,
+ sizeof(ldst_cmd), &ldst_cmd);
+
+ if (rc || !is_fw_attached(pdbg_init)) {
cudbg_mps_rpl_backdoor(padap, &mps_rplc);
- else
+ /* Ignore error since we collected directly from
+ * reading registers.
+ */
+ rc = 0;
+ } else {
mps_rplc = ldst_cmd.u.mps.rplc;
+ }
tcam->rplc[0] = ntohl(mps_rplc.rplc31_0);
tcam->rplc[1] = ntohl(mps_rplc.rplc63_32);
@@ -1351,16 +2094,16 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
n = padap->params.arch.mps_tcam_size;
size = sizeof(struct cudbg_mps_tcam) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
tcam = (struct cudbg_mps_tcam *)temp_buff.data;
for (i = 0; i < n; i++) {
- rc = cudbg_collect_tcam_index(padap, tcam, i);
+ rc = cudbg_collect_tcam_index(pdbg_init, tcam, i);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
total_size += sizeof(struct cudbg_mps_tcam);
@@ -1370,11 +2113,10 @@ int cudbg_collect_mps_tcam(struct cudbg_init *pdbg_init,
if (!total_size) {
rc = CUDBG_SYSTEM_ERROR;
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
@@ -1425,7 +2167,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
if (rc)
return rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_vpd_data),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_vpd_data),
&temp_buff);
if (rc)
return rc;
@@ -1441,8 +2183,7 @@ int cudbg_collect_vpd_data(struct cudbg_init *pdbg_init,
vpd_data->fw_minor = FW_HDR_FW_VER_MINOR_G(fw_vers);
vpd_data->fw_micro = FW_HDR_FW_VER_MICRO_G(fw_vers);
vpd_data->fw_build = FW_HDR_FW_VER_BUILD_G(fw_vers);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
static int cudbg_read_tid(struct cudbg_init *pdbg_init, u32 tid,
@@ -1593,7 +2334,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
size = sizeof(struct cudbg_tid_data) * tcam_region.max_tid;
size += sizeof(struct cudbg_tcam);
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1605,7 +2346,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
rc = cudbg_read_tid(pdbg_init, i, tid_data);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
@@ -1616,8 +2357,7 @@ int cudbg_collect_le_tcam(struct cudbg_init *pdbg_init,
bytes += sizeof(struct cudbg_tid_data);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
@@ -1630,13 +2370,12 @@ int cudbg_collect_cctrl(struct cudbg_init *pdbg_init,
int rc;
size = sizeof(u16) * NMTUS * NCCTRL_WIN;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
t4_read_cong_tbl(padap, (void *)temp_buff.data);
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
@@ -1654,7 +2393,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n * 2;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1690,8 +2429,7 @@ int cudbg_collect_ma_indirect(struct cudbg_init *pdbg_init,
}
ma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
@@ -1704,7 +2442,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
u32 i, j;
int rc;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_ulptx_la),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, sizeof(struct cudbg_ulptx_la),
&temp_buff);
if (rc)
return rc;
@@ -1725,8 +2463,7 @@ int cudbg_collect_ulptx_la(struct cudbg_init *pdbg_init,
t4_read_reg(padap,
ULP_TX_LA_RDDATA_0_A + 0x10 * i);
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
@@ -1735,13 +2472,23 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_buffer temp_buff = { 0 };
+ u32 local_offset, local_range;
struct ireg_buf *up_cim;
+ u32 size, j, iter;
+ u32 instance = 0;
int i, rc, n;
- u32 size;
- n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ if (is_t5(padap->params.chip))
+ n = sizeof(t5_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else if (is_t6(padap->params.chip))
+ n = sizeof(t6_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else
+ return CUDBG_STATUS_NOT_IMPLEMENTED;
+
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1757,6 +2504,7 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
t5_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t5_up_cim_reg_array[i][3];
+ instance = t5_up_cim_reg_array[i][4];
} else if (is_t6(padap->params.chip)) {
up_cim_reg->ireg_addr = t6_up_cim_reg_array[i][0];
up_cim_reg->ireg_data = t6_up_cim_reg_array[i][1];
@@ -1764,18 +2512,39 @@ int cudbg_collect_up_cim_indirect(struct cudbg_init *pdbg_init,
t6_up_cim_reg_array[i][2];
up_cim_reg->ireg_offset_range =
t6_up_cim_reg_array[i][3];
+ instance = t6_up_cim_reg_array[i][4];
}
- rc = t4_cim_read(padap, up_cim_reg->ireg_local_offset,
- up_cim_reg->ireg_offset_range, buff);
- if (rc) {
- cudbg_put_buff(&temp_buff, dbg_buff);
- return rc;
+ switch (instance) {
+ case NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES:
+ iter = up_cim_reg->ireg_offset_range;
+ local_offset = 0x120;
+ local_range = 1;
+ break;
+ case NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES:
+ iter = up_cim_reg->ireg_offset_range;
+ local_offset = 0x10;
+ local_range = 1;
+ break;
+ default:
+ iter = 1;
+ local_offset = 0;
+ local_range = up_cim_reg->ireg_offset_range;
+ break;
+ }
+
+ for (j = 0; j < iter; j++, buff++) {
+ rc = t4_cim_read(padap,
+ up_cim_reg->ireg_local_offset +
+ (j * local_offset), local_range, buff);
+ if (rc) {
+ cudbg_put_buff(pdbg_init, &temp_buff);
+ return rc;
+ }
}
up_cim++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
@@ -1788,7 +2557,8 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
int i, rc;
u32 addr;
- rc = cudbg_get_buff(dbg_buff, sizeof(struct cudbg_pbt_tables),
+ rc = cudbg_get_buff(pdbg_init, dbg_buff,
+ sizeof(struct cudbg_pbt_tables),
&temp_buff);
if (rc)
return rc;
@@ -1801,7 +2571,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_dynamic[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1814,7 +2584,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_static[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1826,7 +2596,7 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->lrf_table[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
@@ -1838,12 +2608,11 @@ int cudbg_collect_pbt_tables(struct cudbg_init *pdbg_init,
&pbt->pbt_data[i]);
if (rc) {
cudbg_err->sys_err = rc;
- cudbg_put_buff(&temp_buff, dbg_buff);
+ cudbg_put_buff(pdbg_init, &temp_buff);
return rc;
}
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
@@ -1864,7 +2633,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
log = padap->mbox_log;
mbox_cmds = padap->mbox_log->size;
size = sizeof(struct cudbg_mbox_log) * mbox_cmds;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1887,8 +2656,7 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
}
mboxlog++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
@@ -1906,7 +2674,7 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32));
size = sizeof(struct ireg_buf) * n;
- rc = cudbg_get_buff(dbg_buff, size, &temp_buff);
+ rc = cudbg_get_buff(pdbg_init, dbg_buff, size, &temp_buff);
if (rc)
return rc;
@@ -1924,6 +2692,5 @@ int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
hma_fli->ireg_local_offset);
hma_indr++;
}
- cudbg_write_and_release_buff(&temp_buff, dbg_buff);
- return rc;
+ return cudbg_write_and_release_buff(pdbg_init, &temp_buff, dbg_buff);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
index caeee8e33e86..eebefe7cd18e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.h
@@ -75,6 +75,12 @@ int cudbg_collect_edc0_meminfo(struct cudbg_init *pdbg_init,
int cudbg_collect_edc1_meminfo(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_mc0_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
+int cudbg_collect_mc1_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_rss(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -102,6 +108,9 @@ int cudbg_collect_ulprx_la(struct cudbg_init *pdbg_init,
int cudbg_collect_tp_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_cim_pif_la(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -123,6 +132,9 @@ int cudbg_collect_pm_indirect(struct cudbg_init *pdbg_init,
int cudbg_collect_tid(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_pcie_config(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
int cudbg_collect_dump_context(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
@@ -156,6 +168,9 @@ int cudbg_collect_mbox_log(struct cudbg_init *pdbg_init,
int cudbg_collect_hma_indirect(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
+int cudbg_collect_hma_meminfo(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err);
struct cudbg_entity_hdr *cudbg_get_entity_hdr(void *outbuf, int i);
void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
@@ -163,7 +178,8 @@ void cudbg_align_debug_buffer(struct cudbg_buffer *dbg_buff,
u32 cudbg_cim_obq_size(struct adapter *padap, int qid);
int cudbg_dump_context_size(struct adapter *padap);
-struct cudbg_tcam;
+int cudbg_fill_meminfo(struct adapter *padap,
+ struct cudbg_meminfo *meminfo_buff);
void cudbg_fill_le_tcam_info(struct adapter *padap,
struct cudbg_tcam *tcam_region);
#endif /* __CUDBG_LIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
index 24b33f28e548..8150ea85d6a5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib_common.h
@@ -26,6 +26,7 @@ enum cudbg_dump_type {
enum cudbg_compression_type {
CUDBG_COMPRESSION_NONE = 1,
+ CUDBG_COMPRESSION_ZLIB,
};
struct cudbg_hdr {
@@ -78,10 +79,11 @@ struct cudbg_error {
#define CDUMP_MAX_COMP_BUF_SIZE ((64 * 1024) - 1)
#define CUDBG_CHUNK_SIZE ((CDUMP_MAX_COMP_BUF_SIZE / 1024) * 1024)
-int cudbg_get_buff(struct cudbg_buffer *pdbg_buff, u32 size,
+int cudbg_get_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pdbg_buff, u32 size,
struct cudbg_buffer *pin_buff);
-void cudbg_put_buff(struct cudbg_buffer *pin_buff,
- struct cudbg_buffer *pdbg_buff);
+void cudbg_put_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff);
void cudbg_update_buff(struct cudbg_buffer *pin_buff,
struct cudbg_buffer *pout_buff);
#endif /* __CUDBG_LIB_COMMON_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
new file mode 100644
index 000000000000..25cc06d75cff
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.c
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#include <linux/zlib.h>
+
+#include "cxgb4.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_zlib.h"
+
+static int cudbg_get_compress_hdr(struct cudbg_buffer *pdbg_buff,
+ struct cudbg_buffer *pin_buff)
+{
+ if (pdbg_buff->offset + sizeof(struct cudbg_compress_hdr) >
+ pdbg_buff->size)
+ return CUDBG_STATUS_NO_MEM;
+
+ pin_buff->data = (char *)pdbg_buff->data + pdbg_buff->offset;
+ pin_buff->offset = 0;
+ pin_buff->size = sizeof(struct cudbg_compress_hdr);
+ pdbg_buff->offset += sizeof(struct cudbg_compress_hdr);
+ return 0;
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff)
+{
+ struct cudbg_buffer temp_buff = { 0 };
+ struct z_stream_s compress_stream;
+ struct cudbg_compress_hdr *c_hdr;
+ int rc;
+
+ /* Write compression header to output buffer before compression */
+ rc = cudbg_get_compress_hdr(pout_buff, &temp_buff);
+ if (rc)
+ return rc;
+
+ c_hdr = (struct cudbg_compress_hdr *)temp_buff.data;
+ c_hdr->compress_id = CUDBG_ZLIB_COMPRESS_ID;
+
+ memset(&compress_stream, 0, sizeof(struct z_stream_s));
+ compress_stream.workspace = pdbg_init->workspace;
+ rc = zlib_deflateInit2(&compress_stream, Z_DEFAULT_COMPRESSION,
+ Z_DEFLATED, CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL, Z_DEFAULT_STRATEGY);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ compress_stream.next_in = pin_buff->data;
+ compress_stream.avail_in = pin_buff->size;
+ compress_stream.next_out = pout_buff->data + pout_buff->offset;
+ compress_stream.avail_out = pout_buff->size - pout_buff->offset;
+
+ rc = zlib_deflate(&compress_stream, Z_FINISH);
+ if (rc != Z_STREAM_END)
+ return CUDBG_SYSTEM_ERROR;
+
+ rc = zlib_deflateEnd(&compress_stream);
+ if (rc != Z_OK)
+ return CUDBG_SYSTEM_ERROR;
+
+ c_hdr->compress_size = compress_stream.total_out;
+ c_hdr->decompress_size = pin_buff->size;
+ pout_buff->offset += compress_stream.total_out;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
new file mode 100644
index 000000000000..60d23805dfc3
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_zlib.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2018 Chelsio Communications. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ */
+
+#ifndef __CUDBG_ZLIB_H__
+#define __CUDBG_ZLIB_H__
+
+#include <linux/zlib.h>
+
+#define CUDBG_ZLIB_COMPRESS_ID 17
+#define CUDBG_ZLIB_WIN_BITS 12
+#define CUDBG_ZLIB_MEM_LVL 4
+
+struct cudbg_compress_hdr {
+ u32 compress_id;
+ u64 decompress_size;
+ u64 compress_size;
+ u64 rsvd[32];
+};
+
+static inline int cudbg_get_workspace_size(void)
+{
+ return zlib_deflate_workspacesize(CUDBG_ZLIB_WIN_BITS,
+ CUDBG_ZLIB_MEM_LVL);
+}
+
+int cudbg_compress_buff(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *pin_buff,
+ struct cudbg_buffer *pout_buff);
+#endif /* __CUDBG_ZLIB_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 351f4bf37ca9..9040e13ce4b7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -84,7 +84,8 @@ enum {
MEM_EDC1,
MEM_MC,
MEM_MC0 = MEM_MC,
- MEM_MC1
+ MEM_MC1,
+ MEM_HMA,
};
enum {
@@ -318,6 +319,7 @@ struct vpd_params {
};
struct pci_params {
+ unsigned int vpd_cap_addr;
unsigned char speed;
unsigned char width;
};
@@ -826,12 +828,17 @@ struct vf_info {
unsigned char vf_mac_addr[ETH_ALEN];
unsigned int tx_rate;
bool pf_set_mac;
+ u16 vlan;
};
struct mbox_list {
struct list_head list;
};
+struct mps_encap_entry {
+ atomic_t refcnt;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -846,6 +853,10 @@ struct adapter {
enum chip_type chip;
int msg_enable;
+ __be16 vxlan_port;
+ u8 vxlan_port_cnt;
+ __be16 geneve_port;
+ u8 geneve_port_cnt;
struct adapter_params params;
struct cxgb4_virt_res vres;
@@ -875,7 +886,10 @@ struct adapter {
unsigned int clipt_start;
unsigned int clipt_end;
struct clip_tbl *clipt;
+ unsigned int rawf_start;
+ unsigned int rawf_cnt;
struct smt_data *smt;
+ struct mps_encap_entry *mps_encap;
struct cxgb4_uld_info *uld;
void *uld_handle[CXGB4_ULD_MAX];
unsigned int num_uld;
@@ -1317,6 +1331,7 @@ void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb);
extern int dbfifo_int_thresh;
#define for_each_port(adapter, iter) \
@@ -1435,6 +1450,21 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
q->size = size;
}
+/**
+ * t4_is_inserted_mod_type - is a plugged in Firmware Module Type
+ * @fw_mod_type: the Firmware Mofule Type
+ *
+ * Return whether the Firmware Module Type represents a real Transceiver
+ * Module/Cable Module Type which has been inserted.
+ */
+static inline bool t4_is_inserted_mod_type(unsigned int fw_mod_type)
+{
+ return (fw_mod_type != FW_PORT_MOD_TYPE_NONE &&
+ fw_mod_type != FW_PORT_MOD_TYPE_NOTSUPPORTED &&
+ fw_mod_type != FW_PORT_MOD_TYPE_UNKNOWN &&
+ fw_mod_type != FW_PORT_MOD_TYPE_ERROR);
+}
+
void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
unsigned int data_reg, const u32 *vals,
unsigned int nregs, unsigned int start_idx);
@@ -1524,6 +1554,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox,
int port, int pf, int vf, u8 mac[]);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
+unsigned int t4_chip_rss_size(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
int start, int n, const u16 *rspq, unsigned int nrspq);
int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
@@ -1633,6 +1664,12 @@ int t4_free_vi(struct adapter *adap, unsigned int mbox,
int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
int mtu, int promisc, int all_multi, int bcast, int vlanex,
bool sleep_ok);
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok);
int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
@@ -1665,7 +1702,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type);
void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_update_port_info(struct port_info *pi);
int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
@@ -1708,6 +1745,9 @@ void t4_uld_mem_free(struct adapter *adap);
int t4_uld_mem_alloc(struct adapter *adap);
void t4_uld_clean_up(struct adapter *adap);
void t4_register_netevent_notifier(void);
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ unsigned int devid, unsigned int offset,
+ unsigned int len, u8 *buf);
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
@@ -1722,4 +1762,6 @@ void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
struct ulptx_sgl *sgl, u64 *end, unsigned int start,
const dma_addr_t *addr);
void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+ u16 vlan);
#endif /* __CXGB4_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 29cc625e9833..30485f9a598f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -18,11 +18,14 @@
#include "t4_regs.h"
#include "cxgb4.h"
#include "cxgb4_cudbg.h"
-#include "cudbg_entity.h"
+#include "cudbg_zlib.h"
static const struct cxgb4_collect_entity cxgb4_collect_mem_dump[] = {
{ CUDBG_EDC0, cudbg_collect_edc0_meminfo },
{ CUDBG_EDC1, cudbg_collect_edc1_meminfo },
+ { CUDBG_MC0, cudbg_collect_mc0_meminfo },
+ { CUDBG_MC1, cudbg_collect_mc1_meminfo },
+ { CUDBG_HMA, cudbg_collect_hma_meminfo },
};
static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
@@ -53,6 +56,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_SGE_INDIRECT, cudbg_collect_sge_indirect },
{ CUDBG_ULPRX_LA, cudbg_collect_ulprx_la },
{ CUDBG_TP_LA, cudbg_collect_tp_la },
+ { CUDBG_MEMINFO, cudbg_collect_meminfo },
{ CUDBG_CIM_PIF_LA, cudbg_collect_cim_pif_la },
{ CUDBG_CLK, cudbg_collect_clk_info },
{ CUDBG_CIM_OBQ_RXQ0, cudbg_collect_obq_sge_rx_q0 },
@@ -60,6 +64,7 @@ static const struct cxgb4_collect_entity cxgb4_collect_hw_dump[] = {
{ CUDBG_PCIE_INDIRECT, cudbg_collect_pcie_indirect },
{ CUDBG_PM_INDIRECT, cudbg_collect_pm_indirect },
{ CUDBG_TID_INFO, cudbg_collect_tid },
+ { CUDBG_PCIE_CONFIG, cudbg_collect_pcie_config },
{ CUDBG_DUMP_CONTEXT, cudbg_collect_dump_context },
{ CUDBG_MPS_TCAM, cudbg_collect_mps_tcam },
{ CUDBG_VPD_DATA, cudbg_collect_vpd_data },
@@ -158,8 +163,24 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
}
len = cudbg_mbytes_to_bytes(len);
break;
+ case CUDBG_MC0:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EXT_MEM0_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
+ len = EXT_MEM0_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
+ case CUDBG_MC1:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & EXT_MEM1_ENABLE_F) {
+ value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ len = EXT_MEM1_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
case CUDBG_RSS:
- len = RSS_NENTRIES * sizeof(u16);
+ len = t4_chip_rss_size(adap) * sizeof(u16);
break;
case CUDBG_RSS_VF_CONF:
len = adap->params.arch.vfcount *
@@ -201,6 +222,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TP_LA:
len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64);
break;
+ case CUDBG_MEMINFO:
+ len = sizeof(struct cudbg_meminfo);
+ break;
case CUDBG_CIM_PIF_LA:
len = sizeof(struct cudbg_cim_pif_la);
len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32);
@@ -219,6 +243,9 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
case CUDBG_TID_INFO:
len = sizeof(struct cudbg_tid_info_region_rev1);
break;
+ case CUDBG_PCIE_CONFIG:
+ len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS;
+ break;
case CUDBG_DUMP_CONTEXT:
len = cudbg_dump_context_size(adap);
break;
@@ -248,7 +275,13 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct cudbg_ulptx_la);
break;
case CUDBG_UP_CIM_INDIRECT:
- n = sizeof(t5_up_cim_reg_array) / (IREG_NUM_ELEM * sizeof(u32));
+ n = 0;
+ if (is_t5(adap->params.chip))
+ n = sizeof(t5_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
+ else if (is_t6(adap->params.chip))
+ n = sizeof(t6_up_cim_reg_array) /
+ ((IREG_NUM_ELEM + 1) * sizeof(u32));
len = sizeof(struct ireg_buf) * n;
break;
case CUDBG_PBT_TABLE:
@@ -264,6 +297,17 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
len = sizeof(struct ireg_buf) * n;
}
break;
+ case CUDBG_HMA:
+ value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
+ if (value & HMA_MUX_F) {
+ /* In T6, there's no MC1. So, HMA shares MC1
+ * address space.
+ */
+ value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
+ len = EXT_MEM1_SIZE_G(value);
+ }
+ len = cudbg_mbytes_to_bytes(len);
+ break;
default:
break;
}
@@ -275,6 +319,7 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
{
u32 i, entity;
u32 len = 0;
+ u32 wsize;
if (flag & CXGB4_ETH_DUMP_HW) {
for (i = 0; i < ARRAY_SIZE(cxgb4_collect_hw_dump); i++) {
@@ -290,6 +335,11 @@ u32 cxgb4_get_dump_length(struct adapter *adap, u32 flag)
}
}
+ /* If compression is enabled, a smaller destination buffer is enough */
+ wsize = cudbg_get_workspace_size();
+ if (wsize && len > CUDBG_DUMP_BUFF_SIZE)
+ len = CUDBG_DUMP_BUFF_SIZE;
+
return len;
}
@@ -298,22 +348,14 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
const struct cxgb4_collect_entity *e_arr,
u32 arr_size, void *buf, u32 *tot_size)
{
- struct adapter *adap = pdbg_init->adap;
struct cudbg_error cudbg_err = { 0 };
struct cudbg_entity_hdr *entity_hdr;
- u32 entity_size, i;
- u32 total_size = 0;
+ u32 i, total_size = 0;
int ret;
for (i = 0; i < arr_size; i++) {
const struct cxgb4_collect_entity *e = &e_arr[i];
- /* Skip entities that won't fit in output buffer */
- entity_size = cxgb4_get_entity_length(adap, e->entity);
- if (entity_size >
- pdbg_init->outbuf_size - *tot_size - total_size)
- continue;
-
entity_hdr = cudbg_get_entity_hdr(buf, e->entity);
entity_hdr->entity_type = e->entity;
entity_hdr->start_offset = dbg_buff->offset;
@@ -339,16 +381,40 @@ static void cxgb4_cudbg_collect_entity(struct cudbg_init *pdbg_init,
*tot_size += total_size;
}
+static int cudbg_alloc_compress_buff(struct cudbg_init *pdbg_init)
+{
+ u32 workspace_size;
+
+ workspace_size = cudbg_get_workspace_size();
+ pdbg_init->compress_buff = vzalloc(CUDBG_COMPRESS_BUFF_SIZE +
+ workspace_size);
+ if (!pdbg_init->compress_buff)
+ return -ENOMEM;
+
+ pdbg_init->compress_buff_size = CUDBG_COMPRESS_BUFF_SIZE;
+ pdbg_init->workspace = (u8 *)pdbg_init->compress_buff +
+ CUDBG_COMPRESS_BUFF_SIZE - workspace_size;
+ return 0;
+}
+
+static void cudbg_free_compress_buff(struct cudbg_init *pdbg_init)
+{
+ if (pdbg_init->compress_buff)
+ vfree(pdbg_init->compress_buff);
+}
+
int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
u32 flag)
{
- struct cudbg_init cudbg_init = { 0 };
struct cudbg_buffer dbg_buff = { 0 };
u32 size, min_size, total_size = 0;
+ struct cudbg_init cudbg_init;
struct cudbg_hdr *cudbg_hdr;
+ int rc;
size = *buf_size;
+ memset(&cudbg_init, 0, sizeof(struct cudbg_init));
cudbg_init.adap = adap;
cudbg_init.outbuf = buf;
cudbg_init.outbuf_size = size;
@@ -365,7 +431,6 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
cudbg_hdr->max_entities = CUDBG_MAX_ENTITY;
cudbg_hdr->chip_ver = adap->params.chip;
cudbg_hdr->dump_type = CUDBG_DUMP_TYPE_MINI;
- cudbg_hdr->compress_type = CUDBG_COMPRESSION_NONE;
min_size = sizeof(struct cudbg_hdr) +
sizeof(struct cudbg_entity_hdr) *
@@ -373,6 +438,24 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
if (size < min_size)
return -ENOMEM;
+ rc = cudbg_get_workspace_size();
+ if (rc) {
+ /* Zlib available. So, use zlib deflate */
+ cudbg_init.compress_type = CUDBG_COMPRESSION_ZLIB;
+ rc = cudbg_alloc_compress_buff(&cudbg_init);
+ if (rc) {
+ /* Ignore error and continue without compression. */
+ dev_warn(adap->pdev_dev,
+ "Fail allocating compression buffer ret: %d. Continuing without compression.\n",
+ rc);
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ rc = 0;
+ }
+ } else {
+ cudbg_init.compress_type = CUDBG_COMPRESSION_NONE;
+ }
+
+ cudbg_hdr->compress_type = cudbg_init.compress_type;
dbg_buff.offset += min_size;
total_size = dbg_buff.offset;
@@ -390,8 +473,12 @@ int cxgb4_cudbg_collect(struct adapter *adap, void *buf, u32 *buf_size,
buf,
&total_size);
+ cudbg_free_compress_buff(&cudbg_init);
cudbg_hdr->data_len = total_size;
- *buf_size = total_size;
+ if (cudbg_init.compress_type != CUDBG_COMPRESSION_NONE)
+ *buf_size = size;
+ else
+ *buf_size = total_size;
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
index c099b5aa2214..ce1ac9a1c878 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.h
@@ -20,8 +20,12 @@
#include "cudbg_if.h"
#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
#include "cudbg_lib.h"
+#define CUDBG_DUMP_BUFF_SIZE (32 * 1024 * 1024) /* 32 MB */
+#define CUDBG_COMPRESS_BUFF_SIZE (4 * 1024 * 1024) /* 4 MB */
+
typedef int (*cudbg_collect_callback_t)(struct cudbg_init *pdbg_init,
struct cudbg_buffer *dbg_buff,
struct cudbg_error *cudbg_err);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index cf471831ee71..2822bbff73e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -45,6 +45,10 @@
#include "cxgb4_debugfs.h"
#include "clip_tbl.h"
#include "l2t.h"
+#include "cudbg_if.h"
+#include "cudbg_lib_common.h"
+#include "cudbg_entity.h"
+#include "cudbg_lib.h"
/* generic seq_file support for showing a table of size rows x width. */
static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos)
@@ -1739,7 +1743,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
*/
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
/* Inner header VNI */
- vniy = ((data2 & DATAVIDH2_F) << 23) |
+ vniy = (data2 & DATAVIDH2_F) |
(DATAVIDH1_G(data2) << 16) | VIDL_G(val);
dip_hit = data2 & DATADIPHIT_F;
} else {
@@ -1749,6 +1753,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
port_num = DATAPORTNUM_G(data2);
/* Read tcamx. Change the control param */
+ vnix = 0;
ctl |= CTLXYBITSEL_V(1);
t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
@@ -1757,7 +1762,7 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
data2 = t4_read_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A);
if (lookup_type && (lookup_type != DATALKPTYPE_M)) {
/* Inner header VNI mask */
- vnix = ((data2 & DATAVIDH2_F) << 23) |
+ vnix = (data2 & DATAVIDH2_F) |
(DATAVIDH1_G(data2) << 16) | VIDL_G(val);
}
} else {
@@ -1830,7 +1835,8 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
addr[1], addr[2], addr[3],
addr[4], addr[5],
(unsigned long long)mask,
- vniy, vnix, dip_hit ? 'Y' : 'N',
+ vniy, (vnix | vniy),
+ dip_hit ? 'Y' : 'N',
port_num,
(cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
PORTMAP_G(cls_hi),
@@ -2017,11 +2023,12 @@ static int rss_show(struct seq_file *seq, void *v, int idx)
static int rss_open(struct inode *inode, struct file *file)
{
- int ret;
- struct seq_tab *p;
struct adapter *adap = inode->i_private;
+ int ret, nentries;
+ struct seq_tab *p;
- p = seq_open_tab(file, RSS_NENTRIES / 8, 8 * sizeof(u16), 0, rss_show);
+ nentries = t4_chip_rss_size(adap);
+ p = seq_open_tab(file, nentries / 8, 8 * sizeof(u16), 0, rss_show);
if (!p)
return -ENOMEM;
@@ -2664,10 +2671,14 @@ static const struct file_operations mem_debugfs_fops = {
static int tid_info_show(struct seq_file *seq, void *v)
{
+ unsigned int tid_start = 0;
struct adapter *adap = seq->private;
const struct tid_info *t = &adap->tids;
enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+ if (chip > CHELSIO_T5)
+ tid_start = t4_read_reg(adap, LE_DB_ACTIVE_TABLE_START_INDEX_A);
+
if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
unsigned int sb;
seq_printf(seq, "Connections in use: %u\n",
@@ -2679,8 +2690,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
sb = t4_read_reg(adap, LE_DB_SRVR_START_INDEX_A);
if (sb) {
- seq_printf(seq, "TID range: 0..%u/%u..%u", sb - 1,
- adap->tids.hash_base,
+ seq_printf(seq, "TID range: %u..%u/%u..%u", tid_start,
+ sb - 1, adap->tids.hash_base,
t->ntids - 1);
seq_printf(seq, ", in use: %u/%u\n",
atomic_read(&t->tids_in_use),
@@ -2705,7 +2716,8 @@ static int tid_info_show(struct seq_file *seq, void *v)
seq_printf(seq, "Connections in use: %u\n",
atomic_read(&t->conns_in_use));
- seq_printf(seq, "TID range: 0..%u", t->ntids - 1);
+ seq_printf(seq, "TID range: %u..%u", tid_start,
+ tid_start + t->ntids - 1);
seq_printf(seq, ", in use: %u\n",
atomic_read(&t->tids_in_use));
}
@@ -2794,18 +2806,6 @@ static const struct file_operations blocked_fl_fops = {
.llseek = generic_file_llseek,
};
-struct mem_desc {
- unsigned int base;
- unsigned int limit;
- unsigned int idx;
-};
-
-static int mem_desc_cmp(const void *a, const void *b)
-{
- return ((const struct mem_desc *)a)->base -
- ((const struct mem_desc *)b)->base;
-}
-
static void mem_region_show(struct seq_file *seq, const char *name,
unsigned int from, unsigned int to)
{
@@ -2819,250 +2819,60 @@ static void mem_region_show(struct seq_file *seq, const char *name,
static int meminfo_show(struct seq_file *seq, void *v)
{
static const char * const memory[] = { "EDC0:", "EDC1:", "MC:",
- "MC0:", "MC1:"};
- static const char * const region[] = {
- "DBQ contexts:", "IMSG contexts:", "FLM cache:", "TCBs:",
- "Pstructs:", "Timers:", "Rx FL:", "Tx FL:", "Pstruct FL:",
- "Tx payload:", "Rx payload:", "LE hash:", "iSCSI region:",
- "TDDP region:", "TPT region:", "STAG region:", "RQ region:",
- "RQUDP region:", "PBL region:", "TXPBL region:",
- "DBVFIFO region:", "ULPRX state:", "ULPTX state:",
- "On-chip queues:"
- };
-
- int i, n;
- u32 lo, hi, used, alloc;
- struct mem_desc avail[4];
- struct mem_desc mem[ARRAY_SIZE(region) + 3]; /* up to 3 holes */
- struct mem_desc *md = mem;
+ "MC0:", "MC1:", "HMA:"};
struct adapter *adap = seq->private;
+ struct cudbg_meminfo meminfo;
+ int i, rc;
- for (i = 0; i < ARRAY_SIZE(mem); i++) {
- mem[i].limit = 0;
- mem[i].idx = i;
- }
-
- /* Find and sort the populated memory ranges */
- i = 0;
- lo = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A);
- if (lo & EDRAM0_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EDRAM0_BAR_A);
- avail[i].base = EDRAM0_BASE_G(hi) << 20;
- avail[i].limit = avail[i].base + (EDRAM0_SIZE_G(hi) << 20);
- avail[i].idx = 0;
- i++;
- }
- if (lo & EDRAM1_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EDRAM1_BAR_A);
- avail[i].base = EDRAM1_BASE_G(hi) << 20;
- avail[i].limit = avail[i].base + (EDRAM1_SIZE_G(hi) << 20);
- avail[i].idx = 1;
- i++;
- }
-
- if (is_t5(adap->params.chip)) {
- if (lo & EXT_MEM0_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
- avail[i].base = EXT_MEM0_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM0_SIZE_G(hi) << 20);
- avail[i].idx = 3;
- i++;
- }
- if (lo & EXT_MEM1_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
- avail[i].base = EXT_MEM1_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM1_SIZE_G(hi) << 20);
- avail[i].idx = 4;
- i++;
- }
- } else {
- if (lo & EXT_MEM_ENABLE_F) {
- hi = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
- avail[i].base = EXT_MEM_BASE_G(hi) << 20;
- avail[i].limit =
- avail[i].base + (EXT_MEM_SIZE_G(hi) << 20);
- avail[i].idx = 2;
- i++;
- }
- }
- if (!i) /* no memory available */
- return 0;
- sort(avail, i, sizeof(struct mem_desc), mem_desc_cmp, NULL);
-
- (md++)->base = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A);
- (md++)->base = t4_read_reg(adap, SGE_IMSG_CTXT_BADDR_A);
- (md++)->base = t4_read_reg(adap, SGE_FLM_CACHE_BADDR_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_TCB_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_TIMER_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_RX_FLST_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_TX_FLST_BASE_A);
- (md++)->base = t4_read_reg(adap, TP_CMM_MM_PS_FLST_BASE_A);
-
- /* the next few have explicit upper bounds */
- md->base = t4_read_reg(adap, TP_PMM_TX_BASE_A);
- md->limit = md->base - 1 +
- t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A) *
- PMTXMAXPAGE_G(t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A));
- md++;
-
- md->base = t4_read_reg(adap, TP_PMM_RX_BASE_A);
- md->limit = md->base - 1 +
- t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) *
- PMRXMAXPAGE_G(t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A));
- md++;
-
- if (t4_read_reg(adap, LE_DB_CONFIG_A) & HASHEN_F) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5) {
- hi = t4_read_reg(adap, LE_DB_TID_HASHBASE_A) / 4;
- md->base = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
- } else {
- hi = t4_read_reg(adap, LE_DB_HASH_TID_BASE_A);
- md->base = t4_read_reg(adap,
- LE_DB_HASH_TBL_BASE_ADDR_A);
- }
- md->limit = 0;
- } else {
- md->base = 0;
- md->idx = ARRAY_SIZE(region); /* hide it */
- }
- md++;
-
-#define ulp_region(reg) do { \
- md->base = t4_read_reg(adap, ULP_ ## reg ## _LLIMIT_A);\
- (md++)->limit = t4_read_reg(adap, ULP_ ## reg ## _ULIMIT_A); \
-} while (0)
-
- ulp_region(RX_ISCSI);
- ulp_region(RX_TDDP);
- ulp_region(TX_TPT);
- ulp_region(RX_STAG);
- ulp_region(RX_RQ);
- ulp_region(RX_RQUDP);
- ulp_region(RX_PBL);
- ulp_region(TX_PBL);
-#undef ulp_region
- md->base = 0;
- md->idx = ARRAY_SIZE(region);
- if (!is_t4(adap->params.chip)) {
- u32 size = 0;
- u32 sge_ctrl = t4_read_reg(adap, SGE_CONTROL2_A);
- u32 fifo_size = t4_read_reg(adap, SGE_DBVFIFO_SIZE_A);
-
- if (is_t5(adap->params.chip)) {
- if (sge_ctrl & VFIFO_ENABLE_F)
- size = DBVFIFO_SIZE_G(fifo_size);
- } else {
- size = T6_DBVFIFO_SIZE_G(fifo_size);
- }
-
- if (size) {
- md->base = BASEADDR_G(t4_read_reg(adap,
- SGE_DBVFIFO_BADDR_A));
- md->limit = md->base + (size << 2) - 1;
- }
- }
-
- md++;
-
- md->base = t4_read_reg(adap, ULP_RX_CTX_BASE_A);
- md->limit = 0;
- md++;
- md->base = t4_read_reg(adap, ULP_TX_ERR_TABLE_BASE_A);
- md->limit = 0;
- md++;
-
- md->base = adap->vres.ocq.start;
- if (adap->vres.ocq.size)
- md->limit = md->base + adap->vres.ocq.size - 1;
- else
- md->idx = ARRAY_SIZE(region); /* hide it */
- md++;
-
- /* add any address-space holes, there can be up to 3 */
- for (n = 0; n < i - 1; n++)
- if (avail[n].limit < avail[n + 1].base)
- (md++)->base = avail[n].limit;
- if (avail[n].limit)
- (md++)->base = avail[n].limit;
-
- n = md - mem;
- sort(mem, n, sizeof(struct mem_desc), mem_desc_cmp, NULL);
+ memset(&meminfo, 0, sizeof(struct cudbg_meminfo));
+ rc = cudbg_fill_meminfo(adap, &meminfo);
+ if (rc)
+ return -ENXIO;
- for (lo = 0; lo < i; lo++)
- mem_region_show(seq, memory[avail[lo].idx], avail[lo].base,
- avail[lo].limit - 1);
+ for (i = 0; i < meminfo.avail_c; i++)
+ mem_region_show(seq, memory[meminfo.avail[i].idx],
+ meminfo.avail[i].base,
+ meminfo.avail[i].limit - 1);
seq_putc(seq, '\n');
- for (i = 0; i < n; i++) {
- if (mem[i].idx >= ARRAY_SIZE(region))
+ for (i = 0; i < meminfo.mem_c; i++) {
+ if (meminfo.mem[i].idx >= ARRAY_SIZE(cudbg_region))
continue; /* skip holes */
- if (!mem[i].limit)
- mem[i].limit = i < n - 1 ? mem[i + 1].base - 1 : ~0;
- mem_region_show(seq, region[mem[i].idx], mem[i].base,
- mem[i].limit);
+ if (!meminfo.mem[i].limit)
+ meminfo.mem[i].limit =
+ i < meminfo.mem_c - 1 ?
+ meminfo.mem[i + 1].base - 1 : ~0;
+ mem_region_show(seq, cudbg_region[meminfo.mem[i].idx],
+ meminfo.mem[i].base, meminfo.mem[i].limit);
}
seq_putc(seq, '\n');
- lo = t4_read_reg(adap, CIM_SDRAM_BASE_ADDR_A);
- hi = t4_read_reg(adap, CIM_SDRAM_ADDR_SIZE_A) + lo - 1;
- mem_region_show(seq, "uP RAM:", lo, hi);
-
- lo = t4_read_reg(adap, CIM_EXTMEM2_BASE_ADDR_A);
- hi = t4_read_reg(adap, CIM_EXTMEM2_ADDR_SIZE_A) + lo - 1;
- mem_region_show(seq, "uP Extmem2:", lo, hi);
+ mem_region_show(seq, "uP RAM:", meminfo.up_ram_lo, meminfo.up_ram_hi);
+ mem_region_show(seq, "uP Extmem2:", meminfo.up_extmem2_lo,
+ meminfo.up_extmem2_hi);
- lo = t4_read_reg(adap, TP_PMM_RX_MAX_PAGE_A);
seq_printf(seq, "\n%u Rx pages of size %uKiB for %u channels\n",
- PMRXMAXPAGE_G(lo),
- t4_read_reg(adap, TP_PMM_RX_PAGE_SIZE_A) >> 10,
- (lo & PMRXNUMCHN_F) ? 2 : 1);
+ meminfo.rx_pages_data[0], meminfo.rx_pages_data[1],
+ meminfo.rx_pages_data[2]);
- lo = t4_read_reg(adap, TP_PMM_TX_MAX_PAGE_A);
- hi = t4_read_reg(adap, TP_PMM_TX_PAGE_SIZE_A);
seq_printf(seq, "%u Tx pages of size %u%ciB for %u channels\n",
- PMTXMAXPAGE_G(lo),
- hi >= (1 << 20) ? (hi >> 20) : (hi >> 10),
- hi >= (1 << 20) ? 'M' : 'K', 1 << PMTXNUMCHN_G(lo));
- seq_printf(seq, "%u p-structs\n\n",
- t4_read_reg(adap, TP_CMM_MM_MAX_PSTRUCT_A));
-
- for (i = 0; i < 4; i++) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
- lo = t4_read_reg(adap, MPS_RX_MAC_BG_PG_CNT0_A + i * 4);
- else
- lo = t4_read_reg(adap, MPS_RX_PG_RSV0_A + i * 4);
- if (is_t5(adap->params.chip)) {
- used = T5_USED_G(lo);
- alloc = T5_ALLOC_G(lo);
- } else {
- used = USED_G(lo);
- alloc = ALLOC_G(lo);
- }
+ meminfo.tx_pages_data[0], meminfo.tx_pages_data[1],
+ meminfo.tx_pages_data[2], meminfo.tx_pages_data[3]);
+
+ seq_printf(seq, "%u p-structs\n\n", meminfo.p_structs);
+
+ for (i = 0; i < 4; i++)
/* For T6 these are MAC buffer groups */
seq_printf(seq, "Port %d using %u pages out of %u allocated\n",
- i, used, alloc);
- }
- for (i = 0; i < adap->params.arch.nchan; i++) {
- if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5)
- lo = t4_read_reg(adap,
- MPS_RX_LPBK_BG_PG_CNT0_A + i * 4);
- else
- lo = t4_read_reg(adap, MPS_RX_PG_RSV4_A + i * 4);
- if (is_t5(adap->params.chip)) {
- used = T5_USED_G(lo);
- alloc = T5_ALLOC_G(lo);
- } else {
- used = USED_G(lo);
- alloc = ALLOC_G(lo);
- }
+ i, meminfo.port_used[i], meminfo.port_alloc[i]);
+
+ for (i = 0; i < adap->params.arch.nchan; i++)
/* For T6 these are MAC buffer groups */
seq_printf(seq,
"Loopback %d using %u pages out of %u allocated\n",
- i, used, alloc);
- }
+ i, meminfo.loopback_used[i],
+ meminfo.loopback_alloc[i]);
+
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index eb338212f5af..7852d98bad75 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -517,7 +517,8 @@ static int from_fw_port_mod_type(enum fw_port_type port_type,
else
return PORT_OTHER;
} else if (port_type == FW_PORT_TYPE_KR4_100G ||
- port_type == FW_PORT_TYPE_KR_SFP28) {
+ port_type == FW_PORT_TYPE_KR_SFP28 ||
+ port_type == FW_PORT_TYPE_KR_XLAUI) {
return PORT_NONE;
}
@@ -645,6 +646,13 @@ static void fw_caps_to_lmm(enum fw_port_type port_type,
FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
break;
+ case FW_PORT_TYPE_KR_XLAUI:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
+ FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
+ break;
+
case FW_PORT_TYPE_CR2_QSFP:
SET_LMM(FIBRE);
SET_LMM(50000baseSR2_Full);
@@ -1396,6 +1404,101 @@ static int get_dump_data(struct net_device *dev, struct ethtool_dump *eth_dump,
return 0;
}
+static int cxgb4_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct port_info *pi = netdev_priv(dev);
+ u8 sff8472_comp, sff_diag_type, sff_rev;
+ struct adapter *adapter = pi->adapter;
+ int ret;
+
+ if (!t4_is_inserted_mod_type(pi->mod_type))
+ return -EINVAL;
+
+ switch (pi->port_type) {
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSA:
+ case FW_PORT_TYPE_SFP28:
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFF_8472_COMP_ADDR,
+ SFF_8472_COMP_LEN, &sff8472_comp);
+ if (ret)
+ return ret;
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFP_DIAG_TYPE_ADDR,
+ SFP_DIAG_TYPE_LEN, &sff_diag_type);
+ if (ret)
+ return ret;
+
+ if (!sff8472_comp || (sff_diag_type & 4)) {
+ modinfo->type = ETH_MODULE_SFF_8079;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ }
+ break;
+
+ case FW_PORT_TYPE_QSFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_CR2_QSFP:
+ case FW_PORT_TYPE_CR4_QSFP:
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, SFF_REV_ADDR,
+ SFF_REV_LEN, &sff_rev);
+ /* For QSFP type ports, revision value >= 3
+ * means the SFP is 8636 compliant.
+ */
+ if (ret)
+ return ret;
+ if (sff_rev >= 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cxgb4_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eprom, u8 *data)
+{
+ int ret = 0, offset = eprom->offset, len = eprom->len;
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ memset(data, 0, eprom->len);
+ if (offset + len <= I2C_PAGE_SIZE)
+ return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, offset, len, data);
+
+ /* offset + len spans 0xa0 and 0xa1 pages */
+ if (offset <= I2C_PAGE_SIZE) {
+ /* read 0xa0 page */
+ len = I2C_PAGE_SIZE - offset;
+ ret = t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan,
+ I2C_DEV_ADDR_A0, offset, len, data);
+ if (ret)
+ return ret;
+ offset = I2C_PAGE_SIZE;
+ /* Remaining bytes to be read from second page =
+ * Total length - bytes read from first page
+ */
+ len = eprom->len - len;
+ }
+ /* Read additional optical diagnostics from page 0xa2 if supported */
+ return t4_i2c_rd(adapter, adapter->mbox, pi->tx_chan, I2C_DEV_ADDR_A2,
+ offset, len, &data[eprom->len - len]);
+}
+
static const struct ethtool_ops cxgb_ethtool_ops = {
.get_link_ksettings = get_link_ksettings,
.set_link_ksettings = set_link_ksettings,
@@ -1430,6 +1533,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_dump = set_dump,
.get_dump_flag = get_dump_flag,
.get_dump_data = get_dump_data,
+ .get_module_info = cxgb4_get_module_info,
+ .get_module_eeprom = cxgb4_get_module_eeprom,
};
void cxgb4_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 5980f308a253..3177b0c9bd2d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -439,19 +439,32 @@ int cxgb4_get_free_ftid(struct net_device *dev, int family)
if (ftid >= t->nftids)
ftid = -1;
} else {
- ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2);
- if (ftid < 0)
- goto out_unlock;
+ if (is_t6(adap->params.chip)) {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 1);
+ if (ftid < 0)
+ goto out_unlock;
+
+ /* this is only a lookup, keep the found region
+ * unallocated
+ */
+ bitmap_release_region(t->ftid_bmap, ftid, 1);
+ } else {
+ ftid = bitmap_find_free_region(t->ftid_bmap,
+ t->nftids, 2);
+ if (ftid < 0)
+ goto out_unlock;
- /* this is only a lookup, keep the found region unallocated */
- bitmap_release_region(t->ftid_bmap, ftid, 2);
+ bitmap_release_region(t->ftid_bmap, ftid, 2);
+ }
}
out_unlock:
spin_unlock_bh(&t->ftid_lock);
return ftid;
}
-static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
+static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
@@ -460,22 +473,31 @@ static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family)
return -EBUSY;
}
- if (family == PF_INET)
+ if (family == PF_INET) {
__set_bit(fidx, t->ftid_bmap);
- else
- bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_allocate_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_allocate_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
return 0;
}
-static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family)
+static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
+ unsigned int chip_ver)
{
spin_lock_bh(&t->ftid_lock);
- if (family == PF_INET)
+ if (family == PF_INET) {
__clear_bit(fidx, t->ftid_bmap);
- else
- bitmap_release_region(t->ftid_bmap, fidx, 2);
+ } else {
+ if (chip_ver < CHELSIO_T6)
+ bitmap_release_region(t->ftid_bmap, fidx, 2);
+ else
+ bitmap_release_region(t->ftid_bmap, fidx, 1);
+ }
spin_unlock_bh(&t->ftid_lock);
}
@@ -694,7 +716,7 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
if (f->smt)
cxgb4_smt_release(f->smt);
- if (f->fs.hash && f->fs.type)
+ if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
/* The zeroing of the filter rule below clears the filter valid,
@@ -1189,6 +1211,7 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
unsigned int max_fidx, fidx;
struct filter_entry *f;
u32 iconf;
@@ -1225,12 +1248,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
* insertion.
*/
if (fs->type == 0) { /* IPv4 */
- /* If our IPv4 filter isn't being written to a
- * multiple of four filter index and there's an IPv6
- * filter at the multiple of 4 base slot, then we
- * prevent insertion.
+ /* For T6, If our IPv4 filter isn't being written to a
+ * multiple of two filter index and there's an IPv6
+ * filter at the multiple of 2 base slot, then we need
+ * to delete that IPv6 filter ...
+ * For adapters below T6, IPv6 filter occupies 4 entries.
+ * Hence we need to delete the filter in multiple of 4 slot.
*/
- fidx = filter_id & ~0x3;
+ if (chip_ver < CHELSIO_T6)
+ fidx = filter_id & ~0x3;
+ else
+ fidx = filter_id & ~0x1;
+
if (fidx != filter_id &&
adapter->tids.ftid_tab[fidx].fs.type) {
f = &adapter->tids.ftid_tab[fidx];
@@ -1242,23 +1271,42 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
}
}
} else { /* IPv6 */
- /* Ensure that the IPv6 filter is aligned on a
- * multiple of 4 boundary.
- */
- if (filter_id & 0x3) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
- return -EINVAL;
- }
+ if (chip_ver < CHELSIO_T6) {
+ /* Ensure that the IPv6 filter is aligned on a
+ * multiple of 4 boundary.
+ */
+ if (filter_id & 0x3) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
+ return -EINVAL;
+ }
- /* Check all except the base overlapping IPv4 filter slots. */
- for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) {
+ /* Check all except the base overlapping IPv4 filter
+ * slots.
+ */
+ for (fidx = filter_id + 1; fidx < filter_id + 4;
+ fidx++) {
+ f = &adapter->tids.ftid_tab[fidx];
+ if (f->valid) {
+ dev_err(adapter->pdev_dev,
+ "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
+ fidx);
+ return -EBUSY;
+ }
+ }
+ } else {
+ /* For T6, CLIP being enabled, IPv6 filter would occupy
+ * 2 entries.
+ */
+ if (filter_id & 0x1)
+ return -EINVAL;
+ /* Check overlapping IPv4 filter slot */
+ fidx = filter_id + 1;
f = &adapter->tids.ftid_tab[fidx];
if (f->valid) {
- dev_err(adapter->pdev_dev,
- "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
- fidx);
- return -EINVAL;
+ pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
+ __func__, fidx);
+ return -EBUSY;
}
}
}
@@ -1272,16 +1320,18 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
fidx = filter_id + adapter->tids.ftid_base;
ret = cxgb4_set_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
if (ret)
return ret;
- /* Check to make sure the filter requested is writable ... */
+ /* Check t make sure the filter requested is writable ... */
ret = writable_filter(f);
if (ret) {
/* Clear the bits we have set above */
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
return ret;
}
@@ -1291,6 +1341,17 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
if (f->valid)
clear_filter(adapter, f);
+ if (is_t6(adapter->params.chip) && fs->type &&
+ ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
+ IPV6_ADDR_ANY) {
+ ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
+ if (ret) {
+ cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6,
+ chip_ver);
+ return ret;
+ }
+ }
+
/* Convert the filter specification into our internal format.
* We copy the PF/VF specification into the Outer VLAN field
* here so the rest of the code -- including the interface to
@@ -1316,7 +1377,8 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
ret = set_filter_wr(adapter, filter_id);
if (ret) {
cxgb4_clear_ftid(&adapter->tids, filter_id,
- fs->type ? PF_INET6 : PF_INET);
+ fs->type ? PF_INET6 : PF_INET,
+ chip_ver);
clear_filter(adapter, f);
}
@@ -1394,6 +1456,7 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx)
{
struct adapter *adapter = netdev2adap(dev);
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
struct filter_entry *f;
unsigned int max_fidx;
int ret;
@@ -1419,7 +1482,8 @@ int __cxgb4_del_filter(struct net_device *dev, int filter_id,
if (f->valid) {
f->ctx = ctx;
cxgb4_clear_ftid(&adapter->tids, filter_id,
- f->fs.type ? PF_INET6 : PF_INET);
+ f->fs.type ? PF_INET6 : PF_INET,
+ chip_ver);
return del_filter_wr(adapter, filter_id);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 05a4abfd5ec1..1ca2a39ed0f8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -65,6 +65,7 @@
#include <net/addrconf.h>
#include <linux/uaccess.h>
#include <linux/crash_dump.h>
+#include <net/udp_tunnel.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
@@ -101,7 +102,9 @@ const char cxgb4_driver_version[] = DRV_VERSION;
*/
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
static const struct pci_device_id cxgb4_pci_tbl[] = {
-#define CH_PCI_DEVICE_ID_FUNCTION 0x4
+#define CXGB4_UNIFIED_PF 0x4
+
+#define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
/* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
* called for both.
@@ -109,7 +112,7 @@ const char cxgb4_driver_version[] = DRV_VERSION;
#define CH_PCI_DEVICE_ID_FUNCTION2 0x0
#define CH_PCI_ID_TABLE_ENTRY(devid) \
- {PCI_VDEVICE(CHELSIO, (devid)), 4}
+ {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
#define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
{ 0, } \
@@ -1673,7 +1676,7 @@ int cxgb4_flush_eq_cache(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- return t4_sge_ctxt_flush(adap, adap->mbox);
+ return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
}
EXPORT_SYMBOL(cxgb4_flush_eq_cache);
@@ -2604,7 +2607,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
}
#ifdef CONFIG_PCI_IOV
-static int dummy_open(struct net_device *dev)
+static int cxgb4_mgmt_open(struct net_device *dev)
{
/* Turn carrier off since we don't have to transmit anything on this
* interface.
@@ -2614,39 +2617,44 @@ static int dummy_open(struct net_device *dev)
}
/* Fill MAC address that will be assigned by the FW */
-static void fill_vf_station_mac_addr(struct adapter *adap)
+static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
{
- unsigned int i;
u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
+ unsigned int i, vf, nvfs;
+ u16 a, b;
int err;
u8 *na;
- u16 a, b;
+ adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
+ PCI_CAP_ID_VPD);
err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
- if (!err) {
- na = adap->params.vpd.na;
- for (i = 0; i < ETH_ALEN; i++)
- hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
- hex2val(na[2 * i + 1]));
- a = (hw_addr[0] << 8) | hw_addr[1];
- b = (hw_addr[1] << 8) | hw_addr[2];
- a ^= b;
- a |= 0x0200; /* locally assigned Ethernet MAC address */
- a &= ~0x0100; /* not a multicast Ethernet MAC address */
- macaddr[0] = a >> 8;
- macaddr[1] = a & 0xff;
-
- for (i = 2; i < 5; i++)
- macaddr[i] = hw_addr[i + 1];
-
- for (i = 0; i < adap->num_vfs; i++) {
- macaddr[5] = adap->pf * 16 + i;
- ether_addr_copy(adap->vfinfo[i].vf_mac_addr, macaddr);
- }
+ if (err)
+ return;
+
+ na = adap->params.vpd.na;
+ for (i = 0; i < ETH_ALEN; i++)
+ hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
+ hex2val(na[2 * i + 1]));
+
+ a = (hw_addr[0] << 8) | hw_addr[1];
+ b = (hw_addr[1] << 8) | hw_addr[2];
+ a ^= b;
+ a |= 0x0200; /* locally assigned Ethernet MAC address */
+ a &= ~0x0100; /* not a multicast Ethernet MAC address */
+ macaddr[0] = a >> 8;
+ macaddr[1] = a & 0xff;
+
+ for (i = 2; i < 5; i++)
+ macaddr[i] = hw_addr[i + 1];
+
+ for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
+ vf < nvfs; vf++) {
+ macaddr[5] = adap->pf * 16 + vf;
+ ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
}
}
-static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2668,8 +2676,8 @@ static int cxgb_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
return ret;
}
-static int cxgb_get_vf_config(struct net_device *dev,
- int vf, struct ifla_vf_info *ivi)
+static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2683,8 +2691,8 @@ static int cxgb_get_vf_config(struct net_device *dev,
return 0;
}
-static int cxgb_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
{
struct port_info *pi = netdev_priv(dev);
unsigned int phy_port_id;
@@ -2695,8 +2703,8 @@ static int cxgb_get_phys_port_id(struct net_device *dev,
return 0;
}
-static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
- int max_tx_rate)
+static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
@@ -2775,7 +2783,30 @@ static int cxgb_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
return 0;
}
-#endif
+static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adap = pi->adapter;
+ int ret;
+
+ if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+
+ if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
+ return -EPROTONOSUPPORT;
+
+ ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
+ if (!ret) {
+ adap->vfinfo[vf].vlan = vlan;
+ return 0;
+ }
+
+ dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
+ ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
+ return ret;
+}
+#endif /* CONFIG_PCI_IOV */
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
{
@@ -2897,9 +2928,6 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
static int cxgb_setup_tc_flower(struct net_device *dev,
struct tc_cls_flower_offload *cls_flower)
{
- if (cls_flower->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return cxgb4_tc_flower_replace(dev, cls_flower);
@@ -2915,9 +2943,6 @@ static int cxgb_setup_tc_flower(struct net_device *dev,
static int cxgb_setup_tc_cls_u32(struct net_device *dev,
struct tc_cls_u32_offload *cls_u32)
{
- if (cls_u32->common.chain_index)
- return -EOPNOTSUPP;
-
switch (cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
@@ -2943,7 +2968,7 @@ static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
return -EINVAL;
}
- if (!tc_can_offload(dev))
+ if (!tc_cls_can_offload_and_chain0(dev, type_data))
return -EOPNOTSUPP;
switch (type) {
@@ -2987,6 +3012,174 @@ static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
}
}
+static void cxgb_del_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int ret = 0, i;
+
+ if (chip_ver < CHELSIO_T6)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!adapter->vxlan_port_cnt ||
+ adapter->vxlan_port != ti->port)
+ return; /* Invalid VxLAN destination port */
+
+ adapter->vxlan_port_cnt--;
+ if (adapter->vxlan_port_cnt)
+ return;
+
+ adapter->vxlan_port = 0;
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!adapter->geneve_port_cnt ||
+ adapter->geneve_port != ti->port)
+ return; /* Invalid GENEVE destination port */
+
+ adapter->geneve_port_cnt--;
+ if (adapter->geneve_port_cnt)
+ return;
+
+ adapter->geneve_port = 0;
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
+ default:
+ return;
+ }
+
+ /* Matchall mac entries can be deleted only after all tunnel ports
+ * are brought down or removed.
+ */
+ if (!adapter->rawf_cnt)
+ return;
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+ ret = t4_free_raw_mac_filt(adapter, pi->viid,
+ match_all_mac, match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
+ i);
+ return;
+ }
+ atomic_dec(&adapter->mps_encap[adapter->rawf_start +
+ pi->port_id].refcnt);
+ }
+}
+
+static void cxgb_add_udp_tunnel(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ struct port_info *pi = netdev_priv(netdev);
+ struct adapter *adapter = pi->adapter;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
+ u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+ int i, ret;
+
+ if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
+ return;
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ /* Callback for adding vxlan port can be called with the same
+ * port for both IPv4 and IPv6. We should not disable the
+ * offloading when the same port for both protocols is added
+ * and later one of them is removed.
+ */
+ if (adapter->vxlan_port_cnt &&
+ adapter->vxlan_port == ti->port) {
+ adapter->vxlan_port_cnt++;
+ return;
+ }
+
+ /* We will support only one VxLAN port */
+ if (adapter->vxlan_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->vxlan_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->vxlan_port = ti->port;
+ adapter->vxlan_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
+ VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (adapter->geneve_port_cnt &&
+ adapter->geneve_port == ti->port) {
+ adapter->geneve_port_cnt++;
+ return;
+ }
+
+ /* We will support only one GENEVE port */
+ if (adapter->geneve_port_cnt) {
+ netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
+ be16_to_cpu(adapter->geneve_port),
+ be16_to_cpu(ti->port));
+ return;
+ }
+
+ adapter->geneve_port = ti->port;
+ adapter->geneve_port_cnt = 1;
+
+ t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
+ GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
+ default:
+ return;
+ }
+
+ /* Create a 'match all' mac filter entry for inner mac,
+ * if raw mac interface is supported. Once the linux kernel provides
+ * driver entry points for adding/deleting the inner mac addresses,
+ * we will remove this 'match all' entry and fallback to adding
+ * exact match filters.
+ */
+ for_each_port(adapter, i) {
+ pi = adap2pinfo(adapter, i);
+
+ ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
+ match_all_mac,
+ match_all_mac,
+ adapter->rawf_start +
+ pi->port_id,
+ 1, pi->port_id, true);
+ if (ret < 0) {
+ netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
+ be16_to_cpu(ti->port));
+ cxgb_del_udp_tunnel(netdev, ti);
+ return;
+ }
+ atomic_inc(&adapter->mps_encap[ret].refcnt);
+ }
+}
+
+static netdev_features_t cxgb_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct port_info *pi = netdev_priv(dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+ return features;
+
+ /* Check if hw supports offload for this packet */
+ if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
+ return features;
+
+ /* Offload is not supported for this encapsulated packet */
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+}
+
static netdev_features_t cxgb_fix_features(struct net_device *dev,
netdev_features_t features)
{
@@ -3018,20 +3211,25 @@ static const struct net_device_ops cxgb4_netdev_ops = {
#endif /* CONFIG_CHELSIO_T4_FCOE */
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
+ .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
+ .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
+ .ndo_features_check = cxgb_features_check,
.ndo_fix_features = cxgb_fix_features,
};
#ifdef CONFIG_PCI_IOV
static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
- .ndo_open = dummy_open,
- .ndo_set_vf_mac = cxgb_set_vf_mac,
- .ndo_get_vf_config = cxgb_get_vf_config,
- .ndo_set_vf_rate = cxgb_set_vf_rate,
- .ndo_get_phys_port_id = cxgb_get_phys_port_id,
+ .ndo_open = cxgb4_mgmt_open,
+ .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
+ .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
+ .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
+ .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
+ .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
};
#endif
-static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
@@ -3043,7 +3241,7 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
- .get_drvinfo = get_drvinfo,
+ .get_drvinfo = cxgb4_mgmt_get_drvinfo,
};
void t4_fatal_err(struct adapter *adap)
@@ -4759,7 +4957,7 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
}
#ifdef CONFIG_PCI_IOV
-static void dummy_setup(struct net_device *dev)
+static void cxgb4_mgmt_setup(struct net_device *dev)
{
dev->type = ARPHRD_NONE;
dev->mtu = 0;
@@ -4775,38 +4973,6 @@ static void dummy_setup(struct net_device *dev)
dev->needs_free_netdev = true;
}
-static int config_mgmt_dev(struct pci_dev *pdev)
-{
- struct adapter *adap = pci_get_drvdata(pdev);
- struct net_device *netdev;
- struct port_info *pi;
- char name[IFNAMSIZ];
- int err;
-
- snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
- netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
- dummy_setup);
- if (!netdev)
- return -ENOMEM;
-
- pi = netdev_priv(netdev);
- pi->adapter = adap;
- pi->tx_chan = adap->pf % adap->params.nports;
- SET_NETDEV_DEV(netdev, &pdev->dev);
-
- adap->port[0] = netdev;
- pi->port_id = 0;
-
- err = register_netdev(adap->port[0]);
- if (err) {
- pr_info("Unable to register VF mgmt netdev %s\n", name);
- free_netdev(adap->port[0]);
- adap->port[0] = NULL;
- return err;
- }
- return 0;
-}
-
static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
{
struct adapter *adap = pci_get_drvdata(pdev);
@@ -4818,7 +4984,7 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
/* Check if cxgb4 is the MASTER and fw is initialized */
if (!(pcie_fw & PCIE_FW_INIT_F) ||
!(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
- PCIE_FW_MASTER_G(pcie_fw) != 4) {
+ PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) {
dev_warn(&pdev->dev,
"cxgb4 driver needs to be MASTER to support SRIOV\n");
return -EOPNOTSUPP;
@@ -4830,46 +4996,132 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
if (current_vfs && pci_vfs_assigned(pdev)) {
dev_err(&pdev->dev,
"Cannot modify SR-IOV while VFs are assigned\n");
- num_vfs = current_vfs;
- return num_vfs;
+ return current_vfs;
}
-
- /* Disable SRIOV when zero is passed.
- * One needs to disable SRIOV before modifying it, else
- * stack throws the below warning:
- * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+ /* Note that the upper-level code ensures that we're never called with
+ * a non-zero "num_vfs" when we already have VFs instantiated. But
+ * it never hurts to code defensively.
*/
+ if (num_vfs != 0 && current_vfs != 0)
+ return -EBUSY;
+
+ /* Nothing to do for no change. */
+ if (num_vfs == current_vfs)
+ return num_vfs;
+
+ /* Disable SRIOV when zero is passed. */
if (!num_vfs) {
pci_disable_sriov(pdev);
- if (adap->port[0]) {
- unregister_netdev(adap->port[0]);
- adap->port[0] = NULL;
- }
+ /* free VF Management Interface */
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+
/* free VF resources */
+ adap->num_vfs = 0;
kfree(adap->vfinfo);
adap->vfinfo = NULL;
- adap->num_vfs = 0;
- return num_vfs;
+ return 0;
}
- if (num_vfs != current_vfs) {
- err = pci_enable_sriov(pdev, num_vfs);
+ if (!current_vfs) {
+ struct fw_pfvf_cmd port_cmd, port_rpl;
+ struct net_device *netdev;
+ unsigned int pmask, port;
+ struct pci_dev *pbridge;
+ struct port_info *pi;
+ char name[IFNAMSIZ];
+ u32 devcap2;
+ u16 flags;
+ int pos;
+
+ /* If we want to instantiate Virtual Functions, then our
+ * parent bridge's PCI-E needs to support Alternative Routing
+ * ID (ARI) because our VFs will show up at function offset 8
+ * and above.
+ */
+ pbridge = pdev->bus->self;
+ pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
+ pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
+ pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
+
+ if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
+ !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
+ /* Our parent bridge does not support ARI so issue a
+ * warning and skip instantiating the VFs. They
+ * won't be reachable.
+ */
+ dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
+ pbridge->bus->number, PCI_SLOT(pbridge->devfn),
+ PCI_FUNC(pbridge->devfn));
+ return -ENOTSUPP;
+ }
+ memset(&port_cmd, 0, sizeof(port_cmd));
+ port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_PFVF_CMD_PFN_V(adap->pf) |
+ FW_PFVF_CMD_VFN_V(0));
+ port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
+ err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
+ &port_rpl);
if (err)
return err;
+ pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
+ port = ffs(pmask) - 1;
+ /* Allocate VF Management Interface. */
+ snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
+ adap->pf);
+ netdev = alloc_netdev(sizeof(struct port_info),
+ name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
+ if (!netdev)
+ return -ENOMEM;
- adap->num_vfs = num_vfs;
- err = config_mgmt_dev(pdev);
- if (err)
+ pi = netdev_priv(netdev);
+ pi->adapter = adap;
+ pi->lport = port;
+ pi->tx_chan = port;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adap->port[0] = netdev;
+ pi->port_id = 0;
+
+ err = register_netdev(adap->port[0]);
+ if (err) {
+ pr_info("Unable to register VF mgmt netdev %s\n", name);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
return err;
+ }
+ /* Allocate and set up VF Information. */
+ adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
+ sizeof(struct vf_info), GFP_KERNEL);
+ if (!adap->vfinfo) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ return -ENOMEM;
+ }
+ cxgb4_mgmt_fill_vf_station_mac_addr(adap);
+ }
+ /* Instantiate the requested number of VFs. */
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ pr_info("Unable to instantiate %d VFs\n", num_vfs);
+ if (!current_vfs) {
+ unregister_netdev(adap->port[0]);
+ free_netdev(adap->port[0]);
+ adap->port[0] = NULL;
+ kfree(adap->vfinfo);
+ adap->vfinfo = NULL;
+ }
+ return err;
}
- adap->vfinfo = kcalloc(adap->num_vfs,
- sizeof(struct vf_info), GFP_KERNEL);
- if (adap->vfinfo)
- fill_vf_station_mac_addr(adap);
+ adap->num_vfs = num_vfs;
return num_vfs;
}
-#endif
+#endif /* CONFIG_PCI_IOV */
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
@@ -4882,9 +5134,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 whoami, pl_rev;
enum chip_type chip;
static int adap_idx = 1;
-#ifdef CONFIG_PCI_IOV
- u32 v, port_vec;
-#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4908,6 +5157,13 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_disable_device;
}
+ adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ if (!adapter) {
+ err = -ENOMEM;
+ goto out_unmap_bar0;
+ }
+
+ adapter->regs = regs;
err = t4_wait_dev_ready(regs);
if (err < 0)
goto out_unmap_bar0;
@@ -4918,13 +5174,29 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
chip = get_chip_type(pdev, pl_rev);
func = CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5 ?
SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
+
+ adapter->pdev = pdev;
+ adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
+ adapter->mbox = func;
+ adapter->pf = func;
+ adapter->msg_enable = DFLT_MSG_ENABLE;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
+ pci_set_drvdata(pdev, adapter);
+
if (func != ent->driver_data) {
-#ifndef CONFIG_PCI_IOV
- iounmap(regs);
-#endif
pci_disable_device(pdev);
pci_save_state(pdev); /* to restore SR-IOV later */
- goto sriov;
+ return 0;
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
@@ -4933,53 +5205,30 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
"coherent allocations\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
} else {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_unmap_bar0;
+ goto out_free_adapter;
}
}
pci_enable_pcie_error_reporting(pdev);
pci_set_master(pdev);
pci_save_state(pdev);
-
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto out_unmap_bar0;
- }
adap_idx++;
-
adapter->workq = create_singlethread_workqueue("cxgb4");
if (!adapter->workq) {
err = -ENOMEM;
goto out_free_adapter;
}
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto out_free_adapter;
- }
adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
-
- adapter->regs = regs;
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->msg_enable = DFLT_MSG_ENABLE;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
@@ -5002,9 +5251,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&adapter->stats_lock);
spin_lock_init(&adapter->tid_release_lock);
spin_lock_init(&adapter->win0_lock);
- spin_lock_init(&adapter->mbox_lock);
-
- INIT_LIST_HEAD(&adapter->mlist.list);
INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
INIT_WORK(&adapter->db_full_task, process_db_full);
@@ -5080,6 +5326,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
+
+ if (CHELSIO_CHIP_VERSION(chip) > CHELSIO_T5)
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
@@ -5273,58 +5523,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_fw_sge_queues(adapter);
return 0;
-sriov:
-#ifdef CONFIG_PCI_IOV
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto free_pci_region;
- }
-
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->name = pci_name(pdev);
- adapter->mbox = func;
- adapter->pf = func;
- adapter->regs = regs;
- adapter->adap_idx = adap_idx;
- adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
- (sizeof(struct mbox_cmd) *
- T4_OS_LOG_MBOX_CMDS),
- GFP_KERNEL);
- if (!adapter->mbox_log) {
- err = -ENOMEM;
- goto free_adapter;
- }
- spin_lock_init(&adapter->mbox_lock);
- INIT_LIST_HEAD(&adapter->mlist.list);
-
- v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
- &v, &port_vec);
- if (err < 0) {
- dev_err(adapter->pdev_dev, "Could not fetch port params\n");
- goto free_mbox_log;
- }
-
- adapter->params.nports = hweight32(port_vec);
- pci_set_drvdata(pdev, adapter);
- return 0;
-
-free_mbox_log:
- kfree(adapter->mbox_log);
- free_adapter:
- kfree(adapter);
- free_pci_region:
- iounmap(regs);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- return err;
-#else
- return 0;
-#endif
-
out_free_dev:
free_some_resources(adapter);
if (adapter->flags & USING_MSIX)
@@ -5416,14 +5614,7 @@ static void remove_one(struct pci_dev *pdev)
}
#ifdef CONFIG_PCI_IOV
else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
+ cxgb4_iov_configure(adapter->pdev, 0);
}
#endif
}
@@ -5467,18 +5658,6 @@ static void shutdown_one(struct pci_dev *pdev)
if (adapter->flags & FW_OK)
t4_fw_bye(adapter, adapter->mbox);
}
-#ifdef CONFIG_PCI_IOV
- else {
- if (adapter->port[0])
- unregister_netdev(adapter->port[0]);
- iounmap(adapter->regs);
- kfree(adapter->vfinfo);
- kfree(adapter->mbox_log);
- kfree(adapter);
- pci_disable_sriov(pdev);
- pci_release_regions(pdev);
- }
-#endif
}
static struct pci_driver cxgb4_driver = {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index a452d5a1b0f3..36563364bae7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -43,7 +43,7 @@
#define STATS_CHECK_PERIOD (HZ / 2)
-struct ch_tc_pedit_fields pedits[] = {
+static struct ch_tc_pedit_fields pedits[] = {
PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
@@ -408,9 +408,7 @@ static void cxgb4_process_flow_actions(struct net_device *in,
} else if (is_tcf_gact_shot(a)) {
fs->action = FILTER_DROP;
} else if (is_tcf_mirred_egress_redirect(a)) {
- int ifindex = tcf_mirred_ifindex(a);
- struct net_device *out = __dev_get_by_index(dev_net(in),
- ifindex);
+ struct net_device *out = tcf_mirred_dev(a);
struct port_info *pi = netdev_priv(out);
fs->action = FILTER_SWITCH;
@@ -585,14 +583,14 @@ static int cxgb4_validate_flow_actions(struct net_device *dev,
/* Do nothing */
} else if (is_tcf_mirred_egress_redirect(a)) {
struct adapter *adap = netdev2adap(dev);
- struct net_device *n_dev;
- unsigned int i, ifindex;
+ struct net_device *n_dev, *target_dev;
+ unsigned int i;
bool found = false;
- ifindex = tcf_mirred_ifindex(a);
+ target_dev = tcf_mirred_dev(a);
for_each_port(adap, i) {
n_dev = adap->port[i];
- if (ifindex == n_dev->ifindex) {
+ if (target_dev == n_dev) {
found = true;
break;
}
@@ -768,9 +766,7 @@ static void ch_flower_stats_handler(struct work_struct *work)
rhashtable_walk_enter(&adap->flower_tbl, &iter);
do {
- flower_entry = ERR_PTR(rhashtable_walk_start(&iter));
- if (IS_ERR(flower_entry))
- goto walk_stop;
+ rhashtable_walk_start(&iter);
while ((flower_entry = rhashtable_walk_next(&iter)) &&
!IS_ERR(flower_entry)) {
@@ -789,8 +785,9 @@ static void ch_flower_stats_handler(struct work_struct *work)
spin_unlock(&flower_entry->lock);
}
}
-walk_stop:
+
rhashtable_walk_stop(&iter);
+
} while (flower_entry == ERR_PTR(-EAGAIN));
rhashtable_walk_exit(&iter);
mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
index cd0cd13a964d..ab174bcfbfb0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c
@@ -114,14 +114,14 @@ static int fill_action_fields(struct adapter *adap,
/* Re-direct to specified port in hardware. */
if (is_tcf_mirred_egress_redirect(a)) {
- struct net_device *n_dev;
- unsigned int i, index;
+ struct net_device *n_dev, *target_dev;
bool found = false;
+ unsigned int i;
- index = tcf_mirred_ifindex(a);
+ target_dev = tcf_mirred_dev(a);
for_each_port(adap, i) {
n_dev = adap->port[i];
- if (index == n_dev->ifindex) {
+ if (target_dev == n_dev) {
fs->action = FILTER_SWITCH;
fs->eport = i;
found = true;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6c7b0ac0b48b..6e310a0da7c9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -761,12 +761,19 @@ static inline unsigned int flits_to_desc(unsigned int n)
* Returns whether an Ethernet packet is small enough to fit as
* immediate data. Return value corresponds to headroom required.
*/
-static inline int is_eth_imm(const struct sk_buff *skb)
+static inline int is_eth_imm(const struct sk_buff *skb, unsigned int chip_ver)
{
- int hdrlen = skb_shinfo(skb)->gso_size ?
- sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ int hdrlen = 0;
- hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->encapsulation && skb_shinfo(skb)->gso_size &&
+ chip_ver > CHELSIO_T5) {
+ hdrlen = sizeof(struct cpl_tx_tnl_lso);
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ } else {
+ hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ }
if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
return hdrlen;
return 0;
@@ -779,10 +786,11 @@ static inline int is_eth_imm(const struct sk_buff *skb)
* Returns the number of flits needed for a Tx WR for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
unsigned int flits;
- int hdrlen = is_eth_imm(skb);
+ int hdrlen = is_eth_imm(skb, chip_ver);
/* If the skb is small enough, we can pump it out as a work request
* with only immediate data. In that case we just have to have the
@@ -801,13 +809,20 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* with an embedded TX Packet Write CPL message.
*/
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
- if (skb_shinfo(skb)->gso_size)
- flits += (sizeof(struct fw_eth_tx_pkt_wr) +
- sizeof(struct cpl_tx_pkt_lso_core) +
- sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
- else
+ if (skb_shinfo(skb)->gso_size) {
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_tnl_lso);
+ else
+ hdrlen = sizeof(struct fw_eth_tx_pkt_wr) +
+ sizeof(struct cpl_tx_pkt_lso_core);
+
+ hdrlen += sizeof(struct cpl_tx_pkt_core);
+ flits += (hdrlen / sizeof(__be64));
+ } else {
flits += (sizeof(struct fw_eth_tx_pkt_wr) +
sizeof(struct cpl_tx_pkt_core)) / sizeof(__be64);
+ }
return flits;
}
@@ -818,9 +833,10 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
* Returns the number of Tx descriptors needed for the given Ethernet
* packet, including the needed WR and CPL headers.
*/
-static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb,
+ unsigned int chip_ver)
{
- return flits_to_desc(calc_tx_flits(skb));
+ return flits_to_desc(calc_tx_flits(skb, chip_ver));
}
/**
@@ -1148,6 +1164,105 @@ cxgb_fcoe_offload(struct sk_buff *skb, struct adapter *adap,
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
+/* Returns tunnel type if hardware supports offloading of the same.
+ * It is called only for T5 and onwards.
+ */
+enum cpl_tx_tnl_lso_type cxgb_encap_offload_supported(struct sk_buff *skb)
+{
+ u8 l4_hdr = 0;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+ struct port_info *pi = netdev_priv(skb->dev);
+ struct adapter *adapter = pi->adapter;
+
+ if (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB))
+ return tnl_type;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ switch (l4_hdr) {
+ case IPPROTO_UDP:
+ if (adapter->vxlan_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_VXLAN;
+ else if (adapter->geneve_port == udp_hdr(skb)->dest)
+ tnl_type = TX_TNL_TYPE_GENEVE;
+ break;
+ default:
+ return tnl_type;
+ }
+
+ return tnl_type;
+}
+
+static inline void t6_fill_tnl_lso(struct sk_buff *skb,
+ struct cpl_tx_tnl_lso *tnl_lso,
+ enum cpl_tx_tnl_lso_type tnl_type)
+{
+ u32 val;
+ int in_eth_xtra_len;
+ int l3hdr_len = skb_network_header_len(skb);
+ int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ const struct skb_shared_info *ssi = skb_shinfo(skb);
+ bool v6 = (ip_hdr(skb)->version == 6);
+
+ val = CPL_TX_TNL_LSO_OPCODE_V(CPL_TX_TNL_LSO) |
+ CPL_TX_TNL_LSO_FIRST_F |
+ CPL_TX_TNL_LSO_LAST_F |
+ (v6 ? CPL_TX_TNL_LSO_IPV6OUT_F : 0) |
+ CPL_TX_TNL_LSO_ETHHDRLENOUT_V(eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPHDRLENOUT_V(l3hdr_len / 4) |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPHDRCHKOUT_F) |
+ CPL_TX_TNL_LSO_IPLENSETOUT_F |
+ (v6 ? 0 : CPL_TX_TNL_LSO_IPIDINCOUT_F);
+ tnl_lso->op_to_IpIdSplitOut = htonl(val);
+
+ tnl_lso->IpIdOffsetOut = 0;
+
+ /* Get the tunnel header length */
+ val = skb_inner_mac_header(skb) - skb_mac_header(skb);
+ in_eth_xtra_len = skb_inner_network_header(skb) -
+ skb_inner_mac_header(skb) - ETH_HLEN;
+
+ switch (tnl_type) {
+ case TX_TNL_TYPE_VXLAN:
+ case TX_TNL_TYPE_GENEVE:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen =
+ htons(CPL_TX_TNL_LSO_UDPCHKCLROUT_F |
+ CPL_TX_TNL_LSO_UDPLENSETOUT_F);
+ break;
+ default:
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0;
+ break;
+ }
+
+ tnl_lso->UdpLenSetOut_to_TnlHdrLen |=
+ htons(CPL_TX_TNL_LSO_TNLHDRLEN_V(val) |
+ CPL_TX_TNL_LSO_TNLTYPE_V(tnl_type));
+
+ tnl_lso->r1 = 0;
+
+ val = CPL_TX_TNL_LSO_ETHHDRLEN_V(in_eth_xtra_len / 4) |
+ CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) |
+ CPL_TX_TNL_LSO_IPHDRLEN_V(skb_inner_network_header_len(skb) / 4) |
+ CPL_TX_TNL_LSO_TCPHDRLEN_V(inner_tcp_hdrlen(skb) / 4);
+ tnl_lso->Flow_to_TcpHdrLen = htonl(val);
+
+ tnl_lso->IpIdOffset = htons(0);
+
+ tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size));
+ tnl_lso->TCPSeqOffset = htonl(0);
+ tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len));
+}
+
/**
* t4_eth_xmit - add a packet to an Ethernet Tx queue
* @skb: the packet
@@ -1171,6 +1286,9 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
bool immediate = false;
int len, max_pkt_len;
bool ptp_enabled = is_ptp_enabled(skb, dev);
+ unsigned int chip_ver;
+ enum cpl_tx_tnl_lso_type tnl_type = TX_TNL_TYPE_OPAQUE;
+
#ifdef CONFIG_CHELSIO_T4_FCOE
int err;
#endif /* CONFIG_CHELSIO_T4_FCOE */
@@ -1227,7 +1345,8 @@ out_free: dev_kfree_skb_any(skb);
}
#endif /* CONFIG_CHELSIO_T4_FCOE */
- flits = calc_tx_flits(skb);
+ chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+ flits = calc_tx_flits(skb, chip_ver);
ndesc = flits_to_desc(flits);
credits = txq_avail(&q->q) - ndesc;
@@ -1241,9 +1360,12 @@ out_free: dev_kfree_skb_any(skb);
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb))
+ if (is_eth_imm(skb, chip_ver))
immediate = true;
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ tnl_type = cxgb_encap_offload_supported(skb);
+
if (!immediate &&
unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
@@ -1269,33 +1391,58 @@ out_free: dev_kfree_skb_any(skb);
bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ struct cpl_tx_tnl_lso *tnl_lso = (void *)(wr + 1);
+
+ if (tnl_type)
+ len += sizeof(*tnl_lso);
+ else
+ len += sizeof(*lso);
- len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN_V(len));
- lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
- LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
- LSO_IPV6_V(v6) |
- LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
- LSO_IPHDR_LEN_V(l3hdr_len / 4) |
- LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
- lso->c.ipid_ofst = htons(0);
- lso->c.mss = htons(ssi->gso_size);
- lso->c.seqno_offset = htonl(0);
- if (is_t4(adap->params.chip))
- lso->c.len = htonl(skb->len);
- else
- lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
- cpl = (void *)(lso + 1);
+ if (tnl_type) {
+ struct iphdr *iph = ip_hdr(skb);
- if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
- cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
- else
- cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ t6_fill_tnl_lso(skb, tnl_lso, tnl_type);
+ cpl = (void *)(tnl_lso + 1);
+ /* Driver is expected to compute partial checksum that
+ * does not include the IP Total Length.
+ */
+ if (iph->version == 4) {
+ iph->check = 0;
+ iph->tot_len = 0;
+ iph->check = (u16)(~ip_fast_csum((u8 *)iph,
+ iph->ihl));
+ }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ cntrl = hwcsum(adap->params.chip, skb);
+ } else {
+ lso->c.lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) |
+ LSO_FIRST_SLICE_F | LSO_LAST_SLICE_F |
+ LSO_IPV6_V(v6) |
+ LSO_ETHHDR_LEN_V(eth_xtra_len / 4) |
+ LSO_IPHDR_LEN_V(l3hdr_len / 4) |
+ LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff));
+ lso->c.ipid_ofst = htons(0);
+ lso->c.mss = htons(ssi->gso_size);
+ lso->c.seqno_offset = htonl(0);
+ if (is_t4(adap->params.chip))
+ lso->c.len = htonl(skb->len);
+ else
+ lso->c.len =
+ htonl(LSO_T5_XFER_SIZE_V(skb->len));
+ cpl = (void *)(lso + 1);
- cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
- TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip)
+ <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
+ }
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 375ef86a84da..047609ef0515 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -195,9 +195,11 @@ static void t4_report_fw_error(struct adapter *adap)
u32 pcie_fw;
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
- if (pcie_fw & PCIE_FW_ERR_F)
+ if (pcie_fw & PCIE_FW_ERR_F) {
dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
reason[PCIE_FW_EVAL_G(pcie_fw)]);
+ adap->flags &= ~FW_OK;
+ }
}
/*
@@ -317,9 +319,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
* wait [for a while] till we're at the front [or bail out with an
* EBUSY] ...
*/
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_add_tail(&entry.list, &adap->mlist.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
delay_idx = 0;
ms = delay[0];
@@ -332,9 +334,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
*/
pcie_fw = t4_read_reg(adap, PCIE_FW_A);
if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
@@ -365,9 +367,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
if (v != MBOX_OWNER_DRV) {
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
@@ -418,9 +420,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
execute = i + ms;
t4_record_mbox(adap, cmd_rpl,
MBOX_LEN, access, execute);
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
return -FW_CMD_RETVAL_G((int)res);
}
}
@@ -430,9 +432,9 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
- spin_lock(&adap->mbox_lock);
+ spin_lock_bh(&adap->mbox_lock);
list_del(&entry.list);
- spin_unlock(&adap->mbox_lock);
+ spin_unlock_bh(&adap->mbox_lock);
t4_fatal_err(adap);
return ret;
}
@@ -524,11 +526,14 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* MEM_EDC1 = 1
* MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
* MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ * MEM_HMA = 4
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
- if (mtype != MEM_MC1)
+ if (mtype == MEM_HMA) {
+ memoffset = 2 * (edc_size * 1024 * 1024);
+ } else if (mtype != MEM_MC1) {
memoffset = (mtype * (edc_size * 1024 * 1024));
- else {
+ } else {
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
MA_EXT_MEMORY0_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
@@ -4923,6 +4928,14 @@ void t4_intr_disable(struct adapter *adapter)
t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
}
+unsigned int t4_chip_rss_size(struct adapter *adap)
+{
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ return RSS_NENTRIES;
+ else
+ return T6_RSS_NENTRIES;
+}
+
/**
* t4_config_rss_range - configure a portion of the RSS mapping table
* @adapter: the adapter
@@ -5061,10 +5074,11 @@ static int rd_rss_row(struct adapter *adap, int row, u32 *val)
*/
int t4_read_rss(struct adapter *adapter, u16 *map)
{
+ int i, ret, nentries;
u32 val;
- int i, ret;
- for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+ nentries = t4_chip_rss_size(adapter);
+ for (i = 0; i < nentries / 2; ++i) {
ret = rd_rss_row(adapter, i, &val);
if (ret)
return ret;
@@ -5076,7 +5090,7 @@ int t4_read_rss(struct adapter *adapter, u16 *map)
static unsigned int t4_use_ldst(struct adapter *adap)
{
- return (adap->flags & FW_OK) || !adap->use_bd;
+ return (adap->flags & FW_OK) && !adap->use_bd;
}
/**
@@ -6071,6 +6085,7 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
"CR2_QSFP",
"SFP28",
"KR_SFP28",
+ "KR_XLAUI"
};
if (port_type < ARRAY_SIZE(port_type_description))
@@ -6526,18 +6541,21 @@ void t4_sge_decode_idma_state(struct adapter *adapter, int state)
* t4_sge_ctxt_flush - flush the SGE context cache
* @adap: the adapter
* @mbox: mailbox to use for the FW command
+ * @ctx_type: Egress or Ingress
*
* Issues a FW command through the given mailbox to flush the
* SGE context cache.
*/
-int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
+int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
{
int ret;
u32 ldst_addrspace;
struct fw_ldst_cmd c;
memset(&c, 0, sizeof(c));
- ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
+ ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
+ FW_LDST_ADDRSPC_SGE_EGRC :
+ FW_LDST_ADDRSPC_SGE_INGC);
c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F |
ldst_addrspace);
@@ -7451,6 +7469,112 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
}
/**
+ * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @addr: the MAC address
+ * @mask: the mask
+ * @idx: index of the entry in mps tcam
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @port_id: the port index
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Removes the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number on failure.
+ */
+int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(0) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+ FW_CMD_LEN16_V(val));
+
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
+ FW_VI_MAC_ID_BASED_FREE);
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+}
+
+/**
+ * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
+ * @adap: the adapter
+ * @viid: the VI id
+ * @mac: the MAC address
+ * @mask: the mask
+ * @idx: index at which to add this entry
+ * @port_id: the port index
+ * @lookup_type: MAC address for inner (1) or outer (0) header
+ * @sleep_ok: call is allowed to sleep
+ *
+ * Adds the mac entry at the specified index using raw mac interface.
+ *
+ * Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
+ const u8 *addr, const u8 *mask, unsigned int idx,
+ u8 lookup_type, u8 port_id, bool sleep_ok)
+{
+ int ret = 0;
+ struct fw_vi_mac_cmd c;
+ struct fw_vi_mac_raw *p = &c.u.raw;
+ u32 val;
+
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ val = FW_CMD_LEN16_V(1) |
+ FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
+ c.freemacs_to_len16 = cpu_to_be32(val);
+
+ /* Specify that this is an inner mac address */
+ p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
+
+ /* Lookup Type. Outer header: 0, Inner header: 1 */
+ p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
+ DATAPORTNUM_V(port_id));
+ /* Lookup mask and port mask */
+ p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
+ DATAPORTNUM_V(DATAPORTNUM_M));
+
+ /* Copy the address and the mask */
+ memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
+ memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
+
+ ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret == 0) {
+ ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
+ if (ret != idx)
+ ret = -ENOMEM;
+ }
+
+ return ret;
+}
+
+/**
* t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
* @adap: the adapter
* @mbox: mailbox to use for the FW command
@@ -8491,22 +8615,6 @@ found:
return 0;
}
-static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
-{
- u16 val;
- u32 pcie_cap;
-
- pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
- if (pcie_cap) {
- pci_read_config_word(adapter->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, &val);
- val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
- val |= range;
- pci_write_config_word(adapter->pdev,
- pcie_cap + PCI_EXP_DEVCTL2, val);
- }
-}
-
/**
* t4_prep_adapter - prepare SW and HW for operation
* @adapter: the adapter
@@ -8592,8 +8700,9 @@ int t4_prep_adapter(struct adapter *adapter)
adapter->params.portvec = 1;
adapter->params.vpd.cclk = 50000;
- /* Set pci completion timeout value to 4 seconds. */
- set_pcie_completion_timeout(adapter, 0xd);
+ /* Set PCIe completion timeout to 4 seconds. */
+ pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
+ PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
return 0;
}
@@ -9736,3 +9845,91 @@ int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
NULL, 1);
}
+
+/**
+ * t4_i2c_rd - read I2C data from adapter
+ * @adap: the adapter
+ * @port: Port number if per-port device; <0 if not
+ * @devid: per-port device ID or absolute device ID
+ * @offset: byte offset into device I2C space
+ * @len: byte length of I2C space data
+ * @buf: buffer in which to return I2C data
+ *
+ * Reads the I2C data from the indicated device and location.
+ */
+int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
+ unsigned int devid, unsigned int offset,
+ unsigned int len, u8 *buf)
+{
+ struct fw_ldst_cmd ldst_cmd, ldst_rpl;
+ unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
+ int ret = 0;
+
+ if (len > I2C_PAGE_SIZE)
+ return -EINVAL;
+
+ /* Dont allow reads that spans multiple pages */
+ if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
+ return -EINVAL;
+
+ memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_cmd.op_to_addrspace =
+ cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_READ_F |
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
+ ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
+ ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
+ ldst_cmd.u.i2c.did = devid;
+
+ while (len > 0) {
+ unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
+
+ ldst_cmd.u.i2c.boffset = offset;
+ ldst_cmd.u.i2c.blen = i2c_len;
+
+ ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
+ &ldst_rpl);
+ if (ret)
+ break;
+
+ memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
+ offset += i2c_len;
+ buf += i2c_len;
+ len -= i2c_len;
+ }
+
+ return ret;
+}
+
+/**
+ * t4_set_vlan_acl - Set a VLAN id for the specified VF
+ * @adapter: the adapter
+ * @mbox: mailbox to use for the FW command
+ * @vf: one of the VFs instantiated by the specified PF
+ * @vlan: The vlanid to be set
+ */
+int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+ u16 vlan)
+{
+ struct fw_acl_vlan_cmd vlan_cmd;
+ unsigned int enable;
+
+ enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
+ memset(&vlan_cmd, 0, sizeof(vlan_cmd));
+ vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_F |
+ FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
+ FW_ACL_VLAN_CMD_VFN_V(vf));
+ vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
+ /* Drop all packets that donot match vlan id */
+ vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
+ if (enable != 0) {
+ vlan_cmd.nvlan = 1;
+ vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
+ }
+
+ return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);
+}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index a964ed184356..361d5032c288 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -38,21 +38,22 @@
#include <linux/types.h>
enum {
- NCHAN = 4, /* # of HW channels */
- MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
- EEPROMSIZE = 17408, /* Serial EEPROM physical size */
- EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
- EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
- RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
- TCB_SIZE = 128, /* TCB size */
- NMTUS = 16, /* size of MTU table */
- NCCTRL_WIN = 32, /* # of congestion control windows */
- NTX_SCHED = 8, /* # of HW Tx scheduling queues */
- PM_NSTATS = 5, /* # of PM stats */
- T6_PM_NSTATS = 7, /* # of PM stats in T6 */
- MBOX_LEN = 64, /* mailbox size in bytes */
- TRACE_LEN = 112, /* length of trace data and mask */
- FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
+ NCHAN = 4, /* # of HW channels */
+ MAX_MTU = 9600, /* max MAC MTU, excluding header + FCS */
+ EEPROMSIZE = 17408,/* Serial EEPROM physical size */
+ EEPROMVSIZE = 32768,/* Serial EEPROM virtual address space size */
+ EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */
+ RSS_NENTRIES = 2048, /* # of entries in RSS mapping table */
+ T6_RSS_NENTRIES = 4096, /* # of entries in RSS mapping table */
+ TCB_SIZE = 128, /* TCB size */
+ NMTUS = 16, /* size of MTU table */
+ NCCTRL_WIN = 32, /* # of congestion control windows */
+ NTX_SCHED = 8, /* # of HW Tx scheduling queues */
+ PM_NSTATS = 5, /* # of PM stats */
+ T6_PM_NSTATS = 7, /* # of PM stats in T6 */
+ MBOX_LEN = 64, /* mailbox size in bytes */
+ TRACE_LEN = 112, /* length of trace data and mask */
+ FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
};
enum {
@@ -70,7 +71,9 @@ enum {
/* SGE context types */
enum ctxt_type {
- CTXT_FLM = 2,
+ CTXT_EGRESS,
+ CTXT_INGRESS,
+ CTXT_FLM,
CTXT_CNM,
};
@@ -284,4 +287,14 @@ enum {
#define SGE_TIMESTAMP_V(x) ((__u64)(x) << SGE_TIMESTAMP_S)
#define SGE_TIMESTAMP_G(x) (((__u64)(x) >> SGE_TIMESTAMP_S) & SGE_TIMESTAMP_M)
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+#define I2C_PAGE_SIZE 0x100
+#define SFP_DIAG_TYPE_ADDR 0x5c
+#define SFP_DIAG_TYPE_LEN 0x1
+#define SFF_8472_COMP_ADDR 0x5e
+#define SFF_8472_COMP_LEN 0x1
+#define SFF_REV_ADDR 0x1
+#define SFF_REV_LEN 0x1
+
#endif /* __T4_HW_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 7e12f241145b..d0db4427b77e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -107,6 +107,7 @@ enum {
CPL_FW6_MSG = 0xE0,
CPL_FW6_PLD = 0xE1,
+ CPL_TX_TNL_LSO = 0xEC,
CPL_TX_PKT_LSO = 0xED,
CPL_TX_PKT_XT = 0xEE,
@@ -1479,6 +1480,169 @@ struct ulp_txpkt {
#define ULP_TXPKT_RO_V(x) ((x) << ULP_TXPKT_RO_S)
#define ULP_TXPKT_RO_F ULP_TXPKT_RO_V(1U)
+enum cpl_tx_tnl_lso_type {
+ TX_TNL_TYPE_OPAQUE,
+ TX_TNL_TYPE_NVGRE,
+ TX_TNL_TYPE_VXLAN,
+ TX_TNL_TYPE_GENEVE,
+};
+
+struct cpl_tx_tnl_lso {
+ __be32 op_to_IpIdSplitOut;
+ __be16 IpIdOffsetOut;
+ __be16 UdpLenSetOut_to_TnlHdrLen;
+ __be64 r1;
+ __be32 Flow_to_TcpHdrLen;
+ __be16 IpIdOffset;
+ __be16 IpIdSplit_to_Mss;
+ __be32 TCPSeqOffset;
+ __be32 EthLenOffset_Size;
+ /* encapsulated CPL (TX_PKT_XT) follows here */
+};
+
+#define CPL_TX_TNL_LSO_OPCODE_S 24
+#define CPL_TX_TNL_LSO_OPCODE_M 0xff
+#define CPL_TX_TNL_LSO_OPCODE_V(x) ((x) << CPL_TX_TNL_LSO_OPCODE_S)
+#define CPL_TX_TNL_LSO_OPCODE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_OPCODE_S) & CPL_TX_TNL_LSO_OPCODE_M)
+
+#define CPL_TX_TNL_LSO_FIRST_S 23
+#define CPL_TX_TNL_LSO_FIRST_M 0x1
+#define CPL_TX_TNL_LSO_FIRST_V(x) ((x) << CPL_TX_TNL_LSO_FIRST_S)
+#define CPL_TX_TNL_LSO_FIRST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_FIRST_S) & CPL_TX_TNL_LSO_FIRST_M)
+#define CPL_TX_TNL_LSO_FIRST_F CPL_TX_TNL_LSO_FIRST_V(1U)
+
+#define CPL_TX_TNL_LSO_LAST_S 22
+#define CPL_TX_TNL_LSO_LAST_M 0x1
+#define CPL_TX_TNL_LSO_LAST_V(x) ((x) << CPL_TX_TNL_LSO_LAST_S)
+#define CPL_TX_TNL_LSO_LAST_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_LAST_S) & CPL_TX_TNL_LSO_LAST_M)
+#define CPL_TX_TNL_LSO_LAST_F CPL_TX_TNL_LSO_LAST_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_S 21
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_M 0x1
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENXOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENXOUT_S) & \
+ CPL_TX_TNL_LSO_ETHHDRLENXOUT_M)
+#define CPL_TX_TNL_LSO_ETHHDRLENXOUT_F CPL_TX_TNL_LSO_ETHHDRLENXOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPV6OUT_S 20
+#define CPL_TX_TNL_LSO_IPV6OUT_M 0x1
+#define CPL_TX_TNL_LSO_IPV6OUT_V(x) ((x) << CPL_TX_TNL_LSO_IPV6OUT_S)
+#define CPL_TX_TNL_LSO_IPV6OUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6OUT_S) & CPL_TX_TNL_LSO_IPV6OUT_M)
+#define CPL_TX_TNL_LSO_IPV6OUT_F CPL_TX_TNL_LSO_IPV6OUT_V(1U)
+
+#define CPL_TX_TNL_LSO_ETHHDRLEN_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_ETHHDRLEN_S)
+#define CPL_TX_TNL_LSO_ETHHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLEN_S) & CPL_TX_TNL_LSO_ETHHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLEN_S 4
+#define CPL_TX_TNL_LSO_IPHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLEN_S)
+#define CPL_TX_TNL_LSO_IPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLEN_S) & CPL_TX_TNL_LSO_IPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_TCPHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TCPHDRLEN_M 0xf
+#define CPL_TX_TNL_LSO_TCPHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TCPHDRLEN_S)
+#define CPL_TX_TNL_LSO_TCPHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TCPHDRLEN_S) & CPL_TX_TNL_LSO_TCPHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_MSS_S 0
+#define CPL_TX_TNL_LSO_MSS_M 0x3fff
+#define CPL_TX_TNL_LSO_MSS_V(x) ((x) << CPL_TX_TNL_LSO_MSS_S)
+#define CPL_TX_TNL_LSO_MSS_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_MSS_S) & CPL_TX_TNL_LSO_MSS_M)
+
+#define CPL_TX_TNL_LSO_SIZE_S 0
+#define CPL_TX_TNL_LSO_SIZE_M 0xfffffff
+#define CPL_TX_TNL_LSO_SIZE_V(x) ((x) << CPL_TX_TNL_LSO_SIZE_S)
+#define CPL_TX_TNL_LSO_SIZE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_SIZE_S) & CPL_TX_TNL_LSO_SIZE_M)
+
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_S 16
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_M 0xf
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_ETHHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_ETHHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_ETHHDRLENOUT_S) & CPL_TX_TNL_LSO_ETHHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_S 4
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_M 0xfff
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRLENOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRLENOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRLENOUT_S) & CPL_TX_TNL_LSO_IPHDRLENOUT_M)
+
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_S 3
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPHDRCHKOUT_S)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPHDRCHKOUT_S) & CPL_TX_TNL_LSO_IPHDRCHKOUT_M)
+#define CPL_TX_TNL_LSO_IPHDRCHKOUT_F CPL_TX_TNL_LSO_IPHDRCHKOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPLENSETOUT_S 2
+#define CPL_TX_TNL_LSO_IPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPLENSETOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPLENSETOUT_S) & CPL_TX_TNL_LSO_IPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_IPLENSETOUT_F CPL_TX_TNL_LSO_IPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_IPIDINCOUT_S 1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_M 0x1
+#define CPL_TX_TNL_LSO_IPIDINCOUT_V(x) ((x) << CPL_TX_TNL_LSO_IPIDINCOUT_S)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPIDINCOUT_S) & CPL_TX_TNL_LSO_IPIDINCOUT_M)
+#define CPL_TX_TNL_LSO_IPIDINCOUT_F CPL_TX_TNL_LSO_IPIDINCOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_S 14
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPCHKCLROUT_S)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPCHKCLROUT_S) & \
+ CPL_TX_TNL_LSO_UDPCHKCLROUT_M)
+#define CPL_TX_TNL_LSO_UDPCHKCLROUT_F CPL_TX_TNL_LSO_UDPCHKCLROUT_V(1U)
+
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_S 15
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_M 0x1
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_V(x) \
+ ((x) << CPL_TX_TNL_LSO_UDPLENSETOUT_S)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_UDPLENSETOUT_S) & \
+ CPL_TX_TNL_LSO_UDPLENSETOUT_M)
+#define CPL_TX_TNL_LSO_UDPLENSETOUT_F CPL_TX_TNL_LSO_UDPLENSETOUT_V(1U)
+
+#define CPL_TX_TNL_LSO_TNLTYPE_S 12
+#define CPL_TX_TNL_LSO_TNLTYPE_M 0x3
+#define CPL_TX_TNL_LSO_TNLTYPE_V(x) ((x) << CPL_TX_TNL_LSO_TNLTYPE_S)
+#define CPL_TX_TNL_LSO_TNLTYPE_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLTYPE_S) & CPL_TX_TNL_LSO_TNLTYPE_M)
+
+#define S_CPL_TX_TNL_LSO_ETHHDRLEN 16
+#define M_CPL_TX_TNL_LSO_ETHHDRLEN 0xf
+#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN)
+#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x) \
+ (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN)
+
+#define CPL_TX_TNL_LSO_TNLHDRLEN_S 0
+#define CPL_TX_TNL_LSO_TNLHDRLEN_M 0xfff
+#define CPL_TX_TNL_LSO_TNLHDRLEN_V(x) ((x) << CPL_TX_TNL_LSO_TNLHDRLEN_S)
+#define CPL_TX_TNL_LSO_TNLHDRLEN_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_TNLHDRLEN_S) & CPL_TX_TNL_LSO_TNLHDRLEN_M)
+
+#define CPL_TX_TNL_LSO_IPV6_S 20
+#define CPL_TX_TNL_LSO_IPV6_M 0x1
+#define CPL_TX_TNL_LSO_IPV6_V(x) ((x) << CPL_TX_TNL_LSO_IPV6_S)
+#define CPL_TX_TNL_LSO_IPV6_G(x) \
+ (((x) >> CPL_TX_TNL_LSO_IPV6_S) & CPL_TX_TNL_LSO_IPV6_M)
+#define CPL_TX_TNL_LSO_IPV6_F CPL_TX_TNL_LSO_IPV6_V(1U)
+
#define ULP_TX_SC_MORE_S 23
#define ULP_TX_SC_MORE_V(x) ((x) << ULP_TX_SC_MORE_S)
#define ULP_TX_SC_MORE_F ULP_TX_SC_MORE_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index 60cf9e02de5d..51b18035d691 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -183,6 +183,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x50a9), /* Custom T580-KR */
CH_PCI_ID_TABLE_FENTRY(0x50aa), /* Custom T580-CR */
CH_PCI_ID_TABLE_FENTRY(0x50ab), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x50ac), /* Custom T540-BT */
/* T6 adapters:
*/
@@ -206,6 +207,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x6084), /* Custom T64100-CR QSFP28 */
CH_PCI_ID_TABLE_FENTRY(0x6085), /* Custom T6240-SO */
CH_PCI_ID_TABLE_FENTRY(0x6086), /* Custom T6225-SO-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a7cfece72828..a6df73398d17 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -45,6 +45,9 @@
#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+#define NUM_CIM_CTL_TSCH_CHANNEL_INSTANCES 4
+#define NUM_CIM_CTL_TSCH_CHANNEL_TSCH_CLASS_INSTANCES 16
+
#define MYPORT_BASE 0x1c000
#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
@@ -961,6 +964,10 @@
#define MA_EXT_MEMORY1_BAR_A 0x7808
+#define HMA_MUX_S 5
+#define HMA_MUX_V(x) ((x) << HMA_MUX_S)
+#define HMA_MUX_F HMA_MUX_V(1U)
+
#define EXT_MEM1_BASE_S 16
#define EXT_MEM1_BASE_M 0xfffU
#define EXT_MEM1_BASE_G(x) (((x) >> EXT_MEM1_BASE_S) & EXT_MEM1_BASE_M)
@@ -2504,6 +2511,28 @@
#define MPS_RX_MAC_BG_PG_CNT0_A 0x11208
#define MPS_RX_LPBK_BG_PG_CNT0_A 0x11218
+#define MPS_RX_VXLAN_TYPE_A 0x11234
+
+#define VXLAN_EN_S 16
+#define VXLAN_EN_V(x) ((x) << VXLAN_EN_S)
+#define VXLAN_EN_F VXLAN_EN_V(1U)
+
+#define VXLAN_S 0
+#define VXLAN_M 0xffffU
+#define VXLAN_V(x) ((x) << VXLAN_S)
+#define VXLAN_G(x) (((x) >> VXLAN_S) & VXLAN_M)
+
+#define MPS_RX_GENEVE_TYPE_A 0x11238
+
+#define GENEVE_EN_S 16
+#define GENEVE_EN_V(x) ((x) << GENEVE_EN_S)
+#define GENEVE_EN_F GENEVE_EN_V(1U)
+
+#define GENEVE_S 0
+#define GENEVE_M 0xffffU
+#define GENEVE_V(x) ((x) << GENEVE_S)
+#define GENEVE_G(x) (((x) >> GENEVE_S) & GENEVE_M)
+
#define MPS_CLS_TCAM_Y_L_A 0xf000
#define MPS_CLS_TCAM_DATA0_A 0xf000
#define MPS_CLS_TCAM_DATA1_A 0xf004
@@ -2530,8 +2559,14 @@
#define DATAPORTNUM_S 12
#define DATAPORTNUM_M 0xfU
+#define DATAPORTNUM_V(x) ((x) << DATAPORTNUM_S)
#define DATAPORTNUM_G(x) (((x) >> DATAPORTNUM_S) & DATAPORTNUM_M)
+#define DATALKPTYPE_S 10
+#define DATALKPTYPE_M 0x3U
+#define DATALKPTYPE_V(x) ((x) << DATALKPTYPE_S)
+#define DATALKPTYPE_G(x) (((x) >> DATALKPTYPE_S) & DATALKPTYPE_M)
+
#define DATADIPHIT_S 8
#define DATADIPHIT_V(x) ((x) << DATADIPHIT_S)
#define DATADIPHIT_F DATADIPHIT_V(1U)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index be3658301832..0d83b4064a78 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -835,6 +835,7 @@ enum fw_ldst_addrspc {
FW_LDST_ADDRSPC_MPS = 0x0020,
FW_LDST_ADDRSPC_FUNC = 0x0028,
FW_LDST_ADDRSPC_FUNC_PCIE = 0x0029,
+ FW_LDST_ADDRSPC_I2C = 0x0038,
};
enum fw_ldst_mps_fid {
@@ -2066,6 +2067,7 @@ struct fw_vi_cmd {
#define FW_VI_MAC_ADD_MAC 0x3FF
#define FW_VI_MAC_ADD_PERSIST_MAC 0x3FE
#define FW_VI_MAC_MAC_BASED_FREE 0x3FD
+#define FW_VI_MAC_ID_BASED_FREE 0x3FC
#define FW_CLS_TCAM_NUM_ENTRIES 336
enum fw_vi_mac_smac {
@@ -2082,6 +2084,13 @@ enum fw_vi_mac_result {
FW_VI_MAC_R_F_ACL_CHECK
};
+enum fw_vi_mac_entry_types {
+ FW_VI_MAC_TYPE_EXACTMAC,
+ FW_VI_MAC_TYPE_HASHVEC,
+ FW_VI_MAC_TYPE_RAW,
+ FW_VI_MAC_TYPE_EXACTMAC_VNI,
+};
+
struct fw_vi_mac_cmd {
__be32 op_to_viid;
__be32 freemacs_to_len16;
@@ -2093,6 +2102,13 @@ struct fw_vi_mac_cmd {
struct fw_vi_mac_hash {
__be64 hashvec;
} hash;
+ struct fw_vi_mac_raw {
+ __be32 raw_idx_pkd;
+ __be32 data0_pkd;
+ __be32 data1[2];
+ __be64 data0m_pkd;
+ __be32 data1m[2];
+ } raw;
} u;
};
@@ -2102,6 +2118,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_FREEMACS_S 31
#define FW_VI_MAC_CMD_FREEMACS_V(x) ((x) << FW_VI_MAC_CMD_FREEMACS_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_S 23
+#define FW_VI_MAC_CMD_ENTRY_TYPE_M 0x7
+#define FW_VI_MAC_CMD_ENTRY_TYPE_V(x) ((x) << FW_VI_MAC_CMD_ENTRY_TYPE_S)
+#define FW_VI_MAC_CMD_ENTRY_TYPE_G(x) \
+ (((x) >> FW_VI_MAC_CMD_ENTRY_TYPE_S) & FW_VI_MAC_CMD_ENTRY_TYPE_M)
+
#define FW_VI_MAC_CMD_HASHVECEN_S 23
#define FW_VI_MAC_CMD_HASHVECEN_V(x) ((x) << FW_VI_MAC_CMD_HASHVECEN_S)
#define FW_VI_MAC_CMD_HASHVECEN_F FW_VI_MAC_CMD_HASHVECEN_V(1U)
@@ -2128,6 +2150,12 @@ struct fw_vi_mac_cmd {
#define FW_VI_MAC_CMD_IDX_G(x) \
(((x) >> FW_VI_MAC_CMD_IDX_S) & FW_VI_MAC_CMD_IDX_M)
+#define FW_VI_MAC_CMD_RAW_IDX_S 16
+#define FW_VI_MAC_CMD_RAW_IDX_M 0xffff
+#define FW_VI_MAC_CMD_RAW_IDX_V(x) ((x) << FW_VI_MAC_CMD_RAW_IDX_S)
+#define FW_VI_MAC_CMD_RAW_IDX_G(x) \
+ (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
+
#define FW_RXMODE_MTU_NO_CHG 65535
struct fw_vi_rxmode_cmd {
@@ -2332,14 +2360,22 @@ struct fw_acl_vlan_cmd {
#define FW_ACL_VLAN_CMD_VFN_S 0
#define FW_ACL_VLAN_CMD_VFN_V(x) ((x) << FW_ACL_VLAN_CMD_VFN_S)
-#define FW_ACL_VLAN_CMD_EN_S 31
-#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_S 31
+#define FW_ACL_VLAN_CMD_EN_M 0x1
+#define FW_ACL_VLAN_CMD_EN_V(x) ((x) << FW_ACL_VLAN_CMD_EN_S)
+#define FW_ACL_VLAN_CMD_EN_G(x) \
+ (((x) >> S_FW_ACL_VLAN_CMD_EN_S) & FW_ACL_VLAN_CMD_EN_M)
+#define FW_ACL_VLAN_CMD_EN_F FW_ACL_VLAN_CMD_EN_V(1U)
#define FW_ACL_VLAN_CMD_DROPNOVLAN_S 7
#define FW_ACL_VLAN_CMD_DROPNOVLAN_V(x) ((x) << FW_ACL_VLAN_CMD_DROPNOVLAN_S)
-#define FW_ACL_VLAN_CMD_FM_S 6
-#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_S 6
+#define FW_ACL_VLAN_CMD_FM_M 0x1
+#define FW_ACL_VLAN_CMD_FM_V(x) ((x) << FW_ACL_VLAN_CMD_FM_S)
+#define FW_ACL_VLAN_CMD_FM_G(x) \
+ (((x) >> FW_ACL_VLAN_CMD_FM_S) & FW_ACL_VLAN_CMD_FM_M)
+#define FW_ACL_VLAN_CMD_FM_F FW_ACL_VLAN_CMD_FM_V(1U)
/* old 16-bit port capabilities bitmap (fw_port_cap16_t) */
enum fw_port_cap {
@@ -2835,6 +2871,7 @@ enum fw_port_type {
FW_PORT_TYPE_CR2_QSFP,
FW_PORT_TYPE_SFP28,
FW_PORT_TYPE_KR_SFP28,
+ FW_PORT_TYPE_KR_XLAUI,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};