summaryrefslogtreecommitdiffstats
path: root/src/drivers/net/bnxt
diff options
context:
space:
mode:
Diffstat (limited to 'src/drivers/net/bnxt')
-rw-r--r--src/drivers/net/bnxt/bnxt.c399
-rw-r--r--src/drivers/net/bnxt/bnxt.h215
-rw-r--r--src/drivers/net/bnxt/bnxt_dbg.h6
-rw-r--r--src/drivers/net/bnxt/bnxt_hsi.h229
4 files changed, 736 insertions, 113 deletions
diff --git a/src/drivers/net/bnxt/bnxt.c b/src/drivers/net/bnxt/bnxt.c
index 605aea32..a127f6ce 100644
--- a/src/drivers/net/bnxt/bnxt.c
+++ b/src/drivers/net/bnxt/bnxt.c
@@ -24,6 +24,11 @@ static int bnxt_rx_complete ( struct net_device *dev, struct rx_pkt_cmpl *rx );
void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt );
static struct pci_device_id bnxt_nics[] = {
+ PCI_ROM( 0x14e4, 0x1604, "14e4-1604", "14e4-1604", 0 ),
+ PCI_ROM( 0x14e4, 0x1605, "14e4-1605", "14e4-1605", 0 ),
+ PCI_ROM( 0x14e4, 0x1606, "14e4-1606", "14e4-1606", 0 ),
+ PCI_ROM( 0x14e4, 0x1609, "14e4-1609", "14e4-1609", 0 ),
+ PCI_ROM( 0x14e4, 0x1614, "14e4-1614", "14e4-1614", 0 ),
PCI_ROM( 0x14e4, 0x16c0, "14e4-16C0", "14e4-16C0", 0 ),
PCI_ROM( 0x14e4, 0x16c1, "14e4-16C1", "14e4-16C1", BNXT_FLAG_PCI_VF ),
PCI_ROM( 0x14e4, 0x16c8, "14e4-16C8", "14e4-16C8", 0 ),
@@ -62,26 +67,22 @@ static struct pci_device_id bnxt_nics[] = {
PCI_ROM( 0x14e4, 0x16ef, "14e4-16EF", "14e4-16EF", 0 ),
PCI_ROM( 0x14e4, 0x16f0, "14e4-16F0", "14e4-16F0", 0 ),
PCI_ROM( 0x14e4, 0x16f1, "14e4-16F1", "14e4-16F1", 0 ),
- PCI_ROM( 0x14e4, 0x1604, "14e4-1604", "14e4-1604", 0 ),
- PCI_ROM( 0x14e4, 0x1605, "14e4-1605", "14e4-1605", 0 ),
- PCI_ROM( 0x14e4, 0x1606, "14e4-1606", "14e4-1606", 0 ),
- PCI_ROM( 0x14e4, 0x1609, "14e4-1609", "14e4-1609", 0 ),
- PCI_ROM( 0x14e4, 0x1614, "14e4-1614", "14e4-1614", 0 ),
- PCI_ROM( 0x14e4, 0xd802, "14e4-D802", "14e4-D802", 0 ),
- PCI_ROM( 0x14e4, 0xd804, "14e4-D804", "14e4-D804", 0 ),
PCI_ROM( 0x14e4, 0x1750, "14e4-1750", "14e4-1750", 0 ),
- PCI_ROM( 0x14e4, 0x1802, "14e4-1802", "14e4-1802", 0 ),
- PCI_ROM( 0x14e4, 0x1805, "14e4-1805", "14e4-1805", 0 ),
PCI_ROM( 0x14e4, 0x1751, "14e4-1751", "14e4-1751", 0 ),
- PCI_ROM( 0x14e4, 0x1801, "14e4-1801", "14e4-1801", 0 ),
- PCI_ROM( 0x14e4, 0x1804, "14e4-1804", "14e4-1804", 0 ),
PCI_ROM( 0x14e4, 0x1752, "14e4-1752", "14e4-1752", 0 ),
+ PCI_ROM( 0x14e4, 0x1760, "14e4-1760", "14e4-1760", 0 ),
PCI_ROM( 0x14e4, 0x1800, "14e4-1800", "14e4-1800", 0 ),
+ PCI_ROM( 0x14e4, 0x1801, "14e4-1801", "14e4-1801", 0 ),
+ PCI_ROM( 0x14e4, 0x1802, "14e4-1802", "14e4-1802", 0 ),
PCI_ROM( 0x14e4, 0x1803, "14e4-1803", "14e4-1803", 0 ),
+ PCI_ROM( 0x14e4, 0x1804, "14e4-1804", "14e4-1804", 0 ),
+ PCI_ROM( 0x14e4, 0x1805, "14e4-1805", "14e4-1805", 0 ),
PCI_ROM( 0x14e4, 0x1806, "14e4-1806", "14e4-1806", BNXT_FLAG_PCI_VF ),
PCI_ROM( 0x14e4, 0x1807, "14e4-1807", "14e4-1807", BNXT_FLAG_PCI_VF ),
PCI_ROM( 0x14e4, 0x1808, "14e4-1808", "14e4-1808", BNXT_FLAG_PCI_VF ),
PCI_ROM( 0x14e4, 0x1809, "14e4-1809", "14e4-1809", BNXT_FLAG_PCI_VF ),
+ PCI_ROM( 0x14e4, 0xd802, "14e4-D802", "14e4-D802", 0 ),
+ PCI_ROM( 0x14e4, 0xd804, "14e4-D804", "14e4-D804", 0 ),
};
/**
@@ -181,7 +182,7 @@ static void bnxt_set_link ( struct bnxt *bp )
netdev_link_down ( bp->dev );
}
-static void thor_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag )
+static void dev_p5_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag )
{
void *off;
u64 val;
@@ -196,10 +197,28 @@ static void thor_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag )
write64 ( val, off );
}
+static void dev_p7_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, u32 toggle )
+{
+ void *off;
+ u64 val;
+
+ off = ( void * ) ( bp->bar1 );
+
+ val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) |
+ ( u64 )DBC_MSG_IDX ( idx ) |
+ ( u64 )DBC_MSG_EPCH ( epoch ) |
+ ( u64 )DBC_MSG_TOGGLE ( toggle );
+ write64 ( val, off );
+}
+
static void bnxt_db_nq ( struct bnxt *bp )
{
- if ( bp->thor )
- thor_db ( bp, ( u32 )bp->nq.cons_id,
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
+ dev_p7_db ( bp, ( u32 )bp->nq.cons_id,
+ ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM,
+ ( u32 )bp->nq.epoch, 0 );
+ else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
+ dev_p5_db ( bp, ( u32 )bp->nq.cons_id,
( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM );
else
write32 ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) );
@@ -207,8 +226,12 @@ static void bnxt_db_nq ( struct bnxt *bp )
static void bnxt_db_cq ( struct bnxt *bp )
{
- if ( bp->thor )
- thor_db ( bp, ( u32 )bp->cq.cons_id,
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
+ dev_p7_db ( bp, ( u32 )bp->cq.cons_id,
+ ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL,
+ ( u32 )bp->cq.epoch, ( u32 )bp->nq.toggle );
+ else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
+ dev_p5_db ( bp, ( u32 )bp->cq.cons_id,
( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL );
else
write32 ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ),
@@ -217,16 +240,22 @@ static void bnxt_db_cq ( struct bnxt *bp )
static void bnxt_db_rx ( struct bnxt *bp, u32 idx )
{
- if ( bp->thor )
- thor_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ );
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
+ dev_p7_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ,
+ ( u32 )bp->rx.epoch, 0 );
+ else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
+ dev_p5_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ );
else
write32 ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) );
}
static void bnxt_db_tx ( struct bnxt *bp, u32 idx )
{
- if ( bp->thor )
- thor_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ );
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) )
+ dev_p7_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ,
+ ( u32 )bp->tx.epoch, 0 );
+ else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) )
+ dev_p5_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ );
else
write32 ( ( u32 ) ( TX_DOORBELL_KEY_TX | idx ),
( bp->bar1 + 0 ) );
@@ -253,6 +282,31 @@ static u16 bnxt_get_pkt_vlan ( char *src )
return 0;
}
+static u16 bnxt_get_rx_vlan ( struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi )
+{
+ struct rx_pkt_v3_cmpl *rx_cmp_v3 = ( struct rx_pkt_v3_cmpl * )rx_cmp;
+ struct rx_pkt_v3_cmpl_hi *rx_cmp_hi_v3 = ( struct rx_pkt_v3_cmpl_hi * )rx_cmp_hi;
+ u16 rx_vlan;
+
+ /* Get VLAN ID from RX completion ring */
+ if ( ( rx_cmp_v3->flags_type & RX_PKT_V3_CMPL_TYPE_MASK ) ==
+ RX_PKT_V3_CMPL_TYPE_RX_L2_V3 ) {
+ if ( rx_cmp_hi_v3->flags2 & RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_ACT_REC_PTR )
+ rx_vlan = ( rx_cmp_hi_v3->metadata0 &
+ RX_PKT_V3_CMPL_HI_METADATA0_VID_MASK );
+ else
+ rx_vlan = 0;
+ } else {
+ if ( rx_cmp_hi->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN )
+ rx_vlan = ( rx_cmp_hi->metadata &
+ RX_PKT_CMPL_METADATA_VID_MASK );
+ else
+ rx_vlan = 0;
+ }
+
+ return rx_vlan;
+}
+
int bnxt_vlan_drop ( struct bnxt *bp, u16 rx_vlan )
{
if ( rx_vlan ) {
@@ -382,6 +436,9 @@ int bnxt_post_rx_buffers ( struct bnxt *bp )
}
}
cons_id = NEXT_IDX ( cons_id, bp->rx.ring_cnt );
+ /* If the ring has wrapped, flip the epoch bit */
+ if ( iob_idx > cons_id )
+ bp->rx.epoch ^= 1;
bp->rx.iob_cnt++;
}
@@ -396,14 +453,21 @@ int bnxt_post_rx_buffers ( struct bnxt *bp )
}
u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob,
+ struct rx_pkt_cmpl *rx_cmp,
struct rx_pkt_cmpl_hi *rx_cmp_hi, u16 rx_len )
{
+ struct rx_pkt_v3_cmpl *rx_cmp_v3 = ( struct rx_pkt_v3_cmpl * )rx_cmp;
+ struct rx_pkt_v3_cmpl_hi *rx_cmp_hi_v3 = ( struct rx_pkt_v3_cmpl_hi * )rx_cmp_hi;
u8 *rx_buf = ( u8 * )iob->data;
u16 err_flags, rx_vlan;
u8 ignore_chksum_err = 0;
int i;
- err_flags = rx_cmp_hi->errors_v2 >> RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT;
+ if ( ( rx_cmp_v3->flags_type & RX_PKT_V3_CMPL_TYPE_MASK ) ==
+ RX_PKT_V3_CMPL_TYPE_RX_L2_V3 ) {
+ err_flags = rx_cmp_hi_v3->errors_v2 >> RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_SFT;
+ } else
+ err_flags = rx_cmp_hi->errors_v2 >> RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT;
if ( rx_cmp_hi->errors_v2 == 0x20 || rx_cmp_hi->errors_v2 == 0x21 )
ignore_chksum_err = 1;
@@ -423,13 +487,7 @@ u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob,
return 2;
}
- /* Get VLAN ID from RX completion ring */
- if ( rx_cmp_hi->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN )
- rx_vlan = ( rx_cmp_hi->metadata &
- RX_PKT_CMPL_METADATA_VID_MASK );
- else
- rx_vlan = 0;
-
+ rx_vlan = bnxt_get_rx_vlan ( rx_cmp, rx_cmp_hi );
dbg_rx_vlan ( bp, rx_cmp_hi->metadata, rx_cmp_hi->flags2, rx_vlan );
if ( bnxt_vlan_drop ( bp, rx_vlan ) ) {
bp->rx.drop_vlan++;
@@ -449,10 +507,11 @@ static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt )
u16 cons_id;
cons_id = bp->cq.cons_id + cnt;
- if ( cons_id >= MAX_CQ_DESC_CNT ) {
+ if ( cons_id >= bp->cq.ring_cnt) {
/* Toggle completion bit when the ring wraps. */
bp->cq.completion_bit ^= 1;
- cons_id = cons_id - MAX_CQ_DESC_CNT;
+ bp->cq.epoch ^= 1;
+ cons_id = cons_id - bp->cq.ring_cnt;
}
bp->cq.cons_id = cons_id;
}
@@ -466,7 +525,7 @@ void bnxt_rx_process ( struct net_device *dev, struct bnxt *bp,
dump_rx_bd ( rx_cmp, rx_cmp_hi, desc_idx );
assert ( !iob );
- drop = bnxt_rx_drop ( bp, iob, rx_cmp_hi, rx_cmp->len );
+ drop = bnxt_rx_drop ( bp, iob, rx_cmp, rx_cmp_hi, rx_cmp->len );
dbg_rxp ( iob->data, rx_cmp->len, drop );
if ( drop )
netdev_rx_err ( dev, iob, -EINVAL );
@@ -531,12 +590,17 @@ void bnxt_mm_nic ( struct bnxt *bp )
memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE );
bp->nq.cons_id = 0;
bp->nq.completion_bit = 0x1;
+ bp->nq.epoch = 0;
+ bp->nq.toggle = 0;
bp->cq.cons_id = 0;
bp->cq.completion_bit = 0x1;
+ bp->cq.epoch = 0;
bp->tx.prod_id = 0;
bp->tx.cons_id = 0;
+ bp->tx.epoch = 0;
bp->rx.cons_id = 0;
bp->rx.iob_cnt = 0;
+ bp->rx.epoch = 0;
bp->link_status = STATUS_LINK_DOWN;
bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT;
@@ -722,8 +786,16 @@ static int bnxt_hwrm_ver_get ( struct bnxt *bp )
( resp->dev_caps_cfg & SHORT_CMD_REQUIRED ) )
FLAG_SET ( bp->flags, BNXT_FLAG_HWRM_SHORT_CMD_SUPP );
bp->hwrm_max_ext_req_len = resp->max_ext_req_len;
- if ( bp->chip_num == CHIP_NUM_57500 )
- bp->thor = 1;
+ if ( ( bp->chip_num == CHIP_NUM_57508 ) ||
+ ( bp->chip_num == CHIP_NUM_57504 ) ||
+ ( bp->chip_num == CHIP_NUM_57502 ) ) {
+ FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P5 );
+ FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS );
+ }
+ if ( bp->chip_num == CHIP_NUM_57608 ) {
+ FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P7 );
+ FLAG_SET ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS );
+ }
dbg_fw_ver ( resp, bp->hwrm_cmd_timeout );
return STATUS_SUCCESS;
}
@@ -915,6 +987,30 @@ static int bnxt_hwrm_func_qcfg_req ( struct bnxt *bp )
return STATUS_SUCCESS;
}
+static int bnxt_hwrm_port_phy_qcaps_req ( struct bnxt *bp )
+{
+ u16 cmd_len = ( u16 )sizeof ( struct hwrm_port_phy_qcaps_input );
+ struct hwrm_port_phy_qcaps_input *req;
+ struct hwrm_port_phy_qcaps_output *resp;
+ int rc;
+
+ DBGP ( "%s\n", __func__ );
+
+ req = ( struct hwrm_port_phy_qcaps_input * )bp->hwrm_addr_req;
+ resp = ( struct hwrm_port_phy_qcaps_output * )bp->hwrm_addr_resp;
+ hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCAPS, cmd_len );
+ rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
+ if ( rc ) {
+ DBGP ( "-s %s ( ): Failed\n", __func__ );
+ return STATUS_FAILURE;
+ }
+
+ if ( resp->flags2 & PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED )
+ FLAG_SET ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 );
+
+ return STATUS_SUCCESS;
+}
+
static int bnxt_hwrm_func_reset_req ( struct bnxt *bp )
{
u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_reset_input );
@@ -942,7 +1038,7 @@ static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp )
hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len );
req->fid = ( u16 )HWRM_NA_SIGNATURE;
bnxt_hwrm_assign_resources ( bp );
- if ( bp->thor ) {
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) {
req->enables |= ( FUNC_CFG_REQ_ENABLES_NUM_MSIX |
FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_EVB_MODE );
@@ -1009,7 +1105,7 @@ static int bnxt_hwrm_set_async_event ( struct bnxt *bp )
u16 idx;
DBGP ( "%s\n", __func__ );
- if ( bp->thor )
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) )
idx = bp->nq_ring_id;
else
idx = bp->cq_ring_id;
@@ -1160,6 +1256,10 @@ static int bnxt_hwrm_port_phy_qcfg ( struct bnxt *bp, u16 idx )
if ( idx & SUPPORT_SPEEDS )
bp->support_speeds = resp->support_speeds;
+ if ( idx & SUPPORT_SPEEDS2 )
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) )
+ bp->auto_link_speeds2_mask = resp->auto_link_speeds2;
+
if ( idx & DETECT_MEDIA )
bp->media_detect = resp->module_status;
@@ -1199,22 +1299,24 @@ static int bnxt_get_link_speed ( struct bnxt *bp )
u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma;
DBGP ( "%s\n", __func__ );
- test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4,
- ( u16 )LINK_SPEED_DRV_NUM,
- 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
- return STATUS_FAILURE;
- bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT );
+ if ( ! ( FLAG_TEST (bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ) ) {
+ test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4,
+ ( u16 )LINK_SPEED_DRV_NUM,
+ 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
+ return STATUS_FAILURE;
+ bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT );
+ test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4,
+ ( u16 )D3_LINK_SPEED_FW_NUM, 1,
+ ( u16 )bp->port_idx ) != STATUS_SUCCESS )
+ return STATUS_FAILURE;
+ bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK,
+ D3_SPEED_FW_SHIFT );
+ }
test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4,
( u16 )LINK_SPEED_FW_NUM,
1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
return STATUS_FAILURE;
bp->link_set |= SET_LINK ( *ptr32, SPEED_FW_MASK, SPEED_FW_SHIFT );
- test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4,
- ( u16 )D3_LINK_SPEED_FW_NUM, 1,
- ( u16 )bp->port_idx ) != STATUS_SUCCESS )
- return STATUS_FAILURE;
- bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK,
- D3_SPEED_FW_SHIFT );
test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1,
( u16 )PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM,
1, ( u16 )bp->port_idx ) != STATUS_SUCCESS )
@@ -1222,32 +1324,51 @@ static int bnxt_get_link_speed ( struct bnxt *bp )
bp->link_set |= SET_LINK ( *ptr32,
MEDIA_AUTO_DETECT_MASK, MEDIA_AUTO_DETECT_SHIFT );
- switch ( bp->link_set & LINK_SPEED_DRV_MASK ) {
- case LINK_SPEED_DRV_1G:
+ /* Use LINK_SPEED_FW_xxx which is valid for CHIP_P7 and earlier devices */
+ switch ( bp->link_set & LINK_SPEED_FW_MASK ) {
+ case LINK_SPEED_FW_1G:
bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_1000MBPS );
break;
- case LINK_SPEED_DRV_2_5G:
+ case LINK_SPEED_FW_2_5G:
bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_2500MBPS );
break;
- case LINK_SPEED_DRV_10G:
+ case LINK_SPEED_FW_10G:
bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_10GBPS );
break;
- case LINK_SPEED_DRV_25G:
+ case LINK_SPEED_FW_25G:
bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_25GBPS );
break;
- case LINK_SPEED_DRV_40G:
+ case LINK_SPEED_FW_40G:
bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_40GBPS );
break;
- case LINK_SPEED_DRV_50G:
+ case LINK_SPEED_FW_50G:
bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_50GBPS );
break;
- case LINK_SPEED_DRV_100G:
+ case LINK_SPEED_FW_50G_PAM4:
+ bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_50PAM4GBPS );
+ break;
+ case LINK_SPEED_FW_100G:
bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_100GBPS );
break;
- case LINK_SPEED_DRV_200G:
+ case LINK_SPEED_FW_100G_PAM4:
+ bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_100PAM4GBPS );
+ break;
+ case LINK_SPEED_FW_100G_PAM4_112:
+ bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_100PAM4_112GBPS );
+ break;
+ case LINK_SPEED_FW_200G:
bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_200GBPS );
break;
- case LINK_SPEED_DRV_AUTONEG:
+ case LINK_SPEED_FW_200G_PAM4_112:
+ bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_200PAM4_112GBPS );
+ break;
+ case LINK_SPEED_FW_400G_PAM4:
+ bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_400PAM4GBPS );
+ break;
+ case LINK_SPEED_FW_400G_PAM4_112:
+ bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_400PAM4_112GBPS );
+ break;
+ case LINK_SPEED_FW_AUTONEG:
bp->medium = SET_MEDIUM_SPEED ( bp, MEDIUM_SPEED_AUTONEG );
break;
default:
@@ -1266,27 +1387,29 @@ static int bnxt_get_vlan ( struct bnxt *bp )
if ( bp->vf )
return STATUS_SUCCESS;
- test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1,
- ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM, 1,
- ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
- return STATUS_FAILURE;
+ if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ) ) {
+ test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1,
+ ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM, 1,
+ ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
+ return STATUS_FAILURE;
- bp->mba_cfg2 = SET_MBA ( *ptr32, VLAN_MASK, VLAN_SHIFT );
- test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 16,
- ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM, 1,
- ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
- return STATUS_FAILURE;
+ bp->mba_cfg2 = SET_MBA ( *ptr32, VLAN_MASK, VLAN_SHIFT );
+ test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 16,
+ ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM, 1,
+ ( u16 )bp->ordinal_value ) != STATUS_SUCCESS )
+ return STATUS_FAILURE;
- bp->mba_cfg2 |= SET_MBA ( *ptr32, VLAN_VALUE_MASK, VLAN_VALUE_SHIFT );
- if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
- bp->vlan_id = bp->mba_cfg2 & VLAN_VALUE_MASK;
- else
- bp->vlan_id = 0;
+ bp->mba_cfg2 |= SET_MBA ( *ptr32, VLAN_VALUE_MASK, VLAN_VALUE_SHIFT );
+ if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
+ bp->vlan_id = bp->mba_cfg2 & VLAN_VALUE_MASK;
+ else
+ bp->vlan_id = 0;
- if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
- DBGP ( "VLAN MBA Enabled ( %d )\n",
- ( bp->mba_cfg2 & VLAN_VALUE_MASK ) );
+ if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED )
+ DBGP ( "VLAN MBA Enabled ( %d )\n",
+ ( bp->mba_cfg2 & VLAN_VALUE_MASK ) );
+ }
return STATUS_SUCCESS;
}
@@ -1296,7 +1419,7 @@ static int bnxt_hwrm_backing_store_qcfg ( struct bnxt *bp )
struct hwrm_func_backing_store_qcfg_input *req;
DBGP ( "%s\n", __func__ );
- if ( !bp->thor )
+ if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
return STATUS_SUCCESS;
req = ( struct hwrm_func_backing_store_qcfg_input * )bp->hwrm_addr_req;
@@ -1311,7 +1434,7 @@ static int bnxt_hwrm_backing_store_cfg ( struct bnxt *bp )
struct hwrm_func_backing_store_cfg_input *req;
DBGP ( "%s\n", __func__ );
- if ( !bp->thor )
+ if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
return STATUS_SUCCESS;
req = ( struct hwrm_func_backing_store_cfg_input * )bp->hwrm_addr_req;
@@ -1330,7 +1453,7 @@ static int bnxt_hwrm_queue_qportcfg ( struct bnxt *bp )
int rc;
DBGP ( "%s\n", __func__ );
- if ( !bp->thor )
+ if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
return STATUS_SUCCESS;
req = ( struct hwrm_queue_qportcfg_input * )bp->hwrm_addr_req;
@@ -1370,7 +1493,10 @@ static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp )
u32 flags;
u32 enables = 0;
u16 force_link_speed = 0;
+ u16 force_link_speeds2 = 0;
+ u16 force_pam4_link_speed = 0;
u16 auto_link_speed_mask = 0;
+ u16 auto_link_speeds2_mask = 0;
u8 auto_mode = 0;
u8 auto_pause = 0;
u8 auto_duplex = 0;
@@ -1385,34 +1511,111 @@ static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp )
force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB;
break;
case MEDIUM_SPEED_10GBPS:
- force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ } else {
+ force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB;
+ }
break;
case MEDIUM_SPEED_25GBPS:
- force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ } else {
+ force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB;
+ }
break;
case MEDIUM_SPEED_40GBPS:
- force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ } else {
+ force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB;
+ }
break;
case MEDIUM_SPEED_50GBPS:
- force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ } else {
+ force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB;
+ }
+ break;
+ case MEDIUM_SPEED_50PAM4GBPS:
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ } else {
+ force_pam4_link_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED;
+ }
break;
case MEDIUM_SPEED_100GBPS:
- force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ } else {
+ force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB;
+ }
+ break;
+ case MEDIUM_SPEED_100PAM4GBPS:
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ } else {
+ force_pam4_link_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED;
+ }
+ break;
+ case MEDIUM_SPEED_100PAM4_112GBPS:
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ }
break;
case MEDIUM_SPEED_200GBPS:
- force_link_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_200GB;
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ } else {
+ force_pam4_link_speed = PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED;
+ }
+ break;
+ case MEDIUM_SPEED_200PAM4_112GBPS:
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ }
+ break;
+ case MEDIUM_SPEED_400PAM4GBPS:
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ }
break;
+ case MEDIUM_SPEED_400PAM4_112GBPS:
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) ) {
+ force_link_speeds2 = PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112;
+ enables |= PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2;
+ }
+ break;
default:
auto_mode = PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
flags &= ~PORT_PHY_CFG_REQ_FLAGS_FORCE;
enables |= PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE |
- PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK |
PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX |
PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE;
+ if ( FLAG_TEST (bp->flags, BNXT_FLAG_LINK_SPEEDS2 ) )
+ enables |= PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK;
+ else
+ enables |= PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK;
auto_pause = PORT_PHY_CFG_REQ_AUTO_PAUSE_TX |
PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_BOTH;
auto_link_speed_mask = bp->support_speeds;
+ auto_link_speeds2_mask = bp->auto_link_speeds2_mask;
break;
}
@@ -1421,10 +1624,13 @@ static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp )
req->enables = enables;
req->port_id = bp->port_idx;
req->force_link_speed = force_link_speed;
+ req->force_pam4_link_speed = force_pam4_link_speed;
+ req->force_link_speeds2 = force_link_speeds2;
req->auto_mode = auto_mode;
req->auto_duplex = auto_duplex;
req->auto_pause = auto_pause;
req->auto_link_speed_mask = auto_link_speed_mask;
+ req->auto_link_speeds2_mask = auto_link_speeds2_mask;
return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ );
}
@@ -1563,7 +1769,7 @@ static int bnxt_hwrm_ring_alloc_grp ( struct bnxt *bp )
int rc;
DBGP ( "%s\n", __func__ );
- if ( bp->thor )
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) )
return STATUS_SUCCESS;
req = ( struct hwrm_ring_grp_alloc_input * )bp->hwrm_addr_req;
@@ -1614,7 +1820,7 @@ static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type )
switch ( type ) {
case RING_ALLOC_REQ_RING_TYPE_NQ:
req->page_size = LM_PAGE_BITS ( 12 );
- req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
+ req->int_mode = BNXT_CQ_INTR_MODE ( ( (FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7) ) || bp->vf ) );
req->length = ( u32 )bp->nq.ring_cnt;
req->logical_id = 0xFFFF; // Required value for Thor FW?
req->page_tbl_addr = virt_to_bus ( bp->nq.bd_virt );
@@ -1624,7 +1830,7 @@ static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type )
req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf );
req->length = ( u32 )bp->cq.ring_cnt;
req->page_tbl_addr = virt_to_bus ( bp->cq.bd_virt );
- if ( !bp->thor )
+ if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
break;
req->enables = RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID;
req->nq_ring_id = bp->nq_ring_id;
@@ -1646,7 +1852,7 @@ static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type )
req->stat_ctx_id = ( u32 )STAT_CTX_ID;
req->cmpl_ring_id = bp->cq_ring_id;
req->page_tbl_addr = virt_to_bus ( bp->rx.bd_virt );
- if ( !bp->thor )
+ if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
break;
req->queue_id = ( u16 )RX_RING_QID;
req->rx_buf_size = MAX_ETHERNET_PACKET_BUFFER_SIZE;
@@ -1742,7 +1948,7 @@ static int bnxt_hwrm_ring_free_rx ( struct bnxt *bp )
static int bnxt_hwrm_ring_alloc_nq ( struct bnxt *bp )
{
- if ( !bp->thor )
+ if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
return STATUS_SUCCESS;
return bnxt_hwrm_ring_alloc ( bp, RING_ALLOC_REQ_RING_TYPE_NQ );
}
@@ -1751,7 +1957,7 @@ static int bnxt_hwrm_ring_free_nq ( struct bnxt *bp )
{
int ret = STATUS_SUCCESS;
- if ( !bp->thor )
+ if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
return STATUS_SUCCESS;
DBGP ( "%s\n", __func__ );
@@ -1822,7 +2028,7 @@ static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp )
req->enables = VNIC_CFG_REQ_ENABLES_MRU;
req->mru = bp->mtu;
- if ( bp->thor ) {
+ if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) {
req->enables |= ( VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID );
req->default_rx_ring_id = bp->rx_ring_id;
@@ -1876,6 +2082,7 @@ hwrm_func_t bring_up_chip[] = {
bnxt_hwrm_backing_store_cfg, /* HWRM_FUNC_BACKING_STORE_CFG */
bnxt_hwrm_backing_store_qcfg, /* HWRM_FUNC_BACKING_STORE_QCFG */
bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */
+ bnxt_hwrm_port_phy_qcaps_req, /* HWRM_PORT_PHY_QCAPS */
bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */
bnxt_get_vlan, /* HWRM_NVM_GET_VARIABLE - vlan */
bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */
@@ -1968,6 +2175,9 @@ static int bnxt_tx ( struct net_device *dev, struct io_buffer *iob )
bp->tx.iob[entry] = iob;
bnxt_set_txq ( bp, entry, mapping, len );
entry = NEXT_IDX ( entry, bp->tx.ring_cnt );
+ /* If the ring has wrapped, toggle the epoch bit */
+ if ( bp->tx.prod_id > entry )
+ bp->tx.epoch ^= 1;
dump_tx_pkt ( ( u8 * )iob->data, len, bp->tx.prod_id );
/* Packets are ready, update Tx producer idx local and on card. */
bnxt_db_tx ( bp, ( u32 )entry );
@@ -1986,6 +2196,7 @@ static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt )
if ( cons_id >= bp->nq.ring_cnt ) {
/* Toggle completion bit when the ring wraps. */
bp->nq.completion_bit ^= 1;
+ bp->nq.epoch ^= 1;
cons_id = cons_id - bp->nq.ring_cnt;
}
bp->nq.cons_id = cons_id;
@@ -2026,7 +2237,7 @@ static void bnxt_service_cq ( struct net_device *dev )
cq_type = cmp->type & CMPL_BASE_TYPE_MASK;
dump_evt ( ( u8 * )cmp, cq_type, bp->cq.cons_id, 0 );
- dump_cq ( cmp, bp->cq.cons_id );
+ dump_cq ( cmp, bp->cq.cons_id, bp->nq.toggle );
switch ( cq_type ) {
case CMPL_BASE_TYPE_TX_L2:
@@ -2037,6 +2248,7 @@ static void bnxt_service_cq ( struct net_device *dev )
bnxt_adv_cq_index ( bp, 1 );
break;
case CMPL_BASE_TYPE_RX_L2:
+ case CMPL_BASE_TYPE_RX_L2_V3:
done = bnxt_rx_complete ( dev,
( struct rx_pkt_cmpl * )cmp );
break;
@@ -2063,7 +2275,7 @@ static void bnxt_service_nq ( struct net_device *dev )
int done = SERVICE_NEXT_NQ_BD;
u32 nq_type;
- if ( !bp->thor )
+ if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) )
return;
while ( done == SERVICE_NEXT_NQ_BD ) {
@@ -2072,6 +2284,7 @@ static void bnxt_service_nq ( struct net_device *dev )
if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit )
break;
nq_type = ( nqp->type & NQ_CN_TYPE_MASK );
+ bp->nq.toggle = ( ( nqp->type & NQ_CN_TOGGLE_MASK ) >> NQ_CN_TOGGLE_SFT );
dump_evt ( ( u8 * )nqp, nq_type, bp->nq.cons_id, 1 );
dump_nq ( nqp, bp->nq.cons_id );
@@ -2096,8 +2309,8 @@ static void bnxt_service_nq ( struct net_device *dev )
static void bnxt_poll ( struct net_device *dev )
{
mb ( );
- bnxt_service_cq ( dev );
bnxt_service_nq ( dev );
+ bnxt_service_cq ( dev );
}
static void bnxt_close ( struct net_device *dev )
diff --git a/src/drivers/net/bnxt/bnxt.h b/src/drivers/net/bnxt/bnxt.h
index 2cbaec5e..8c8a3328 100644
--- a/src/drivers/net/bnxt/bnxt.h
+++ b/src/drivers/net/bnxt/bnxt.h
@@ -52,6 +52,10 @@ union dma_addr64_t {
#define BNXT_FLAG_NPAR_MODE 0x0010
#define BNXT_FLAG_ATOMICS_ENABLE 0x0020
#define BNXT_FLAG_PCI_VF 0x0040
+#define BNXT_FLAG_LINK_SPEEDS2 0x0080
+#define BNXT_FLAG_IS_CHIP_P5 0x0100
+#define BNXT_FLAG_IS_CHIP_P5_PLUS 0x0200
+#define BNXT_FLAG_IS_CHIP_P7 0x0400
/*******************************************************************************
* Status codes.
******************************************************************************/
@@ -106,6 +110,12 @@ union dma_addr64_t {
#define MEDIUM_SPEED_50GBPS 0x0a00L
#define MEDIUM_SPEED_100GBPS 0x0b00L
#define MEDIUM_SPEED_200GBPS 0x0c00L
+#define MEDIUM_SPEED_50PAM4GBPS 0x0d00L
+#define MEDIUM_SPEED_100PAM4GBPS 0x0e00L
+#define MEDIUM_SPEED_100PAM4_112GBPS 0x0f00L
+#define MEDIUM_SPEED_200PAM4_112GBPS 0x1000L
+#define MEDIUM_SPEED_400PAM4GBPS 0x2000L
+#define MEDIUM_SPEED_400PAM4_112GBPS 0x3000L
#define MEDIUM_SPEED_AUTONEG_1G_FALLBACK 0x8000L /* Serdes */
#define MEDIUM_SPEED_AUTONEG_2_5G_FALLBACK 0x8100L /* Serdes */
#define MEDIUM_SPEED_HARDWARE_DEFAULT 0xff00L /* Serdes nvram def.*/
@@ -168,9 +178,9 @@ union dma_addr64_t {
RX_MASK_ACCEPT_MULTICAST)
#define MAX_NQ_DESC_CNT 64
#define NQ_RING_BUFFER_SIZE (MAX_NQ_DESC_CNT * sizeof(struct cmpl_base))
-#define TX_RING_QID (bp->thor ? (u16)bp->queue_id : ((u16)bp->port_idx * 10))
-#define RX_RING_QID (bp->thor ? bp->queue_id : 0)
-#define STAT_CTX_ID ((bp->vf || bp->thor) ? bp->stat_ctx_id : 0)
+#define TX_RING_QID (FLAG_TEST(bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS) ? (u16)bp->queue_id : ((u16)bp->port_idx * 10))
+#define RX_RING_QID (FLAG_TEST(bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS) ? bp->queue_id : 0)
+#define STAT_CTX_ID ((bp->vf || FLAG_TEST(bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS)) ? bp->stat_ctx_id : 0)
#define TX_AVAIL(r) (r - 1)
#define TX_IN_USE(a, b, c) ((a - b) & (c - 1))
#define NO_MORE_NQ_BD_TO_SERVICE 1
@@ -189,13 +199,19 @@ union dma_addr64_t {
((idx) << DBC_DBC_INDEX_SFT) & DBC_DBC_INDEX_MASK)
#define DBC_MSG_XID(xid, flg) (\
(((xid) << DBC_DBC_XID_SFT) & DBC_DBC_XID_MASK) | \
- DBC_DBC_PATH_L2 | (flg))
+ DBC_DBC_PATH_L2 | (FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ? DBC_DBC_VALID : 0) | (flg))
+#define DBC_MSG_EPCH(idx) (\
+ ((idx) << DBC_DBC_EPOCH_SFT))
+#define DBC_MSG_TOGGLE(idx) (\
+ ((idx) << DBC_DBC_TOGGLE_SFT) & DBC_DBC_TOGGLE_MASK)
#define PHY_STATUS 0x0001
#define PHY_SPEED 0x0002
#define DETECT_MEDIA 0x0004
#define SUPPORT_SPEEDS 0x0008
+#define SUPPORT_SPEEDS2 0x0010
#define QCFG_PHY_ALL (\
- SUPPORT_SPEEDS | DETECT_MEDIA | PHY_SPEED | PHY_STATUS)
+ SUPPORT_SPEEDS | SUPPORT_SPEEDS2 | \
+ DETECT_MEDIA | PHY_SPEED | PHY_STATUS)
#define str_mbps "Mbps"
#define str_gbps "Gbps"
/*
@@ -287,6 +303,18 @@ union dma_addr64_t {
#define NS_LINK_SPEED_FW_100G (0x6)
#define LINK_SPEED_FW_200G (0x7L << 7)
#define NS_LINK_SPEED_FW_200G (0x7)
+#define LINK_SPEED_FW_50G_PAM4 (0x8L << 7)
+#define NS_LINK_SPEED_FW_50G_PAM4 (0x8)
+#define LINK_SPEED_FW_100G_PAM4 (0x9L << 7)
+#define NS_LINK_SPEED_FW_100G_PAM4 (0x9)
+#define LINK_SPEED_FW_100G_PAM4_112 (0xAL << 7)
+#define NS_LINK_SPEED_FW_100G_PAM4_112 (0xA)
+#define LINK_SPEED_FW_200G_PAM4_112 (0xBL << 7)
+#define NS_LINK_SPEED_FW_200G_PAM4_112 (0xB)
+#define LINK_SPEED_FW_400G_PAM4 (0xCL << 7)
+#define NS_LINK_SPEED_FW_400G_PAM4 (0xC)
+#define LINK_SPEED_FW_400G_PAM4_112 (0xDL << 7)
+#define NS_LINK_SPEED_FW_400G_PAM4_112 (0xD)
#define LINK_SPEED_FW_2_5G (0xEL << 7)
#define NS_LINK_SPEED_FW_2_5G (0xE)
#define LINK_SPEED_FW_100M (0xFL << 7)
@@ -387,6 +415,10 @@ struct dbc_dbc {
__le32 index;
#define DBC_DBC_INDEX_MASK 0xffffffUL
#define DBC_DBC_INDEX_SFT 0
+ #define DBC_DBC_EPOCH 0x1000000UL
+ #define DBC_DBC_EPOCH_SFT 24
+ #define DBC_DBC_TOGGLE_MASK 0x6000000UL
+ #define DBC_DBC_TOGGLE_SFT 25
__le32 type_path_xid;
#define DBC_DBC_XID_MASK 0xfffffUL
#define DBC_DBC_XID_SFT 0
@@ -396,6 +428,7 @@ struct dbc_dbc {
#define DBC_DBC_PATH_L2 (0x1UL << 24)
#define DBC_DBC_PATH_ENGINE (0x2UL << 24)
#define DBC_DBC_PATH_LAST DBC_DBC_PATH_ENGINE
+ #define DBC_DBC_VALID 0x4000000UL
#define DBC_DBC_DEBUG_TRACE 0x8000000UL
#define DBC_DBC_TYPE_MASK 0xf0000000UL
#define DBC_DBC_TYPE_SFT 28
@@ -481,6 +514,8 @@ struct tx_info {
u16 ring_cnt;
u32 cnt; /* Tx statistics. */
u32 cnt_req;
+ u8 epoch;
+ u8 res[3];
};
struct cmpl_base {
@@ -492,6 +527,7 @@ struct cmpl_base {
#define CMPL_BASE_TYPE_RX_AGG 0x12UL
#define CMPL_BASE_TYPE_RX_TPA_START 0x13UL
#define CMPL_BASE_TYPE_RX_TPA_END 0x15UL
+#define CMPL_BASE_TYPE_RX_L2_V3 0x17UL
#define CMPL_BASE_TYPE_STAT_EJECT 0x1aUL
#define CMPL_BASE_TYPE_HWRM_DONE 0x20UL
#define CMPL_BASE_TYPE_HWRM_FWD_REQ 0x22UL
@@ -517,7 +553,8 @@ struct cmp_info {
u16 cons_id;
u16 ring_cnt;
u8 completion_bit;
- u8 res[3];
+ u8 epoch;
+ u8 res[2];
};
/* Completion Queue Notification */
@@ -533,6 +570,8 @@ struct nq_base {
*/
#define NQ_CN_TYPE_MASK 0x3fUL
#define NQ_CN_TYPE_SFT 0
+#define NQ_CN_TOGGLE_MASK 0xc0UL
+#define NQ_CN_TOGGLE_SFT 6
/* CQ Notification */
#define NQ_CN_TYPE_CQ_NOTIFICATION 0x30UL
#define NQ_CN_TYPE_LAST NQ_CN_TYPE_CQ_NOTIFICATION
@@ -561,7 +600,9 @@ struct nq_info {
u16 cons_id;
u16 ring_cnt;
u8 completion_bit;
- u8 res[3];
+ u8 epoch;
+ u8 toggle;
+ u8 res[1];
};
struct rx_pkt_cmpl {
@@ -675,6 +716,156 @@ struct rx_pkt_cmpl_hi {
#define RX_PKT_CMPL_REORDER_SFT 0
};
+struct rx_pkt_v3_cmpl {
+ u16 flags_type;
+ #define RX_PKT_V3_CMPL_TYPE_MASK 0x3fUL
+ #define RX_PKT_V3_CMPL_TYPE_SFT 0
+ /*
+ * RX L2 V3 completion:
+ * Completion of and L2 RX packet. Length = 32B
+ * This is the new version of the RX_L2 completion used in Thor2
+ * and later chips.
+ */
+ #define RX_PKT_V3_CMPL_TYPE_RX_L2_V3 0x17UL
+ #define RX_PKT_V3_CMPL_TYPE_LAST RX_PKT_V3_CMPL_TYPE_RX_L2_V3
+ #define RX_PKT_V3_CMPL_FLAGS_MASK 0xffc0UL
+ #define RX_PKT_V3_CMPL_FLAGS_SFT 6
+ #define RX_PKT_V3_CMPL_FLAGS_ERROR 0x40UL
+ #define RX_PKT_V3_CMPL_FLAGS_PLACEMENT_MASK 0x380UL
+ #define RX_PKT_V3_CMPL_FLAGS_PLACEMENT_SFT 7
+ #define RX_PKT_V3_CMPL_FLAGS_PLACEMENT_NORMAL (0x0UL << 7)
+ #define RX_PKT_V3_CMPL_FLAGS_PLACEMENT_JUMBO (0x1UL << 7)
+ #define RX_PKT_V3_CMPL_FLAGS_PLACEMENT_HDS (0x2UL << 7)
+ #define RX_PKT_V3_CMPL_FLAGS_PLACEMENT_TRUNCATION (0x3UL << 7)
+ #define RX_PKT_V3_CMPL_FLAGS_PLACEMENT_LAST RX_PKT_V3_CMPL_FLAGS_PLACEMENT_TRUNCATION
+ #define RX_PKT_V3_CMPL_FLAGS_RSS_VALID 0x400UL
+ #define RX_PKT_V3_CMPL_FLAGS_PKT_METADATA_PRESENT 0x800UL
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_MASK 0xf000UL
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_SFT 12
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_NOT_KNOWN (0x0UL << 12)
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_IP (0x1UL << 12)
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_TCP (0x2UL << 12)
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_UDP (0x3UL << 12)
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_FCOE (0x4UL << 12)
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_ROCE (0x5UL << 12)
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_ICMP (0x7UL << 12)
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP (0x8UL << 12)
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (0x9UL << 12)
+ #define RX_PKT_V3_CMPL_FLAGS_ITYPE_LAST RX_PKT_V3_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP
+ u16 len;
+ u32 opaque;
+ u16 rss_hash_type_agg_bufs_v1;
+ #define RX_PKT_V3_CMPL_V1 0x1UL
+ #define RX_PKT_V3_CMPL_AGG_BUFS_MASK 0x3eUL
+ #define RX_PKT_V3_CMPL_AGG_BUFS_SFT 1
+ #define RX_PKT_V3_CMPL_UNUSED1 0x40UL
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_MASK 0xff80UL
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_SFT 7
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_0 (0x0UL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_1 (0x1UL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_3 (0x3UL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_4 (0x4UL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_5 (0x5UL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_6 (0x6UL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_7 (0x7UL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_8 (0x8UL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_9 (0x9UL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_10 (0xaUL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_11 (0xbUL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_12 (0xcUL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_13 (0xdUL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_14 (0xeUL << 7)
+ #define RX_PKT_V3_CMPL_RSS_HASH_TYPE_LAST RX_PKT_V3_CMPL_RSS_HASH_TYPE_ENUM_14
+ u16 metadata1_payload_offset;
+ #define RX_PKT_V3_CMPL_PAYLOAD_OFFSET_MASK 0x1ffUL
+ #define RX_PKT_V3_CMPL_PAYLOAD_OFFSET_SFT 0
+ #define RX_PKT_V3_CMPL_METADATA1_MASK 0xf000UL
+ #define RX_PKT_V3_CMPL_METADATA1_SFT 12
+ #define RX_PKT_V3_CMPL_METADATA1_TPID_SEL_MASK 0x7000UL
+ #define RX_PKT_V3_CMPL_METADATA1_TPID_SEL_SFT 12
+ #define RX_PKT_V3_CMPL_METADATA1_TPID_SEL_TPID88A8 (0x0UL << 12)
+ #define RX_PKT_V3_CMPL_METADATA1_TPID_SEL_TPID8100 (0x1UL << 12)
+ #define RX_PKT_V3_CMPL_METADATA1_TPID_SEL_TPID9100 (0x2UL << 12)
+ #define RX_PKT_V3_CMPL_METADATA1_TPID_SEL_TPID9200 (0x3UL << 12)
+ #define RX_PKT_V3_CMPL_METADATA1_TPID_SEL_TPID9300 (0x4UL << 12)
+ #define RX_PKT_V3_CMPL_METADATA1_TPID_SEL_TPIDCFG (0x5UL << 12)
+ #define RX_PKT_V3_CMPL_METADATA1_TPID_SEL_LAST RX_PKT_V3_CMPL_METADATA1_TPID_SEL_TPIDCFG
+ #define RX_PKT_V3_CMPL_METADATA1_VALID 0x8000UL
+ u32 rss_hash;
+};
+
+struct rx_pkt_v3_cmpl_hi {
+ u32 flags2;
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_IP_CS_CALC 0x1UL
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_L4_CS_CALC 0x2UL
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_T_IP_CS_CALC 0x4UL
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_T_L4_CS_CALC 0x8UL
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_MASK 0xf0UL
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_SFT 4
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_NONE (0x0UL << 4)
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_ACT_REC_PTR (0x1UL << 4)
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_TUNNEL_ID (0x2UL << 4)
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_CHDR_DATA (0x3UL << 4)
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_HDR_OFFSET (0x4UL << 4)
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_LAST RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_HDR_OFFSET
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_IP_TYPE 0x100UL
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_COMPLETE_CHECKSUM_CALC 0x200UL
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_T_IP_TYPE 0x400UL
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_T_IP_TYPE_IPV4 (0x0UL << 10)
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_T_IP_TYPE_IPV6 (0x1UL << 10)
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_T_IP_TYPE_LAST RX_PKT_V3_CMPL_HI_FLAGS2_T_IP_TYPE_IPV6
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_COMPLETE_CHECKSUM_MASK 0xffff0000UL
+ #define RX_PKT_V3_CMPL_HI_FLAGS2_COMPLETE_CHECKSUM_SFT 16
+ u32 metadata2;
+ u16 errors_v2;
+ #define RX_PKT_V3_CMPL_HI_V2 0x1UL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_MASK 0xfffeUL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_SFT 1
+ #define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_MASK 0xeUL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_SFT 1
+ #define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_NOT_ON_CHIP (0x2UL << 1)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_LAST RX_PKT_V3_CMPL_HI_ERRORS_BUFFER_ERROR_FLUSH
+ #define RX_PKT_V3_CMPL_HI_ERRORS_IP_CS_ERROR 0x10UL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_L4_CS_ERROR 0x20UL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_IP_CS_ERROR 0x40UL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_L4_CS_ERROR 0x80UL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_CRC_ERROR 0x100UL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_MASK 0xe00UL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_SFT 9
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_NO_ERROR (0x0UL << 9)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION (0x1UL << 9)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN (0x2UL << 9)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR (0x3UL << 9)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR (0x4UL << 9)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL (0x5UL << 9)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR (0x6UL << 9)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_LAST RX_PKT_V3_CMPL_HI_ERRORS_T_PKT_ERROR_T_TOTAL_ERROR
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_MASK 0xf000UL
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_SFT 12
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_NO_ERROR (0x0UL << 12)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L3_BAD_VERSION (0x1UL << 12)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN (0x2UL << 12)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L3_BAD_TTL (0x3UL << 12)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_IP_TOTAL_ERROR (0x4UL << 12)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR (0x5UL << 12)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN (0x6UL << 12)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL (0x7UL << 12)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN (0x8UL << 12)
+ #define RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_LAST RX_PKT_V3_CMPL_HI_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN
+ u16 metadata0;
+ #define RX_PKT_V3_CMPL_HI_METADATA0_VID_MASK 0xfffUL
+ #define RX_PKT_V3_CMPL_HI_METADATA0_VID_SFT 0
+ #define RX_PKT_V3_CMPL_HI_METADATA0_DE 0x1000UL
+ /* When meta_format=1, this value is the VLAN PRI. */
+ #define RX_PKT_V3_CMPL_HI_METADATA0_PRI_MASK 0xe000UL
+ #define RX_PKT_V3_CMPL_HI_METADATA0_PRI_SFT 13
+ u32 timestamp;
+};
+
struct rx_prod_pkt_bd {
u16 flags_type;
#define RX_PROD_PKT_BD_TYPE_MASK 0x3fUL
@@ -705,6 +896,8 @@ struct rx_info {
u32 drop_err;
u32 drop_lb;
u32 drop_vlan;
+ u8 epoch;
+ u8 res[3];
};
#define VALID_DRIVER_REG 0x0001
@@ -750,7 +943,6 @@ struct bnxt {
struct nq_info nq; /* completion info. */
u16 nq_ring_id;
u8 queue_id;
- u8 thor;
u16 last_resp_code;
u16 seq_id;
u32 flag_hwrm;
@@ -792,6 +984,7 @@ struct bnxt {
u32 mba_cfg2;
u32 medium;
u16 support_speeds;
+ u16 auto_link_speeds2_mask;
u32 link_set;
u8 media_detect;
u8 rsvd;
@@ -868,4 +1061,8 @@ struct bnxt {
FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR | \
FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR)
-#define CHIP_NUM_57500 0x1750
+#define CHIP_NUM_57508 0x1750
+#define CHIP_NUM_57504 0x1751
+#define CHIP_NUM_57502 0x1752
+
+#define CHIP_NUM_57608 0x1760
diff --git a/src/drivers/net/bnxt/bnxt_dbg.h b/src/drivers/net/bnxt/bnxt_dbg.h
index 188978ad..14540281 100644
--- a/src/drivers/net/bnxt/bnxt_dbg.h
+++ b/src/drivers/net/bnxt/bnxt_dbg.h
@@ -475,7 +475,7 @@ void dbg_rx_stat(struct bnxt *bp)
#endif
#if defined(DEBUG_CQ)
-static void dump_cq(struct cmpl_base *cmp, u16 cid)
+static void dump_cq(struct cmpl_base *cmp, u16 cid, u8 toggle)
{
dbg_prn("- CQ Type ");
switch (cmp->type & CMPL_BASE_TYPE_MASK) {
@@ -495,7 +495,7 @@ static void dump_cq(struct cmpl_base *cmp, u16 cid)
dbg_prn("%04x", (u16)(cmp->type & CMPL_BASE_TYPE_MASK));
break;
}
- dbg_prn(" cid %d", cid);
+ dbg_prn(" cid %d, tog %d", cid, toggle);
#if defined(DEBUG_CQ_DUMP)
dump_mem((u8 *)cmp, (u32)sizeof(struct cmpl_base), DISP_U8);
#else
@@ -513,7 +513,7 @@ static void dump_nq(struct nq_base *nqp, u16 cid)
#endif
}
#else
-#define dump_cq(cq, id)
+#define dump_cq(cq, id, toggle)
#define dump_nq(nq, id)
#endif
diff --git a/src/drivers/net/bnxt/bnxt_hsi.h b/src/drivers/net/bnxt/bnxt_hsi.h
index 086acb8b..dbcffd90 100644
--- a/src/drivers/net/bnxt/bnxt_hsi.h
+++ b/src/drivers/net/bnxt/bnxt_hsi.h
@@ -2929,7 +2929,7 @@ struct hwrm_func_drv_if_change_output {
u8 valid;
};
-/* hwrm_port_phy_cfg_input (size:448b/56B) */
+/* hwrm_port_phy_cfg_input (size:512b/64B) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2952,6 +2952,15 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
#define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN 0x4000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_ENABLE 0x8000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_1XN_DISABLE 0x10000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_ENABLE 0x20000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS544_IEEE_DISABLE 0x40000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_ENABLE 0x80000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_1XN_DISABLE 0x100000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_ENABLE 0x200000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_RS272_IEEE_DISABLE 0x400000UL
+
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -2964,6 +2973,10 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
#define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
#define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAM4_LINK_SPEED 0x800UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_PAM4_LINK_SPEED_MASK 0x1000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_FORCE_LINK_SPEEDS2 0x2000UL
+ #define PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEEDS2_MASK 0x4000UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB 0x1UL
@@ -3049,11 +3062,48 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
#define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
- u8 unused_2[2];
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
__le32 tx_lpi_timer;
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
#define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
- __le32 unused_3;
+ __le16 auto_link_pam4_speed_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_PAM4_SPEED_MASK_200G 0x4UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_LAST PORT_PHY_CFG_REQ_FORCE_LINK_SPEEDS2_400GB_PAM4_112
+ __le16 auto_link_speeds2_mask;
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_1GB 0x1UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_10GB 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_25GB 0x4UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_40GB 0x8UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB 0x10UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB 0x20UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEEDS2_MASK_400GB_PAM4_112 0x1000UL
+ u8 unused_2[6];
};
/* hwrm_port_phy_cfg_output (size:128b/16B) */
@@ -3087,7 +3137,7 @@ struct hwrm_port_phy_qcfg_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcfg_output (size:768b/96B) */
+/* hwrm_port_phy_qcfg_output (size:832b/104B) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -3098,7 +3148,23 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SIGNAL 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_LINK 0x2UL
#define PORT_PHY_QCFG_RESP_LINK_LAST PORT_PHY_QCFG_RESP_LINK_LINK
- u8 unused_0;
+ u8 active_fec_signal_mode;
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK 0xfUL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_SFT 0
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_NRZ 0x0UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4 0x1UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112 0x2UL
+ #define PORT_PHY_QCFG_RESP_SIGNAL_MODE_LAST HWRM_PORT_PHY_QCFG_RESP_SIGNAL_MODE_PAM4_112
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK 0xf0UL
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_SFT 4
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE (0x0UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE (0x1UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE (0x2UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE (0x3UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE (0x4UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE (0x5UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE (0x6UL << 4)
+ #define PORT_PHY_QCFG_RESP_ACTIVE_FEC_LAST PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
__le16 link_speed;
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100MB 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_1GB 0xaUL
@@ -3111,6 +3177,7 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_400GB 0xfa0UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_LINK_SPEED_10MB
u8 duplex_cfg;
@@ -3249,7 +3316,31 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR 0x28UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR 0x29UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR 0x2aUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER 0x2bUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2 0x2cUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2 0x2dUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2 0x2eUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2 0x2fUL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8 0x30UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8 0x31UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8 0x32UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8 0x33UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4 0x34UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4 0x35UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4 0x36UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4 0x37UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -3330,15 +3421,90 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_SUPPORTED 0x80UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_1XN_ENABLED 0x100UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_SUPPORTED 0x200UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS544_IEEE_ENABLED 0x400UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_SUPPORTED 0x800UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_1XN_ENABLED 0x1000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_SUPPORTED 0x2000UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_RS272_IEEE_ENABLED 0x4000UL
u8 duplex_state;
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_LAST PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
u8 option_flags;
#define PORT_PHY_QCFG_RESP_OPTION_FLAGS_MEDIA_AUTO_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SIGNAL_MODE_KNOWN 0x2UL
+ #define PORT_PHY_QCFG_RESP_OPTION_FLAGS_SPEEDS2_SUPPORTED 0x4UL
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
- u8 unused_2[7];
+ __le16 support_pam4_speeds;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_PAM4_SPEEDS_200G 0x4UL
+ __le16 force_pam4_link_speed;
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB 0x7d0UL
+ #define PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_LAST PORT_PHY_QCFG_RESP_FORCE_PAM4_LINK_SPEED_200GB
+ __le16 auto_pam4_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_50G 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_100G 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAM4_LINK_SPEED_MASK_200G 0x4UL
+ u8 link_partner_pam4_adv_speeds;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_50GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_100GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_PAM4_ADV_SPEEDS_200GB 0x4UL
+ u8 link_down_reason;
+ #define PORT_PHY_QCFG_RESP_LINK_DOWN_REASON_RF 0x1UL
+ __le16 support_speeds2;
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS2_800GB_PAM4_112 0x2000UL
+ __le16 force_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_1GB 0xaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_10GB 0x64UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_25GB 0xfaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_40GB 0x190UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB 0x1f4UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB 0x3e8UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_50GB_PAM4_56 0x1f5UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_56 0x3e9UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_56 0x7d1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_56 0xfa1UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_100GB_PAM4_112 0x3eaUL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_200GB_PAM4_112 0x7d2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_400GB_PAM4_112 0xfa2UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112 0x1f42UL
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_LAST PORT_PHY_QCFG_RESP_FORCE_LINK_SPEEDS2_800GB_PAM4_112
+ __le16 auto_link_speeds2;
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_1GB 0x1UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_10GB 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_25GB 0x4UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_40GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB 0x10UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB 0x20UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEEDS2_800GB_PAM4_112 0x2000UL
+ u8 active_lanes;
u8 valid;
};
@@ -3888,7 +4054,7 @@ struct hwrm_port_phy_qcaps_input {
u8 unused_0[6];
};
-/* hwrm_port_phy_qcaps_output (size:192b/24B) */
+/* hwrm_port_phy_qcaps_output (size:320b/40B) */
struct hwrm_port_phy_qcaps_output {
__le16 error_code;
__le16 req_type;
@@ -3954,6 +4120,53 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
#define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
#define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+ __le16 supported_pam4_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_AUTO_MODE_200G 0x4UL
+ __le16 supported_pam4_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_BANK_ADDR_SUPPORTED 0x4UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_SPEEDS2_SUPPORTED 0x8UL
+ u8 internal_port_cnt;
+ u8 unused_0;
+ __le16 supported_speeds2_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_FORCE_MODE_800GB_PAM4_112 0x2000UL
+ __le16 supported_speeds2_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_1GB 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_10GB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_25GB 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_40GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_50GB_PAM4_56 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_56 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_56 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_56 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_100GB_PAM4_112 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_200GB_PAM4_112 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_400GB_PAM4_112 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS2_AUTO_MODE_800GB_PAM4_112 0x2000UL
+ u8 unused_1[3];
+ u8 valid;
};
/* hwrm_port_phy_i2c_write_input (size:832b/104B) */