diff options
| author | Simon Rettberg | 2026-01-28 12:53:53 +0100 |
|---|---|---|
| committer | Simon Rettberg | 2026-01-28 12:53:53 +0100 |
| commit | 8e82785c584dc13e20f9229decb95bd17bbe9cd1 (patch) | |
| tree | a8b359e59196be5b2e3862bed189107f4bc9975f /src/drivers/net | |
| parent | Merge branch 'master' into openslx (diff) | |
| parent | [prefix] Make unlzma.S compatible with 386 class CPUs (diff) | |
| download | ipxe-openslx.tar.gz ipxe-openslx.tar.xz ipxe-openslx.zip | |
Merge branch 'master' into openslxopenslx
Diffstat (limited to 'src/drivers/net')
188 files changed, 7035 insertions, 1064 deletions
diff --git a/src/drivers/net/3c509-eisa.c b/src/drivers/net/3c509-eisa.c index 81c60ee91..ada34a6b9 100644 --- a/src/drivers/net/3c509-eisa.c +++ b/src/drivers/net/3c509-eisa.c @@ -35,7 +35,7 @@ static struct eisa_device_id el3_eisa_adapters[] = { EISA_DRIVER ( el3_eisa_driver, el3_eisa_adapters ); DRIVER ( "3c509 (EISA)", nic_driver, eisa_driver, el3_eisa_driver, - el3_eisa_probe, el3_eisa_disable ); + el3_eisa_probe, el3_eisa_disable, no_fake_bss ); ISA_ROM ( "3c509-eisa","3c509 (EISA)" ); diff --git a/src/drivers/net/3c509.c b/src/drivers/net/3c509.c index 4326a835c..0f721315d 100644 --- a/src/drivers/net/3c509.c +++ b/src/drivers/net/3c509.c @@ -316,7 +316,7 @@ static int t509_probe ( struct t509_device *t509 ) { DBG ( "Adding 3c509 device %02x (I/O %04x)\n", t509->tag, t509->ioaddr ); return legacy_probe ( t509, legacy_t509_set_drvdata, &t509->dev, - legacy_t509_probe, legacy_t509_disable ); + legacy_t509_probe, legacy_t509_disable, 0 ); } /** diff --git a/src/drivers/net/3c515.c b/src/drivers/net/3c515.c index 1591e0617..353d45794 100644 --- a/src/drivers/net/3c515.c +++ b/src/drivers/net/3c515.c @@ -64,7 +64,6 @@ static void t3c515_wait(unsigned int nticks) /* TJL definations */ #define HZ 100 static int if_port; -static struct corkscrew_private *vp; /* Brought directly from 3c515.c by Becker */ #define CORKSCREW 1 @@ -237,6 +236,7 @@ struct corkscrew_private { full_bus_master_tx:1, full_bus_master_rx:1, /* Boomerang */ tx_full:1; }; +#define vp NIC_FAKE_BSS_PTR ( struct corkscrew_private ) /* The action to take with a media selection timer tick. Note that we deviate from the 3Com order by checking 10base2 before AUI. @@ -759,6 +759,6 @@ static struct isapnp_device_id t515_adapters[] = { ISAPNP_DRIVER ( t515_driver, t515_adapters ); DRIVER ( "3c515", nic_driver, isapnp_driver, t515_driver, - t515_probe, t515_disable ); + t515_probe, t515_disable, *vp ); ISA_ROM ( "3c515", "3c515 Fast EtherLink ISAPnP" ); diff --git a/src/drivers/net/3c529.c b/src/drivers/net/3c529.c index d68f28ec1..8b408f255 100644 --- a/src/drivers/net/3c529.c +++ b/src/drivers/net/3c529.c @@ -49,7 +49,7 @@ static struct mca_device_id el3_mca_adapters[] = { MCA_DRIVER ( t529_driver, el3_mca_adapters ); DRIVER ( "3c529", nic_driver, mca_driver, t529_driver, - t529_probe, t529_disable ); + t529_probe, t529_disable, no_fake_bss ); ISA_ROM( "3c529", "3c529 == MCA 3c509" ); diff --git a/src/drivers/net/3c595.c b/src/drivers/net/3c595.c index c69831005..58cfc41ae 100644 --- a/src/drivers/net/3c595.c +++ b/src/drivers/net/3c595.c @@ -342,8 +342,7 @@ eeprom_rdy() * before */ static int -get_e(offset) -int offset; +get_e(int offset) { if (!eeprom_rdy()) return (0xffff); @@ -444,7 +443,7 @@ vxsetlink(void) GO_WINDOW(1); } -static void t595_disable ( struct nic *nic ) { +static void t595_disable ( struct nic *nic, void *hwdev __unused ) { t595_reset(nic); @@ -542,7 +541,7 @@ PCI_ROM(0x10b7, 0x9805, "3c9805-1", "3Com9805", 0), /* Dual Port Server PCI_DRIVER ( t595_driver, t595_nics, PCI_NO_CLASS ); DRIVER ( "3C595", nic_driver, pci_driver, t595_driver, - t595_probe, t595_disable ); + t595_probe, t595_disable, no_fake_bss ); /* * Local variables: diff --git a/src/drivers/net/acm.c b/src/drivers/net/acm.c index 16dab4be8..0cb2713b2 100644 --- a/src/drivers/net/acm.c +++ b/src/drivers/net/acm.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <string.h> diff --git a/src/drivers/net/acm.h b/src/drivers/net/acm.h index d4944967b..3f10f0fa2 100644 --- a/src/drivers/net/acm.h +++ b/src/drivers/net/acm.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <ipxe/usb.h> #include <ipxe/cdc.h> diff --git a/src/drivers/net/amd8111e.c b/src/drivers/net/amd8111e.c index babd12d3c..c21c11835 100644 --- a/src/drivers/net/amd8111e.c +++ b/src/drivers/net/amd8111e.c @@ -105,8 +105,7 @@ struct amd8111e_priv { struct nic *nic; void *mmio; }; - -static struct amd8111e_priv amd8111e; +#define amd8111e NIC_FAKE_BSS ( struct amd8111e_priv ) /******************************************************** @@ -609,7 +608,7 @@ static int amd8111e_poll(struct nic *nic, int retrieve) return pkt_ok; } -static void amd8111e_disable(struct nic *nic) +static void amd8111e_disable(struct nic *nic, void *hwdev __unused) { struct amd8111e_priv *lp = nic->priv_data; @@ -683,7 +682,7 @@ static struct pci_device_id amd8111e_nics[] = { PCI_DRIVER ( amd8111e_driver, amd8111e_nics, PCI_NO_CLASS ); DRIVER ( "AMD8111E", nic_driver, pci_driver, amd8111e_driver, - amd8111e_probe, amd8111e_disable ); + amd8111e_probe, amd8111e_disable, amd8111e ); /* * Local variables: diff --git a/src/drivers/net/ath/ath.h b/src/drivers/net/ath/ath.h index 589bb5634..60d2ee021 100644 --- a/src/drivers/net/ath/ath.h +++ b/src/drivers/net/ath/ath.h @@ -21,8 +21,10 @@ #define ATH_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include <unistd.h> +#include <string.h> #include <ipxe/net80211.h> /* This block of functions are from kernel.h v3.0.1 */ diff --git a/src/drivers/net/ath/ath5k/ath5k.c b/src/drivers/net/ath/ath5k/ath5k.c index 643884d46..09510d3d2 100644 --- a/src/drivers/net/ath/ath5k/ath5k.c +++ b/src/drivers/net/ath/ath5k/ath5k.c @@ -44,6 +44,7 @@ */ FILE_LICENCE ( BSD3 ); +FILE_SECBOOT ( FORBIDDEN ); #include <stdlib.h> #include <ipxe/malloc.h> diff --git a/src/drivers/net/ath/ath5k/ath5k.h b/src/drivers/net/ath/ath5k/ath5k.h index fa62e8ce5..612926f17 100644 --- a/src/drivers/net/ath/ath5k/ath5k.h +++ b/src/drivers/net/ath/ath5k/ath5k.h @@ -22,8 +22,10 @@ #define _ATH5K_H FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); #include <stddef.h> +#include <string.h> #include <byteswap.h> #include <ipxe/io.h> #include <ipxe/netdevice.h> diff --git a/src/drivers/net/ath/ath5k/ath5k_attach.c b/src/drivers/net/ath/ath5k/ath5k_attach.c index 302536dbd..fb3382943 100644 --- a/src/drivers/net/ath/ath5k/ath5k_attach.c +++ b/src/drivers/net/ath/ath5k/ath5k_attach.c @@ -20,6 +20,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); /*************************************\ * Attach/Detach Functions and helpers * diff --git a/src/drivers/net/ath/ath5k/ath5k_caps.c b/src/drivers/net/ath/ath5k/ath5k_caps.c index 9c00d15d7..bc5abc58c 100644 --- a/src/drivers/net/ath/ath5k/ath5k_caps.c +++ b/src/drivers/net/ath/ath5k/ath5k_caps.c @@ -20,6 +20,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); /**************\ * Capabilities * diff --git a/src/drivers/net/ath/ath5k/ath5k_desc.c b/src/drivers/net/ath/ath5k/ath5k_desc.c index 816d26ede..a3ac340df 100644 --- a/src/drivers/net/ath/ath5k/ath5k_desc.c +++ b/src/drivers/net/ath/ath5k/ath5k_desc.c @@ -20,6 +20,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); /******************************\ Hardware Descriptor Functions diff --git a/src/drivers/net/ath/ath5k/ath5k_dma.c b/src/drivers/net/ath/ath5k/ath5k_dma.c index fa1e0d013..f27ea8fd2 100644 --- a/src/drivers/net/ath/ath5k/ath5k_dma.c +++ b/src/drivers/net/ath/ath5k/ath5k_dma.c @@ -19,6 +19,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); /*************************************\ * DMA and interrupt masking functions * diff --git a/src/drivers/net/ath/ath5k/ath5k_eeprom.c b/src/drivers/net/ath/ath5k/ath5k_eeprom.c index 46f33d1e8..5219ef148 100644 --- a/src/drivers/net/ath/ath5k/ath5k_eeprom.c +++ b/src/drivers/net/ath/ath5k/ath5k_eeprom.c @@ -20,6 +20,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); /*************************************\ * EEPROM access functions and helpers * diff --git a/src/drivers/net/ath/ath5k/ath5k_gpio.c b/src/drivers/net/ath/ath5k/ath5k_gpio.c index 2301ec70b..7f9652b1e 100644 --- a/src/drivers/net/ath/ath5k/ath5k_gpio.c +++ b/src/drivers/net/ath/ath5k/ath5k_gpio.c @@ -19,6 +19,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); /****************\ GPIO Functions diff --git a/src/drivers/net/ath/ath5k/ath5k_initvals.c b/src/drivers/net/ath/ath5k/ath5k_initvals.c index 8f3bd2034..f2fd23bb4 100644 --- a/src/drivers/net/ath/ath5k/ath5k_initvals.c +++ b/src/drivers/net/ath/ath5k/ath5k_initvals.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); #include <unistd.h> diff --git a/src/drivers/net/ath/ath5k/ath5k_pcu.c b/src/drivers/net/ath/ath5k/ath5k_pcu.c index c8165da79..6821a01d5 100644 --- a/src/drivers/net/ath/ath5k/ath5k_pcu.c +++ b/src/drivers/net/ath/ath5k/ath5k_pcu.c @@ -23,6 +23,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); /*********************************\ * Protocol Control Unit Functions * diff --git a/src/drivers/net/ath/ath5k/ath5k_phy.c b/src/drivers/net/ath/ath5k/ath5k_phy.c index c2a66a4d3..d220b255b 100644 --- a/src/drivers/net/ath/ath5k/ath5k_phy.c +++ b/src/drivers/net/ath/ath5k/ath5k_phy.c @@ -23,6 +23,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); #define _ATH5K_PHY diff --git a/src/drivers/net/ath/ath5k/ath5k_qcu.c b/src/drivers/net/ath/ath5k/ath5k_qcu.c index e38dba9e2..ff4b9e5e3 100644 --- a/src/drivers/net/ath/ath5k/ath5k_qcu.c +++ b/src/drivers/net/ath/ath5k/ath5k_qcu.c @@ -19,6 +19,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); /********************************************\ Queue Control Unit, DFS Control Unit Functions diff --git a/src/drivers/net/ath/ath5k/ath5k_reset.c b/src/drivers/net/ath/ath5k/ath5k_reset.c index 73765a7b0..98c729cf0 100644 --- a/src/drivers/net/ath/ath5k/ath5k_reset.c +++ b/src/drivers/net/ath/ath5k/ath5k_reset.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); #define _ATH5K_RESET diff --git a/src/drivers/net/ath/ath5k/ath5k_rfkill.c b/src/drivers/net/ath/ath5k/ath5k_rfkill.c index 752ef70b9..74708af9e 100644 --- a/src/drivers/net/ath/ath5k/ath5k_rfkill.c +++ b/src/drivers/net/ath/ath5k/ath5k_rfkill.c @@ -35,6 +35,7 @@ */ FILE_LICENCE ( MIT ); +FILE_SECBOOT ( FORBIDDEN ); #include "base.h" diff --git a/src/drivers/net/ath/ath5k/base.h b/src/drivers/net/ath/ath5k/base.h index 976a3f306..1155da68e 100644 --- a/src/drivers/net/ath/ath5k/base.h +++ b/src/drivers/net/ath/ath5k/base.h @@ -45,6 +45,7 @@ #define _DEV_ATH_ATHVAR_H FILE_LICENCE ( BSD3 ); +FILE_SECBOOT ( FORBIDDEN ); #include "ath5k.h" #include <ipxe/iobuf.h> diff --git a/src/drivers/net/ath/ath5k/desc.h b/src/drivers/net/ath/ath5k/desc.h index 6e11b0d43..bcbd291f4 100644 --- a/src/drivers/net/ath/ath5k/desc.h +++ b/src/drivers/net/ath/ath5k/desc.h @@ -16,6 +16,8 @@ * */ +FILE_SECBOOT ( FORBIDDEN ); + /* * Internal RX/TX descriptor structures * (rX: reserved fields possibily used by future versions of the ar5k chipset) diff --git a/src/drivers/net/ath/ath5k/eeprom.h b/src/drivers/net/ath/ath5k/eeprom.h index da4543393..e7b25b09f 100644 --- a/src/drivers/net/ath/ath5k/eeprom.h +++ b/src/drivers/net/ath/ath5k/eeprom.h @@ -16,6 +16,8 @@ * */ +FILE_SECBOOT ( FORBIDDEN ); + /* * Common ar5xxx EEPROM data offsets (set these on AR5K_EEPROM_BASE) */ diff --git a/src/drivers/net/ath/ath5k/reg.h b/src/drivers/net/ath/ath5k/reg.h index 7070d1543..fd9aa7c8e 100644 --- a/src/drivers/net/ath/ath5k/reg.h +++ b/src/drivers/net/ath/ath5k/reg.h @@ -17,6 +17,8 @@ * */ +FILE_SECBOOT ( FORBIDDEN ); + /* * Register values for Atheros 5210/5211/5212 cards from OpenBSD's ar5k * maintained by Reyk Floeter diff --git a/src/drivers/net/ath/ath5k/rfbuffer.h b/src/drivers/net/ath/ath5k/rfbuffer.h index e50baff66..bcbaf1db9 100644 --- a/src/drivers/net/ath/ath5k/rfbuffer.h +++ b/src/drivers/net/ath/ath5k/rfbuffer.h @@ -17,6 +17,7 @@ * */ +FILE_SECBOOT ( FORBIDDEN ); /* * There are some special registers on the RF chip diff --git a/src/drivers/net/ath/ath5k/rfgain.h b/src/drivers/net/ath/ath5k/rfgain.h index 1354d8c39..a220d8661 100644 --- a/src/drivers/net/ath/ath5k/rfgain.h +++ b/src/drivers/net/ath/ath5k/rfgain.h @@ -18,6 +18,8 @@ * */ +FILE_SECBOOT ( FORBIDDEN ); + /* * Mode-specific RF Gain table (64bytes) for RF5111/5112 * (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial diff --git a/src/drivers/net/ath/ath9k/ani.h b/src/drivers/net/ath/ath9k/ani.h index ba87ba0fd..2aeb5654d 100644 --- a/src/drivers/net/ath/ath9k/ani.h +++ b/src/drivers/net/ath/ath9k/ani.h @@ -21,6 +21,7 @@ #define ANI_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #define HAL_PROCESS_ANI 0x00000001 diff --git a/src/drivers/net/ath/ath9k/ar5008_initvals.h b/src/drivers/net/ath/ath9k/ar5008_initvals.h index fcc155654..a4466b286 100644 --- a/src/drivers/net/ath/ath9k/ar5008_initvals.h +++ b/src/drivers/net/ath/ath9k/ar5008_initvals.h @@ -15,6 +15,7 @@ */ FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); static const u32 ar5416Modes[][6] = { {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0}, diff --git a/src/drivers/net/ath/ath9k/ar9001_initvals.h b/src/drivers/net/ath/ath9k/ar9001_initvals.h index 6c1ccd50e..5bf2d5dd4 100644 --- a/src/drivers/net/ath/ath9k/ar9001_initvals.h +++ b/src/drivers/net/ath/ath9k/ar9001_initvals.h @@ -15,6 +15,7 @@ */ FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); static const u32 ar5416Modes_9100[][6] = { {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0}, diff --git a/src/drivers/net/ath/ath9k/ar9002_initvals.h b/src/drivers/net/ath/ath9k/ar9002_initvals.h index f9a92c9b7..82bd94a5e 100644 --- a/src/drivers/net/ath/ath9k/ar9002_initvals.h +++ b/src/drivers/net/ath/ath9k/ar9002_initvals.h @@ -15,6 +15,7 @@ */ FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); static __unused const u32 ar9280Modes_9280_2[][6] = { {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0}, diff --git a/src/drivers/net/ath/ath9k/ar9002_phy.h b/src/drivers/net/ath/ath9k/ar9002_phy.h index 71d9162c9..a6f5db97a 100644 --- a/src/drivers/net/ath/ath9k/ar9002_phy.h +++ b/src/drivers/net/ath/ath9k/ar9002_phy.h @@ -17,6 +17,7 @@ #define AR9002_PHY_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #define AR_PHY_TEST 0x9800 #define PHY_AGC_CLR 0x10000000 diff --git a/src/drivers/net/ath/ath9k/ar9003_2p2_initvals.h b/src/drivers/net/ath/ath9k/ar9003_2p2_initvals.h index b1303bbaa..b2d30fff5 100644 --- a/src/drivers/net/ath/ath9k/ar9003_2p2_initvals.h +++ b/src/drivers/net/ath/ath9k/ar9003_2p2_initvals.h @@ -17,6 +17,8 @@ #ifndef INITVALS_9003_2P2_H #define INITVALS_9003_2P2_H +FILE_SECBOOT ( FORBIDDEN ); + /* AR9003 2.2 */ static __unused const u32 ar9300_2p2_radio_postamble[][5] = { diff --git a/src/drivers/net/ath/ath9k/ar9003_eeprom.h b/src/drivers/net/ath/ath9k/ar9003_eeprom.h index f03879236..092227564 100644 --- a/src/drivers/net/ath/ath9k/ar9003_eeprom.h +++ b/src/drivers/net/ath/ath9k/ar9003_eeprom.h @@ -21,6 +21,7 @@ #define AR9003_EEPROM_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #define AR9300_EEP_VER 0xD000 #define AR9300_EEP_VER_MINOR_MASK 0xFFF diff --git a/src/drivers/net/ath/ath9k/ar9003_mac.h b/src/drivers/net/ath/ath9k/ar9003_mac.h index 6442bb779..a5f7e0432 100644 --- a/src/drivers/net/ath/ath9k/ar9003_mac.h +++ b/src/drivers/net/ath/ath9k/ar9003_mac.h @@ -20,6 +20,8 @@ #ifndef AR9003_MAC_H #define AR9003_MAC_H +FILE_SECBOOT ( FORBIDDEN ); + #define AR_DescId 0xffff0000 #define AR_DescId_S 16 #define AR_CtrlStat 0x00004000 diff --git a/src/drivers/net/ath/ath9k/ar9003_phy.h b/src/drivers/net/ath/ath9k/ar9003_phy.h index 443090d27..130b5c65b 100644 --- a/src/drivers/net/ath/ath9k/ar9003_phy.h +++ b/src/drivers/net/ath/ath9k/ar9003_phy.h @@ -17,6 +17,8 @@ #ifndef AR9003_PHY_H #define AR9003_PHY_H +FILE_SECBOOT ( FORBIDDEN ); + /* * Channel Register Map */ diff --git a/src/drivers/net/ath/ath9k/ar9340_initvals.h b/src/drivers/net/ath/ath9k/ar9340_initvals.h index 784080b16..5ee400050 100644 --- a/src/drivers/net/ath/ath9k/ar9340_initvals.h +++ b/src/drivers/net/ath/ath9k/ar9340_initvals.h @@ -17,6 +17,8 @@ #ifndef INITVALS_9340_H #define INITVALS_9340_H +FILE_SECBOOT ( FORBIDDEN ); + static __unused const u32 ar9340_1p0_radio_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ {0x000160ac, 0xa4646800, 0xa4646800, 0xa4646800, 0xa4646800}, diff --git a/src/drivers/net/ath/ath9k/ar9485_initvals.h b/src/drivers/net/ath/ath9k/ar9485_initvals.h index c854398aa..8892bb1b7 100644 --- a/src/drivers/net/ath/ath9k/ar9485_initvals.h +++ b/src/drivers/net/ath/ath9k/ar9485_initvals.h @@ -17,6 +17,8 @@ #ifndef INITVALS_9485_H #define INITVALS_9485_H +FILE_SECBOOT ( FORBIDDEN ); + static __unused const u32 ar9485_1_1_mac_core[][2] = { /* Addr allmodes */ {0x00000008, 0x00000000}, diff --git a/src/drivers/net/ath/ath9k/ath9k.c b/src/drivers/net/ath/ath9k/ath9k.c index 98b7ecd5a..a3020bc65 100644 --- a/src/drivers/net/ath/ath9k/ath9k.c +++ b/src/drivers/net/ath/ath9k/ath9k.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/pci.h> #include "ath9k.h" diff --git a/src/drivers/net/ath/ath9k/ath9k.h b/src/drivers/net/ath/ath9k/ath9k.h index 36dc97e99..9d62d9b82 100644 --- a/src/drivers/net/ath/ath9k/ath9k.h +++ b/src/drivers/net/ath/ath9k/ath9k.h @@ -21,6 +21,7 @@ #define ATH9K_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include "common.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_ani.c b/src/drivers/net/ath/ath9k/ath9k_ani.c index 76ca79cba..2b0f11c3f 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ani.c +++ b/src/drivers/net/ath/ath9k/ath9k_ani.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include "hw.h" #include "hw-ops.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_ar5008_phy.c b/src/drivers/net/ath/ath9k/ath9k_ar5008_phy.c index a98e4bb66..622955368 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar5008_phy.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar5008_phy.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/malloc.h> #include <ipxe/io.h> diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9002_calib.c b/src/drivers/net/ath/ath9k/ath9k_ar9002_calib.c index f8978a558..b03da98b3 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar9002_calib.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar9002_calib.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include "hw.h" #include "hw-ops.h" #include "ar9002_phy.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9002_hw.c b/src/drivers/net/ath/ath9k/ath9k_ar9002_hw.c index 85d0c7de6..0477af4d1 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar9002_hw.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar9002_hw.c @@ -18,6 +18,7 @@ */ FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include "hw.h" #include "ar5008_initvals.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9002_mac.c b/src/drivers/net/ath/ath9k/ath9k_ar9002_mac.c index 057756b2e..b6308ffc4 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar9002_mac.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar9002_mac.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9002_phy.c b/src/drivers/net/ath/ath9k/ath9k_ar9002_phy.c index 65cfad597..396763533 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar9002_phy.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar9002_phy.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + /** * DOC: Programming Atheros 802.11n analog front end radios * diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9003_calib.c b/src/drivers/net/ath/ath9k/ath9k_ar9003_calib.c index c37168bd2..4b6b7f907 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar9003_calib.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar9003_calib.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9003_eeprom.c b/src/drivers/net/ath/ath9k/ath9k_ar9003_eeprom.c index 95e54b9b2..96aa9ef2b 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar9003_eeprom.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar9003_eeprom.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include <ipxe/malloc.h> diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9003_hw.c b/src/drivers/net/ath/ath9k/ath9k_ar9003_hw.c index f3020fd7e..2276ded25 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar9003_hw.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar9003_hw.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include "hw.h" #include "ar9003_mac.h" #include "ar9003_2p2_initvals.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9003_mac.c b/src/drivers/net/ath/ath9k/ath9k_ar9003_mac.c index 1fa4039cc..7f1b26182 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar9003_mac.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar9003_mac.c @@ -16,6 +16,9 @@ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_ar9003_phy.c b/src/drivers/net/ath/ath9k/ath9k_ar9003_phy.c index b66358b92..ebc10c50f 100644 --- a/src/drivers/net/ath/ath9k/ath9k_ar9003_phy.c +++ b/src/drivers/net/ath/ath9k/ath9k_ar9003_phy.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_calib.c b/src/drivers/net/ath/ath9k/ath9k_calib.c index 6f3e07e6d..5da8a5ba0 100644 --- a/src/drivers/net/ath/ath9k/ath9k_calib.c +++ b/src/drivers/net/ath/ath9k/ath9k_calib.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include "hw.h" #include "hw-ops.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_common.c b/src/drivers/net/ath/ath9k/ath9k_common.c index ce33afbd4..474ff6a0c 100644 --- a/src/drivers/net/ath/ath9k/ath9k_common.c +++ b/src/drivers/net/ath/ath9k/ath9k_common.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + /* * Module for common driver code between ath9k and ath9k_htc */ diff --git a/src/drivers/net/ath/ath9k/ath9k_eeprom.c b/src/drivers/net/ath/ath9k/ath9k_eeprom.c index a20423790..95e677591 100644 --- a/src/drivers/net/ath/ath9k/ath9k_eeprom.c +++ b/src/drivers/net/ath/ath9k/ath9k_eeprom.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_eeprom_4k.c b/src/drivers/net/ath/ath9k/ath9k_eeprom_4k.c index a42ad3d97..a0ba897aa 100644 --- a/src/drivers/net/ath/ath9k/ath9k_eeprom_4k.c +++ b/src/drivers/net/ath/ath9k/ath9k_eeprom_4k.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_eeprom_9287.c b/src/drivers/net/ath/ath9k/ath9k_eeprom_9287.c index ee16a6f18..323a6307e 100644 --- a/src/drivers/net/ath/ath9k/ath9k_eeprom_9287.c +++ b/src/drivers/net/ath/ath9k/ath9k_eeprom_9287.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_eeprom_def.c b/src/drivers/net/ath/ath9k/ath9k_eeprom_def.c index 9b144d70b..e4941188f 100644 --- a/src/drivers/net/ath/ath9k/ath9k_eeprom_def.c +++ b/src/drivers/net/ath/ath9k/ath9k_eeprom_def.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_hw.c b/src/drivers/net/ath/ath9k/ath9k_hw.c index 554e9be3c..8f123add6 100644 --- a/src/drivers/net/ath/ath9k/ath9k_hw.c +++ b/src/drivers/net/ath/ath9k/ath9k_hw.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/vsprintf.h> #include <ipxe/io.h> diff --git a/src/drivers/net/ath/ath9k/ath9k_init.c b/src/drivers/net/ath/ath9k/ath9k_init.c index 05ed3336a..2350f724e 100644 --- a/src/drivers/net/ath/ath9k/ath9k_init.c +++ b/src/drivers/net/ath/ath9k/ath9k_init.c @@ -18,6 +18,7 @@ */ FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include <ipxe/malloc.h> #include <ipxe/pci_io.h> diff --git a/src/drivers/net/ath/ath9k/ath9k_mac.c b/src/drivers/net/ath/ath9k/ath9k_mac.c index c2f6d630a..d8ef0682f 100644 --- a/src/drivers/net/ath/ath9k/ath9k_mac.c +++ b/src/drivers/net/ath/ath9k/ath9k_mac.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_main.c b/src/drivers/net/ath/ath9k/ath9k_main.c index 0a17b9bcb..81562c87d 100644 --- a/src/drivers/net/ath/ath9k/ath9k_main.c +++ b/src/drivers/net/ath/ath9k/ath9k_main.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "ath9k.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_recv.c b/src/drivers/net/ath/ath9k/ath9k_recv.c index 0ffe9d45a..245e1390a 100644 --- a/src/drivers/net/ath/ath9k/ath9k_recv.c +++ b/src/drivers/net/ath/ath9k/ath9k_recv.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "ath9k.h" diff --git a/src/drivers/net/ath/ath9k/ath9k_xmit.c b/src/drivers/net/ath/ath9k/ath9k_xmit.c index 7f4f28ab8..354cb37a8 100644 --- a/src/drivers/net/ath/ath9k/ath9k_xmit.c +++ b/src/drivers/net/ath/ath9k/ath9k_xmit.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "ath9k.h" diff --git a/src/drivers/net/ath/ath9k/calib.h b/src/drivers/net/ath/ath9k/calib.h index b811accf0..955721d63 100644 --- a/src/drivers/net/ath/ath9k/calib.h +++ b/src/drivers/net/ath/ath9k/calib.h @@ -21,6 +21,7 @@ #define CALIB_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/common.h b/src/drivers/net/ath/ath9k/common.h index 0fe3b5be6..b3d4fa481 100644 --- a/src/drivers/net/ath/ath9k/common.h +++ b/src/drivers/net/ath/ath9k/common.h @@ -18,6 +18,7 @@ */ FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include "../ath.h" diff --git a/src/drivers/net/ath/ath9k/eeprom.h b/src/drivers/net/ath/ath9k/eeprom.h index 8a48d6e5f..86521bccf 100644 --- a/src/drivers/net/ath/ath9k/eeprom.h +++ b/src/drivers/net/ath/ath9k/eeprom.h @@ -21,6 +21,7 @@ #define EEPROM_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #define AR_EEPROM_MODAL_SPURS 5 diff --git a/src/drivers/net/ath/ath9k/hw-ops.h b/src/drivers/net/ath/ath9k/hw-ops.h index 51c7b08e4..c35be7dd0 100644 --- a/src/drivers/net/ath/ath9k/hw-ops.h +++ b/src/drivers/net/ath/ath9k/hw-ops.h @@ -18,6 +18,7 @@ #define ATH9K_HW_OPS_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include "hw.h" diff --git a/src/drivers/net/ath/ath9k/hw.h b/src/drivers/net/ath/ath9k/hw.h index 051074691..02e9fc346 100644 --- a/src/drivers/net/ath/ath9k/hw.h +++ b/src/drivers/net/ath/ath9k/hw.h @@ -21,6 +21,7 @@ #define HW_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include <errno.h> diff --git a/src/drivers/net/ath/ath9k/mac.h b/src/drivers/net/ath/ath9k/mac.h index 0c0a75948..be0429169 100644 --- a/src/drivers/net/ath/ath9k/mac.h +++ b/src/drivers/net/ath/ath9k/mac.h @@ -21,6 +21,7 @@ #define MAC_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include <unistd.h> diff --git a/src/drivers/net/ath/ath9k/phy.h b/src/drivers/net/ath/ath9k/phy.h index 28f59ecd9..69adbb1a5 100644 --- a/src/drivers/net/ath/ath9k/phy.h +++ b/src/drivers/net/ath/ath9k/phy.h @@ -18,6 +18,7 @@ #define PHY_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #define CHANSEL_DIV 15 #define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV) diff --git a/src/drivers/net/ath/ath9k/reg.h b/src/drivers/net/ath/ath9k/reg.h index 67762b6d1..8e81f4787 100644 --- a/src/drivers/net/ath/ath9k/reg.h +++ b/src/drivers/net/ath/ath9k/reg.h @@ -18,6 +18,7 @@ #define REG_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include "../reg.h" diff --git a/src/drivers/net/ath/ath_hw.c b/src/drivers/net/ath/ath_hw.c index 8e3128868..0a044ea50 100644 --- a/src/drivers/net/ath/ath_hw.c +++ b/src/drivers/net/ath/ath_hw.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include <ipxe/io.h> #include "ath.h" diff --git a/src/drivers/net/ath/ath_key.c b/src/drivers/net/ath/ath_key.c index d269a45ac..217e98ef4 100644 --- a/src/drivers/net/ath/ath_key.c +++ b/src/drivers/net/ath/ath_key.c @@ -18,6 +18,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include "ath.h" #include "reg.h" diff --git a/src/drivers/net/ath/ath_regd.c b/src/drivers/net/ath/ath_regd.c index 190b1f9f5..0dba257bc 100644 --- a/src/drivers/net/ath/ath_regd.c +++ b/src/drivers/net/ath/ath_regd.c @@ -17,6 +17,8 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +FILE_SECBOOT ( FORBIDDEN ); + #include "regd.h" #include "regd_common.h" diff --git a/src/drivers/net/ath/reg.h b/src/drivers/net/ath/reg.h index 7982f4344..c4833a515 100644 --- a/src/drivers/net/ath/reg.h +++ b/src/drivers/net/ath/reg.h @@ -21,6 +21,7 @@ #define ATH_REGISTERS_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #define AR_MIBC 0x0040 #define AR_MIBC_COW 0x00000001 diff --git a/src/drivers/net/ath/regd.h b/src/drivers/net/ath/regd.h index fd09a0c8d..a0634cd73 100644 --- a/src/drivers/net/ath/regd.h +++ b/src/drivers/net/ath/regd.h @@ -21,6 +21,7 @@ #define REGD_H FILE_LICENCE ( BSD2 ); +FILE_SECBOOT ( FORBIDDEN ); #include "ath.h" diff --git a/src/drivers/net/ath/regd_common.h b/src/drivers/net/ath/regd_common.h index ee1ac3f40..1e219c1dd 100644 --- a/src/drivers/net/ath/regd_common.h +++ b/src/drivers/net/ath/regd_common.h @@ -20,6 +20,8 @@ #ifndef REGD_COMMON_H #define REGD_COMMON_H +FILE_SECBOOT ( FORBIDDEN ); + enum EnumRd { NO_ENUMRD = 0x00, NULL1_WORLD = 0x03, diff --git a/src/drivers/net/axge.c b/src/drivers/net/axge.c index fb274d24f..922c94d91 100644 --- a/src/drivers/net/axge.c +++ b/src/drivers/net/axge.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <string.h> diff --git a/src/drivers/net/axge.h b/src/drivers/net/axge.h index e22e0ec47..c30ca5950 100644 --- a/src/drivers/net/axge.h +++ b/src/drivers/net/axge.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <ipxe/usb.h> #include <ipxe/usbnet.h> diff --git a/src/drivers/net/b44.c b/src/drivers/net/b44.c index 30ece5574..c6ca99865 100644 --- a/src/drivers/net/b44.c +++ b/src/drivers/net/b44.c @@ -31,6 +31,7 @@ FILE_LICENCE ( GPL2_OR_LATER ); +#include <string.h> #include <errno.h> #include <assert.h> #include <stdio.h> diff --git a/src/drivers/net/bnx2.c b/src/drivers/net/bnx2.c index d5783ff99..ae94c9bed 100644 --- a/src/drivers/net/bnx2.c +++ b/src/drivers/net/bnx2.c @@ -36,13 +36,14 @@ FILE_LICENCE ( GPL_ANY ); /* The bnx2 seems to be picky about the alignment of the receive buffers * and possibly the status block. */ -static struct bss { +struct bss { struct tx_bd tx_desc_ring[TX_DESC_CNT]; struct rx_bd rx_desc_ring[RX_DESC_CNT]; unsigned char rx_buf[RX_BUF_CNT][RX_BUF_SIZE]; struct status_block status_blk; struct statistics_block stats_blk; -} bnx2_bss; +}; +#define bnx2_bss NIC_FAKE_BSS ( struct bss ) static struct bnx2 bnx2; @@ -2671,6 +2672,12 @@ err_out_disable: return 0; } +static void +bnx2_remove(struct nic *nic, void *hwdev __unused) +{ + bnx2_disable(nic); +} + static struct pci_device_id bnx2_nics[] = { PCI_ROM(0x14e4, 0x164a, "bnx2-5706", "Broadcom NetXtreme II BCM5706", 0), PCI_ROM(0x14e4, 0x164c, "bnx2-5708", "Broadcom NetXtreme II BCM5708", 0), @@ -2680,7 +2687,8 @@ static struct pci_device_id bnx2_nics[] = { PCI_DRIVER ( bnx2_driver, bnx2_nics, PCI_NO_CLASS ); -DRIVER ( "BNX2", nic_driver, pci_driver, bnx2_driver, bnx2_probe, bnx2_disable ); +DRIVER ( "BNX2", nic_driver, pci_driver, bnx2_driver, + bnx2_probe, bnx2_remove, bnx2_bss ); /* static struct pci_driver bnx2_driver __pci_driver = { diff --git a/src/drivers/net/bnxt/bnxt.c b/src/drivers/net/bnxt/bnxt.c index a127f6cef..703f6e384 100644 --- a/src/drivers/net/bnxt/bnxt.c +++ b/src/drivers/net/bnxt/bnxt.c @@ -3,11 +3,13 @@ FILE_LICENCE ( GPL2_ONLY ); #include <mii.h> #include <stdio.h> +#include <string.h> #include <errno.h> #include <unistd.h> #include <byteswap.h> #include <ipxe/pci.h> #include <ipxe/iobuf.h> +#include <ipxe/dma.h> #include <ipxe/timer.h> #include <ipxe/malloc.h> #include <ipxe/if_ether.h> @@ -23,66 +25,71 @@ static void bnxt_adv_cq_index ( struct bnxt *bp, u16 cnt ); static int bnxt_rx_complete ( struct net_device *dev, struct rx_pkt_cmpl *rx ); void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt ); + static struct pci_device_id bnxt_nics[] = { - PCI_ROM( 0x14e4, 0x1604, "14e4-1604", "14e4-1604", 0 ), - PCI_ROM( 0x14e4, 0x1605, "14e4-1605", "14e4-1605", 0 ), - PCI_ROM( 0x14e4, 0x1606, "14e4-1606", "14e4-1606", 0 ), - PCI_ROM( 0x14e4, 0x1609, "14e4-1609", "14e4-1609", 0 ), - PCI_ROM( 0x14e4, 0x1614, "14e4-1614", "14e4-1614", 0 ), - PCI_ROM( 0x14e4, 0x16c0, "14e4-16C0", "14e4-16C0", 0 ), - PCI_ROM( 0x14e4, 0x16c1, "14e4-16C1", "14e4-16C1", BNXT_FLAG_PCI_VF ), - PCI_ROM( 0x14e4, 0x16c8, "14e4-16C8", "14e4-16C8", 0 ), - PCI_ROM( 0x14e4, 0x16c9, "14e4-16C9", "14e4-16C9", 0 ), - PCI_ROM( 0x14e4, 0x16ca, "14e4-16CA", "14e4-16CA", 0 ), - PCI_ROM( 0x14e4, 0x16cc, "14e4-16CC", "14e4-16CC", 0 ), - PCI_ROM( 0x14e4, 0x16cd, "14e4-16CD", "14e4-16CD", 0 ), - PCI_ROM( 0x14e4, 0x16ce, "14e4-16CE", "14e4-16CE", 0 ), - PCI_ROM( 0x14e4, 0x16cf, "14e4-16CF", "14e4-16CF", 0 ), - PCI_ROM( 0x14e4, 0x16d0, "14e4-16D0", "14e4-16D0", 0 ), - PCI_ROM( 0x14e4, 0x16d1, "14e4-16D1", "14e4-16D1", 0 ), - PCI_ROM( 0x14e4, 0x16d2, "14e4-16D2", "14e4-16D2", 0 ), - PCI_ROM( 0x14e4, 0x16d4, "14e4-16D4", "14e4-16D4", 0 ), - PCI_ROM( 0x14e4, 0x16d5, "14e4-16D5", "14e4-16D5", 0 ), - PCI_ROM( 0x14e4, 0x16d6, "14e4-16D6", "14e4-16D6", 0 ), - PCI_ROM( 0x14e4, 0x16d7, "14e4-16D7", "14e4-16D7", 0 ), - PCI_ROM( 0x14e4, 0x16d8, "14e4-16D8", "14e4-16D8", 0 ), - PCI_ROM( 0x14e4, 0x16d9, "14e4-16D9", "14e4-16D9", 0 ), - PCI_ROM( 0x14e4, 0x16da, "14e4-16DA", "14e4-16DA", 0 ), - PCI_ROM( 0x14e4, 0x16db, "14e4-16DB", "14e4-16DB", 0 ), - PCI_ROM( 0x14e4, 0x16dc, "14e4-16DC", "14e4-16DC", BNXT_FLAG_PCI_VF ), - PCI_ROM( 0x14e4, 0x16de, "14e4-16DE", "14e4-16DE", 0 ), - PCI_ROM( 0x14e4, 0x16df, "14e4-16DF", "14e4-16DF", 0 ), - PCI_ROM( 0x14e4, 0x16e0, "14e4-16E0", "14e4-16E0", 0 ), - PCI_ROM( 0x14e4, 0x16e2, "14e4-16E2", "14e4-16E2", 0 ), - PCI_ROM( 0x14e4, 0x16e3, "14e4-16E3", "14e4-16E3", 0 ), - PCI_ROM( 0x14e4, 0x16e4, "14e4-16E4", "14e4-16E4", 0 ), - PCI_ROM( 0x14e4, 0x16e7, "14e4-16E7", "14e4-16E7", 0 ), - PCI_ROM( 0x14e4, 0x16e8, "14e4-16E8", "14e4-16E8", 0 ), - PCI_ROM( 0x14e4, 0x16e9, "14e4-16E9", "14e4-16E9", 0 ), - PCI_ROM( 0x14e4, 0x16ea, "14e4-16EA", "14e4-16EA", 0 ), - PCI_ROM( 0x14e4, 0x16eb, "14e4-16EB", "14e4-16EB", 0 ), - PCI_ROM( 0x14e4, 0x16ec, "14e4-16EC", "14e4-16EC", 0 ), - PCI_ROM( 0x14e4, 0x16ed, "14e4-16ED", "14e4-16ED", 0 ), - PCI_ROM( 0x14e4, 0x16ee, "14e4-16EE", "14e4-16EE", 0 ), - PCI_ROM( 0x14e4, 0x16ef, "14e4-16EF", "14e4-16EF", 0 ), - PCI_ROM( 0x14e4, 0x16f0, "14e4-16F0", "14e4-16F0", 0 ), - PCI_ROM( 0x14e4, 0x16f1, "14e4-16F1", "14e4-16F1", 0 ), - PCI_ROM( 0x14e4, 0x1750, "14e4-1750", "14e4-1750", 0 ), - PCI_ROM( 0x14e4, 0x1751, "14e4-1751", "14e4-1751", 0 ), - PCI_ROM( 0x14e4, 0x1752, "14e4-1752", "14e4-1752", 0 ), - PCI_ROM( 0x14e4, 0x1760, "14e4-1760", "14e4-1760", 0 ), - PCI_ROM( 0x14e4, 0x1800, "14e4-1800", "14e4-1800", 0 ), - PCI_ROM( 0x14e4, 0x1801, "14e4-1801", "14e4-1801", 0 ), - PCI_ROM( 0x14e4, 0x1802, "14e4-1802", "14e4-1802", 0 ), - PCI_ROM( 0x14e4, 0x1803, "14e4-1803", "14e4-1803", 0 ), - PCI_ROM( 0x14e4, 0x1804, "14e4-1804", "14e4-1804", 0 ), - PCI_ROM( 0x14e4, 0x1805, "14e4-1805", "14e4-1805", 0 ), - PCI_ROM( 0x14e4, 0x1806, "14e4-1806", "14e4-1806", BNXT_FLAG_PCI_VF ), - PCI_ROM( 0x14e4, 0x1807, "14e4-1807", "14e4-1807", BNXT_FLAG_PCI_VF ), - PCI_ROM( 0x14e4, 0x1808, "14e4-1808", "14e4-1808", BNXT_FLAG_PCI_VF ), - PCI_ROM( 0x14e4, 0x1809, "14e4-1809", "14e4-1809", BNXT_FLAG_PCI_VF ), - PCI_ROM( 0x14e4, 0xd802, "14e4-D802", "14e4-D802", 0 ), - PCI_ROM( 0x14e4, 0xd804, "14e4-D804", "14e4-D804", 0 ), + PCI_ROM( 0x14e4, 0x1604, "14e4-1604", "Broadcom BCM957454", 0 ), + PCI_ROM( 0x14e4, 0x1605, "14e4-1605", "Broadcom BCM957454 RDMA", 0 ), + PCI_ROM( 0x14e4, 0x1606, "14e4-1606", "Broadcom BCM957454 RDMA VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1607, "bcm957454-1607", "Broadcom BCM957454 HV VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1608, "bcm957454-1608", "Broadcom BCM957454 RDMA HV VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1609, "14e4-1609", "Broadcom BCM957454 VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1614, "14e4-1614", "Broadcom BCM957454", 0 ), + PCI_ROM( 0x14e4, 0x16bd, "bcm95741x-16bd", "Broadcom BCM95741x RDMA_HV_VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x16c0, "14e4-16c0", "Broadcom BCM957417", 0 ), + PCI_ROM( 0x14e4, 0x16c1, "14e4-16c1", "Broadcom BCM95741x VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x16c5, "bcm95741x-16c5", "Broadcom BCM95741x HV VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x16c8, "14e4-16c8", "Broadcom BCM957301", 0 ), + PCI_ROM( 0x14e4, 0x16c9, "14e4-16c9", "Broadcom BCM957302", 0 ), + PCI_ROM( 0x14e4, 0x16ca, "14e4-16ca", "Broadcom BCM957304", 0 ), + PCI_ROM( 0x14e4, 0x16cc, "14e4-16cc", "Broadcom BCM957417 MF", 0 ), + PCI_ROM( 0x14e4, 0x16cd, "14e4-16cd", "Broadcom BCM958700", 0 ), + PCI_ROM( 0x14e4, 0x16ce, "14e4-16ce", "Broadcom BCM957311", 0 ), + PCI_ROM( 0x14e4, 0x16cf, "14e4-16cf", "Broadcom BCM957312", 0 ), + PCI_ROM( 0x14e4, 0x16d0, "14e4-16d0", "Broadcom BCM957402", 0 ), + PCI_ROM( 0x14e4, 0x16d1, "14e4-16d1", "Broadcom BCM957404", 0 ), + PCI_ROM( 0x14e4, 0x16d2, "14e4-16d2", "Broadcom BCM957406", 0 ), + PCI_ROM( 0x14e4, 0x16d4, "14e4-16d4", "Broadcom BCM957402 MF", 0 ), + PCI_ROM( 0x14e4, 0x16d5, "14e4-16d5", "Broadcom BCM957407", 0 ), + PCI_ROM( 0x14e4, 0x16d6, "14e4-16d6", "Broadcom BCM957412", 0 ), + PCI_ROM( 0x14e4, 0x16d7, "14e4-16d7", "Broadcom BCM957414", 0 ), + PCI_ROM( 0x14e4, 0x16d8, "14e4-16d8", "Broadcom BCM957416", 0 ), + PCI_ROM( 0x14e4, 0x16d9, "14e4-16d9", "Broadcom BCM957417", 0 ), + PCI_ROM( 0x14e4, 0x16da, "14e4-16da", "Broadcom BCM957402", 0 ), + PCI_ROM( 0x14e4, 0x16db, "14e4-16db", "Broadcom BCM957404", 0 ), + PCI_ROM( 0x14e4, 0x16dc, "14e4-16dc", "Broadcom BCM95741x VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x16de, "14e4-16de", "Broadcom BCM957412 MF", 0 ), + PCI_ROM( 0x14e4, 0x16df, "14e4-16df", "Broadcom BCM957314", 0 ), + PCI_ROM( 0x14e4, 0x16e0, "14e4-16e0", "Broadcom BCM957317", 0 ), + PCI_ROM( 0x14e4, 0x16e2, "14e4-16e2", "Broadcom BCM957417", 0 ), + PCI_ROM( 0x14e4, 0x16e3, "14e4-16e3", "Broadcom BCM957416", 0 ), + PCI_ROM( 0x14e4, 0x16e4, "14e4-16e4", "Broadcom BCM957317", 0 ), + PCI_ROM( 0x14e4, 0x16e7, "14e4-16e7", "Broadcom BCM957404 MF", 0 ), + PCI_ROM( 0x14e4, 0x16e8, "14e4-16e8", "Broadcom BCM957406 MF", 0 ), + PCI_ROM( 0x14e4, 0x16e9, "14e4-16e9", "Broadcom BCM957407", 0 ), + PCI_ROM( 0x14e4, 0x16ea, "14e4-16ea", "Broadcom BCM957407 MF", 0 ), + PCI_ROM( 0x14e4, 0x16eb, "14e4-16eb", "Broadcom BCM957412 RDMA MF", 0 ), + PCI_ROM( 0x14e4, 0x16ec, "14e4-16ec", "Broadcom BCM957414 MF", 0 ), + PCI_ROM( 0x14e4, 0x16ed, "14e4-16ed", "Broadcom BCM957414 RDMA MF", 0 ), + PCI_ROM( 0x14e4, 0x16ee, "14e4-16ee", "Broadcom BCM957416 MF", 0 ), + PCI_ROM( 0x14e4, 0x16ef, "14e4-16ef", "Broadcom BCM957416 RDMA MF", 0 ), + PCI_ROM( 0x14e4, 0x16f0, "14e4-16f0", "Broadcom BCM957320", 0 ), + PCI_ROM( 0x14e4, 0x16f1, "14e4-16f1", "Broadcom BCM957320", 0 ), + PCI_ROM( 0x14e4, 0x1750, "14e4-1750", "Broadcom BCM957508", 0 ), + PCI_ROM( 0x14e4, 0x1751, "14e4-1751", "Broadcom BCM957504", 0 ), + PCI_ROM( 0x14e4, 0x1752, "14e4-1752", "Broadcom BCM957502", 0 ), + PCI_ROM( 0x14e4, 0x1760, "14e4-1760", "Broadcom BCM957608", 0 ), + PCI_ROM( 0x14e4, 0x1800, "14e4-1800", "Broadcom BCM957502 MF", 0 ), + PCI_ROM( 0x14e4, 0x1801, "14e4-1801", "Broadcom BCM957504 MF", 0 ), + PCI_ROM( 0x14e4, 0x1802, "14e4-1802", "Broadcom BCM957508 MF", 0 ), + PCI_ROM( 0x14e4, 0x1803, "14e4-1803", "Broadcom BCM957502 RDMA MF", 0 ), + PCI_ROM( 0x14e4, 0x1804, "14e4-1804", "Broadcom BCM957504 RDMA MF", 0 ), + PCI_ROM( 0x14e4, 0x1805, "14e4-1805", "Broadcom BCM957508 RDMA MF", 0 ), + PCI_ROM( 0x14e4, 0x1806, "14e4-1806", "Broadcom BCM9575xx VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1807, "14e4-1807", "Broadcom BCM9575xx RDMA VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1808, "14e4-1808", "Broadcom BCM9575xx HV VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1809, "14e4-1809", "Broadcom BCM9575xx RDMA HV VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x1819, "bcm95760x-1819", "Broadcom BCM95760x VF", BNXT_FLAG_PCI_VF ), + PCI_ROM( 0x14e4, 0x181b, "bcm95760x-181b", "Broadcom BCM95760x HV VF", BNXT_FLAG_PCI_VF ), }; /** @@ -128,23 +135,23 @@ static int bnxt_get_pci_info ( struct bnxt *bp ) DBGP ( "%s\n", __func__ ); /* Disable Interrupt */ - pci_read_word16 ( bp->pdev, PCI_COMMAND, &bp->cmd_reg ); + pci_read_config_word ( bp->pdev, PCI_COMMAND, &bp->cmd_reg ); cmd_reg = bp->cmd_reg | PCI_COMMAND_INTX_DISABLE; - pci_write_word ( bp->pdev, PCI_COMMAND, cmd_reg ); - pci_read_word16 ( bp->pdev, PCI_COMMAND, &cmd_reg ); + pci_write_config_word ( bp->pdev, PCI_COMMAND, cmd_reg ); + pci_read_config_word ( bp->pdev, PCI_COMMAND, &cmd_reg ); /* SSVID */ - pci_read_word16 ( bp->pdev, + pci_read_config_word ( bp->pdev, PCI_SUBSYSTEM_VENDOR_ID, &bp->subsystem_vendor ); /* SSDID */ - pci_read_word16 ( bp->pdev, + pci_read_config_word ( bp->pdev, PCI_SUBSYSTEM_ID, &bp->subsystem_device ); /* Function Number */ - pci_read_byte ( bp->pdev, + pci_read_config_byte ( bp->pdev, PCICFG_ME_REGISTER, &bp->pf_num ); @@ -194,7 +201,7 @@ static void dev_p5_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag ) val = ( ( u64 )DBC_MSG_XID ( xid, flag ) << 32 ) | ( u64 )DBC_MSG_IDX ( idx ); - write64 ( val, off ); + writeq ( val, off ); } static void dev_p7_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, u32 toggle ) @@ -208,7 +215,7 @@ static void dev_p7_db ( struct bnxt *bp, u32 idx, u32 xid, u32 flag, u32 epoch, ( u64 )DBC_MSG_IDX ( idx ) | ( u64 )DBC_MSG_EPCH ( epoch ) | ( u64 )DBC_MSG_TOGGLE ( toggle ); - write64 ( val, off ); + writeq ( val, off ); } static void bnxt_db_nq ( struct bnxt *bp ) @@ -221,20 +228,20 @@ static void bnxt_db_nq ( struct bnxt *bp ) dev_p5_db ( bp, ( u32 )bp->nq.cons_id, ( u32 )bp->nq_ring_id, DBC_DBC_TYPE_NQ_ARM ); else - write32 ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) ); + writel ( CMPL_DOORBELL_KEY_CMPL, ( bp->bar1 + 0 ) ); } static void bnxt_db_cq ( struct bnxt *bp ) { if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ) dev_p7_db ( bp, ( u32 )bp->cq.cons_id, - ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL, + ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ, ( u32 )bp->cq.epoch, ( u32 )bp->nq.toggle ); else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) ) dev_p5_db ( bp, ( u32 )bp->cq.cons_id, - ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ_ARMALL ); + ( u32 )bp->cq_ring_id, DBC_DBC_TYPE_CQ); else - write32 ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ), + writel ( CQ_DOORBELL_KEY_IDX ( bp->cq.cons_id ), ( bp->bar1 + 0 ) ); } @@ -246,7 +253,7 @@ static void bnxt_db_rx ( struct bnxt *bp, u32 idx ) else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) ) dev_p5_db ( bp, idx, ( u32 )bp->rx_ring_id, DBC_DBC_TYPE_SRQ ); else - write32 ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) ); + writel ( RX_DOORBELL_KEY_RX | idx, ( bp->bar1 + 0 ) ); } static void bnxt_db_tx ( struct bnxt *bp, u32 idx ) @@ -257,7 +264,7 @@ static void bnxt_db_tx ( struct bnxt *bp, u32 idx ) else if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5 ) ) dev_p5_db ( bp, idx, ( u32 )bp->tx_ring_id, DBC_DBC_TYPE_SQ ); else - write32 ( ( u32 ) ( TX_DOORBELL_KEY_TX | idx ), + writel ( ( u32 ) ( TX_DOORBELL_KEY_TX | idx ), ( bp->bar1 + 0 ) ); } @@ -282,51 +289,6 @@ static u16 bnxt_get_pkt_vlan ( char *src ) return 0; } -static u16 bnxt_get_rx_vlan ( struct rx_pkt_cmpl *rx_cmp, struct rx_pkt_cmpl_hi *rx_cmp_hi ) -{ - struct rx_pkt_v3_cmpl *rx_cmp_v3 = ( struct rx_pkt_v3_cmpl * )rx_cmp; - struct rx_pkt_v3_cmpl_hi *rx_cmp_hi_v3 = ( struct rx_pkt_v3_cmpl_hi * )rx_cmp_hi; - u16 rx_vlan; - - /* Get VLAN ID from RX completion ring */ - if ( ( rx_cmp_v3->flags_type & RX_PKT_V3_CMPL_TYPE_MASK ) == - RX_PKT_V3_CMPL_TYPE_RX_L2_V3 ) { - if ( rx_cmp_hi_v3->flags2 & RX_PKT_V3_CMPL_HI_FLAGS2_META_FORMAT_ACT_REC_PTR ) - rx_vlan = ( rx_cmp_hi_v3->metadata0 & - RX_PKT_V3_CMPL_HI_METADATA0_VID_MASK ); - else - rx_vlan = 0; - } else { - if ( rx_cmp_hi->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN ) - rx_vlan = ( rx_cmp_hi->metadata & - RX_PKT_CMPL_METADATA_VID_MASK ); - else - rx_vlan = 0; - } - - return rx_vlan; -} - -int bnxt_vlan_drop ( struct bnxt *bp, u16 rx_vlan ) -{ - if ( rx_vlan ) { - if ( bp->vlan_tx ) { - if ( rx_vlan == bp->vlan_tx ) - return 0; - } else { - if ( rx_vlan == bp->vlan_id ) - return 0; - if ( rx_vlan && !bp->vlan_id ) - return 0; - } - } else { - if ( !bp->vlan_tx && !bp->vlan_id ) - return 0; - } - - return 1; -} - static inline u32 bnxt_tx_avail ( struct bnxt *bp ) { u32 avail; @@ -339,7 +301,7 @@ static inline u32 bnxt_tx_avail ( struct bnxt *bp ) return ( avail-use ); } -void bnxt_set_txq ( struct bnxt *bp, int entry, dma_addr_t mapping, int len ) +void bnxt_set_txq ( struct bnxt *bp, int entry, physaddr_t mapping, int len ) { struct tx_bd_short *prod_bd; @@ -354,7 +316,7 @@ void bnxt_set_txq ( struct bnxt *bp, int entry, dma_addr_t mapping, int len ) else prod_bd->flags_type = TX_BD_SHORT_FLAGS_LHINT_GTE2K; prod_bd->flags_type |= TX_BD_FLAGS; - prod_bd->dma.addr = mapping; + prod_bd->dma = mapping; prod_bd->len = len; prod_bd->opaque = ( u32 )entry; } @@ -382,7 +344,7 @@ int bnxt_free_rx_iob ( struct bnxt *bp ) for ( i = 0; i < bp->rx.buf_cnt; i++ ) { if ( bp->rx.iob[i] ) { - free_iob ( bp->rx.iob[i] ); + free_rx_iob ( bp->rx.iob[i] ); bp->rx.iob[i] = NULL; } } @@ -402,14 +364,14 @@ static void bnxt_set_rx_desc ( u8 *buf, struct io_buffer *iob, desc->flags_type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT; desc->len = MAX_ETHERNET_PACKET_BUFFER_SIZE; desc->opaque = idx; - desc->dma.addr = virt_to_bus ( iob->data ); + desc->dma = iob_dma ( iob ); } static int bnxt_alloc_rx_iob ( struct bnxt *bp, u16 cons_id, u16 iob_idx ) { struct io_buffer *iob; - iob = alloc_iob ( BNXT_RX_STD_DMA_SZ ); + iob = alloc_rx_iob ( BNXT_RX_STD_DMA_SZ, bp->dma ); if ( !iob ) { DBGP ( "- %s ( ): alloc_iob Failed\n", __func__ ); return -ENOMEM; @@ -459,7 +421,7 @@ u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob, struct rx_pkt_v3_cmpl *rx_cmp_v3 = ( struct rx_pkt_v3_cmpl * )rx_cmp; struct rx_pkt_v3_cmpl_hi *rx_cmp_hi_v3 = ( struct rx_pkt_v3_cmpl_hi * )rx_cmp_hi; u8 *rx_buf = ( u8 * )iob->data; - u16 err_flags, rx_vlan; + u16 err_flags; u8 ignore_chksum_err = 0; int i; @@ -487,17 +449,8 @@ u8 bnxt_rx_drop ( struct bnxt *bp, struct io_buffer *iob, return 2; } - rx_vlan = bnxt_get_rx_vlan ( rx_cmp, rx_cmp_hi ); - dbg_rx_vlan ( bp, rx_cmp_hi->metadata, rx_cmp_hi->flags2, rx_vlan ); - if ( bnxt_vlan_drop ( bp, rx_vlan ) ) { - bp->rx.drop_vlan++; - return 3; - } iob_put ( iob, rx_len ); - if ( rx_vlan ) - bnxt_add_vlan ( iob, rx_vlan ); - bp->rx.good++; return 0; } @@ -524,7 +477,7 @@ void bnxt_rx_process ( struct net_device *dev, struct bnxt *bp, u8 drop; dump_rx_bd ( rx_cmp, rx_cmp_hi, desc_idx ); - assert ( !iob ); + assert ( iob ); drop = bnxt_rx_drop ( bp, iob, rx_cmp, rx_cmp_hi, rx_cmp->len ); dbg_rxp ( iob->data, rx_cmp->len, drop ); if ( drop ) @@ -548,7 +501,7 @@ static int bnxt_rx_complete ( struct net_device *dev, u8 cmpl_bit = bp->cq.completion_bit; if ( bp->cq.cons_id == ( bp->cq.ring_cnt - 1 ) ) { - rx_cmp_hi = ( struct rx_pkt_cmpl_hi * )bp->cq.bd_virt; + rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) CQ_DMA_ADDR ( bp ); cmpl_bit ^= 0x1; /* Ring has wrapped. */ } else rx_cmp_hi = ( struct rx_pkt_cmpl_hi * ) ( rx_cmp+1 ); @@ -560,19 +513,28 @@ static int bnxt_rx_complete ( struct net_device *dev, return NO_MORE_CQ_BD_TO_SERVICE; } -void bnxt_mm_init ( struct bnxt *bp, const char *func ) +void bnxt_mm_init_hwrm ( struct bnxt *bp, const char *func ) { DBGP ( "%s\n", __func__ ); memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE ); memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE ); memset ( bp->hwrm_addr_dma, 0, DMA_BUFFER_SIZE ); - bp->req_addr_mapping = virt_to_bus ( bp->hwrm_addr_req ); - bp->resp_addr_mapping = virt_to_bus ( bp->hwrm_addr_resp ); - bp->dma_addr_mapping = virt_to_bus ( bp->hwrm_addr_dma ); + bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; + bp->hwrm_cmd_timeout = HWRM_CMD_DEFAULT_TIMEOUT; + dbg_mem ( bp, func ); +} + +void bnxt_mm_init_rings ( struct bnxt *bp, const char *func ) +{ + DBGP ( "%s\n", __func__ ); + memset ( bp->tx.bd_virt, 0, TX_RING_BUFFER_SIZE ); + memset ( bp->rx.bd_virt, 0, RX_RING_BUFFER_SIZE ); + memset ( bp->cq.bd_virt, 0, CQ_RING_BUFFER_SIZE ); + memset ( bp->nq.bd_virt, 0, NQ_RING_BUFFER_SIZE ); + bp->link_status = STATUS_LINK_DOWN; bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT; bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE; - bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; bp->nq.ring_cnt = MAX_NQ_DESC_CNT; bp->cq.ring_cnt = MAX_CQ_DESC_CNT; bp->tx.ring_cnt = MAX_TX_DESC_CNT; @@ -602,10 +564,7 @@ void bnxt_mm_nic ( struct bnxt *bp ) bp->rx.iob_cnt = 0; bp->rx.epoch = 0; - bp->link_status = STATUS_LINK_DOWN; - bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT; - bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE; - bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; + bp->mtu = MAX_ETHERNET_PACKET_BUFFER_SIZE; bp->nq.ring_cnt = MAX_NQ_DESC_CNT; bp->cq.ring_cnt = MAX_CQ_DESC_CNT; bp->tx.ring_cnt = MAX_TX_DESC_CNT; @@ -613,73 +572,95 @@ void bnxt_mm_nic ( struct bnxt *bp ) bp->rx.buf_cnt = NUM_RX_BUFFERS; } -void bnxt_free_mem ( struct bnxt *bp ) +void bnxt_free_rings_mem ( struct bnxt *bp ) { DBGP ( "%s\n", __func__ ); if ( bp->nq.bd_virt ) { - free_phys ( bp->nq.bd_virt, NQ_RING_BUFFER_SIZE ); + dma_free ( &bp->nq_mapping, bp->nq.bd_virt, NQ_RING_BUFFER_SIZE ); bp->nq.bd_virt = NULL; } if ( bp->cq.bd_virt ) { - free_phys ( bp->cq.bd_virt, CQ_RING_BUFFER_SIZE ); + dma_free ( &bp->cq_mapping, bp->cq.bd_virt, CQ_RING_BUFFER_SIZE ); bp->cq.bd_virt = NULL; } if ( bp->rx.bd_virt ) { - free_phys ( bp->rx.bd_virt, RX_RING_BUFFER_SIZE ); + dma_free ( &bp->rx_mapping, bp->rx.bd_virt, RX_RING_BUFFER_SIZE ); bp->rx.bd_virt = NULL; } if ( bp->tx.bd_virt ) { - free_phys ( bp->tx.bd_virt, TX_RING_BUFFER_SIZE ); + dma_free ( &bp->tx_mapping, bp->tx.bd_virt, TX_RING_BUFFER_SIZE ); bp->tx.bd_virt = NULL; } + DBGP ( "- %s ( ): - Done\n", __func__ ); +} + +void bnxt_free_hwrm_mem ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); if ( bp->hwrm_addr_dma ) { - free_phys ( bp->hwrm_addr_dma, DMA_BUFFER_SIZE ); - bp->dma_addr_mapping = 0; + dma_free ( &bp->dma_mapped, bp->hwrm_addr_dma, DMA_BUFFER_SIZE ); bp->hwrm_addr_dma = NULL; } if ( bp->hwrm_addr_resp ) { - free_phys ( bp->hwrm_addr_resp, RESP_BUFFER_SIZE ); - bp->resp_addr_mapping = 0; + dma_free ( &bp->resp_mapping, bp->hwrm_addr_resp, RESP_BUFFER_SIZE ); bp->hwrm_addr_resp = NULL; } if ( bp->hwrm_addr_req ) { - free_phys ( bp->hwrm_addr_req, REQ_BUFFER_SIZE ); - bp->req_addr_mapping = 0; + dma_free ( &bp->req_mapping, bp->hwrm_addr_req, REQ_BUFFER_SIZE ); bp->hwrm_addr_req = NULL; } DBGP ( "- %s ( ): - Done\n", __func__ ); } -int bnxt_alloc_mem ( struct bnxt *bp ) +int bnxt_alloc_hwrm_mem ( struct bnxt *bp ) { DBGP ( "%s\n", __func__ ); - bp->hwrm_addr_req = malloc_phys ( REQ_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); - bp->hwrm_addr_resp = malloc_phys ( RESP_BUFFER_SIZE, - BNXT_DMA_ALIGNMENT ); - bp->hwrm_addr_dma = malloc_phys ( DMA_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); - bp->tx.bd_virt = malloc_phys ( TX_RING_BUFFER_SIZE, DMA_ALIGN_4K ); - bp->rx.bd_virt = malloc_phys ( RX_RING_BUFFER_SIZE, DMA_ALIGN_4K ); - bp->cq.bd_virt = malloc_phys ( CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); - bp->nq.bd_virt = malloc_phys ( NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); - test_if ( bp->hwrm_addr_req && + bp->hwrm_addr_req = dma_alloc ( bp->dma, &bp->req_mapping, + REQ_BUFFER_SIZE, REQ_BUFFER_SIZE ); + bp->hwrm_addr_resp = dma_alloc ( bp->dma, &bp->resp_mapping, + RESP_BUFFER_SIZE, RESP_BUFFER_SIZE ); + bp->hwrm_addr_dma = dma_alloc ( bp->dma, &bp->dma_mapped, + DMA_BUFFER_SIZE, DMA_BUFFER_SIZE); + + if ( bp->hwrm_addr_req && bp->hwrm_addr_resp && - bp->hwrm_addr_dma && - bp->tx.bd_virt && + bp->hwrm_addr_dma ) { + bnxt_mm_init_hwrm ( bp, __func__ ); + return STATUS_SUCCESS; + } + + DBGP ( "- %s ( ): Failed\n", __func__ ); + bnxt_free_hwrm_mem ( bp ); + return -ENOMEM; +} + +int bnxt_alloc_rings_mem ( struct bnxt *bp ) +{ + DBGP ( "%s\n", __func__ ); + bp->tx.bd_virt = dma_alloc ( bp->dma, &bp->tx_mapping, + TX_RING_BUFFER_SIZE, DMA_ALIGN_4K ); + bp->rx.bd_virt = dma_alloc ( bp->dma, &bp->rx_mapping, + RX_RING_BUFFER_SIZE, DMA_ALIGN_4K ); + bp->cq.bd_virt = dma_alloc ( bp->dma, &bp->cq_mapping, + CQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + bp->nq.bd_virt = dma_alloc ( bp->dma, &bp->nq_mapping, + NQ_RING_BUFFER_SIZE, BNXT_DMA_ALIGNMENT ); + if ( bp->tx.bd_virt && bp->rx.bd_virt && bp->nq.bd_virt && bp->cq.bd_virt ) { - bnxt_mm_init ( bp, __func__ ); + bnxt_mm_init_rings ( bp, __func__ ); return STATUS_SUCCESS; } DBGP ( "- %s ( ): Failed\n", __func__ ); - bnxt_free_mem ( bp ); + bnxt_free_rings_mem ( bp ); return -ENOMEM; } @@ -689,7 +670,7 @@ static void hwrm_init ( struct bnxt *bp, struct input *req, u16 cmd, u16 len ) req->req_type = cmd; req->cmpl_ring = ( u16 )HWRM_NA_SIGNATURE; req->target_id = ( u16 )HWRM_NA_SIGNATURE; - req->resp_addr = bp->resp_addr_mapping; + req->resp_addr = RESP_DMA_ADDR ( bp ); req->seq_id = bp->seq_id++; } @@ -698,10 +679,10 @@ static void hwrm_write_req ( struct bnxt *bp, void *req, u32 cnt ) u32 i = 0; for ( i = 0; i < cnt; i++ ) { - write32 ( ( ( u32 * )req )[i], + writel ( ( ( u32 * )req )[i], ( bp->bar0 + GRC_COM_CHAN_BASE + ( i * 4 ) ) ); } - write32 ( 0x1, ( bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG ) ); + writel ( 0x1, ( bp->bar0 + GRC_COM_CHAN_BASE + GRC_COM_CHAN_TRIG ) ); } static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len ) @@ -709,10 +690,10 @@ static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len ) struct hwrm_short_input sreq; memset ( &sreq, 0, sizeof ( struct hwrm_short_input ) ); - sreq.req_type = ( u16 ) ( ( struct input * )bp->hwrm_addr_req )->req_type; + sreq.req_type = ( u16 ) ( ( struct input * ) REQ_DMA_ADDR (bp ) )->req_type; sreq.signature = SHORT_REQ_SIGNATURE_SHORT_CMD; sreq.size = len; - sreq.req_addr = bp->req_addr_mapping; + sreq.req_addr = REQ_DMA_ADDR ( bp ); mdelay ( 100 ); dbg_short_cmd ( ( u8 * )&sreq, __func__, sizeof ( struct hwrm_short_input ) ); @@ -721,8 +702,8 @@ static void short_hwrm_cmd_req ( struct bnxt *bp, u16 len ) static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func ) { - struct input *req = ( struct input * )bp->hwrm_addr_req; - struct output *resp = ( struct output * )bp->hwrm_addr_resp; + struct input *req = ( struct input * ) REQ_DMA_ADDR ( bp ); + struct output *resp = ( struct output * ) RESP_DMA_ADDR ( bp ); u8 *ptr = ( u8 * )resp; u32 idx; u32 wait_cnt = HWRM_CMD_DEFAULT_MULTIPLAYER ( ( u32 )tmo ); @@ -736,7 +717,7 @@ static int wait_resp ( struct bnxt *bp, u32 tmo, u16 len, const char *func ) for ( idx = 0; idx < wait_cnt; idx++ ) { resp_len = resp->resp_len; - test_if ( resp->seq_id == req->seq_id && + if ( resp->seq_id == req->seq_id && resp->req_type == req->req_type && ptr[resp_len - 1] == 1 ) { bp->last_resp_code = resp->error_code; @@ -757,8 +738,8 @@ static int bnxt_hwrm_ver_get ( struct bnxt *bp ) int rc; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_ver_get_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_ver_get_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_ver_get_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_ver_get_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VER_GET, cmd_len ); req->hwrm_intf_maj = HWRM_VERSION_MAJOR; req->hwrm_intf_min = HWRM_VERSION_MINOR; @@ -782,7 +763,7 @@ static int bnxt_hwrm_ver_get ( struct bnxt *bp ) resp->chip_bond_id << 8 | resp->chip_platform_type; bp->chip_num = resp->chip_num; - test_if ( ( resp->dev_caps_cfg & SHORT_CMD_SUPPORTED ) && + if ( ( resp->dev_caps_cfg & SHORT_CMD_SUPPORTED ) && ( resp->dev_caps_cfg & SHORT_CMD_REQUIRED ) ) FLAG_SET ( bp->flags, BNXT_FLAG_HWRM_SHORT_CMD_SUPP ); bp->hwrm_max_ext_req_len = resp->max_ext_req_len; @@ -808,8 +789,8 @@ static int bnxt_hwrm_func_resource_qcaps ( struct bnxt *bp ) int rc; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_func_resource_qcaps_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_func_resource_qcaps_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_func_resource_qcaps_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_func_resource_qcaps_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESOURCE_QCAPS, cmd_len ); req->fid = ( u16 )HWRM_NA_SIGNATURE; @@ -907,7 +888,7 @@ static void bnxt_hwrm_assign_resources ( struct bnxt *bp ) if ( FLAG_TEST ( bp->flags, BNXT_FLAG_RESOURCE_QCAPS_SUPPORT ) ) enables = bnxt_set_ring_info ( bp ); - req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req; + req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp ); req->num_cmpl_rings = bp->num_cmpl_rings; req->num_tx_rings = bp->num_tx_rings; req->num_rx_rings = bp->num_rx_rings; @@ -927,8 +908,8 @@ static int bnxt_hwrm_func_qcaps_req ( struct bnxt *bp ) if ( bp->vf ) return STATUS_SUCCESS; - req = ( struct hwrm_func_qcaps_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_func_qcaps_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_func_qcaps_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_func_qcaps_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCAPS, cmd_len ); req->fid = ( u16 )HWRM_NA_SIGNATURE; rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); @@ -940,9 +921,14 @@ static int bnxt_hwrm_func_qcaps_req ( struct bnxt *bp ) bp->fid = resp->fid; bp->port_idx = ( u8 )resp->port_id; + if ( resp->flags & FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE ) { + bp->err_rcvry_supported = 1; + } + /* Get MAC address for this PF */ memcpy ( &bp->mac_addr[0], &resp->mac_address[0], ETH_ALEN ); dbg_func_qcaps ( bp ); + return STATUS_SUCCESS; } @@ -954,8 +940,8 @@ static int bnxt_hwrm_func_qcfg_req ( struct bnxt *bp ) int rc; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_func_qcfg_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_func_qcfg_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_func_qcfg_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_func_qcfg_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_QCFG, cmd_len ); req->fid = ( u16 )HWRM_NA_SIGNATURE; rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); @@ -996,8 +982,8 @@ static int bnxt_hwrm_port_phy_qcaps_req ( struct bnxt *bp ) DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_port_phy_qcaps_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_port_phy_qcaps_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_port_phy_qcaps_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_port_phy_qcaps_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCAPS, cmd_len ); rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); if ( rc ) { @@ -1017,7 +1003,7 @@ static int bnxt_hwrm_func_reset_req ( struct bnxt *bp ) struct hwrm_func_reset_input *req; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_func_reset_input * )bp->hwrm_addr_req; + req = ( struct hwrm_func_reset_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_RESET, cmd_len ); if ( !bp->vf ) req->func_reset_level = FUNC_RESET_REQ_FUNC_RESET_LEVEL_RESETME; @@ -1034,7 +1020,7 @@ static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp ) if ( bp->vf ) return STATUS_SUCCESS; - req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req; + req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len ); req->fid = ( u16 )HWRM_NA_SIGNATURE; bnxt_hwrm_assign_resources ( bp ); @@ -1049,6 +1035,69 @@ static int bnxt_hwrm_func_cfg_req ( struct bnxt *bp ) return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); } +static int bnxt_hwrm_error_recovery_req ( struct bnxt *bp ) +{ + struct hwrm_error_recovery_qcfg_input *req; + struct hwrm_error_recovery_qcfg_output *resp; + int rc = 0; + u8 i = 0; + u16 cmd_len = ( u16 ) sizeof ( struct hwrm_error_recovery_qcfg_input ); + + DBGP ( "%s\n", __func__ ); + /* Set default error recovery heartbeat polling value (in 100ms)*/ + bp->er.drv_poll_freq = 100; + if ( ! ( bp->err_rcvry_supported ) ) { + return STATUS_SUCCESS; + } + + req = ( struct hwrm_error_recovery_qcfg_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_error_recovery_qcfg_output * ) RESP_DMA_ADDR ( bp ); + + hwrm_init ( bp, ( void * ) req, ( u16 ) HWRM_ER_QCFG, cmd_len ); + + rc = wait_resp ( bp, HWRM_CMD_WAIT ( 6 ), cmd_len, __func__ ); + if ( rc ) { + DBGP ( "- %s ( ): Failed\n", __func__ ); + return STATUS_FAILURE; + } + + bp->er.flags = resp->flags; + bp->er.drv_poll_freq = resp->driver_polling_freq; + bp->er.master_wait_period = resp->master_wait_period; + bp->er.normal_wait_period = resp->normal_wait_period; + bp->er.master_wait_post_rst = resp->master_wait_post_reset; + bp->er.max_bailout_post_rst = resp->max_bailout_time; + + bp->er.fw_status_reg = resp->fw_health_status_reg; + bp->er.fw_hb_reg = resp->fw_heartbeat_reg; + bp->er.fw_rst_cnt_reg = resp->fw_reset_cnt_reg; + bp->er.recvry_cnt_reg = resp->err_recovery_cnt_reg; + bp->er.rst_inprg_reg = resp->reset_inprogress_reg; + + bp->er.rst_inprg_reg_mask = resp->reset_inprogress_reg_mask; + bp->er.reg_array_cnt = resp->reg_array_cnt; + + DBGP ( "flags = 0x%x\n", resp->flags ); + DBGP ( "driver_polling_freq = 0x%x\n", resp->driver_polling_freq ); + DBGP ( "master_wait_period = 0x%x\n", resp->master_wait_period ); + DBGP ( "normal_wait_period = 0x%x\n", resp->normal_wait_period ); + DBGP ( "wait_post_reset = 0x%x\n", resp->master_wait_post_reset ); + DBGP ( "bailout_post_reset = 0x%x\n", resp->max_bailout_time ); + DBGP ( "reg_array_cnt = %x\n", resp->reg_array_cnt ); + + for ( i = 0; i < resp->reg_array_cnt; i++ ) { + bp->er.rst_reg[i] = resp->reset_reg[i]; + bp->er.rst_reg_val[i] = resp->reset_reg_val[i]; + bp->er.delay_after_rst[i] = resp->delay_after_reset[i]; + + DBGP ( "rst_reg = %x ", bp->er.rst_reg[i] ); + DBGP ( "rst_reg_val = %x ", bp->er.rst_reg_val[i] ); + DBGP ( "rst_after_reset = %x\n", bp->er.delay_after_rst[i] ); + } + + return STATUS_SUCCESS; +} + static int bnxt_hwrm_func_drv_rgtr ( struct bnxt *bp ) { u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_drv_rgtr_input ); @@ -1056,14 +1105,27 @@ static int bnxt_hwrm_func_drv_rgtr ( struct bnxt *bp ) int rc; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_func_drv_rgtr_input * )bp->hwrm_addr_req; + req = ( struct hwrm_func_drv_rgtr_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_RGTR, cmd_len ); /* Register with HWRM */ req->enables = FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD | FUNC_DRV_RGTR_REQ_ENABLES_VER; - req->async_event_fwd[0] |= 0x01; + req->flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; + + req->async_event_fwd[0] |= 1 << ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE; + req->async_event_fwd[0] |= 1 << ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE; + req->async_event_fwd[0] |= 1 << ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE; + req->async_event_fwd[0] |= 1 << ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE; + + if ( bp->err_rcvry_supported ) { + req->flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT; + req->flags |= FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; + req->async_event_fwd[0] |= 1 << ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY; + req->async_event_fwd[0] |= 1 << ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY; + } + req->os_type = FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER; req->ver_maj = IPXE_VERSION_MAJOR; req->ver_min = IPXE_VERSION_MINOR; @@ -1088,7 +1150,7 @@ static int bnxt_hwrm_func_drv_unrgtr ( struct bnxt *bp ) if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_DRIVER_REG ) ) ) return STATUS_SUCCESS; - req = ( struct hwrm_func_drv_unrgtr_input * )bp->hwrm_addr_req; + req = ( struct hwrm_func_drv_unrgtr_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_DRV_UNRGTR, cmd_len ); req->flags = FUNC_DRV_UNRGTR_REQ_FLAGS_PREPARE_FOR_SHUTDOWN; rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); @@ -1113,7 +1175,7 @@ static int bnxt_hwrm_set_async_event ( struct bnxt *bp ) u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_vf_cfg_input ); struct hwrm_func_vf_cfg_input *req; - req = ( struct hwrm_func_vf_cfg_input * )bp->hwrm_addr_req; + req = ( struct hwrm_func_vf_cfg_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_VF_CFG, cmd_len ); req->enables = VF_CFG_ENABLE_FLAGS; @@ -1127,7 +1189,7 @@ static int bnxt_hwrm_set_async_event ( struct bnxt *bp ) u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_cfg_input ); struct hwrm_func_cfg_input *req; - req = ( struct hwrm_func_cfg_input * )bp->hwrm_addr_req; + req = ( struct hwrm_func_cfg_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_CFG, cmd_len ); req->fid = ( u16 )HWRM_NA_SIGNATURE; req->enables = FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR; @@ -1147,8 +1209,8 @@ static int bnxt_hwrm_cfa_l2_filter_alloc ( struct bnxt *bp ) u32 enables; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_cfa_l2_filter_alloc_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_cfa_l2_filter_alloc_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_cfa_l2_filter_alloc_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_cfa_l2_filter_alloc_output * ) RESP_DMA_ADDR ( bp ); if ( bp->vf ) flags |= CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST; enables = CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | @@ -1188,7 +1250,7 @@ static int bnxt_hwrm_cfa_l2_filter_free ( struct bnxt *bp ) if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_L2_FILTER ) ) ) return STATUS_SUCCESS; - req = ( struct hwrm_cfa_l2_filter_free_input * )bp->hwrm_addr_req; + req = ( struct hwrm_cfa_l2_filter_free_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_FILTER_FREE, cmd_len ); req->l2_filter_id = bp->l2_filter_id; @@ -1227,7 +1289,7 @@ static int bnxt_hwrm_set_rx_mask ( struct bnxt *bp, u32 rx_mask ) struct hwrm_cfa_l2_set_rx_mask_input *req; u32 mask = set_rx_mask ( rx_mask ); - req = ( struct hwrm_cfa_l2_set_rx_mask_input * )bp->hwrm_addr_req; + req = ( struct hwrm_cfa_l2_set_rx_mask_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_CFA_L2_SET_RX_MASK, cmd_len ); req->vnic_id = bp->vnic_id; @@ -1244,8 +1306,8 @@ static int bnxt_hwrm_port_phy_qcfg ( struct bnxt *bp, u16 idx ) int rc; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_port_phy_qcfg_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_port_phy_qcfg_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_port_phy_qcfg_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_port_phy_qcfg_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_PHY_QCFG, cmd_len ); rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); if ( rc ) { @@ -1282,9 +1344,9 @@ static int bnxt_hwrm_nvm_get_variable_req ( struct bnxt *bp, struct hwrm_nvm_get_variable_input *req; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_nvm_get_variable_input * )bp->hwrm_addr_req; + req = ( struct hwrm_nvm_get_variable_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_NVM_GET_VARIABLE, cmd_len ); - req->dest_data_addr = bp->dma_addr_mapping; + req->dest_data_addr = DMA_DMA_ADDR ( bp ); req->data_len = data_len; req->option_num = option_num; req->dimensions = dimensions; @@ -1296,28 +1358,28 @@ static int bnxt_hwrm_nvm_get_variable_req ( struct bnxt *bp, static int bnxt_get_link_speed ( struct bnxt *bp ) { - u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma; + u32 *ptr32 = ( u32 * ) DMA_DMA_ADDR ( bp ); DBGP ( "%s\n", __func__ ); if ( ! ( FLAG_TEST (bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ) ) { - test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, + if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, ( u16 )LINK_SPEED_DRV_NUM, 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) return STATUS_FAILURE; bp->link_set = SET_LINK ( *ptr32, SPEED_DRV_MASK, SPEED_DRV_SHIFT ); - test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, + if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, ( u16 )D3_LINK_SPEED_FW_NUM, 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) return STATUS_FAILURE; bp->link_set |= SET_LINK ( *ptr32, D3_SPEED_FW_MASK, D3_SPEED_FW_SHIFT ); } - test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, + if ( bnxt_hwrm_nvm_get_variable_req ( bp, 4, ( u16 )LINK_SPEED_FW_NUM, 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) return STATUS_FAILURE; bp->link_set |= SET_LINK ( *ptr32, SPEED_FW_MASK, SPEED_FW_SHIFT ); - test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1, + if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1, ( u16 )PORT_CFG_LINK_SETTINGS_MEDIA_AUTO_DETECT_NUM, 1, ( u16 )bp->port_idx ) != STATUS_SUCCESS ) return STATUS_FAILURE; @@ -1379,40 +1441,6 @@ static int bnxt_get_link_speed ( struct bnxt *bp ) return STATUS_SUCCESS; } -static int bnxt_get_vlan ( struct bnxt *bp ) -{ - u32 *ptr32 = ( u32 * )bp->hwrm_addr_dma; - - /* If VF is set to TRUE, Do not issue this command */ - if ( bp->vf ) - return STATUS_SUCCESS; - - if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7 ) ) ) { - test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 1, - ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_NUM, 1, - ( u16 )bp->ordinal_value ) != STATUS_SUCCESS ) - return STATUS_FAILURE; - - bp->mba_cfg2 = SET_MBA ( *ptr32, VLAN_MASK, VLAN_SHIFT ); - test_if ( bnxt_hwrm_nvm_get_variable_req ( bp, 16, - ( u16 )FUNC_CFG_PRE_BOOT_MBA_VLAN_VALUE_NUM, 1, - ( u16 )bp->ordinal_value ) != STATUS_SUCCESS ) - return STATUS_FAILURE; - - bp->mba_cfg2 |= SET_MBA ( *ptr32, VLAN_VALUE_MASK, VLAN_VALUE_SHIFT ); - if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED ) - bp->vlan_id = bp->mba_cfg2 & VLAN_VALUE_MASK; - else - bp->vlan_id = 0; - - if ( bp->mba_cfg2 & FUNC_CFG_PRE_BOOT_MBA_VLAN_ENABLED ) - DBGP ( "VLAN MBA Enabled ( %d )\n", - ( bp->mba_cfg2 & VLAN_VALUE_MASK ) ); - - } - return STATUS_SUCCESS; -} - static int bnxt_hwrm_backing_store_qcfg ( struct bnxt *bp ) { u16 cmd_len = ( u16 )sizeof ( struct hwrm_func_backing_store_qcfg_input ); @@ -1422,7 +1450,7 @@ static int bnxt_hwrm_backing_store_qcfg ( struct bnxt *bp ) if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) ) return STATUS_SUCCESS; - req = ( struct hwrm_func_backing_store_qcfg_input * )bp->hwrm_addr_req; + req = ( struct hwrm_func_backing_store_qcfg_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_QCFG, cmd_len ); return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); @@ -1437,7 +1465,7 @@ static int bnxt_hwrm_backing_store_cfg ( struct bnxt *bp ) if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) ) return STATUS_SUCCESS; - req = ( struct hwrm_func_backing_store_cfg_input * )bp->hwrm_addr_req; + req = ( struct hwrm_func_backing_store_cfg_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_FUNC_BACKING_STORE_CFG, cmd_len ); req->flags = FUNC_BACKING_STORE_CFG_REQ_FLAGS_PREBOOT_MODE; @@ -1456,8 +1484,8 @@ static int bnxt_hwrm_queue_qportcfg ( struct bnxt *bp ) if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) ) return STATUS_SUCCESS; - req = ( struct hwrm_queue_qportcfg_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_queue_qportcfg_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_queue_qportcfg_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_queue_qportcfg_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_QUEUE_QPORTCFG, cmd_len ); req->flags = 0; req->port_id = 0; @@ -1480,7 +1508,7 @@ static int bnxt_hwrm_port_mac_cfg ( struct bnxt *bp ) if ( bp->vf ) return STATUS_SUCCESS; - req = ( struct hwrm_port_mac_cfg_input * )bp->hwrm_addr_req; + req = ( struct hwrm_port_mac_cfg_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_PORT_MAC_CFG, cmd_len ); req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); @@ -1502,7 +1530,7 @@ static int bnxt_hwrm_port_phy_cfg ( struct bnxt *bp ) u8 auto_duplex = 0; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_port_phy_cfg_input * )bp->hwrm_addr_req; + req = ( struct hwrm_port_phy_cfg_input * ) REQ_DMA_ADDR ( bp ); flags = PORT_PHY_CFG_REQ_FLAGS_FORCE | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY; @@ -1689,7 +1717,9 @@ static int bnxt_get_phy_link ( struct bnxt *bp ) mdelay ( LINK_POLL_WAIT_TIME ); } dbg_link_state ( bp, ( u32 ) ( ( i + 1 ) * 100 ) ); - bnxt_set_link ( bp ); + if ( !bp->er.er_rst_on ) { + bnxt_set_link ( bp ); + } return STATUS_SUCCESS; } @@ -1701,8 +1731,8 @@ static int bnxt_hwrm_stat_ctx_alloc ( struct bnxt *bp ) int rc; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_stat_ctx_alloc_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_stat_ctx_alloc_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_stat_ctx_alloc_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_stat_ctx_alloc_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_ALLOC, cmd_len ); rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); if ( rc ) { @@ -1725,7 +1755,7 @@ static int bnxt_hwrm_stat_ctx_free ( struct bnxt *bp ) if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_STAT_CTX ) ) ) return STATUS_SUCCESS; - req = ( struct hwrm_stat_ctx_free_input * )bp->hwrm_addr_req; + req = ( struct hwrm_stat_ctx_free_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_STAT_CTX_FREE, cmd_len ); req->stat_ctx_id = ( u32 )bp->stat_ctx_id; rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); @@ -1748,7 +1778,7 @@ static int bnxt_hwrm_ring_free_grp ( struct bnxt *bp ) if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_RING_GRP ) ) ) return STATUS_SUCCESS; - req = ( struct hwrm_ring_grp_free_input * )bp->hwrm_addr_req; + req = ( struct hwrm_ring_grp_free_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_FREE, cmd_len ); req->ring_group_id = ( u32 )bp->ring_grp_id; rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); @@ -1772,8 +1802,8 @@ static int bnxt_hwrm_ring_alloc_grp ( struct bnxt *bp ) if ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) return STATUS_SUCCESS; - req = ( struct hwrm_ring_grp_alloc_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_ring_grp_alloc_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_ring_grp_alloc_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_ring_grp_alloc_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_GRP_ALLOC, cmd_len ); req->cr = bp->cq_ring_id; req->rr = bp->rx_ring_id; @@ -1798,7 +1828,7 @@ int bnxt_hwrm_ring_free ( struct bnxt *bp, u16 ring_id, u8 ring_type ) struct hwrm_ring_free_input *req; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_ring_free_input * )bp->hwrm_addr_req; + req = ( struct hwrm_ring_free_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_FREE, cmd_len ); req->ring_type = ring_type; req->ring_id = ring_id; @@ -1813,8 +1843,8 @@ static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type ) int rc; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_ring_alloc_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_ring_alloc_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_ring_alloc_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_ring_alloc_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_RING_ALLOC, cmd_len ); req->ring_type = type; switch ( type ) { @@ -1823,13 +1853,13 @@ static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type ) req->int_mode = BNXT_CQ_INTR_MODE ( ( (FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P7) ) || bp->vf ) ); req->length = ( u32 )bp->nq.ring_cnt; req->logical_id = 0xFFFF; // Required value for Thor FW? - req->page_tbl_addr = virt_to_bus ( bp->nq.bd_virt ); + req->page_tbl_addr = NQ_DMA_ADDR ( bp ); break; case RING_ALLOC_REQ_RING_TYPE_L2_CMPL: req->page_size = LM_PAGE_BITS ( 8 ); req->int_mode = BNXT_CQ_INTR_MODE ( bp->vf ); req->length = ( u32 )bp->cq.ring_cnt; - req->page_tbl_addr = virt_to_bus ( bp->cq.bd_virt ); + req->page_tbl_addr = CQ_DMA_ADDR ( bp ); if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) ) break; req->enables = RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID; @@ -1840,10 +1870,10 @@ static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type ) req->page_size = LM_PAGE_BITS ( 8 ); req->int_mode = RING_ALLOC_REQ_INT_MODE_POLL; req->length = ( u32 )bp->tx.ring_cnt; - req->queue_id = TX_RING_QID; + req->queue_id = ( u16 )bp->queue_id; req->stat_ctx_id = ( u32 )bp->stat_ctx_id; req->cmpl_ring_id = bp->cq_ring_id; - req->page_tbl_addr = virt_to_bus ( bp->tx.bd_virt ); + req->page_tbl_addr = TX_DMA_ADDR ( bp ); break; case RING_ALLOC_REQ_RING_TYPE_RX: req->page_size = LM_PAGE_BITS ( 8 ); @@ -1851,7 +1881,7 @@ static int bnxt_hwrm_ring_alloc ( struct bnxt *bp, u8 type ) req->length = ( u32 )bp->rx.ring_cnt; req->stat_ctx_id = ( u32 )STAT_CTX_ID; req->cmpl_ring_id = bp->cq_ring_id; - req->page_tbl_addr = virt_to_bus ( bp->rx.bd_virt ); + req->page_tbl_addr = RX_DMA_ADDR ( bp ); if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) ) break; req->queue_id = ( u16 )RX_RING_QID; @@ -1979,8 +2009,8 @@ static int bnxt_hwrm_vnic_alloc ( struct bnxt *bp ) int rc; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_vnic_alloc_input * )bp->hwrm_addr_req; - resp = ( struct hwrm_vnic_alloc_output * )bp->hwrm_addr_resp; + req = ( struct hwrm_vnic_alloc_input * ) REQ_DMA_ADDR ( bp ); + resp = ( struct hwrm_vnic_alloc_output * ) RESP_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_ALLOC, cmd_len ); req->flags = VNIC_ALLOC_REQ_FLAGS_DEFAULT; rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); @@ -2004,7 +2034,7 @@ static int bnxt_hwrm_vnic_free ( struct bnxt *bp ) if ( ! ( FLAG_TEST ( bp->flag_hwrm, VALID_VNIC_ID ) ) ) return STATUS_SUCCESS; - req = ( struct hwrm_vnic_free_input * )bp->hwrm_addr_req; + req = ( struct hwrm_vnic_free_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_FREE, cmd_len ); req->vnic_id = bp->vnic_id; rc = wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); @@ -2023,7 +2053,7 @@ static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp ) struct hwrm_vnic_cfg_input *req; DBGP ( "%s\n", __func__ ); - req = ( struct hwrm_vnic_cfg_input * )bp->hwrm_addr_req; + req = ( struct hwrm_vnic_cfg_input * ) REQ_DMA_ADDR ( bp ); hwrm_init ( bp, ( void * )req, ( u16 )HWRM_VNIC_CFG, cmd_len ); req->enables = VNIC_CFG_REQ_ENABLES_MRU; req->mru = bp->mtu; @@ -2038,7 +2068,6 @@ static int bnxt_hwrm_vnic_cfg ( struct bnxt *bp ) req->dflt_ring_grp = bp->ring_grp_id; } - req->flags = VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE; req->vnic_id = bp->vnic_id; return wait_resp ( bp, bp->hwrm_cmd_timeout, cmd_len, __func__ ); } @@ -2053,8 +2082,27 @@ static int bnxt_reset_rx_mask ( struct bnxt *bp ) return bnxt_hwrm_set_rx_mask ( bp, 0 ); } +static int bnxt_get_link_state ( struct bnxt *bp ) +{ + int rc = 0; + + DBGP ( "%s \n", __func__ ); + rc = bnxt_hwrm_port_phy_qcfg ( bp, PHY_STATUS ); + + return rc; +} + typedef int ( *hwrm_func_t ) ( struct bnxt *bp ); +hwrm_func_t bring_up_init[] = { + bnxt_hwrm_ver_get, /* HWRM_VER_GET */ + bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */ + bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */ + bnxt_get_device_address, /* HW MAC address */ + bnxt_get_link_state, + NULL +}; + hwrm_func_t bring_down_chip[] = { bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */ NULL, @@ -2072,19 +2120,20 @@ hwrm_func_t bring_down_nic[] = { bnxt_hwrm_stat_ctx_free, /* HWRM_STAT_CTX_FREE */ bnxt_hwrm_ring_free_cq, /* HWRM_RING_FREE - CQ Ring */ bnxt_hwrm_ring_free_nq, /* HWRM_RING_FREE - NQ Ring */ + bnxt_hwrm_func_drv_unrgtr, /* HWRM_FUNC_DRV_UNRGTR */ NULL, }; hwrm_func_t bring_up_chip[] = { bnxt_hwrm_ver_get, /* HWRM_VER_GET */ bnxt_hwrm_func_reset_req, /* HWRM_FUNC_RESET */ - bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */ bnxt_hwrm_func_qcaps_req, /* HWRM_FUNC_QCAPS */ + bnxt_hwrm_func_drv_rgtr, /* HWRM_FUNC_DRV_RGTR */ + bnxt_hwrm_error_recovery_req, /* HWRM_ERROR_RECOVERY_REQ */ bnxt_hwrm_backing_store_cfg, /* HWRM_FUNC_BACKING_STORE_CFG */ bnxt_hwrm_backing_store_qcfg, /* HWRM_FUNC_BACKING_STORE_QCFG */ bnxt_hwrm_func_resource_qcaps, /* HWRM_FUNC_RESOURCE_QCAPS */ bnxt_hwrm_port_phy_qcaps_req, /* HWRM_PORT_PHY_QCAPS */ bnxt_hwrm_func_qcfg_req, /* HWRM_FUNC_QCFG */ - bnxt_get_vlan, /* HWRM_NVM_GET_VARIABLE - vlan */ bnxt_hwrm_port_mac_cfg, /* HWRM_PORT_MAC_CFG */ bnxt_hwrm_func_cfg_req, /* HWRM_FUNC_CFG */ bnxt_query_phy_link, /* HWRM_PORT_PHY_QCFG */ @@ -2116,8 +2165,8 @@ int bnxt_hwrm_run ( hwrm_func_t cmds[], struct bnxt *bp ) int ret; for ( ptr = cmds; *ptr; ++ptr ) { - memset ( bp->hwrm_addr_req, 0, REQ_BUFFER_SIZE ); - memset ( bp->hwrm_addr_resp, 0, RESP_BUFFER_SIZE ); + memset ( ( void * ) REQ_DMA_ADDR ( bp ), 0, REQ_BUFFER_SIZE ); + memset ( ( void * ) RESP_DMA_ADDR ( bp ), 0, RESP_BUFFER_SIZE ); ret = ( *ptr ) ( bp ); if ( ret ) { DBGP ( "- %s ( ): Failed\n", __func__ ); @@ -2131,14 +2180,40 @@ int bnxt_hwrm_run ( hwrm_func_t cmds[], struct bnxt *bp ) #define bnxt_up_chip( bp ) bnxt_hwrm_run ( bring_up_chip, bp ) #define bnxt_down_nic( bp ) bnxt_hwrm_run ( bring_down_nic, bp ) #define bnxt_up_nic( bp ) bnxt_hwrm_run ( bring_up_nic, bp ) +#define bnxt_up_init( bp ) bnxt_hwrm_run ( bring_up_init, bp ) static int bnxt_open ( struct net_device *dev ) { struct bnxt *bp = dev->priv; DBGP ( "%s\n", __func__ ); + + /* Allocate and Initialise device specific parameters */ + if ( bnxt_alloc_rings_mem ( bp ) != 0 ) { + DBGP ( "- %s ( ): bnxt_alloc_rings_mem Failed\n", __func__ ); + return -ENOMEM; + } + bnxt_mm_nic ( bp ); - return (bnxt_up_nic ( bp )); + + if ( bnxt_up_chip ( bp ) != 0 ) { + DBGP ( "- %s ( ): bnxt_up_chip Failed\n", __func__ ); + goto err_bnxt_open; + } + + if ( bnxt_up_nic ( bp ) != 0 ) { + DBGP ( "- %s ( ): bnxt_up_nic\n", __func__); + goto err_bnxt_open; + } + + return 0; + +err_bnxt_open: + bnxt_down_nic ( bp ); + + bnxt_free_rings_mem ( bp ); + + return -1; } static void bnxt_tx_adjust_pkt ( struct bnxt *bp, struct io_buffer *iob ) @@ -2153,24 +2228,27 @@ static void bnxt_tx_adjust_pkt ( struct bnxt *bp, struct io_buffer *iob ) if ( iob_len ( iob ) != prev_len ) prev_len = iob_len ( iob ); - iob_pad ( iob, ETH_ZLEN ); - dbg_tx_pad ( prev_len, iob_len ( iob ) ); } static int bnxt_tx ( struct net_device *dev, struct io_buffer *iob ) { struct bnxt *bp = dev->priv; u16 len, entry; - dma_addr_t mapping; + physaddr_t mapping; + + if ( bp->er.er_rst_on ) { + /* Error recovery has been initiated */ + return -EBUSY; + } if ( bnxt_tx_avail ( bp ) < 1 ) { DBGP ( "- %s ( ): Failed no bd's available\n", __func__ ); return -ENOBUFS; } + mapping = iob_dma ( iob ); bnxt_tx_adjust_pkt ( bp, iob ); entry = bp->tx.prod_id; - mapping = virt_to_bus ( iob->data ); len = iob_len ( iob ); bp->tx.iob[entry] = iob; bnxt_set_txq ( bp, entry, mapping, len ); @@ -2204,18 +2282,310 @@ static void bnxt_adv_nq_index ( struct bnxt *bp, u16 cnt ) void bnxt_link_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt ) { - switch ( evt->event_id ) { - case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: - if ( evt->event_data1 & 0x01 ) - bp->link_status = STATUS_LINK_ACTIVE; - else - bp->link_status = STATUS_LINK_DOWN; - bnxt_set_link ( bp ); - dbg_link_status ( bp ); - break; - default: - break; + if ( evt->event_data1 & 0x01 ) + bp->link_status = STATUS_LINK_ACTIVE; + else + bp->link_status = STATUS_LINK_DOWN; + + bnxt_set_link ( bp ); + dbg_link_status ( bp ); +} + +#define BNXT_FW_HEALTH_WIN_OFF 0x3000 +#define BNXT_REG_WINDOW_BASE 0x400 +#define BNXT_GRC_BASE_MASK 0xfff +#define BNXT_GRC_OFFSET_MASK 0xffc + +u32 bnxt_er_reg_write ( struct bnxt *bp, u32 reg_addr, u32 reg_val) +{ + u32 reg_base = 0; + + reg_base = reg_addr & ~BNXT_GRC_BASE_MASK; + + writel ( reg_base, bp->bar0 + BNXT_REG_WINDOW_BASE + 8 ); + + writel ( reg_val, bp->bar0 + ( BNXT_FW_HEALTH_WIN_OFF + + ( reg_addr & BNXT_GRC_OFFSET_MASK ) ) ); + + DBGP ("bnxt_er_reg_write: reg_addr = %x, reg_val = %x\n", reg_addr, reg_val); + return reg_val; +} + +u32 bnxt_er_reg_read ( struct bnxt *bp, u32 reg_addr) +{ + u32 reg_val = 0; + u32 reg_base = 0; + + reg_base = reg_addr & ~BNXT_GRC_BASE_MASK; + + writel ( reg_base, bp->bar0 + BNXT_REG_WINDOW_BASE + 8 ); + + reg_val = readl ( bp->bar0 + ( BNXT_FW_HEALTH_WIN_OFF + + ( reg_addr & BNXT_GRC_OFFSET_MASK ) ) ); + + DBGP ("bnxt_er_reg_read: reg_addr = %x, reg_val = %x\n", reg_addr, reg_val); + return reg_val; +} + +u32 bnxt_er_get_reg_val ( struct bnxt *bp, u32 reg_addr, u32 reg_type, u32 mask ) +{ + u32 reg_val = 0; + + switch ( reg_type ) { + case ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_PCIE_CFG: + pci_read_config_dword ( bp->pdev, reg_addr & mask, ®_val ); + break; + case ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_GRC: + reg_val = bnxt_er_reg_read ( bp, reg_addr ); + break; + case ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR0: + reg_val = readl ( bp->bar0 + ( reg_addr & mask ) ); + break; + case ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR1: + reg_val = readl ( bp->bar1 + ( reg_addr & mask ) ); + break; + default: + break; + } + DBGP ( "read_reg_val bp %p addr %x type %x : reg_val = %x\n", bp, reg_addr, reg_type, reg_val ); + return reg_val; +} + +void bnxt_rst_reg_val ( struct bnxt *bp, u32 reg_addr, u32 reg_val ) +{ + u32 mask = ER_QCFG_RESET_REG_ADDR_MASK; + u32 reg_type = reg_addr & ER_QCFG_RESET_REG_ADDR_SPACE_MASK; + + switch ( reg_type ) { + case ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_PCIE_CFG: + pci_write_config_dword ( bp->pdev, reg_addr & mask, reg_val ); + break; + case ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_GRC: + bnxt_er_reg_write ( bp, reg_addr, reg_val ); + break; + case ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR0: + writel ( reg_val, bp->bar0 + ( reg_addr & mask ) ); + break; + case ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR1: + writel ( reg_val, bp->bar1 + ( reg_addr & mask ) ); + break; + default: + break; + } +} + +void bnxt_rst_er_registers ( struct bnxt *bp ) +{ + u32 delay_time = 0; + u8 i; + + for ( i = 0; i < bp->er.reg_array_cnt; i++ ) { + bnxt_rst_reg_val ( bp, bp->er.rst_reg[i], bp->er.rst_reg_val[i] ); + + delay_time = bp->er.delay_after_rst[i]; + if ( delay_time ) { + udelay ( delay_time * 100000 ); + } + } + +} + +void bnxt_er_task ( struct bnxt* bp, u8 hb_task ) +{ + u32 present_hb_cnt; + unsigned short pci_command, new_command; + u8 i; + + DBGP ( "%s(hb_task: %d)\n", __func__, hb_task ); + if ( bp->er.er_rst_on ) { + if ( timer_running ( &bp->wait_timer) ) { + /* Reset already in progress */ + return; + } + } + + if ( hb_task ) { + present_hb_cnt = bnxt_er_get_reg_val ( bp, + bp->er.fw_hb_reg, + bp->er.fw_hb_reg & ER_QCFG_FW_HB_REG_ADDR_SPACE_MASK, + ER_QCFG_FW_HB_REG_ADDR_MASK ) ; + + if ( present_hb_cnt != bp->er.last_fw_hb ) { + bp->er.last_fw_hb = present_hb_cnt; + return; + } + } + + /* Heartbeat not incrementing, trigger error recovery */ + DBGP ( "%s(): Trigger Error Recovery\n", __func__ ); + bp->er.er_rst_on = 1; + /* Set a recovery phase wait timer */ + start_timer_fixed ( &bp->wait_timer, BNXT_ER_WAIT_TIMER_INTERVAL ( bp ) ); + + /* Disable bus master */ + pci_read_config_word ( bp->pdev, PCI_COMMAND, &pci_command ); + new_command = pci_command & ~PCI_COMMAND_MASTER; + pci_write_config_word ( bp->pdev, PCI_COMMAND, new_command ); + + /* Free up resources */ + bnxt_free_rx_iob ( bp ); + + /* wait for firmware to be operational */ + udelay ( bp->er.rst_min_dsecs * 100000 ); + + /* Reconfigure the PCI attributes */ + pci_write_config_word ( bp->pdev, PCI_COMMAND, pci_command ); + + if ( hb_task ) { + if ( bp->er.master_pf ) { + /* wait for master func wait period */ + udelay ( bp->er.master_wait_period * 100000 ); + + /* Reset register values */ + bnxt_rst_er_registers ( bp ); + + /* wait for master wait post reset */ + udelay ( bp->er.master_wait_post_rst * 100000 ); + } else { + /* wait for normal func wait period */ + udelay ( bp->er.normal_wait_period * 100000 ); + } + } + + for ( i = 0; i < bp->er.max_bailout_post_rst; i++ ) { + bp->er.fw_health_status = bnxt_er_get_reg_val ( bp, + bp->er.fw_status_reg, + bp->er.fw_status_reg & ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK, + ER_QCFG_FW_HEALTH_REG_ADDR_MASK ); + + if ( bp->er.fw_health_status == FW_STATUS_REG_CODE_READY ) + break; + + /* wait for 1 second */ + udelay ( 1000000 ); + } + + if ( bp->er.fw_health_status == FW_STATUS_REG_CODE_READY ) { + /* Initialize resources */ + bnxt_mm_nic ( bp ); + + /* Get device specific information */ + bnxt_up_chip ( bp ); + + /* Allocate queues */ + bnxt_up_nic ( bp ); + } + + /* Clear Reset in progress flag */ + bp->er.er_rst_on = 0; + stop_timer ( &bp->wait_timer ); +} + +void bnxt_process_er_event ( struct bnxt *bp, + struct hwrm_async_event_cmpl *evt ) +{ + if ( evt->event_data1 & + ASYNC_EVENT_CMPL_ER_EVENT_DATA1_RECOVERY_ENABLED ) { + bp->er.driver_initiated_recovery = 1; + start_timer_fixed ( &bp->task_timer, BNXT_ER_TIMER_INTERVAL ( bp ) ); + + } else { + bp->er.driver_initiated_recovery = 0; + stop_timer ( &bp->task_timer ); + } + + if ( evt->event_data1 & + ASYNC_EVENT_CMPL_ER_EVENT_DATA1_MASTER_FUNC ) { + bp->er.master_pf = 1; + } else { + bp->er.master_pf = 0; + } + + bp->er.fw_health_status = bnxt_er_get_reg_val ( bp, + bp->er.fw_status_reg, + bp->er.fw_status_reg & ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK, + ER_QCFG_FW_HEALTH_REG_ADDR_MASK ); + /* Intialize the last fw heart beat count */ + bp->er.last_fw_hb = 0; + bp->er.last_fw_rst_cnt = bnxt_er_get_reg_val ( bp, + bp->er.fw_rst_cnt_reg, + bp->er.fw_rst_cnt_reg & ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_MASK, + ER_QCFG_FW_RESET_CNT_REG_ADDR_MASK ); + bp->er.rst_in_progress = bnxt_er_get_reg_val ( bp, + bp->er.rst_inprg_reg, + bp->er.rst_inprg_reg & ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_MASK, + ER_QCFG_RESET_INPRG_REG_ADDR_MASK ); + bp->er.err_recovery_cnt = bnxt_er_get_reg_val ( bp, + bp->er.recvry_cnt_reg, + bp->er.recvry_cnt_reg & ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_MASK, + ER_QCFG_RCVRY_CNT_REG_ADDR_MASK ); +} + +void bnxt_process_reset_notify_event ( struct bnxt *bp, + struct hwrm_async_event_cmpl *evt ) +{ + DBGP ( "Reset Notify Async event" ); + if ( ( ( evt->event_data1 ) & + ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_MASK ) == + ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_FATAL) { + DBGP ( " error recovery initiated\n" ); + bp->er.rst_min_dsecs = evt->timestamp_lo; + bp->er.rst_max_dsecs = evt->timestamp_hi; + + if ( bp->er.rst_min_dsecs == 0 ) + bp->er.rst_min_dsecs = ER_DFLT_FW_RST_MIN_DSECS; + + if ( bp->er.rst_max_dsecs == 0 ) + bp->er.rst_max_dsecs = ER_DFLT_FW_RST_MAX_DSECS; + + // Trigger Error recovery + bp->er.er_initiate = 1; + } +} + +void bnxt_link_speed_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt ) +{ + if ( evt->event_data1 & ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_FORCE ) { + DBGP ("bnxt_link_speed_evt: event data = %lx\n", + ( evt->event_data1 & ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_MASK )); + } + + if ( bnxt_hwrm_port_phy_qcfg ( bp, QCFG_PHY_ALL ) != STATUS_SUCCESS ) { + return; + } + + bnxt_set_link ( bp ); + dbg_link_info ( bp ); + dbg_link_status ( bp ); +} + +void bnxt_link_speed_chg_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt ) +{ + if ( ( evt->event_data1 & ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_SUPPORTED_LINK_SPEEDS_CHANGE ) || + ( evt->event_data1 & ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_ILLEGAL_LINK_SPEED_CFG ) ) { + if ( bnxt_hwrm_port_phy_qcfg ( bp, QCFG_PHY_ALL ) != STATUS_SUCCESS ) { + return; + } + } + + bnxt_set_link ( bp ); + dbg_link_info ( bp ); + dbg_link_status ( bp ); +} + +void bnxt_port_phy_chg_evt ( struct bnxt *bp, struct hwrm_async_event_cmpl *evt ) +{ + if ( ( evt->event_data1 & ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_FEC_CFG_CHANGE ) || + ( evt->event_data1 & ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EEE_CFG_CHANGE ) || + ( evt->event_data1 & ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_PAUSE_CFG_CHANGE)) { + if ( bnxt_hwrm_port_phy_qcfg ( bp, QCFG_PHY_ALL ) != STATUS_SUCCESS ) { + return; + } } + + bnxt_set_link ( bp ); + dbg_link_info ( bp ); + dbg_link_status ( bp ); } static void bnxt_service_cq ( struct net_device *dev ) @@ -2226,9 +2596,10 @@ static void bnxt_service_cq ( struct net_device *dev ) u16 old_cid = bp->cq.cons_id; int done = SERVICE_NEXT_CQ_BD; u32 cq_type; + struct hwrm_async_event_cmpl *evt; while ( done == SERVICE_NEXT_CQ_BD ) { - cmp = ( struct cmpl_base * )BD_NOW ( bp->cq.bd_virt, + cmp = ( struct cmpl_base * )BD_NOW ( CQ_DMA_ADDR ( bp ), bp->cq.cons_id, sizeof ( struct cmpl_base ) ); @@ -2253,8 +2624,35 @@ static void bnxt_service_cq ( struct net_device *dev ) ( struct rx_pkt_cmpl * )cmp ); break; case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: - bnxt_link_evt ( bp, - ( struct hwrm_async_event_cmpl * )cmp ); + evt = ( struct hwrm_async_event_cmpl * )cmp; + switch ( evt->event_id ) { + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: + bnxt_link_evt ( bp, + ( struct hwrm_async_event_cmpl * )cmp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: + bnxt_link_speed_evt ( bp, + ( struct hwrm_async_event_cmpl * )cmp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: + bnxt_link_speed_chg_evt ( bp, + ( struct hwrm_async_event_cmpl * )cmp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: + bnxt_port_phy_chg_evt ( bp, + ( struct hwrm_async_event_cmpl * )cmp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: + bnxt_process_er_event ( bp, + ( struct hwrm_async_event_cmpl * )cmp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: + bnxt_process_reset_notify_event ( bp, + ( struct hwrm_async_event_cmpl * )cmp ); + break; + default: + break; + } bnxt_adv_cq_index ( bp, 1 ); break; default: @@ -2274,13 +2672,15 @@ static void bnxt_service_nq ( struct net_device *dev ) u16 old_cid = bp->nq.cons_id; int done = SERVICE_NEXT_NQ_BD; u32 nq_type; + struct hwrm_async_event_cmpl *evt; if ( ! ( FLAG_TEST ( bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS ) ) ) return; while ( done == SERVICE_NEXT_NQ_BD ) { - nqp = ( struct nq_base * )BD_NOW ( bp->nq.bd_virt, - bp->nq.cons_id, sizeof ( struct nq_base ) ); + nqp = ( struct nq_base * )BD_NOW ( NQ_DMA_ADDR ( bp ), + bp->nq.cons_id, + sizeof ( struct nq_base ) ); if ( ( nqp->v & NQ_CN_V ) ^ bp->nq.completion_bit ) break; nq_type = ( nqp->type & NQ_CN_TYPE_MASK ); @@ -2290,9 +2690,37 @@ static void bnxt_service_nq ( struct net_device *dev ) switch ( nq_type ) { case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: - bnxt_link_evt ( bp, - ( struct hwrm_async_event_cmpl * )nqp ); - /* Fall through */ + evt = ( struct hwrm_async_event_cmpl * )nqp; + switch ( evt->event_id ) { + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: + bnxt_link_evt ( bp, + ( struct hwrm_async_event_cmpl * )nqp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: + bnxt_link_speed_evt ( bp, + ( struct hwrm_async_event_cmpl * )nqp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: + bnxt_link_speed_chg_evt ( bp, + ( struct hwrm_async_event_cmpl * )nqp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE: + bnxt_port_phy_chg_evt ( bp, + ( struct hwrm_async_event_cmpl * )nqp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: + bnxt_process_er_event ( bp, + ( struct hwrm_async_event_cmpl * )nqp ); + break; + case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: + bnxt_process_reset_notify_event ( bp, + ( struct hwrm_async_event_cmpl * )nqp ); + break; + default: + break; + } + bnxt_adv_nq_index ( bp, 1 ); + break; case NQ_CN_TYPE_CQ_NOTIFICATION: bnxt_adv_nq_index ( bp, 1 ); break; @@ -2306,11 +2734,40 @@ static void bnxt_service_nq ( struct net_device *dev ) bnxt_db_nq ( bp ); } +static void bnxt_er_task_timer ( struct retry_timer *timer, int over __unused ) +{ + struct bnxt *bp = container_of (timer, struct bnxt, task_timer ); + + /* Restart timer */ + start_timer_fixed ( timer, BNXT_ER_TIMER_INTERVAL ( bp ) ); + if ( bp->er.driver_initiated_recovery ) { + bnxt_er_task ( bp, 1 ); + } +} + +static void bnxt_er_wait_timer ( struct retry_timer *timer, int over __unused ) +{ + struct bnxt *bp = container_of (timer, struct bnxt, wait_timer ); + /* The sole function of this timer is to wait for the specified + * amount of time to complete error recovery phase + */ + stop_timer ( &bp->wait_timer ); + return; +} + static void bnxt_poll ( struct net_device *dev ) { + struct bnxt *bp = dev->priv; + mb ( ); bnxt_service_nq ( dev ); bnxt_service_cq ( dev ); + + if ( bp->er.er_initiate ) { + bnxt_er_task ( bp, 0 ); + bp->er.er_initiate = 0; + } + } static void bnxt_close ( struct net_device *dev ) @@ -2318,15 +2775,12 @@ static void bnxt_close ( struct net_device *dev ) struct bnxt *bp = dev->priv; DBGP ( "%s\n", __func__ ); - bnxt_down_nic (bp); + stop_timer ( &bp->task_timer ); + stop_timer ( &bp->wait_timer ); - /* iounmap PCI BAR ( s ) */ - bnxt_down_pci(bp); + bnxt_down_nic (bp); - /* Get Bar Address */ - bp->bar0 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_0 ); - bp->bar1 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_2 ); - bp->bar2 = bnxt_pci_base ( bp->pdev, PCI_BASE_ADDRESS_4 ); + bnxt_free_rings_mem ( bp ); } @@ -2367,37 +2821,42 @@ static int bnxt_init_one ( struct pci_device *pci ) bp->dev = netdev; netdev->dev = &pci->dev; + timer_init ( &bp->task_timer, bnxt_er_task_timer, &netdev->refcnt ); + timer_init ( &bp->wait_timer, bnxt_er_wait_timer, &netdev->refcnt ); + + /* Configure DMA */ + bp->dma = &pci->dma; + netdev->dma = bp->dma; + /* Enable PCI device */ adjust_pci_device ( pci ); /* Get PCI Information */ bnxt_get_pci_info ( bp ); - /* Allocate and Initialise device specific parameters */ - if ( bnxt_alloc_mem ( bp ) != 0 ) { - DBGP ( "- %s ( ): bnxt_alloc_mem Failed\n", __func__ ); - goto err_down_pci; - } + /* Allocate HWRM memory */ + bnxt_alloc_hwrm_mem ( bp ); - /* Get device specific information */ - if ( bnxt_up_chip ( bp ) != 0 ) { - DBGP ( "- %s ( ): bnxt_up_chip Failed\n", __func__ ); + bp->link_status = STATUS_LINK_DOWN; + bp->wait_link_timeout = LINK_DEFAULT_TIMEOUT; + if ( bnxt_up_init ( bp ) != 0 ) { goto err_down_chip; } /* Register Network device */ - if ( register_netdev ( netdev ) != 0 ) { + if ( ( err = register_netdev ( netdev ) ) != 0 ) { DBGP ( "- %s ( ): register_netdev Failed\n", __func__ ); goto err_down_chip; } + /* Set Initial Link State */ + bnxt_set_link ( bp ); + return 0; -err_down_chip: - bnxt_down_chip (bp); - bnxt_free_mem ( bp ); + unregister_netdev ( netdev ); -err_down_pci: +err_down_chip: bnxt_down_pci ( bp ); netdev_nullify ( netdev ); netdev_put ( netdev ); @@ -2416,11 +2875,8 @@ static void bnxt_remove_one ( struct pci_device *pci ) /* Unregister network device */ unregister_netdev ( netdev ); - /* Bring down Chip */ - bnxt_down_chip(bp); - - /* Free Allocated resource */ - bnxt_free_mem ( bp ); + /* Free HWRM buffers */ + bnxt_free_hwrm_mem ( bp ); /* iounmap PCI BAR ( s ) */ bnxt_down_pci ( bp ); @@ -2430,6 +2886,7 @@ static void bnxt_remove_one ( struct pci_device *pci ) /* Drop refernce to network device */ netdev_put ( netdev ); + DBGP ( "%s - Done\n", __func__ ); } /* Broadcom NXE PCI driver */ diff --git a/src/drivers/net/bnxt/bnxt.h b/src/drivers/net/bnxt/bnxt.h index 8c8a33282..53d9fa733 100644 --- a/src/drivers/net/bnxt/bnxt.h +++ b/src/drivers/net/bnxt/bnxt.h @@ -25,13 +25,6 @@ #define __be32 u32 #define __be64 u64 -#define dma_addr_t unsigned long - -union dma_addr64_t { - dma_addr_t addr; - u64 as_u64; -}; - #include "bnxt_hsi.h" #define DRV_MODULE_NAME "bnxt" @@ -152,8 +145,8 @@ union dma_addr64_t { #define DEFAULT_NUMBER_OF_STAT_CTXS 0x01 #define NUM_RX_BUFFERS 8 #define MAX_RX_DESC_CNT 16 -#define MAX_TX_DESC_CNT 16 -#define MAX_CQ_DESC_CNT 64 +#define MAX_TX_DESC_CNT 64 +#define MAX_CQ_DESC_CNT 128 #define TX_RING_BUFFER_SIZE (MAX_TX_DESC_CNT * sizeof(struct tx_bd_short)) #define RX_RING_BUFFER_SIZE \ (MAX_RX_DESC_CNT * sizeof(struct rx_prod_pkt_bd)) @@ -178,11 +171,17 @@ union dma_addr64_t { RX_MASK_ACCEPT_MULTICAST) #define MAX_NQ_DESC_CNT 64 #define NQ_RING_BUFFER_SIZE (MAX_NQ_DESC_CNT * sizeof(struct cmpl_base)) -#define TX_RING_QID (FLAG_TEST(bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS) ? (u16)bp->queue_id : ((u16)bp->port_idx * 10)) #define RX_RING_QID (FLAG_TEST(bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS) ? bp->queue_id : 0) #define STAT_CTX_ID ((bp->vf || FLAG_TEST(bp->flags, BNXT_FLAG_IS_CHIP_P5_PLUS)) ? bp->stat_ctx_id : 0) #define TX_AVAIL(r) (r - 1) #define TX_IN_USE(a, b, c) ((a - b) & (c - 1)) +#define NQ_DMA_ADDR(bp) ( dma ( &bp->nq_mapping, bp->nq.bd_virt ) ) +#define CQ_DMA_ADDR(bp) ( dma ( &bp->cq_mapping, bp->cq.bd_virt ) ) +#define TX_DMA_ADDR(bp) ( dma ( &bp->tx_mapping, bp->tx.bd_virt ) ) +#define RX_DMA_ADDR(bp) ( dma ( &bp->rx_mapping, bp->rx.bd_virt ) ) +#define REQ_DMA_ADDR(bp) ( dma ( &bp->req_mapping, bp->hwrm_addr_req ) ) +#define RESP_DMA_ADDR(bp) ( dma ( &bp->resp_mapping, bp->hwrm_addr_resp ) ) +#define DMA_DMA_ADDR(bp) ( dma ( &bp->dma_mapped, bp->hwrm_addr_dma ) ) #define NO_MORE_NQ_BD_TO_SERVICE 1 #define SERVICE_NEXT_NQ_BD 0 #define NO_MORE_CQ_BD_TO_SERVICE 1 @@ -473,7 +472,7 @@ struct tx_bd_short { #define TX_BD_SHORT_FLAGS_COAL_NOW 0x8000UL u16 len; u32 opaque; - union dma_addr64_t dma; + physaddr_t dma; }; struct tx_cmpl { @@ -880,7 +879,7 @@ struct rx_prod_pkt_bd { #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8 u16 len; u32 opaque; - union dma_addr64_t dma; + physaddr_t dma; }; struct rx_info { @@ -895,7 +894,6 @@ struct rx_info { u32 good; u32 drop_err; u32 drop_lb; - u32 drop_vlan; u8 epoch; u8 res[3]; }; @@ -911,6 +909,45 @@ struct rx_info { #define VALID_L2_FILTER 0x0100 #define VALID_RING_NQ 0x0200 +struct lm_error_recovery +{ + __le32 flags; + __le32 drv_poll_freq; + __le32 master_wait_period; + __le32 normal_wait_period; + __le32 master_wait_post_rst; + __le32 max_bailout_post_rst; + __le32 fw_status_reg; + __le32 fw_hb_reg; + __le32 fw_rst_cnt_reg; + __le32 rst_inprg_reg; + __le32 rst_inprg_reg_mask; + __le32 rst_reg[16]; + __le32 rst_reg_val[16]; + u8 delay_after_rst[16]; + __le32 recvry_cnt_reg; + + __le32 last_fw_hb; + __le32 last_fw_rst_cnt; + __le32 fw_health_status; + __le32 err_recovery_cnt; + __le32 rst_in_progress; + __le16 rst_max_dsecs; + + u8 master_pf; + u8 error_recvry_supported; + u8 driver_initiated_recovery; + u8 er_rst_on; + +#define ER_DFLT_FW_RST_MIN_DSECS 20 +#define ER_DFLT_FW_RST_MAX_DSECS 60 +#define FW_STATUS_REG_CODE_READY 0x8000UL + u8 rst_min_dsecs; + u8 reg_array_cnt; + u8 er_initiate; + u8 rsvd[3]; +}; + struct bnxt { /* begin "general, frequently-used members" cacheline section */ /* If the IRQ handler (which runs lockless) needs to be @@ -934,13 +971,22 @@ struct bnxt { void *hwrm_addr_req; void *hwrm_addr_resp; void *hwrm_addr_dma; - dma_addr_t req_addr_mapping; - dma_addr_t resp_addr_mapping; - dma_addr_t dma_addr_mapping; + struct dma_device *dma; + struct dma_mapping req_mapping; + struct dma_mapping resp_mapping; + struct dma_mapping dma_mapped; + struct dma_mapping tx_mapping; + struct dma_mapping rx_mapping; + struct dma_mapping cq_mapping; + struct dma_mapping nq_mapping; + struct tx_info tx; /* Tx info. */ struct rx_info rx; /* Rx info. */ struct cmp_info cq; /* completion info. */ struct nq_info nq; /* completion info. */ + struct lm_error_recovery er; /* error recovery. */ + struct retry_timer task_timer; + struct retry_timer wait_timer; u16 nq_ring_id; u8 queue_id; u16 last_resp_code; @@ -987,7 +1033,7 @@ struct bnxt { u16 auto_link_speeds2_mask; u32 link_set; u8 media_detect; - u8 rsvd; + u8 err_rcvry_supported; u16 max_vfs; u16 vf_res_strategy; u16 min_vnics; @@ -1015,12 +1061,6 @@ struct bnxt { }; /* defines required to rsolve checkpatch errors / warnings */ -#define test_if if -#define write32 writel -#define write64 writeq -#define pci_read_byte pci_read_config_byte -#define pci_read_word16 pci_read_config_word -#define pci_write_word pci_write_config_word #define SHORT_CMD_SUPPORTED VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED #define SHORT_CMD_REQUIRED VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED #define CQ_DOORBELL_KEY_MASK(a) (\ @@ -1066,3 +1106,5 @@ struct bnxt { #define CHIP_NUM_57502 0x1752 #define CHIP_NUM_57608 0x1760 +#define BNXT_ER_TIMER_INTERVAL(x) ( TICKS_PER_SEC * ( (x)->er.drv_poll_freq ) ) +#define BNXT_ER_WAIT_TIMER_INTERVAL(x) ( TICKS_PER_SEC * ( ( (x)->er.normal_wait_period / 10 ) ) ) diff --git a/src/drivers/net/bnxt/bnxt_dbg.h b/src/drivers/net/bnxt/bnxt_dbg.h index 145402818..2cda84488 100644 --- a/src/drivers/net/bnxt/bnxt_dbg.h +++ b/src/drivers/net/bnxt/bnxt_dbg.h @@ -197,7 +197,7 @@ void dbg_fw_ver(struct hwrm_ver_get_output *resp, u32 tmo) (u32)(resp->chip_metal << 16) | (u32)(resp->chip_bond_id << 8) | (u32)resp->chip_platform_type); - test_if((resp->dev_caps_cfg & SHORT_CMD_SUPPORTED) && + if((resp->dev_caps_cfg & SHORT_CMD_SUPPORTED) && (resp->dev_caps_cfg & SHORT_CMD_REQUIRED)) dbg_prn(" SHORT_CMD_SUPPORTED\n"); } @@ -418,13 +418,6 @@ void dump_rx_bd(struct rx_pkt_cmpl *rx_cmp, #endif } -void dbg_rx_vlan(struct bnxt *bp, u32 meta, u16 f2, u16 rx_vid) -{ - dbg_prn(" Rx VLAN metadata %x flags2 %x\n", meta, f2); - dbg_prn(" Rx VLAN MBA %d TX %d RX %d\n", - bp->vlan_id, bp->vlan_tx, rx_vid); -} - void dbg_alloc_rx_iob(struct io_buffer *iob, u16 id, u16 cid) { dbg_prn(" Rx alloc_iob (%d) %p bd_virt (%d)\n", @@ -460,13 +453,12 @@ void dbg_rxp(u8 *iob, u16 rx_len, u8 drop) void dbg_rx_stat(struct bnxt *bp) { - dbg_prn("- RX Stat Total %d Good %d Drop err %d LB %d VLAN %d\n", + dbg_prn("- RX Stat Total %d Good %d Drop err %d LB %d\n", bp->rx.cnt, bp->rx.good, - bp->rx.drop_err, bp->rx.drop_lb, bp->rx.drop_vlan); + bp->rx.drop_err, bp->rx.drop_lb); } #else #define dump_rx_bd(rx_cmp, rx_cmp_hi, desc_idx) -#define dbg_rx_vlan(bp, metadata, flags2, rx_vid) #define dbg_alloc_rx_iob(iob, id, cid) #define dbg_rx_cid(idx, cid) #define dbg_alloc_rx_iob_fail(iob_idx, cons_id) @@ -537,12 +529,6 @@ void dbg_tx_vlan(struct bnxt *bp, char *src, u16 plen, u16 len) dbg_prn(" old len %d new len %d\n", plen, len); } -void dbg_tx_pad(u16 plen, u16 len) -{ - if (len != plen) - dbg_prn("- Tx padded(0) old len %d new len %d\n", plen, len); -} - void dump_tx_stat(struct bnxt *bp) { dbg_prn(" TX stats cnt %d req_cnt %d", bp->tx.cnt, bp->tx.cnt_req); @@ -673,5 +659,6 @@ void dbg_link_state(struct bnxt *bp, u32 tmo) #else #define dump_evt(cq, ty, id, ring) #define dbg_link_status(bp) +#define dbg_link_info(bp) #define dbg_link_state(bp, tmo) #endif diff --git a/src/drivers/net/bnxt/bnxt_hsi.h b/src/drivers/net/bnxt/bnxt_hsi.h index dbcffd909..77706504c 100644 --- a/src/drivers/net/bnxt/bnxt_hsi.h +++ b/src/drivers/net/bnxt/bnxt_hsi.h @@ -96,6 +96,7 @@ struct hwrm_short_input { struct cmd_nums { __le16 req_type; #define HWRM_VER_GET 0x0UL + #define HWRM_ER_QCFG 0xcUL #define HWRM_FUNC_DRV_IF_CHANGE 0xdUL #define HWRM_FUNC_BUF_UNRGTR 0xeUL #define HWRM_FUNC_VF_CFG 0xfUL @@ -559,6 +560,7 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE 0x6UL #define ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE 0x7UL #define ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY 0x9UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD 0x10UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD 0x11UL #define ASYNC_EVENT_CMPL_EVENT_ID_FUNC_FLR_PROC_CMPLT 0x12UL @@ -583,6 +585,10 @@ struct hwrm_async_event_cmpl { u8 timestamp_lo; __le16 timestamp_hi; __le32 event_data1; + #define ASYNC_EVENT_CMPL_ER_EVENT_DATA1_MASTER_FUNC 0x1UL + #define ASYNC_EVENT_CMPL_ER_EVENT_DATA1_RECOVERY_ENABLED 0x2UL + #define ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_EVENT_DATA1_REASON_CODE_MASK 0xff00UL }; /* hwrm_async_event_cmpl_link_status_change (size:128b/16B) */ @@ -655,22 +661,22 @@ struct hwrm_async_event_cmpl_link_speed_change { u8 timestamp_lo; __le16 timestamp_hi; __le32 event_data1; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_FORCE 0x1UL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_MASK 0xfffeUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_SFT 1 - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1) - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_FORCE 0x1UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_MASK 0xfffeUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_SFT 1 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_100MB (0x1UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_1GB (0xaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_2GB (0x14UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_2_5GB (0x19UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_10GB (0x64UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_20GB (0xc8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1) + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_LAST ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_NEW_LINK_SPEED_100MBPS_100GB + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_PORT_ID_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_PORT_ID_SFT 16 }; /* hwrm_async_event_cmpl_dcb_config_change (size:128b/16B) */ @@ -775,10 +781,10 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change { u8 timestamp_lo; __le16 timestamp_hi; __le32 event_data1; - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL - #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_ILLEGAL_LINK_SPEED_CFG 0x20000UL }; /* hwrm_async_event_cmpl_port_phy_cfg_change (size:128b/16B) */ @@ -799,11 +805,11 @@ struct hwrm_async_event_cmpl_port_phy_cfg_change { u8 timestamp_lo; __le16 timestamp_hi; __le32 event_data1; - #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL - #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0 - #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_FEC_CFG_CHANGE 0x10000UL - #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_EEE_CFG_CHANGE 0x20000UL - #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EVENT_DATA1_PAUSE_CFG_CHANGE 0x40000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_PORT_ID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_PORT_ID_SFT 0 + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_FEC_CFG_CHANGE 0x10000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_EEE_CFG_CHANGE 0x20000UL + #define ASYNC_EVENT_CMPL_PORT_PHY_CFG_CHANGE_PAUSE_CFG_CHANGE 0x40000UL }; /* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */ @@ -1357,6 +1363,7 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL + #define FUNC_QCAPS_OUTPUT_FLAGS_ERROR_RECOVERY_CAPABLE 0x800000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -1718,6 +1725,99 @@ struct hwrm_func_vf_resc_free_output { u8 valid; }; +/* hwrm_error_recovery_input (size:192b/24B) */ +struct hwrm_error_recovery_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + u8 unused_0[8]; +}; + +/* hwrm_error_recovery_qcfg_output (size:1664b/208B) */ +struct hwrm_error_recovery_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define ER_QCFG_FLAGS_HOST 0x1UL + #define ER_QCFG_FLAGS_CO_CPU 0x2UL + __le32 driver_polling_freq; + __le32 master_wait_period; + __le32 normal_wait_period; + __le32 master_wait_post_reset; + __le32 max_bailout_time; + __le32 fw_health_status_reg; + #define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_MASK 0x3UL + #define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_SFT 0 + #define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_GRC 0x1UL + #define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR0 0x2UL + #define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR1 0x3UL + #define ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_LAST ER_QCFG_FW_HEALTH_REG_ADDR_SPACE_BAR1 + #define ER_QCFG_FW_HEALTH_REG_ADDR_MASK 0xfffffffcUL + #define ER_QCFG_FW_HEALTH_REG_ADDR_SFT 2 + __le32 fw_heartbeat_reg; + #define ER_QCFG_FW_HB_REG_ADDR_SPACE_MASK 0x3UL + #define ER_QCFG_FW_HB_REG_ADDR_SPACE_SFT 0 + #define ER_QCFG_FW_HB_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ER_QCFG_FW_HB_REG_ADDR_SPACE_GRC 0x1UL + #define ER_QCFG_FW_HB_REG_ADDR_SPACE_BAR0 0x2UL + #define ER_QCFG_FW_HB_REG_ADDR_SPACE_BAR1 0x3UL + #define ER_QCFG_FW_HB_REG_ADDR_SPACE_LAST ER_QCFG_FW_HB_REG_ADDR_SPACE_BAR1 + #define ER_QCFG_FW_HB_REG_ADDR_MASK 0xfffffffcUL + #define ER_QCFG_FW_HB_REG_ADDR_SFT 2 + __le32 fw_reset_cnt_reg; + #define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_SFT 0 + #define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_LAST ER_QCFG_FW_RESET_CNT_REG_ADDR_SPACE_BAR1 + #define ER_QCFG_FW_RESET_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ER_QCFG_FW_RESET_CNT_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg; + #define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_MASK 0x3UL + #define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_SFT 0 + #define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_GRC 0x1UL + #define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_BAR0 0x2UL + #define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_BAR1 0x3UL + #define ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_LAST ER_QCFG_RESET_INPRG_REG_ADDR_SPACE_BAR1 + #define ER_QCFG_RESET_INPRG_REG_ADDR_MASK 0xfffffffcUL + #define ER_QCFG_RESET_INPRG_REG_ADDR_SFT 2 + __le32 reset_inprogress_reg_mask; + u8 unused_0[3]; + u8 reg_array_cnt; + __le32 reset_reg[16]; + #define ER_QCFG_RESET_REG_ADDR_SPACE_MASK 0x3UL + #define ER_QCFG_RESET_REG_ADDR_SPACE_SFT 0 + #define ER_QCFG_RESET_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ER_QCFG_RESET_REG_ADDR_SPACE_GRC 0x1UL + #define ER_QCFG_RESET_REG_ADDR_SPACE_BAR0 0x2UL + #define ER_QCFG_RESET_REG_ADDR_SPACE_BAR1 0x3UL + #define ER_QCFG_RESET_REG_ADDR_SPACE_LAST ER_QCFG_RESET_REG_ADDR_SPACE_BAR1 + #define ER_QCFG_RESET_REG_ADDR_MASK 0xfffffffcUL + #define ER_QCFG_RESET_REG_ADDR_SFT 2 + __le32 reset_reg_val[16]; + u8 delay_after_reset[16]; + __le32 err_recovery_cnt_reg; + #define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_MASK 0x3UL + #define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_SFT 0 + #define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_PCIE_CFG 0x0UL + #define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_GRC 0x1UL + #define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_BAR0 0x2UL + #define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_BAR1 0x3UL + #define ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_LAST ER_QCFG_RCVRY_CNT_REG_ADDR_SPACE_BAR1 + #define ER_QCFG_RCVRY_CNT_REG_ADDR_MASK 0xfffffffcUL + #define ER_QCFG_RCVRY_CNT_REG_ADDR_SFT 2 + u8 unused_1[3]; + u8 valid; +}; + /* hwrm_func_drv_rgtr_input (size:896b/112B) */ struct hwrm_func_drv_rgtr_input { __le16 req_type; @@ -1731,6 +1831,8 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL + #define FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT 0x20UL + #define FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT 0x40UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL diff --git a/src/drivers/net/cgem.c b/src/drivers/net/cgem.c new file mode 100644 index 000000000..c935c8024 --- /dev/null +++ b/src/drivers/net/cgem.c @@ -0,0 +1,711 @@ +/* + * Copyright (C) 2025 Michael Brown <mbrown@fensystems.co.uk>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <errno.h> +#include <byteswap.h> +#include <ipxe/netdevice.h> +#include <ipxe/ethernet.h> +#include <ipxe/if_ether.h> +#include <ipxe/iobuf.h> +#include <ipxe/timer.h> +#include <ipxe/devtree.h> +#include <ipxe/fdt.h> +#include "cgem.h" + +/** @file + * + * Cadence Gigabit Ethernet MAC (GEM) network driver + * + * Based primarily on the Zynq 7000 SoC Technical Reference Manual, + * available at the time of writing from: + * + * https://docs.amd.com/r/en-US/ug585-zynq-7000-SoC-TRM + * + */ + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware + * + * @v cgem Cadence GEM device + * @ret rc Return status code + */ +static int cgem_reset ( struct cgem_nic *cgem ) { + + /* There is no software-driven reset capability in the + * hardware. Instead we have to write the expected reset + * values to the various registers. + */ + + /* Disable all interrupts */ + writel ( CGEM_IDR_ALL, ( cgem->regs + CGEM_IDR ) ); + + /* Clear network control register */ + writel ( 0, ( cgem->regs + CGEM_NWCTRL ) ); + + /* Clear statistics registers now that TX and RX are stopped */ + writel ( CGEM_NWCTRL_STATCLR, ( cgem->regs + CGEM_NWCTRL ) ); + + /* Clear TX queue base address */ + writel ( 0, ( cgem->regs + CGEM_TXQBASE ) ); + + /* Clear RX queue base address */ + writel ( 0, ( cgem->regs + CGEM_RXQBASE ) ); + + /* Configure DMA */ + writel ( ( CGEM_DMACR_RXBUF ( CGEM_RX_LEN ) | CGEM_DMACR_TXSIZE_MAX | + CGEM_DMACR_RXSIZE_MAX | CGEM_DMACR_BLENGTH_MAX ), + ( cgem->regs + CGEM_DMACR ) ); + + /* Enable MII interface */ + writel ( CGEM_NWCTRL_MDEN, ( cgem->regs + CGEM_NWCTRL ) ); + + return 0; +} + +/****************************************************************************** + * + * PHY access + * + ****************************************************************************** + */ + +/** + * Wait for MII operation to complete + * + * @v cgem Cadence GEM device + * @ret rc Return status code + */ +static int cgem_mii_wait ( struct cgem_nic *cgem ) { + uint32_t nwsr; + unsigned int i; + + /* Wait for MII interface to become idle */ + for ( i = 0 ; i < CGEM_MII_MAX_WAIT_US ; i++ ) { + + /* Check if MII interface is idle */ + nwsr = readl ( cgem->regs + CGEM_NWSR ); + if ( nwsr & CGEM_NWSR_MII_IDLE ) + return 0; + + /* Delay */ + udelay ( 1 ); + } + + DBGC ( cgem, "CGEM %s timed out waiting for MII\n", cgem->name ); + return -ETIMEDOUT; +} + +/** + * Read from MII register + * + * @v mdio MII interface + * @v phy PHY address + * @v reg Register address + * @ret data Data read, or negative error + */ +static int cgem_mii_read ( struct mii_interface *mdio, unsigned int phy, + unsigned int reg ) { + struct cgem_nic *cgem = container_of ( mdio, struct cgem_nic, mdio ); + unsigned int data; + int rc; + + /* Initiate read */ + writel ( ( CGEM_PHYMNTNC_CLAUSE22 | CGEM_PHYMNTNC_OP_READ | + CGEM_PHYMNTNC_ADDR ( phy ) | CGEM_PHYMNTNC_REG ( reg ) | + CGEM_PHYMNTNC_FIXED ), + ( cgem->regs + CGEM_PHYMNTNC ) ); + + /* Wait for read to complete */ + if ( ( rc = cgem_mii_wait ( cgem ) ) != 0 ) + return rc; + + /* Read data */ + data = ( readl ( cgem->regs + CGEM_PHYMNTNC ) & + CGEM_PHYMNTNC_DATA_MASK ); + + return data; +} + +/** + * Write to MII register + * + * @v mdio MII interface + * @v phy PHY address + * @v reg Register address + * @v data Data to write + * @ret rc Return status code + */ +static int cgem_mii_write ( struct mii_interface *mdio, unsigned int phy, + unsigned int reg, unsigned int data ) { + struct cgem_nic *cgem = container_of ( mdio, struct cgem_nic, mdio ); + int rc; + + /* Initiate write */ + writel ( ( CGEM_PHYMNTNC_CLAUSE22 | CGEM_PHYMNTNC_OP_READ | + CGEM_PHYMNTNC_ADDR ( phy ) | CGEM_PHYMNTNC_REG ( reg ) | + CGEM_PHYMNTNC_FIXED | data ), + ( cgem->regs + CGEM_PHYMNTNC ) ); + + /* Wait for write to complete */ + if ( ( rc = cgem_mii_wait ( cgem ) ) != 0 ) + return rc; + + return 0; +} + +/** MII operations */ +static struct mii_operations cgem_mii_operations = { + .read = cgem_mii_read, + .write = cgem_mii_write, +}; + +/****************************************************************************** + * + * Link state + * + ****************************************************************************** + */ + +/** + * Initialise PHY + * + * @v cgem Cadence GEM device + * @ret rc Return status code + */ +static int cgem_init_phy ( struct cgem_nic *cgem ) { + int rc; + + /* Find PHY address */ + if ( ( rc = mii_find ( &cgem->mii ) ) != 0 ) { + DBGC ( cgem, "CGEM %s could not find PHY address: %s\n", + cgem->name, strerror ( rc ) ); + return rc; + } + + /* Reset PHY */ + if ( ( rc = mii_reset ( &cgem->mii ) ) != 0 ) { + DBGC ( cgem, "CGEM %s could not reset PHY: %s\n", + cgem->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Check link state + * + * @v netdev Network device + */ +static int cgem_check_link ( struct net_device *netdev ) { + struct cgem_nic *cgem = netdev->priv; + int rc; + + /* Check link state */ + if ( ( rc = mii_check_link ( &cgem->mii, netdev ) ) != 0 ) { + DBGC ( cgem, "CGEM %s could not check link: %s\n", + cgem->name, strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** + * Check link state periodically + * + * @v retry Link state check timer + * @v over Failure indicator + */ +static void cgem_expired ( struct retry_timer *timer, int over __unused ) { + struct cgem_nic *cgem = container_of ( timer, struct cgem_nic, timer ); + struct net_device *netdev = cgem->netdev; + + /* Restart timer */ + start_timer_fixed ( timer, CGEM_LINK_INTERVAL ); + + /* Check link state */ + cgem_check_link ( netdev ); +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Create descriptor ring + * + * @v cgem Cadence GEM device + * @v ring Descriptor ring + * @ret rc Return status code + */ +static int cgem_create_ring ( struct cgem_nic *cgem, struct cgem_ring *ring ) { + struct cgem_descriptor *desc; + unsigned int i; + + /* Allocate descriptor ring (on its own size) */ + ring->desc = dma_alloc ( cgem->dma, &ring->map, ring->len, ring->len ); + if ( ! ring->desc ) + return -ENOMEM; + + /* Initialise descriptor ring */ + for ( i = 0 ; i < ring->count ; i++ ) { + desc = &ring->desc[i]; + desc->addr = cpu_to_le32 ( CGEM_RX_ADDR_OWNED ); + desc->flags = cpu_to_le32 ( CGEM_TX_FL_OWNED ); + } + desc = &ring->desc[ ring->count - 1 ]; + desc->addr |= cpu_to_le32 ( CGEM_RX_ADDR_WRAP ); + desc->flags |= cpu_to_le32 ( CGEM_TX_FL_WRAP ); + + /* Program ring address */ + writel ( dma ( &ring->map, ring->desc ), + ( cgem->regs + ring->qbase ) ); + + DBGC ( cgem, "CGEM %s ring %02x is at [%08lx,%08lx)\n", + cgem->name, ring->qbase, virt_to_phys ( ring->desc ), + ( virt_to_phys ( ring->desc ) + ring->len ) ); + return 0; +} + +/** + * Destroy descriptor ring + * + * @v cgem Cadence GEM device + * @v ring Descriptor ring + */ +static void cgem_destroy_ring ( struct cgem_nic *cgem, + struct cgem_ring *ring ) { + + /* Clear ring address */ + writel ( 0, ( cgem->regs + ring->qbase ) ); + + /* Free descriptor ring */ + dma_free ( &ring->map, ring->desc, ring->len ); + ring->desc = NULL; + ring->prod = 0; + ring->cons = 0; +} + +/** + * Refill receive descriptor ring + * + * @v cgem Cadence GEM device + */ +static void cgem_refill_rx ( struct cgem_nic *cgem ) { + struct cgem_descriptor *rx; + struct io_buffer *iobuf; + unsigned int rx_idx; + uint32_t addr; + + /* Refill ring */ + while ( ( cgem->rx.prod - cgem->rx.cons ) != CGEM_NUM_RX_DESC ) { + + /* Allocate I/O buffer */ + iobuf = alloc_rx_iob ( CGEM_RX_LEN, cgem->dma ); + if ( ! iobuf ) { + /* Wait for next refill */ + break; + } + + /* Get next receive descriptor */ + rx_idx = ( cgem->rx.prod++ % CGEM_NUM_RX_DESC ); + rx = &cgem->rx.desc[rx_idx]; + + /* Populate receive descriptor */ + rx->flags = 0; + addr = 0; + if ( ( cgem->rx.prod % CGEM_NUM_RX_DESC ) == 0 ) + addr |= CGEM_RX_ADDR_WRAP; + rx->addr = cpu_to_le32 ( addr | iob_dma ( iobuf ) ); + + /* Record I/O buffer */ + assert ( cgem->rx_iobuf[rx_idx] == NULL ); + cgem->rx_iobuf[rx_idx] = iobuf; + + DBGC2 ( cgem, "CGEM %s RX %d is [%08lx,%08lx)\n", + cgem->name, rx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + CGEM_RX_LEN ) ); + } +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int cgem_open ( struct net_device *netdev ) { + struct cgem_nic *cgem = netdev->priv; + union cgem_mac mac; + int rc; + + /* Create transmit descriptor ring */ + if ( ( rc = cgem_create_ring ( cgem, &cgem->tx ) ) != 0 ) + goto err_create_tx; + + /* Create receive descriptor ring */ + if ( ( rc = cgem_create_ring ( cgem, &cgem->rx ) ) != 0 ) + goto err_create_rx; + + /* Set MAC address */ + memcpy ( mac.raw, netdev->ll_addr, ETH_ALEN ); + writel ( mac.reg.low, ( cgem->regs + CGEM_LADDRL ) ); + writel ( mac.reg.high, ( cgem->regs + CGEM_LADDRH ) ); + + /* Enable transmit and receive */ + writel ( CGEM_NWCTRL_NORMAL, ( cgem->regs + CGEM_NWCTRL ) ); + + /* Refill receive descriptor ring */ + cgem_refill_rx ( cgem ); + + /* Update link state */ + cgem_check_link ( netdev ); + + /* Start link state timer */ + start_timer_fixed ( &cgem->timer, CGEM_LINK_INTERVAL ); + + return 0; + + cgem_destroy_ring ( cgem, &cgem->rx ); + err_create_rx: + cgem_destroy_ring ( cgem, &cgem->tx ); + err_create_tx: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void cgem_close ( struct net_device *netdev ) { + struct cgem_nic *cgem = netdev->priv; + unsigned int i; + + /* Stop link state timer */ + stop_timer ( &cgem->timer ); + + /* Reset NIC */ + cgem_reset ( cgem ); + + /* Discard unused receive buffers */ + for ( i = 0 ; i < CGEM_NUM_RX_DESC ; i++ ) { + if ( cgem->rx_iobuf[i] ) + free_rx_iob ( cgem->rx_iobuf[i] ); + cgem->rx_iobuf[i] = NULL; + } + + /* Destroy receive descriptor ring */ + cgem_destroy_ring ( cgem, &cgem->rx ); + + /* Destroy transmit descriptor ring */ + cgem_destroy_ring ( cgem, &cgem->tx ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int cgem_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct cgem_nic *cgem = netdev->priv; + struct cgem_descriptor *tx; + unsigned int tx_idx; + uint32_t flags; + int rc; + + /* Get next transmit descriptor */ + if ( ( cgem->tx.prod - cgem->tx.cons ) >= CGEM_NUM_TX_DESC ) { + DBGC ( cgem, "CGEM %s out of transmit descriptors\n", + cgem->name ); + return -ENOBUFS; + } + tx_idx = ( cgem->tx.prod % CGEM_NUM_TX_DESC ); + tx = &cgem->tx.desc[tx_idx]; + + /* Pad to minimum length */ + iob_pad ( iobuf, ETH_ZLEN ); + + /* Map I/O buffer */ + if ( ( rc = iob_map_tx ( iobuf, cgem->dma ) ) != 0 ) + return rc; + + /* Update producer index */ + cgem->tx.prod++; + + /* Populate transmit descriptor */ + flags = CGEM_TX_FL_LAST; + if ( ( cgem->tx.prod % CGEM_NUM_TX_DESC ) == 0 ) + flags |= CGEM_TX_FL_WRAP; + tx->addr = cpu_to_le32 ( iob_dma ( iobuf ) ); + wmb(); + tx->flags = cpu_to_le32 ( flags | iob_len ( iobuf ) ); + wmb(); + + /* Initiate transmission */ + writel ( ( CGEM_NWCTRL_NORMAL | CGEM_NWCTRL_STARTTX ), + ( cgem->regs + CGEM_NWCTRL ) ); + + DBGC2 ( cgem, "CGEM %s TX %d is [%08lx,%08lx)\n", + cgem->name, tx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + iob_len ( iobuf ) ) ); + return 0; +} + +/** + * Poll for completed packets + * + * @V netdev Network device + */ +static void cgem_poll_tx ( struct net_device *netdev ) { + struct cgem_nic *cgem = netdev->priv; + struct cgem_descriptor *tx; + unsigned int tx_idx; + + /* Check for completed packets */ + while ( cgem->tx.cons != cgem->tx.prod ) { + + /* Get next transmit descriptor */ + tx_idx = ( cgem->tx.cons % CGEM_NUM_TX_DESC ); + tx = &cgem->tx.desc[tx_idx]; + + /* Stop if descriptor is still owned by hardware */ + if ( ! ( tx->flags & cpu_to_le32 ( CGEM_TX_FL_OWNED ) ) ) + return; + DBGC2 ( cgem, "CGEM %s TX %d complete\n", + cgem->name, tx_idx ); + + /* Complete transmit descriptor */ + netdev_tx_complete_next ( netdev ); + cgem->tx.cons++; + } +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void cgem_poll_rx ( struct net_device *netdev ) { + struct cgem_nic *cgem = netdev->priv; + struct cgem_descriptor *rx; + struct io_buffer *iobuf; + unsigned int rx_idx; + uint32_t flags; + size_t len; + + /* Check for received packets */ + while ( cgem->rx.cons != cgem->rx.prod ) { + + /* Get next receive descriptor */ + rx_idx = ( cgem->rx.cons % CGEM_NUM_RX_DESC ); + rx = &cgem->rx.desc[rx_idx]; + + /* Stop if descriptor is still in use */ + if ( ! ( rx->addr & cpu_to_le32 ( CGEM_RX_ADDR_OWNED ) ) ) + return; + + /* Populate I/O buffer */ + iobuf = cgem->rx_iobuf[rx_idx]; + cgem->rx_iobuf[rx_idx] = NULL; + flags = le32_to_cpu ( rx->flags ); + len = CGEM_RX_FL_LEN ( flags ); + iob_put ( iobuf, len ); + DBGC2 ( cgem, "CGEM %s RX %d complete (length %zd)\n", + cgem->name, rx_idx, len ); + + /* Hand off to network stack */ + netdev_rx ( netdev, iobuf ); + cgem->rx.cons++; + } +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void cgem_poll ( struct net_device *netdev ) { + struct cgem_nic *cgem = netdev->priv; + + /* Poll for TX competions */ + cgem_poll_tx ( netdev ); + + /* Poll for RX completions */ + cgem_poll_rx ( netdev ); + + /* Refill RX ring */ + cgem_refill_rx ( cgem ); +} + +/** Cadence GEM network device operations */ +static struct net_device_operations cgem_operations = { + .open = cgem_open, + .close = cgem_close, + .transmit = cgem_transmit, + .poll = cgem_poll, +}; + +/****************************************************************************** + * + * Devicetree interface + * + ****************************************************************************** + */ + +/** + * Probe devicetree device + * + * @v dt Devicetree device + * @v offset Starting node offset + * @ret rc Return status code + */ +static int cgem_probe ( struct dt_device *dt, unsigned int offset ) { + struct net_device *netdev; + struct cgem_nic *cgem; + union cgem_mac mac; + int rc; + + /* Allocate and initialise net device */ + netdev = alloc_etherdev ( sizeof ( *cgem ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &cgem_operations ); + cgem = netdev->priv; + dt_set_drvdata ( dt, netdev ); + netdev->dev = &dt->dev; + memset ( cgem, 0, sizeof ( *cgem ) ); + cgem->dma = &dt->dma; + cgem->netdev = netdev; + cgem->name = netdev->dev->name; + mdio_init ( &cgem->mdio, &cgem_mii_operations ); + mii_init ( &cgem->mii, &cgem->mdio, 0 ); + timer_init ( &cgem->timer, cgem_expired, &netdev->refcnt ); + cgem_init_ring ( &cgem->tx, CGEM_NUM_TX_DESC, CGEM_TXQBASE ); + cgem_init_ring ( &cgem->rx, CGEM_NUM_RX_DESC, CGEM_RXQBASE ); + + /* Map registers */ + cgem->regs = dt_ioremap ( dt, offset, CGEM_REG_IDX, CGEM_REG_LEN ); + if ( ! cgem->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + + /* Reset the NIC */ + if ( ( rc = cgem_reset ( cgem ) ) != 0 ) + goto err_reset; + + /* Initialise the PHY */ + if ( ( rc = cgem_init_phy ( cgem ) ) != 0 ) + goto err_init_phy; + + /* Fetch devicetree MAC address */ + if ( ( rc = fdt_mac ( &sysfdt, offset, netdev ) ) != 0 ) { + DBGC ( cgem, "CGEM %s could not fetch MAC: %s\n", + cgem->name, strerror ( rc ) ); + goto err_mac; + } + + /* Fetch current MAC address, if set */ + mac.reg.low = readl ( cgem->regs + CGEM_LADDRL ); + mac.reg.high = readl ( cgem->regs + CGEM_LADDRH ); + memcpy ( netdev->ll_addr, mac.raw, ETH_ALEN ); + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + /* Set initial link state */ + cgem_check_link ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_mac: + err_init_phy: + cgem_reset ( cgem ); + err_reset: + iounmap ( cgem->regs ); + err_ioremap: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove devicetree device + * + * @v dt Devicetree device + */ +static void cgem_remove ( struct dt_device *dt ) { + struct net_device *netdev = dt_get_drvdata ( dt ); + struct cgem_nic *cgem = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Reset card */ + cgem_reset ( cgem ); + + /* Free network device */ + iounmap ( cgem->regs ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** Cadence GEM compatible model identifiers */ +static const char * cgem_ids[] = { + "sifive,fu540-c000-gem", +}; + +/** Cadence GEM devicetree driver */ +struct dt_driver cgem_driver __dt_driver = { + .name = "cgem", + .ids = cgem_ids, + .id_count = ( sizeof ( cgem_ids ) / sizeof ( cgem_ids[0] ) ), + .probe = cgem_probe, + .remove = cgem_remove, +}; diff --git a/src/drivers/net/cgem.h b/src/drivers/net/cgem.h new file mode 100644 index 000000000..c91e3677d --- /dev/null +++ b/src/drivers/net/cgem.h @@ -0,0 +1,189 @@ +#ifndef _CGEM_H +#define _CGEM_H + +/** @file + * + * Cadence Gigabit Ethernet MAC (GEM) network driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include <ipxe/if_ether.h> +#include <ipxe/mii.h> +#include <ipxe/dma.h> +#include <ipxe/retry.h> + +/** I/O region index */ +#define CGEM_REG_IDX 0 + +/** I/O region length */ +#define CGEM_REG_LEN 0x800 + +/** Network control register */ +#define CGEM_NWCTRL 0x000 +#define CGEM_NWCTRL_STARTTX 0x00000200 /**< Start transmission */ +#define CGEM_NWCTRL_STATCLR 0x00000020 /**< Clear statistics */ +#define CGEM_NWCTRL_MDEN 0x00000010 /**< MII interface enable */ +#define CGEM_NWCTRL_TXEN 0x00000008 /**< Transmit enable */ +#define CGEM_NWCTRL_RXEN 0x00000004 /**< Receive enable */ + +/** Normal value for network control register while up and running */ +#define CGEM_NWCTRL_NORMAL \ + ( CGEM_NWCTRL_MDEN | CGEM_NWCTRL_TXEN | CGEM_NWCTRL_RXEN ) + +/** Network configuration register */ +#define CGEM_NWCFG 0x004 + +/** Network status register */ +#define CGEM_NWSR 0x008 +#define CGEM_NWSR_MII_IDLE 0x00000004 /**< MII interface is idle */ + +/** DMA configuration register */ +#define CGEM_DMACR 0x010 +#define CGEM_DMACR_RXBUF( x ) ( ( (x) / 64 ) << 16 ) /**< RX buffer size */ +#define CGEM_DMACR_TXSIZE( x ) ( (x) << 10 ) /**< TX memory size */ +#define CGEM_DMACR_TXSIZE_MAX \ + CGEM_DMACR_TXSIZE ( 0x1 ) /**< Max TX memory size */ +#define CGEM_DMACR_RXSIZE( x ) ( (x) << 8 ) /**< RX memory size */ +#define CGEM_DMACR_RXSIZE_MAX \ + CGEM_DMACR_RXSIZE ( 0x3 ) /**< Max RX memory size */ +#define CGEM_DMACR_BLENGTH( x ) ( (x) << 0 ) /**< DMA burst length */ +#define CGEM_DMACR_BLENGTH_MAX \ + CGEM_DMACR_BLENGTH ( 0x10 ) /**< Max DMA burst length */ + +/** RX queue base address register */ +#define CGEM_RXQBASE 0x018 + +/** TX queue base address register */ +#define CGEM_TXQBASE 0x01c + +/** Interrupt disable register */ +#define CGEM_IDR 0x02c +#define CGEM_IDR_ALL 0xffffffff /**< Disable all interrupts */ + +/** PHY maintenance register */ +#define CGEM_PHYMNTNC 0x034 +#define CGEM_PHYMNTNC_CLAUSE22 0x40000000 /**< Clause 22 operation */ +#define CGEM_PHYMNTNC_OP_WRITE 0x10000000 /**< Write to PHY register */ +#define CGEM_PHYMNTNC_OP_READ 0x20000000 /**< Read from PHY register */ +#define CGEM_PHYMNTNC_ADDR( x ) ( (x) << 23 ) /**< PHY address */ +#define CGEM_PHYMNTNC_REG( x ) ( (x) << 18 ) /**< Register address */ +#define CGEM_PHYMNTNC_FIXED 0x00020000 /**< Fixed value to write */ +#define CGEM_PHYMNTNC_DATA_MASK 0x0000ffff /**< Data mask */ + +/** Maximum time to wait for PHY access, in microseconds */ +#define CGEM_MII_MAX_WAIT_US 500 + +/** Link state check interval */ +#define CGEM_LINK_INTERVAL ( 2 * TICKS_PER_SEC ) + +/** Local MAC address (low half) register */ +#define CGEM_LADDRL 0x088 + +/** Local MAC address (high half) register */ +#define CGEM_LADDRH 0x08c + +/** A Cadence GEM descriptor */ +struct cgem_descriptor { + /** Buffer address */ + uint32_t addr; + /** Flags */ + uint32_t flags; +} __attribute__ (( packed )); + +/** Transmit flags */ +#define CGEM_TX_FL_OWNED 0x80000000 /**< Owned by software */ +#define CGEM_TX_FL_WRAP 0x40000000 /**< End of descriptor ring */ +#define CGEM_TX_FL_LAST 0x00008000 /**< Last buffer in frame */ + +/** Transmit ring length */ +#define CGEM_NUM_TX_DESC 8 + +/** Receive flags (in buffer address) */ +#define CGEM_RX_ADDR_OWNED 0x00000001 /**< Owned by software */ +#define CGEM_RX_ADDR_WRAP 0x00000002 /**< End of descriptor ring */ + +/** Receive flags */ +#define CGEM_RX_FL_LEN( x ) ( (x) & 0x1fff ) /**< RX packet length */ + +/** Receive ring length */ +#define CGEM_NUM_RX_DESC 8 + +/** Length of receive buffers + * + * Must be a multiple of 64. + */ +#define CGEM_RX_LEN 1536 + +/** A Cadence GEM MAC address */ +union cgem_mac { + struct { + uint32_t low; + uint32_t high; + } __attribute__ (( packed )) reg; + uint8_t raw[ETH_ALEN]; +}; + +/** A Cadence GEM descriptor ring */ +struct cgem_ring { + /** Descriptors */ + struct cgem_descriptor *desc; + /** Descriptor ring DMA mapping */ + struct dma_mapping map; + /** Producer index */ + unsigned int prod; + /** Consumer index */ + unsigned int cons; + + /** Queue base address register */ + uint8_t qbase; + /** Number of descriptors */ + uint8_t count; + /** Length of descriptors */ + uint16_t len; +}; + +/** + * Initialise descriptor ring + * + * @v ring Descriptor ring + * @v count Number of descriptors + * @v qbase Queue base address register + */ +static inline __attribute__ (( always_inline )) void +cgem_init_ring ( struct cgem_ring *ring, unsigned int count, + unsigned int qbase ) { + + ring->qbase = qbase; + ring->count = count; + ring->len = ( count * sizeof ( ring->desc[0] ) ); +} + +/** A Cadence GEM network card */ +struct cgem_nic { + /** Registers */ + void *regs; + /** DMA device */ + struct dma_device *dma; + /** Network device */ + struct net_device *netdev; + /** Device name (for debugging) */ + const char *name; + + /** PHY interface */ + struct mii_interface mdio; + /** PHY device */ + struct mii_device mii; + /** Link state timer */ + struct retry_timer timer; + + /** Transmit ring */ + struct cgem_ring tx; + /** Receive ring */ + struct cgem_ring rx; + /** Receive I/O buffers */ + struct io_buffer *rx_iobuf[CGEM_NUM_RX_DESC]; +}; + +#endif /* _CGEM_H */ diff --git a/src/drivers/net/cs89x0.c b/src/drivers/net/cs89x0.c index 17b7157a1..fc311a18a 100644 --- a/src/drivers/net/cs89x0.c +++ b/src/drivers/net/cs89x0.c @@ -725,7 +725,7 @@ ISA_DRIVER ( cs89x0_driver, cs89x0_probe_addrs, cs89x0_probe_addr, ISAPNP_VENDOR('C','S','C'), 0x0007 ); DRIVER ( "cs89x0", nic_driver, isa_driver, cs89x0_driver, - cs89x0_probe, cs89x0_disable ); + cs89x0_probe, cs89x0_disable, no_fake_bss ); ISA_ROM ( "cs89x0", "Crystal Semiconductor CS89x0" ); diff --git a/src/drivers/net/davicom.c b/src/drivers/net/davicom.c index 0c96796df..2b063ad1b 100644 --- a/src/drivers/net/davicom.c +++ b/src/drivers/net/davicom.c @@ -134,12 +134,13 @@ static unsigned long ioaddr; /* transmit descriptor and buffer */ #define NTXD 2 #define NRXD 4 -struct { +struct davicom_bss { struct txdesc txd[NTXD] __attribute__ ((aligned(4))); unsigned char txb[BUFLEN] __attribute__ ((aligned(4))); struct rxdesc rxd[NRXD] __attribute__ ((aligned(4))); unsigned char rxb[NRXD * BUFLEN] __attribute__ ((aligned(4))); -} davicom_bufs __shared; +}; +#define davicom_bufs NIC_FAKE_BSS ( struct davicom_bss ) #define txd davicom_bufs.txd #define txb davicom_bufs.txb #define rxd davicom_bufs.rxd @@ -159,7 +160,7 @@ static void davicom_reset(struct nic *nic); static void davicom_transmit(struct nic *nic, const char *d, unsigned int t, unsigned int s, const char *p); static int davicom_poll(struct nic *nic, int retrieve); -static void davicom_disable(struct nic *nic); +static void davicom_disable(struct nic *nic, void *hwdev); static void davicom_wait(unsigned int nticks); static int phy_read(int); static void phy_write(int, u16); @@ -601,7 +602,7 @@ static int davicom_poll(struct nic *nic, int retrieve) /*********************************************************************/ /* eth_disable - Disable the interface */ /*********************************************************************/ -static void davicom_disable ( struct nic *nic ) { +static void davicom_disable ( struct nic *nic, void *hwdev __unused ) { whereami("davicom_disable\n"); @@ -698,7 +699,7 @@ PCI_ROM(0x1282, 0x9132, "davicom9132", "Davicom 9132", 0), /* Needs probably som PCI_DRIVER ( davicom_driver, davicom_nics, PCI_NO_CLASS ); DRIVER ( "DAVICOM", nic_driver, pci_driver, davicom_driver, - davicom_probe, davicom_disable ); + davicom_probe, davicom_disable, davicom_bufs ); /* * Local variables: diff --git a/src/drivers/net/depca.c b/src/drivers/net/depca.c index 016f28bb2..335928667 100644 --- a/src/drivers/net/depca.c +++ b/src/drivers/net/depca.c @@ -644,7 +644,7 @@ static void depca_transmit( /************************************************************************** DISABLE - Turn off ethernet interface ***************************************************************************/ -static void depca_disable ( struct nic *nic ) { +static void depca_disable ( struct nic *nic, void *hwdev __unused ) { depca_reset(nic); STOP_DEPCA(nic->ioaddr); @@ -789,7 +789,7 @@ ISA_DRIVER ( depca_driver, depca_probe_addrs, depca_probe1, GENERIC_ISAPNP_VENDOR, 0x80f7 ); DRIVER ( "depce", nic_driver, isa_driver, depca_driver, - depca_probe, depca_disable ); + depca_probe, depca_disable, no_fake_bss ); ISA_ROM ( "depca", "Digital DE100 and DE200" ); diff --git a/src/drivers/net/dm96xx.c b/src/drivers/net/dm96xx.c index 61b957be9..193980a40 100644 --- a/src/drivers/net/dm96xx.c +++ b/src/drivers/net/dm96xx.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <unistd.h> diff --git a/src/drivers/net/dm96xx.h b/src/drivers/net/dm96xx.h index 43a1a4e30..33e404e17 100644 --- a/src/drivers/net/dm96xx.h +++ b/src/drivers/net/dm96xx.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <ipxe/usb.h> #include <ipxe/usbnet.h> diff --git a/src/drivers/net/dmfe.c b/src/drivers/net/dmfe.c index 53b05815b..2ed108e8c 100644 --- a/src/drivers/net/dmfe.c +++ b/src/drivers/net/dmfe.c @@ -209,14 +209,15 @@ static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control /********************************************** * Descriptor Ring and Buffer defination ***********************************************/ -struct { +struct dmfe_bss { struct tx_desc txd[TX_DESC_CNT] __attribute__ ((aligned(32))); unsigned char txb[TX_BUF_ALLOC * TX_DESC_CNT] __attribute__ ((aligned(32))); struct rx_desc rxd[RX_DESC_CNT] __attribute__ ((aligned(32))); unsigned char rxb[RX_ALLOC_SIZE * RX_DESC_CNT] __attribute__ ((aligned(32))); -} dmfe_bufs __shared; +}; +#define dmfe_bufs NIC_FAKE_BSS ( struct dmfe_bss ) #define txd dmfe_bufs.txd #define txb dmfe_bufs.txb #define rxd dmfe_bufs.rxd @@ -435,7 +436,7 @@ static void dmfe_transmit(struct nic *nic, /************************************************************************** DISABLE - Turn off ethernet interface ***************************************************************************/ -static void dmfe_disable ( struct nic *nic __unused ) { +static void dmfe_disable ( struct nic *nic __unused, void *hwdev __unused ) { /* Reset & stop DM910X board */ outl(DM910X_RESET, BASE + DCR0); udelay(5); @@ -1217,7 +1218,7 @@ static struct pci_device_id dmfe_nics[] = { PCI_DRIVER ( dmfe_driver, dmfe_nics, PCI_NO_CLASS ); DRIVER ( "DMFE/PCI", nic_driver, pci_driver, dmfe_driver, - dmfe_probe, dmfe_disable ); + dmfe_probe, dmfe_disable, dmfe_bufs ); /* * Local variables: diff --git a/src/drivers/net/dwmac.c b/src/drivers/net/dwmac.c new file mode 100644 index 000000000..f581a48a4 --- /dev/null +++ b/src/drivers/net/dwmac.c @@ -0,0 +1,663 @@ +/* + * Copyright (C) 2025 Michael Brown <mbrown@fensystems.co.uk>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include <stdint.h> +#include <string.h> +#include <errno.h> +#include <byteswap.h> +#include <ipxe/netdevice.h> +#include <ipxe/ethernet.h> +#include <ipxe/if_ether.h> +#include <ipxe/iobuf.h> +#include <ipxe/timer.h> +#include <ipxe/devtree.h> +#include <ipxe/fdt.h> +#include "dwmac.h" + +/** @file + * + * Synopsys DesignWare MAC network driver + * + */ + +/****************************************************************************** + * + * Debug + * + ****************************************************************************** + */ + +/** + * Dump MAC registers (for debugging) + * + * @v dwmac DesignWare MAC device + */ +static void dwmac_dump_mac ( struct dwmac *dwmac ) { + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return; + + /* Dump MAC registers */ + DBGC ( dwmac, "DWMAC %s ver %08x cfg %08x flt %08x flo %08x\n", + dwmac->name, readl ( dwmac->regs + DWMAC_VER ), + readl ( dwmac->regs + DWMAC_CFG ), + readl ( dwmac->regs + DWMAC_FILTER ), + readl ( dwmac->regs + DWMAC_FLOW ) ); + DBGC ( dwmac, "DWMAC %s isr %08x dbg %08x gmi %08x\n", + dwmac->name, readl ( dwmac->regs + DWMAC_ISR ), + readl ( dwmac->regs + DWMAC_DEBUG ), + readl ( dwmac->regs + DWMAC_GMII ) ); +} + +/** + * Dump DMA registers (for debugging) + * + * @v dwmac DesignWare MAC device + */ +static void dwmac_dump_dma ( struct dwmac *dwmac ) { + uint32_t status; + + /* Do nothing unless debugging is enabled */ + if ( ! DBG_LOG ) + return; + + /* Dump DMA registers */ + status = readl ( dwmac->regs + DWMAC_STATUS ); + DBGC ( dwmac, "DWMAC %s bus %08x fea %08x axi %08x ahb %08x\n", + dwmac->name, readl ( dwmac->regs + DWMAC_BUS ), + readl ( dwmac->regs + DWMAC_FEATURE ), + readl ( dwmac->regs + DWMAC_AXI ), + readl ( dwmac->regs + DWMAC_AHB ) ); + DBGC ( dwmac, "DWMAC %s opm %08x sta %08x drp %08x\n", + dwmac->name, readl ( dwmac->regs + DWMAC_OP ), + status, readl ( dwmac->regs + DWMAC_DROP ) ); + DBGC ( dwmac, "DWMAC %s txb %08x txd %08x txb %08x\n", + dwmac->name, readl ( dwmac->regs + DWMAC_TXBASE ), + readl ( dwmac->regs + DWMAC_TXDESC ), + readl ( dwmac->regs + DWMAC_TXBUF ) ); + DBGC ( dwmac, "DWMAC %s rxb %08x rxd %08x rxb %08x\n", + dwmac->name, readl ( dwmac->regs + DWMAC_RXBASE ), + readl ( dwmac->regs + DWMAC_RXDESC ), + readl ( dwmac->regs + DWMAC_RXBUF ) ); + + /* Clear sticky bits in status register, since nothing else will */ + writel ( status, ( dwmac->regs + DWMAC_STATUS ) ); +} + +/** + * Dump all registers (for debugging) + * + * @v dwmac DesignWare MAC device + */ +static void __attribute__ (( unused )) dwmac_dump ( struct dwmac *dwmac ) { + + /* Dump MAC and DMA registers */ + dwmac_dump_mac ( dwmac ); + dwmac_dump_dma ( dwmac ); +} + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware + * + * @v dwmac DesignWare MAC device + * @ret rc Return status code + */ +static int dwmac_reset ( struct dwmac *dwmac ) { + unsigned int i; + uint32_t bus; + + /* Trigger software reset */ + writel ( DWMAC_BUS_SWR, ( dwmac->regs + DWMAC_BUS ) ); + + /* Wait for reset to complete */ + for ( i = 0 ; i < DWMAC_RESET_MAX_WAIT_MS ; i++ ) { + + /* Delay */ + mdelay ( 1 ); + + /* Check for reset completion */ + bus = readl ( dwmac->regs + DWMAC_BUS ); + if ( ! ( bus & DWMAC_BUS_SWR ) ) + return 0; + } + + DBGC ( dwmac, "DWMAC %s timed out waiting for reset\n", + dwmac->name ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * Link state + * + ****************************************************************************** + */ + +/** + * Check link state + * + * @v netdev Network device + */ +static void dwmac_check_link ( struct net_device *netdev ) { + struct dwmac *dwmac = netdev->priv; + uint32_t gmii; + + /* Read SGMII/RGMII link status */ + gmii = readl ( dwmac->regs + DWMAC_GMII ); + DBGC ( dwmac, "DWMAC %s GMII link status %#08x\n", dwmac->name, gmii ); + + /* Update network device */ + if ( gmii & DWMAC_GMII_LINK ) { + netdev_link_up ( netdev ); + } else { + netdev_link_down ( netdev ); + } +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Create descriptor ring + * + * @v dwmac DesignWare MAC device + * @v ring Descriptor ring + * @ret rc Return status code + */ +static int dwmac_create_ring ( struct dwmac *dwmac, struct dwmac_ring *ring ) { + struct dwmac_descriptor *desc; + struct dwmac_descriptor *next; + physaddr_t base; + unsigned int i; + + /* Allocate descriptor ring (on its own size) */ + ring->desc = dma_alloc ( dwmac->dma, &ring->map, ring->len, ring->len ); + if ( ! ring->desc ) + return -ENOMEM; + + /* Initialise descriptor ring */ + memset ( ring->desc, 0, ring->len ); + for ( i = 0 ; i < ring->count ; i++ ) { + desc = &ring->desc[i]; + desc->size = cpu_to_le16 ( DWMAC_RX_LEN | + DWMAC_SIZE_RX_CHAIN ); + desc->ctrl = ring->ctrl; + assert ( desc->ctrl & DWMAC_CTRL_CHAIN ); + next = &ring->desc[ ( i + 1 ) & ( ring->count - 1 ) ]; + desc->next = dma ( &ring->map, next ); + } + wmb(); + + /* Program ring address */ + base = dma ( &ring->map, ring->desc ); + assert ( base == ( ( uint32_t ) base ) ); + writel ( base, ( dwmac->regs + DWMAC_DMA + ring->qbase ) ); + + DBGC ( dwmac, "DWMAC %s ring %02x is at [%08lx,%08lx)\n", + dwmac->name, ring->qbase, virt_to_phys ( ring->desc ), + ( virt_to_phys ( ring->desc ) + ring->len ) ); + return 0; +} + +/** + * Destroy descriptor ring + * + * @v dwmac DesignWare MAC device + * @v ring Descriptor ring + */ +static void dwmac_destroy_ring ( struct dwmac *dwmac, + struct dwmac_ring *ring ) { + + /* Clear ring address */ + writel ( 0, ( dwmac->regs + DWMAC_DMA + ring->qbase ) ); + + /* Free descriptor ring */ + dma_free ( &ring->map, ring->desc, ring->len ); + ring->desc = NULL; + ring->prod = 0; + ring->cons = 0; +} + +/** + * Refill receive descriptor ring + * + * @v dwmac DesignWare MAC device + */ +static void dwmac_refill_rx ( struct dwmac *dwmac ) { + struct dwmac_descriptor *rx; + struct io_buffer *iobuf; + unsigned int rx_idx; + unsigned int refilled = 0; + + /* Refill ring */ + while ( ( dwmac->rx.prod - dwmac->rx.cons ) != DWMAC_NUM_RX_DESC ) { + + /* Allocate I/O buffer */ + iobuf = alloc_rx_iob ( DWMAC_RX_LEN, dwmac->dma ); + if ( ! iobuf ) { + /* Wait for next refill */ + break; + } + + /* Get next receive descriptor */ + rx_idx = ( dwmac->rx.prod++ % DWMAC_NUM_RX_DESC ); + rx = &dwmac->rx.desc[rx_idx]; + + /* Populate receive descriptor */ + rx->addr = cpu_to_le32 ( iob_dma ( iobuf ) ); + wmb(); + rx->stat = cpu_to_le32 ( DWMAC_STAT_OWN ); + + /* Record I/O buffer */ + assert ( dwmac->rx_iobuf[rx_idx] == NULL ); + dwmac->rx_iobuf[rx_idx] = iobuf; + + DBGC2 ( dwmac, "DWMAC %s RX %d is [%08lx,%08lx)\n", + dwmac->name, rx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + DWMAC_RX_LEN ) ); + refilled++; + } + + /* Trigger poll */ + if ( refilled ) { + wmb(); + writel ( 0, ( dwmac->regs + DWMAC_RXPOLL ) ); + } +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int dwmac_open ( struct net_device *netdev ) { + struct dwmac *dwmac = netdev->priv; + union dwmac_mac mac; + int rc; + + /* Create transmit descriptor ring */ + if ( ( rc = dwmac_create_ring ( dwmac, &dwmac->tx ) ) != 0 ) + goto err_create_tx; + + /* Create receive descriptor ring */ + if ( ( rc = dwmac_create_ring ( dwmac, &dwmac->rx ) ) != 0 ) + goto err_create_rx; + + /* Set MAC address */ + memcpy ( mac.raw, netdev->ll_addr, ETH_ALEN ); + writel ( mac.reg.addrl, ( dwmac->regs + DWMAC_ADDRL ) ); + writel ( mac.reg.addrh, ( dwmac->regs + DWMAC_ADDRH ) ); + + /* Enable promiscuous mode */ + writel ( DWMAC_FILTER_PR, ( dwmac->regs + DWMAC_FILTER ) ); + + /* Enable transmit and receive */ + writel ( ( DWMAC_OP_TXSF | DWMAC_OP_RXSF | + DWMAC_OP_TXEN | DWMAC_OP_RXEN ), + ( dwmac->regs + DWMAC_OP ) ); + writel ( ( DWMAC_CFG_DO | DWMAC_CFG_FD | + DWMAC_CFG_TXEN | DWMAC_CFG_RXEN ), + ( dwmac->regs + DWMAC_CFG ) ); + + /* Refill receive descriptor ring */ + dwmac_refill_rx ( dwmac ); + + /* Update link state */ + dwmac_check_link ( netdev ); + + return 0; + + dwmac_destroy_ring ( dwmac, &dwmac->rx ); + err_create_rx: + dwmac_destroy_ring ( dwmac, &dwmac->tx ); + err_create_tx: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void dwmac_close ( struct net_device *netdev ) { + struct dwmac *dwmac = netdev->priv; + unsigned int i; + + /* Reset NIC */ + dwmac_reset ( dwmac ); + + /* Discard unused receive buffers */ + for ( i = 0 ; i < DWMAC_NUM_RX_DESC ; i++ ) { + if ( dwmac->rx_iobuf[i] ) + free_rx_iob ( dwmac->rx_iobuf[i] ); + dwmac->rx_iobuf[i] = NULL; + } + + /* Destroy receive descriptor ring */ + dwmac_destroy_ring ( dwmac, &dwmac->rx ); + + /* Destroy transmit descriptor ring */ + dwmac_destroy_ring ( dwmac, &dwmac->tx ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int dwmac_transmit ( struct net_device *netdev, + struct io_buffer *iobuf ) { + struct dwmac *dwmac = netdev->priv; + struct dwmac_descriptor *tx; + unsigned int tx_idx; + + /* Get next transmit descriptor */ + if ( ( dwmac->tx.prod - dwmac->tx.cons ) >= DWMAC_NUM_TX_DESC ) { + DBGC ( dwmac, "DWMAC %s out of transmit descriptors\n", + dwmac->name ); + return -ENOBUFS; + } + tx_idx = ( dwmac->tx.prod % DWMAC_NUM_TX_DESC ); + tx = &dwmac->tx.desc[tx_idx]; + + /* Update producer index */ + dwmac->tx.prod++; + + /* Populate transmit descriptor */ + tx->size = cpu_to_le16 ( iob_len ( iobuf ) ); + tx->addr = cpu_to_le32 ( iob_dma ( iobuf ) ); + wmb(); + tx->stat = cpu_to_le32 ( DWMAC_STAT_OWN | DWMAC_STAT_TX_LAST | + DWMAC_STAT_TX_FIRST | DWMAC_STAT_TX_CHAIN ); + wmb(); + + /* Initiate transmission */ + writel ( 0, ( dwmac->regs + DWMAC_TXPOLL ) ); + + DBGC2 ( dwmac, "DWMAC %s TX %d is [%08lx,%08lx)\n", + dwmac->name, tx_idx, virt_to_phys ( iobuf->data ), + ( virt_to_phys ( iobuf->data ) + iob_len ( iobuf ) ) ); + return 0; +} + +/** + * Poll for completed packets + * + * @V netdev Network device + */ +static void dwmac_poll_tx ( struct net_device *netdev ) { + struct dwmac *dwmac = netdev->priv; + struct dwmac_descriptor *tx; + unsigned int tx_idx; + + /* Check for completed packets */ + while ( dwmac->tx.cons != dwmac->tx.prod ) { + + /* Get next transmit descriptor */ + tx_idx = ( dwmac->tx.cons % DWMAC_NUM_TX_DESC ); + tx = &dwmac->tx.desc[tx_idx]; + + /* Stop if descriptor is still owned by hardware */ + if ( tx->stat & cpu_to_le32 ( DWMAC_STAT_OWN ) ) + return; + dwmac->tx.cons++; + + /* Report completion */ + if ( tx->stat & cpu_to_le32 ( DWMAC_STAT_ERR ) ) { + DBGC ( dwmac, "DWMAC %s TX %d error %#08x\n", + dwmac->name, tx_idx, le32_to_cpu ( tx->stat ) ); + dwmac_dump ( dwmac ); + netdev_tx_complete_next_err ( netdev, -EIO ); + } else { + DBGC2 ( dwmac, "DWMAC %s TX %d complete\n", + dwmac->name, tx_idx ); + netdev_tx_complete_next ( netdev ); + } + } +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void dwmac_poll_rx ( struct net_device *netdev ) { + struct dwmac *dwmac = netdev->priv; + struct dwmac_descriptor *rx; + struct io_buffer *iobuf; + unsigned int rx_idx; + uint32_t stat; + size_t len; + + /* Check for received packets */ + while ( dwmac->rx.cons != dwmac->rx.prod ) { + + /* Get next receive descriptor */ + rx_idx = ( dwmac->rx.cons % DWMAC_NUM_RX_DESC ); + rx = &dwmac->rx.desc[rx_idx]; + + /* Stop if descriptor is still in use */ + if ( rx->stat & cpu_to_le32 ( DWMAC_STAT_OWN ) ) + return; + dwmac->rx.cons++; + + /* Consume I/O buffer */ + iobuf = dwmac->rx_iobuf[rx_idx]; + assert ( iobuf != NULL ); + dwmac->rx_iobuf[rx_idx] = NULL; + + /* Hand off to network stack */ + stat = le32_to_cpu ( rx->stat ); + assert ( stat & DWMAC_STAT_RX_FIRST ); + assert ( stat & DWMAC_STAT_RX_LAST ); + if ( stat & DWMAC_STAT_ERR ) { + DBGC ( dwmac, "DWMAC %s RX %d error %#08x\n", + dwmac->name, rx_idx, stat ); + dwmac_dump ( dwmac ); + netdev_rx_err ( netdev, iobuf, -EIO ); + } else { + len = ( DWMAC_STAT_RX_LEN ( stat ) - 4 /* CRC */ ); + iob_put ( iobuf, len ); + DBGC2 ( dwmac, "DWMAC %s RX %d complete (length " + "%zd)\n", dwmac->name, rx_idx, len ); + netdev_rx ( netdev, iobuf ); + } + } +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void dwmac_poll ( struct net_device *netdev ) { + struct dwmac *dwmac = netdev->priv; + uint32_t status; + + /* Check for link status changes */ + status = readl ( dwmac->regs + DWMAC_STATUS ); + if ( status & DWMAC_STATUS_LINK ) + dwmac_check_link ( netdev ); + + /* Poll for TX competions, if applicable */ + dwmac_poll_tx ( netdev ); + + /* Poll for RX completions */ + dwmac_poll_rx ( netdev ); + + /* Refill RX ring */ + dwmac_refill_rx ( dwmac ); +} + +/** DesignWare MAC network device operations */ +static struct net_device_operations dwmac_operations = { + .open = dwmac_open, + .close = dwmac_close, + .transmit = dwmac_transmit, + .poll = dwmac_poll, +}; + +/****************************************************************************** + * + * Devicetree interface + * + ****************************************************************************** + */ + +/** + * Probe devicetree device + * + * @v dt Devicetree device + * @v offset Starting node offset + * @ret rc Return status code + */ +static int dwmac_probe ( struct dt_device *dt, unsigned int offset ) { + struct net_device *netdev; + struct dwmac *dwmac; + union dwmac_mac mac; + uint32_t version; + int rc; + + /* Allocate and initialise net device */ + netdev = alloc_etherdev ( sizeof ( *dwmac ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &dwmac_operations ); + dwmac = netdev->priv; + dt_set_drvdata ( dt, netdev ); + netdev->dev = &dt->dev; + netdev->dma = &dt->dma; + memset ( dwmac, 0, sizeof ( *dwmac ) ); + dwmac->dma = &dt->dma; + dwmac->name = netdev->dev->name; + dwmac_init_ring ( &dwmac->tx, DWMAC_NUM_TX_DESC, DWMAC_TXBASE, + ( DWMAC_CTRL_TX_FIRST | DWMAC_CTRL_TX_LAST | + DWMAC_CTRL_CHAIN ) ); + dwmac_init_ring ( &dwmac->rx, DWMAC_NUM_RX_DESC, DWMAC_RXBASE, + DWMAC_CTRL_CHAIN ); + + /* Map registers */ + dwmac->regs = dt_ioremap ( dt, offset, DWMAC_REG_IDX, DWMAC_REG_LEN ); + if ( ! dwmac->regs ) { + rc = -ENODEV; + goto err_ioremap; + } + version = readl ( dwmac->regs + DWMAC_VER ); + DBGC ( dwmac, "DWMAC %s version %x.%x (user %x.%x)\n", dwmac->name, + DWMAC_VER_CORE_MAJOR ( version ), + DWMAC_VER_CORE_MINOR ( version ), + DWMAC_VER_USER_MAJOR ( version ), + DWMAC_VER_USER_MINOR ( version ) ); + + /* Fetch devicetree MAC address */ + if ( ( rc = fdt_mac ( &sysfdt, offset, netdev ) ) != 0 ) { + DBGC ( dwmac, "DWMAC %s could not fetch MAC: %s\n", + dwmac->name, strerror ( rc ) ); + goto err_mac; + } + + /* Fetch current MAC address, if set */ + mac.reg.addrl = readl ( dwmac->regs + DWMAC_ADDRL ); + mac.reg.addrh = readl ( dwmac->regs + DWMAC_ADDRH ); + memcpy ( netdev->ll_addr, mac.raw, ETH_ALEN ); + + /* Reset the NIC */ + if ( ( rc = dwmac_reset ( dwmac ) ) != 0 ) + goto err_reset; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + /* Update link state */ + dwmac_check_link ( netdev ); + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + dwmac_reset ( dwmac ); + err_reset: + err_mac: + iounmap ( dwmac->regs ); + err_ioremap: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove devicetree device + * + * @v dt Devicetree device + */ +static void dwmac_remove ( struct dt_device *dt ) { + struct net_device *netdev = dt_get_drvdata ( dt ); + struct dwmac *dwmac = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Reset card */ + dwmac_reset ( dwmac ); + + /* Free network device */ + iounmap ( dwmac->regs ); + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** DesignWare MAC compatible model identifiers */ +static const char * dwmac_ids[] = { + "thead,light-dwmac", + "snps,dwmac", +}; + +/** DesignWare MAC devicetree driver */ +struct dt_driver dwmac_driver __dt_driver = { + .name = "dwmac", + .ids = dwmac_ids, + .id_count = ( sizeof ( dwmac_ids ) / sizeof ( dwmac_ids[0] ) ), + .probe = dwmac_probe, + .remove = dwmac_remove, +}; diff --git a/src/drivers/net/dwmac.h b/src/drivers/net/dwmac.h new file mode 100644 index 000000000..4de62b0ce --- /dev/null +++ b/src/drivers/net/dwmac.h @@ -0,0 +1,246 @@ +#ifndef _DWMAC_H +#define _DWMAC_H + +/** @file + * + * Synopsys DesignWare MAC network driver + * + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include <stdint.h> +#include <ipxe/if_ether.h> + +/** I/O region index */ +#define DWMAC_REG_IDX 0 + +/** I/O region length */ +#define DWMAC_REG_LEN 0x2000 + +/** MAC register block */ +#define DWMAC_MAC 0x0000 +#define DWMAC_MAC_REG( n ) ( DWMAC_MAC + ( (n) * 4 ) ) + +/** MAC configuration register */ +#define DWMAC_CFG DWMAC_MAC_REG ( 0 ) +#define DWMAC_CFG_DO 0x00002000 /**< Disable RX own frames */ +#define DWMAC_CFG_FD 0x00000800 /**< Full duplex */ +#define DWMAC_CFG_TXEN 0x00000008 /**< TX enabled */ +#define DWMAC_CFG_RXEN 0x00000004 /**< RX enabled */ + +/** MAC filter register */ +#define DWMAC_FILTER DWMAC_MAC_REG ( 1 ) +#define DWMAC_FILTER_PR 0x00000001 /**< Promiscuous mode */ + +/** Flow control register */ +#define DWMAC_FLOW DWMAC_MAC_REG ( 6 ) + +/** Version register */ +#define DWMAC_VER DWMAC_MAC_REG ( 8 ) +#define DWMAC_VER_USER_MAJOR( x ) \ + ( ( (x) >> 12 ) & 0xf ) /**< User major version */ +#define DWMAC_VER_USER_MINOR( x ) \ + ( ( (x) >> 8 ) & 0xf ) /**< User minor version */ +#define DWMAC_VER_CORE_MAJOR( x ) \ + ( ( (x) >> 4 ) & 0xf ) /**< Core major version */ +#define DWMAC_VER_CORE_MINOR( x ) \ + ( ( (x) >> 0 ) & 0xf ) /**< Core minor version */ + +/** Debug register */ +#define DWMAC_DEBUG DWMAC_MAC_REG ( 9 ) + +/** Interrupt status register */ +#define DWMAC_ISR DWMAC_MAC_REG ( 14 ) + +/** MAC address high register */ +#define DWMAC_ADDRH DWMAC_MAC_REG ( 16 ) + +/** MAC address low register */ +#define DWMAC_ADDRL DWMAC_MAC_REG ( 17 ) + +/** A DesignWare MAC address */ +union dwmac_mac { + struct { + uint32_t addrl; + uint32_t addrh; + } __attribute__ (( packed )) reg; + uint8_t raw[ETH_ALEN]; +}; + +/** SGMII/RGMII status register */ +#define DWMAC_GMII DWMAC_MAC_REG ( 54 ) +#define DWMAC_GMII_LINK 0x00000008 /**< Link up */ + +/** DMA register block */ +#define DWMAC_DMA 0x1000 +#define DWMAC_DMA_REG( n ) ( DWMAC_DMA + ( (n) * 4 ) ) + +/** Bus mode register */ +#define DWMAC_BUS DWMAC_DMA_REG ( 0 ) +#define DWMAC_BUS_PBL4 0x01000000 /**< 4x PBL mode */ +#define DWMAC_BUS_USP 0x00800000 /**< Use separate PBL */ +#define DWMAC_BUS_RPBL(x) ( (x) << 17 ) /**< RX DMA PBL */ +#define DWMAC_BUS_FB 0x00010000 /**< Fixed burst */ +#define DWMAC_BUS_PBL(x) ( (x) << 8 ) /**< (TX) DMA PBL */ +#define DWMAC_BUS_SWR 0x00000001 /**< Software reset */ + +/** Time to wait for software reset to complete */ +#define DWMAC_RESET_MAX_WAIT_MS 500 + +/** Transmit poll demand register */ +#define DWMAC_TXPOLL DWMAC_DMA_REG ( 1 ) + +/** Receive poll demand register */ +#define DWMAC_RXPOLL DWMAC_DMA_REG ( 2 ) + +/** Receive descriptor list address register */ +#define DWMAC_RXBASE DWMAC_DMA_REG ( 3 ) + +/** Transmit descriptor list address register */ +#define DWMAC_TXBASE DWMAC_DMA_REG ( 4 ) + +/** Status register */ +#define DWMAC_STATUS DWMAC_DMA_REG ( 5 ) +#define DWMAC_STATUS_LINK 0x04000000 /**< Link status change */ + +/** Operation mode register */ +#define DWMAC_OP DWMAC_DMA_REG ( 6 ) +#define DWMAC_OP_RXSF 0x02000000 /**< RX store and forward */ +#define DWMAC_OP_TXSF 0x00200000 /**< TX store and forward */ +#define DWMAC_OP_TXEN 0x00002000 /**< TX enabled */ +#define DWMAC_OP_RXEN 0x00000002 /**< RX enabled */ + +/** Packet drop counter register */ +#define DWMAC_DROP DWMAC_DMA_REG ( 8 ) + +/** AXI bus mode register */ +#define DWMAC_AXI DWMAC_DMA_REG ( 10 ) + +/** AHB or AXI status register */ +#define DWMAC_AHB DWMAC_DMA_REG ( 11 ) + +/** Current transmit descriptor register */ +#define DWMAC_TXDESC DWMAC_DMA_REG ( 18 ) + +/** Current receive descriptor register */ +#define DWMAC_RXDESC DWMAC_DMA_REG ( 19 ) + +/** Current transmit buffer address register */ +#define DWMAC_TXBUF DWMAC_DMA_REG ( 20 ) + +/** Current receive buffer address register */ +#define DWMAC_RXBUF DWMAC_DMA_REG ( 21 ) + +/** Hardware feature register */ +#define DWMAC_FEATURE DWMAC_DMA_REG ( 22 ) + +/** A frame descriptor + * + * We populate the descriptor with values that are valid for both + * normal and enhanced descriptor formats, to avoid needing to care + * about which version of the hardware we have. + */ +struct dwmac_descriptor { + /** Completion status */ + uint32_t stat; + /** Buffer size */ + uint16_t size; + /** Reserved */ + uint8_t reserved_a; + /** Ring control */ + uint8_t ctrl; + /** Buffer address */ + uint32_t addr; + /** Next descriptor address */ + uint32_t next; +} __attribute__ (( packed )); + +/* Completion status */ +#define DWMAC_STAT_OWN 0x80000000 /**< Owned by hardware */ +#define DWMAC_STAT_TX_LAST 0x20000000 /**< Last segment (TX) */ +#define DWMAC_STAT_TX_FIRST 0x10000000 /**< First segment (TX) */ +#define DWMAC_STAT_TX_CHAIN 0x00100000 /**< Chained descriptor (TX) */ +#define DWMAC_STAT_ERR 0x00008000 /**< Error summary */ +#define DWMAC_STAT_RX_FIRST 0x00000200 /**< First segment (RX) */ +#define DWMAC_STAT_RX_LAST 0x00000100 /**< Last segment (RX) */ +#define DWMAC_STAT_RX_LEN(x) \ + ( ( (x) >> 16 ) & 0x3fff ) /**< Frame length (RX) */ + +/** Buffer size */ +#define DWMAC_SIZE_RX_CHAIN 0x4000 /**< Chained descriptor (RX) */ + +/* Ring control */ +#define DWMAC_CTRL_TX_LAST 0x40 /**< Last segment (TX) */ +#define DWMAC_CTRL_TX_FIRST 0x20 /**< First segment (TX) */ +#define DWMAC_CTRL_CHAIN 0x01 /**< Chained descriptor */ + +/** A DesignWare descriptor ring */ +struct dwmac_ring { + /** Descriptors */ + struct dwmac_descriptor *desc; + /** Descriptor ring DMA mapping */ + struct dma_mapping map; + /** Producer index */ + unsigned int prod; + /** Consumer index */ + unsigned int cons; + + /** Queue base address register (within DMA block) */ + uint8_t qbase; + /** Number of descriptors */ + uint8_t count; + /** Default control flags */ + uint8_t ctrl; + /** Length of descriptors */ + size_t len; +}; + +/** Number of transmit descriptors */ +#define DWMAC_NUM_TX_DESC 16 + +/** Number of receive descriptors */ +#define DWMAC_NUM_RX_DESC 16 + +/** Length of receive buffers + * + * Must be a multiple of 16. + */ +#define DWMAC_RX_LEN 1536 + +/** + * Initialise descriptor ring + * + * @v ring Descriptor ring + * @v count Number of descriptors + * @v qbase Queue base address register + * @v ctrl Default descriptor control flags + */ +static inline __attribute__ (( always_inline )) void +dwmac_init_ring ( struct dwmac_ring *ring, unsigned int count, + unsigned int qbase, unsigned int ctrl ) { + + ring->qbase = ( qbase - DWMAC_DMA ); + ring->count = count; + ring->ctrl = ctrl; + ring->len = ( count * sizeof ( ring->desc[0] ) ); +} + +/** A DesignWare MAC network card */ +struct dwmac { + /** Registers */ + void *regs; + /** DMA device */ + struct dma_device *dma; + /** Device name (for debugging) */ + const char *name; + + /** Transmit ring */ + struct dwmac_ring tx; + /** Receive ring */ + struct dwmac_ring rx; + /** Receive I/O buffers */ + struct io_buffer *rx_iobuf[DWMAC_NUM_RX_DESC]; +}; + +#endif /* _DWMAC_H */ diff --git a/src/drivers/net/ecm.c b/src/drivers/net/ecm.c index ab1f98370..9a13b68a5 100644 --- a/src/drivers/net/ecm.c +++ b/src/drivers/net/ecm.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <errno.h> @@ -97,8 +98,9 @@ int ecm_fetch_mac ( struct usb_function *func, int rc; /* Fetch MAC address string */ + buf[ sizeof ( buf ) - 1 ] = '\0'; len = usb_get_string_descriptor ( usb, desc->mac, 0, buf, - sizeof ( buf ) ); + ( sizeof ( buf ) - 1 ) ); if ( len < 0 ) { rc = len; return rc; diff --git a/src/drivers/net/ecm.h b/src/drivers/net/ecm.h index a7d03cf94..d77b0c64f 100644 --- a/src/drivers/net/ecm.h +++ b/src/drivers/net/ecm.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <ipxe/usb.h> #include <ipxe/usbnet.h> diff --git a/src/drivers/net/eepro.c b/src/drivers/net/eepro.c index 97b4c4061..3863553fa 100644 --- a/src/drivers/net/eepro.c +++ b/src/drivers/net/eepro.c @@ -635,7 +635,7 @@ ISA_DRIVER ( eepro_driver, eepro_probe_addrs, eepro_probe1, GENERIC_ISAPNP_VENDOR, 0x828a ); DRIVER ( "eepro", nic_driver, isa_driver, eepro_driver, - eepro_probe, eepro_disable ); + eepro_probe, eepro_disable, no_fake_bss ); ISA_ROM ( "eepro", "Intel Etherexpress Pro/10" ); diff --git a/src/drivers/net/eepro100.c b/src/drivers/net/eepro100.c index 49b00d443..318db1883 100644 --- a/src/drivers/net/eepro100.c +++ b/src/drivers/net/eepro100.c @@ -101,6 +101,7 @@ FILE_LICENCE ( GPL2_OR_LATER ); */ #include <stdint.h> +#include <string.h> #include <byteswap.h> #include <errno.h> #include <stdio.h> diff --git a/src/drivers/net/efi/mnp.c b/src/drivers/net/efi/mnp.c index 33218fb10..212c712df 100644 --- a/src/drivers/net/efi/mnp.c +++ b/src/drivers/net/efi/mnp.c @@ -44,11 +44,11 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); static int mnp_supported ( EFI_HANDLE device ) { EFI_GUID *binding = &efi_managed_network_service_binding_protocol_guid; - return snpnet_supported ( device, binding ); + return snpnet_supported ( device, binding, 0 ); } /** EFI MNP driver */ -struct efi_driver mnp_driver __efi_driver ( EFI_DRIVER_NORMAL ) = { +struct efi_driver mnp_driver __efi_driver ( EFI_DRIVER_MNP ) = { .name = "MNP", .supported = mnp_supported, .start = mnpnet_start, diff --git a/src/drivers/net/efi/mnpnet.c b/src/drivers/net/efi/mnpnet.c index eb4b129c7..fe0ebaadb 100644 --- a/src/drivers/net/efi/mnpnet.c +++ b/src/drivers/net/efi/mnpnet.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); /** @file * @@ -367,14 +368,9 @@ static struct net_device_operations mnpnet_operations = { * @ret rc Return status code */ int mnpnet_start ( struct efi_device *efidev ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_HANDLE device = efidev->device; EFI_GUID *binding = &efi_managed_network_service_binding_protocol_guid; EFI_SIMPLE_NETWORK_MODE mode; - union { - EFI_MANAGED_NETWORK_PROTOCOL *mnp; - void *interface; - } u; struct net_device *netdev; struct mnp_nic *mnp; EFI_STATUS efirc; @@ -408,18 +404,13 @@ int mnpnet_start ( struct efi_device *efidev ) { } /* Open MNP protocol */ - if ( ( efirc = bs->OpenProtocol ( efidev->child, - &efi_managed_network_protocol_guid, - &u.interface, efi_image_handle, - efidev->child, - ( EFI_OPEN_PROTOCOL_BY_DRIVER | - EFI_OPEN_PROTOCOL_EXCLUSIVE )))!=0){ - rc = -EEFI ( efirc ); + if ( ( rc = efi_open_by_driver ( efidev->child, + &efi_managed_network_protocol_guid, + &mnp->mnp ) ) != 0 ) { DBGC ( mnp, "MNP %s could not open MNP protocol: %s\n", efi_handle_name ( device ), strerror ( rc ) ); goto err_open; } - mnp->mnp = u.mnp; /* Get configuration */ efirc = mnp->mnp->GetModeData ( mnp->mnp, NULL, &mode ); @@ -464,8 +455,8 @@ int mnpnet_start ( struct efi_device *efidev ) { err_ll_addr_len: err_hw_addr_len: err_mode: - bs->CloseProtocol ( efidev->child, &efi_managed_network_protocol_guid, - efi_image_handle, efidev->child ); + efi_close_by_driver ( efidev->child, + &efi_managed_network_protocol_guid ); err_open: efi_service_del ( device, binding, efidev->child ); err_service: @@ -482,7 +473,6 @@ int mnpnet_start ( struct efi_device *efidev ) { * @v efidev EFI device */ void mnpnet_stop ( struct efi_device *efidev ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_GUID *binding = &efi_managed_network_service_binding_protocol_guid; struct net_device *netdev = efidev_get_drvdata ( efidev ); struct mnp_nic *mnp = netdev->priv; @@ -491,8 +481,8 @@ void mnpnet_stop ( struct efi_device *efidev ) { unregister_netdev ( netdev ); /* Close MNP protocol */ - bs->CloseProtocol ( efidev->child, &efi_managed_network_protocol_guid, - efi_image_handle, efidev->child ); + efi_close_by_driver ( efidev->child, + &efi_managed_network_protocol_guid ); /* Remove MNP child (unless whole system shutdown is in progress) */ if ( ! efi_shutdown_in_progress ) diff --git a/src/drivers/net/efi/nii.c b/src/drivers/net/efi/nii.c index 16e9e10df..d1adf3d44 100644 --- a/src/drivers/net/efi/nii.c +++ b/src/drivers/net/efi/nii.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <strings.h> @@ -177,7 +178,7 @@ struct nii_nic { size_t mtu; /** Hardware transmit/receive buffer */ - userptr_t buffer; + void *buffer; /** Hardware transmit/receive buffer length */ size_t buffer_len; @@ -210,10 +211,6 @@ static int nii_pci_open ( struct nii_nic *nii ) { EFI_HANDLE device = nii->efidev->device; EFI_HANDLE pci_device; union { - EFI_PCI_IO_PROTOCOL *pci_io; - void *interface; - } pci_io; - union { EFI_ACPI_ADDRESS_SPACE_DESCRIPTOR *acpi; void *resource; } desc; @@ -230,17 +227,18 @@ static int nii_pci_open ( struct nii_nic *nii ) { } nii->pci_device = pci_device; - /* Open PCI I/O protocol */ - if ( ( efirc = bs->OpenProtocol ( pci_device, &efi_pci_io_protocol_guid, - &pci_io.interface, efi_image_handle, - device, - EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ - rc = -EEFI ( efirc ); + /* Open PCI I/O protocol + * + * We cannot open this safely as a by-driver open, since doing + * so would disconnect the underlying NII driver. We must + * therefore use an unsafe open. + */ + if ( ( rc = efi_open_unsafe ( pci_device, &efi_pci_io_protocol_guid, + &nii->pci_io ) ) != 0 ) { DBGC ( nii, "NII %s could not open PCI I/O protocol: %s\n", nii->dev.name, strerror ( rc ) ); goto err_open; } - nii->pci_io = pci_io.pci_io; /* Identify memory and I/O BARs */ nii->mem_bar = PCI_MAX_BAR; @@ -280,8 +278,7 @@ static int nii_pci_open ( struct nii_nic *nii ) { return 0; err_get_bar_attributes: - bs->CloseProtocol ( pci_device, &efi_pci_io_protocol_guid, - efi_image_handle, device ); + efi_close_unsafe ( pci_device, &efi_pci_io_protocol_guid ); err_open: err_locate: return rc; @@ -294,7 +291,6 @@ static int nii_pci_open ( struct nii_nic *nii ) { * @ret rc Return status code */ static void nii_pci_close ( struct nii_nic *nii ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct nii_mapping *map; struct nii_mapping *tmp; @@ -308,8 +304,7 @@ static void nii_pci_close ( struct nii_nic *nii ) { } /* Close protocols */ - bs->CloseProtocol ( nii->pci_device, &efi_pci_io_protocol_guid, - efi_image_handle, nii->efidev->device ); + efi_close_unsafe ( nii->pci_device, &efi_pci_io_protocol_guid ); } /** @@ -1264,18 +1259,35 @@ static struct net_device_operations nii_operations = { }; /** + * Exclude existing drivers + * + * @v device EFI device handle + * @ret rc Return status code + */ +int nii_exclude ( EFI_HANDLE device ) { + EFI_GUID *protocol = &efi_nii31_protocol_guid; + int rc; + + /* Exclude existing NII protocol drivers */ + if ( ( rc = efi_driver_exclude ( device, protocol ) ) != 0 ) { + DBGC ( device, "NII %s could not exclude drivers: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + return rc; + } + + return 0; +} + +/** * Attach driver to device * * @v efidev EFI device * @ret rc Return status code */ int nii_start ( struct efi_device *efidev ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_HANDLE device = efidev->device; struct net_device *netdev; struct nii_nic *nii; - void *interface; - EFI_STATUS efirc; int rc; /* Allocate and initialise structure */ @@ -1300,17 +1312,13 @@ int nii_start ( struct efi_device *efidev ) { netdev->dev = &nii->dev; /* Open NII protocol */ - if ( ( efirc = bs->OpenProtocol ( device, &efi_nii31_protocol_guid, - &interface, efi_image_handle, device, - ( EFI_OPEN_PROTOCOL_BY_DRIVER | - EFI_OPEN_PROTOCOL_EXCLUSIVE )))!=0){ - rc = -EEFI ( efirc ); + if ( ( rc = efi_open_by_driver ( device, &efi_nii31_protocol_guid, + &nii->nii ) ) != 0 ) { DBGC ( nii, "NII %s cannot open NII protocol: %s\n", nii->dev.name, strerror ( rc ) ); DBGC_EFI_OPENERS ( device, device, &efi_nii31_protocol_guid ); goto err_open_protocol; } - nii->nii = interface; /* Locate UNDI and entry point */ nii->undi = ( ( void * ) ( intptr_t ) nii->nii->Id ); @@ -1373,8 +1381,7 @@ int nii_start ( struct efi_device *efidev ) { err_pci_open: err_hw_undi: err_no_undi: - bs->CloseProtocol ( device, &efi_nii31_protocol_guid, - efi_image_handle, device ); + efi_close_by_driver ( device, &efi_nii31_protocol_guid ); err_open_protocol: list_del ( &nii->dev.siblings ); netdev_nullify ( netdev ); @@ -1389,7 +1396,6 @@ int nii_start ( struct efi_device *efidev ) { * @v efidev EFI device */ void nii_stop ( struct efi_device *efidev ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct net_device *netdev = efidev_get_drvdata ( efidev ); struct nii_nic *nii = netdev->priv; EFI_HANDLE device = efidev->device; @@ -1404,8 +1410,7 @@ void nii_stop ( struct efi_device *efidev ) { nii_pci_close ( nii ); /* Close NII protocol */ - bs->CloseProtocol ( device, &efi_nii31_protocol_guid, - efi_image_handle, device ); + efi_close_by_driver ( device, &efi_nii31_protocol_guid ); /* Free network device */ list_del ( &nii->dev.siblings ); diff --git a/src/drivers/net/efi/nii.h b/src/drivers/net/efi/nii.h index c10be9db5..e0b07f0a5 100644 --- a/src/drivers/net/efi/nii.h +++ b/src/drivers/net/efi/nii.h @@ -8,9 +8,11 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); struct efi_device; +extern int nii_exclude ( EFI_HANDLE device ); extern int nii_start ( struct efi_device *efidev ); extern void nii_stop ( struct efi_device *efidev ); diff --git a/src/drivers/net/efi/snp.c b/src/drivers/net/efi/snp.c index cac8b38e2..854fa872d 100644 --- a/src/drivers/net/efi/snp.c +++ b/src/drivers/net/efi/snp.c @@ -41,8 +41,9 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); * @ret rc Return status code */ static int snp_supported ( EFI_HANDLE device ) { + EFI_GUID *protocol = &efi_simple_network_protocol_guid; - return snpnet_supported ( device, &efi_simple_network_protocol_guid ); + return snpnet_supported ( device, protocol, 1 ); } /** @@ -52,22 +53,29 @@ static int snp_supported ( EFI_HANDLE device ) { * @ret rc Return status code */ static int nii_supported ( EFI_HANDLE device ) { + EFI_GUID *protocol = &efi_nii31_protocol_guid; - return snpnet_supported ( device, &efi_nii31_protocol_guid ); + return snpnet_supported ( device, protocol, 1 ); } /** EFI SNP driver */ -struct efi_driver snp_driver __efi_driver ( EFI_DRIVER_NORMAL ) = { +struct efi_driver snp_driver __efi_driver ( EFI_DRIVER_SNP ) = { .name = "SNP", .supported = snp_supported, + .exclude = snpnet_exclude, .start = snpnet_start, .stop = snpnet_stop, }; /** EFI NII driver */ -struct efi_driver nii_driver __efi_driver ( EFI_DRIVER_NORMAL ) = { +struct efi_driver nii_driver __efi_driver ( EFI_DRIVER_NII ) = { .name = "NII", .supported = nii_supported, + .exclude = nii_exclude, .start = nii_start, .stop = nii_stop, }; + +/** Drag in MNP driver */ +REQUIRING_SYMBOL ( snp_driver ); +REQUIRE_SYMBOL ( mnp_driver ); diff --git a/src/drivers/net/efi/snpnet.c b/src/drivers/net/efi/snpnet.c index 6ce731d78..6046f0a1e 100644 --- a/src/drivers/net/efi/snpnet.c +++ b/src/drivers/net/efi/snpnet.c @@ -18,6 +18,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER ); +FILE_SECBOOT ( PERMITTED ); #include <stdlib.h> #include <stdio.h> @@ -86,6 +87,14 @@ struct snp_nic { */ #define SNP_RX_PAD 8 +/** An SNP interface patch to inhibit shutdown for insomniac devices */ +struct snp_insomniac_patch { + /** Original Shutdown() method */ + EFI_SIMPLE_NETWORK_SHUTDOWN shutdown; + /** Original Stop() method */ + EFI_SIMPLE_NETWORK_STOP stop; +}; + /** * Format SNP MAC address (for debugging) * @@ -387,9 +396,10 @@ static int snpnet_open ( struct net_device *netdev ) { /* Initialise NIC, retrying multiple times if link stays down */ for ( retry = 0 ; ; ) { - /* Initialise NIC */ - if ( ( efirc = snp->snp->Initialize ( snp->snp, - 0, 0 ) ) != 0 ) { + /* Initialise NIC, if not already initialised */ + if ( ( mode->State != EfiSimpleNetworkInitialized ) && + ( ( efirc = snp->snp->Initialize ( snp->snp, + 0, 0 ) ) != 0 ) ) { rc = -EEFI ( efirc ); snpnet_dump_mode ( netdev ); DBGC ( snp, "SNP %s could not initialise: %s\n", @@ -413,11 +423,13 @@ static int snpnet_open ( struct net_device *netdev ) { /* Delay to allow time for link to establish */ mdelay ( SNP_INITIALIZE_RETRY_DELAY_MS ); - /* Shut down and retry; this is sometimes necessary in - * order to persuade the underlying SNP driver to - * actually update the link state. + /* Shut down and retry (unless device is insomniac); + * this is sometimes necessary in order to persuade + * the underlying SNP driver to actually update the + * link state. */ - if ( ( efirc = snp->snp->Shutdown ( snp->snp ) ) != 0 ) { + if ( ( ! netdev_insomniac ( netdev ) ) && + ( ( efirc = snp->snp->Shutdown ( snp->snp ) ) != 0 ) ) { rc = -EEFI ( efirc ); snpnet_dump_mode ( netdev ); DBGC ( snp, "SNP %s could not shut down: %s\n", @@ -455,8 +467,11 @@ static void snpnet_close ( struct net_device *netdev ) { EFI_STATUS efirc; int rc; - /* Shut down NIC (unless whole system shutdown is in progress) */ + /* Shut down NIC (unless whole system shutdown is in progress, + * or device is insomniac). + */ if ( ( ! efi_shutdown_in_progress ) && + ( ! netdev_insomniac ( netdev ) ) && ( ( efirc = snp->snp->Shutdown ( snp->snp ) ) != 0 ) ) { rc = -EEFI ( efirc ); DBGC ( snp, "SNP %s could not shut down: %s\n", @@ -490,12 +505,12 @@ static struct net_device_operations snpnet_operations = { * * @v device EFI device handle * @v protocol Protocol GUID + * @v inhibit_wifi Inhibit wireless devices * @ret rc Return status code */ -int snpnet_supported ( EFI_HANDLE device, EFI_GUID *protocol ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; +int snpnet_supported ( EFI_HANDLE device, EFI_GUID *protocol, + int inhibit_wifi ) { EFI_HANDLE parent; - EFI_STATUS efirc; int rc; /* Check that this is not a device we are providing ourselves */ @@ -506,13 +521,11 @@ int snpnet_supported ( EFI_HANDLE device, EFI_GUID *protocol ) { } /* Test for presence of protocol */ - if ( ( efirc = bs->OpenProtocol ( device, protocol, - NULL, efi_image_handle, device, - EFI_OPEN_PROTOCOL_TEST_PROTOCOL))!=0){ + if ( ( rc = efi_test ( device, protocol ) ) != 0 ) { DBGCP ( device, "HANDLE %s is not a %s device\n", efi_handle_name ( device ), efi_guid_ntoa ( protocol ) ); - return -EEFI ( efirc ); + return rc; } /* Check that there are no instances of this protocol further @@ -526,35 +539,205 @@ int snpnet_supported ( EFI_HANDLE device, EFI_GUID *protocol ) { DBGC2 ( device, "%s\n", efi_handle_name ( parent ) ); return -ENOTTY; } - DBGC ( device, "HANDLE %s is a %s device\n", efi_handle_name ( device ), efi_guid_ntoa ( protocol ) ); + + /* Check for wireless devices, if applicable */ + if ( inhibit_wifi && + ( ( efi_test ( device, &efi_wifi2_protocol_guid ) ) == 0 ) ) { + DBGC ( device, "HANDLE %s is wireless: assuming vendor %s " + "driver is too unreliable to use\n", + efi_handle_name ( device ), + efi_guid_ntoa ( protocol ) ); + return -ENOTTY; + } + + return 0; +} + +/** + * Check if device must be insomniac + * + * @v device EFI device handle + * @v is_insomniac Device must be insomniac + */ +static int snpnet_is_insomniac ( EFI_HANDLE device ) { + int rc; + + /* Check for wireless devices + * + * The UEFI model for wireless network configuration is + * somewhat underdefined. At the time of writing, the EDK2 + * "UEFI WiFi Connection Manager" driver provides only one way + * to configure wireless network credentials, which is to + * enter them interactively via an HII form. Credentials are + * not stored (or exposed via any protocol interface), and so + * any temporary disconnection from the wireless network will + * inevitably leave the interface in an unusable state that + * cannot be recovered without user intervention. + * + * Experimentation shows that at least some wireless network + * drivers will disconnect from the wireless network when the + * SNP Shutdown() method is called, or if the device is not + * polled sufficiently frequently to maintain its association + * to the network. We therefore inhibit calls to Shutdown() + * and Stop() for any such SNP protocol interfaces, and mark + * our network device as insomniac so that it will be polled + * even when closed. + */ + if ( ( rc = efi_test ( device, &efi_wifi2_protocol_guid ) ) == 0 ) { + DBGC ( device, "SNP %s is wireless: assuming insomniac\n", + efi_handle_name ( device ) ); + return 1; + } + return 0; } /** + * Ignore shutdown attempt + * + * @v snp SNP interface + * @ret efirc EFI status code + */ +static EFI_STATUS EFIAPI +snpnet_do_nothing ( EFI_SIMPLE_NETWORK_PROTOCOL *snp __unused ) { + + return 0; +} + +/** + * Patch SNP protocol interface to prevent shutdown + * + * @v device EFI device handle + * @v patch Interface patch + * @ret rc Return status code + */ +static int snpnet_insomniac_patch ( EFI_HANDLE device, + struct snp_insomniac_patch *patch ) { + EFI_SIMPLE_NETWORK_PROTOCOL *interface; + int rc; + + /* Open interface for ephemeral use */ + if ( ( rc = efi_open ( device, &efi_simple_network_protocol_guid, + &interface ) ) != 0 ) { + DBGC ( device, "SNP %s cannot open SNP protocol for patching: " + "%s\n", efi_handle_name ( device ), strerror ( rc ) ); + return rc; + } + + /* Record original Shutdown() and Stop() methods */ + patch->shutdown = interface->Shutdown; + patch->stop = interface->Stop; + + /* Inhibit other UEFI drivers' calls to Shutdown() and Stop() + * + * This is necessary since disconnecting the MnpDxe driver + * will attempt to shut down the SNP device, which would leave + * us with an unusable device. + */ + interface->Shutdown = snpnet_do_nothing; + interface->Stop = snpnet_do_nothing; + DBGC ( device, "SNP %s patched to inhibit shutdown\n", + efi_handle_name ( device ) ); + + return 0; +} + +/** + * Restore patched SNP protocol interface + * + * @v device EFI device handle + * @v patch Interface patch to fill in + * @ret rc Return status code + */ +static int snpnet_insomniac_restore ( EFI_HANDLE device, + struct snp_insomniac_patch *patch ) { + EFI_SIMPLE_NETWORK_PROTOCOL *interface; + int rc; + + /* Avoid returning uninitialised data on error */ + memset ( patch, 0, sizeof ( *patch ) ); + + /* Open interface for ephemeral use */ + if ( ( rc = efi_open ( device, &efi_simple_network_protocol_guid, + &interface ) ) != 0 ) { + DBGC ( device, "SNP %s cannot open patched SNP protocol: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + return rc; + } + + /* Restore original Shutdown() and Stop() methods, if possible */ + if ( interface->Shutdown == snpnet_do_nothing ) + interface->Shutdown = patch->shutdown; + if ( interface->Stop == snpnet_do_nothing ) + interface->Stop = patch->stop; + + /* Check that original methods were restored (by us or others) */ + if ( ( interface->Shutdown != patch->shutdown ) || + ( interface->Stop != patch->stop ) ) { + DBGC ( device, "SNP %s could not restore patched SNP " + "protocol\n", efi_handle_name ( device ) ); + return -EBUSY; + } + + return 0; +} + +/** + * Exclude existing drivers + * + * @v device EFI device handle + * @ret rc Return status code + */ +int snpnet_exclude ( EFI_HANDLE device ) { + EFI_GUID *protocol = &efi_simple_network_protocol_guid; + struct snp_insomniac_patch patch; + int insomniac; + int rc; + + /* Check if this is a device that must not ever be shut down */ + insomniac = snpnet_is_insomniac ( device ); + + /* Inhibit calls to Shutdown() and Stop(), if applicable */ + if ( insomniac && + ( ( rc = snpnet_insomniac_patch ( device, &patch ) ) != 0 ) ) { + goto err_patch; + } + + /* Exclude existing SNP drivers */ + if ( ( rc = efi_driver_exclude ( device, protocol ) ) != 0 ) { + DBGC ( device, "SNP %s could not exclude drivers: %s\n", + efi_handle_name ( device ), strerror ( rc ) ); + goto err_exclude; + } + + err_exclude: + if ( insomniac ) + snpnet_insomniac_restore ( device, &patch ); + err_patch: + return rc; +} + +/** * Attach driver to device * * @v efidev EFI device * @ret rc Return status code */ int snpnet_start ( struct efi_device *efidev ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_HANDLE device = efidev->device; + EFI_SIMPLE_NETWORK_PROTOCOL *interface; EFI_SIMPLE_NETWORK_MODE *mode; struct net_device *netdev; struct snp_nic *snp; - void *interface; EFI_STATUS efirc; int rc; /* Open SNP protocol */ - if ( ( efirc = bs->OpenProtocol ( device, - &efi_simple_network_protocol_guid, - &interface, efi_image_handle, device, - ( EFI_OPEN_PROTOCOL_BY_DRIVER | - EFI_OPEN_PROTOCOL_EXCLUSIVE )))!=0){ - rc = -EEFI ( efirc ); + if ( ( rc = efi_open_by_driver ( device, + &efi_simple_network_protocol_guid, + &interface ) ) != 0 ) { DBGC ( device, "SNP %s cannot open SNP protocol: %s\n", efi_handle_name ( device ), strerror ( rc ) ); DBGC_EFI_OPENERS ( device, device, @@ -583,7 +766,11 @@ int snpnet_start ( struct efi_device *efidev ) { INIT_LIST_HEAD ( &snp->dev.children ); netdev->dev = &snp->dev; - /* Bring to the Started state */ + /* Check if device is insomniac */ + if ( snpnet_is_insomniac ( device ) ) + netdev->state |= NETDEV_INSOMNIAC; + + /* Bring to the correct state for a closed interface */ if ( ( mode->State == EfiSimpleNetworkStopped ) && ( ( efirc = snp->snp->Start ( snp->snp ) ) != 0 ) ) { rc = -EEFI ( efirc ); @@ -592,6 +779,7 @@ int snpnet_start ( struct efi_device *efidev ) { goto err_start; } if ( ( mode->State == EfiSimpleNetworkInitialized ) && + ( ! netdev_insomniac ( netdev ) ) && ( ( efirc = snp->snp->Shutdown ( snp->snp ) ) != 0 ) ) { rc = -EEFI ( efirc ); DBGC ( device, "SNP %s could not shut down: %s\n", @@ -644,8 +832,7 @@ int snpnet_start ( struct efi_device *efidev ) { netdev_nullify ( netdev ); netdev_put ( netdev ); err_alloc: - bs->CloseProtocol ( device, &efi_simple_network_protocol_guid, - efi_image_handle, device ); + efi_close_by_driver ( device, &efi_simple_network_protocol_guid ); err_open_protocol: return rc; } @@ -656,7 +843,6 @@ int snpnet_start ( struct efi_device *efidev ) { * @v efidev EFI device */ void snpnet_stop ( struct efi_device *efidev ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; struct net_device *netdev = efidev_get_drvdata ( efidev ); struct snp_nic *snp = netdev->priv; EFI_HANDLE device = efidev->device; @@ -681,6 +867,5 @@ void snpnet_stop ( struct efi_device *efidev ) { netdev_put ( netdev ); /* Close SNP protocol */ - bs->CloseProtocol ( device, &efi_simple_network_protocol_guid, - efi_image_handle, device ); + efi_close_by_driver ( device, &efi_simple_network_protocol_guid ); } diff --git a/src/drivers/net/efi/snpnet.h b/src/drivers/net/efi/snpnet.h index 4699c7892..a361a99c0 100644 --- a/src/drivers/net/efi/snpnet.h +++ b/src/drivers/net/efi/snpnet.h @@ -8,10 +8,13 @@ */ FILE_LICENCE ( GPL2_OR_LATER ); +FILE_SECBOOT ( PERMITTED ); struct efi_device; -extern int snpnet_supported ( EFI_HANDLE device, EFI_GUID *protocol ); +extern int snpnet_supported ( EFI_HANDLE device, EFI_GUID *protocol, + int inhibit_wifi ); +extern int snpnet_exclude ( EFI_HANDLE device ); extern int snpnet_start ( struct efi_device *efidev ); extern void snpnet_stop ( struct efi_device *efidev ); diff --git a/src/drivers/net/efi/snponly.c b/src/drivers/net/efi/snponly.c index 2ae63fc06..b7231ce01 100644 --- a/src/drivers/net/efi/snponly.c +++ b/src/drivers/net/efi/snponly.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <errno.h> @@ -63,16 +64,20 @@ struct chained_protocol { * reinstalling the protocol instance. */ EFI_HANDLE device; + /** Assume wireless devices are unusable */ + int inhibit_wifi; }; /** Chainloaded SNP protocol */ static struct chained_protocol chained_snp = { .protocol = &efi_simple_network_protocol_guid, + .inhibit_wifi = 1, }; /** Chainloaded NII protocol */ static struct chained_protocol chained_nii = { .protocol = &efi_nii31_protocol_guid, + .inhibit_wifi = 1, }; /** Chainloaded MNP protocol */ @@ -86,13 +91,11 @@ static struct chained_protocol chained_mnp = { * @v chained Chainloaded protocol */ static void chained_locate ( struct chained_protocol *chained ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; EFI_HANDLE device = efi_loaded_image->DeviceHandle; EFI_HANDLE handle; void *match = NULL; void *interface; unsigned int skip; - EFI_STATUS efirc; int rc; /* Identify target device handle */ @@ -111,11 +114,8 @@ static void chained_locate ( struct chained_protocol *chained ) { } /* Get protocol instance */ - if ( ( efirc = bs->OpenProtocol ( - handle, chained->protocol, &interface, - efi_image_handle, handle, - EFI_OPEN_PROTOCOL_GET_PROTOCOL )) != 0){ - rc = -EEFI ( efirc ); + if ( ( rc = efi_open ( handle, chained->protocol, + &interface ) ) != 0 ) { DBGC ( device, "CHAINED %s could not open %s on ", efi_handle_name ( device ), efi_guid_ntoa ( chained->protocol ) ); @@ -123,8 +123,6 @@ static void chained_locate ( struct chained_protocol *chained ) { efi_handle_name ( handle ), strerror ( rc ) ); break; } - bs->CloseProtocol ( handle, chained->protocol, - efi_image_handle, handle ); /* Stop if we reach a non-matching protocol instance */ if ( match && ( match != interface ) ) { @@ -154,20 +152,16 @@ static void chained_locate ( struct chained_protocol *chained ) { */ static int chained_supported ( EFI_HANDLE device, struct chained_protocol *chained ) { - EFI_BOOT_SERVICES *bs = efi_systab->BootServices; void *interface; - EFI_STATUS efirc; int rc; /* Get protocol */ - if ( ( efirc = bs->OpenProtocol ( device, chained->protocol, &interface, - efi_image_handle, device, - EFI_OPEN_PROTOCOL_GET_PROTOCOL ))!=0){ - rc = -EEFI ( efirc ); + if ( ( rc = efi_open ( device, chained->protocol, + &interface ) ) != 0 ) { DBGCP ( device, "CHAINED %s is not a %s device\n", efi_handle_name ( device ), efi_guid_ntoa ( chained->protocol ) ); - goto err_open_protocol; + return rc; } /* Ignore non-matching handles */ @@ -175,21 +169,23 @@ static int chained_supported ( EFI_HANDLE device, DBGC2 ( device, "CHAINED %s is not the chainloaded %s\n", efi_handle_name ( device ), efi_guid_ntoa ( chained->protocol ) ); - rc = -ENOTTY; - goto err_no_match; + return -ENOTTY; } - - /* Success */ - rc = 0; DBGC ( device, "CHAINED %s is the chainloaded %s\n", efi_handle_name ( device ), efi_guid_ntoa ( chained->protocol ) ); - err_no_match: - bs->CloseProtocol ( device, chained->protocol, efi_image_handle, - device ); - err_open_protocol: - return rc; + /* Check for wireless devices, if applicable */ + if ( chained->inhibit_wifi && + ( ( efi_test ( device, &efi_wifi2_protocol_guid ) ) == 0 ) ) { + DBGC ( device, "CHAINED %s is wireless: assuming vendor %s " + "driver is too unreliable to use\n", + efi_handle_name ( device ), + efi_guid_ntoa ( chained->protocol ) ); + return -ENOTTY; + } + + return 0; } /** @@ -226,23 +222,25 @@ static int mnponly_supported ( EFI_HANDLE device ) { } /** EFI SNP chainloading-device-only driver */ -struct efi_driver snponly_driver __efi_driver ( EFI_DRIVER_NORMAL ) = { +struct efi_driver snponly_driver __efi_driver ( EFI_DRIVER_SNP ) = { .name = "SNPONLY", .supported = snponly_supported, + .exclude = snpnet_exclude, .start = snpnet_start, .stop = snpnet_stop, }; /** EFI NII chainloading-device-only driver */ -struct efi_driver niionly_driver __efi_driver ( EFI_DRIVER_NORMAL ) = { +struct efi_driver niionly_driver __efi_driver ( EFI_DRIVER_NII ) = { .name = "NIIONLY", .supported = niionly_supported, + .exclude = nii_exclude, .start = nii_start, .stop = nii_stop, }; /** EFI MNP chainloading-device-only driver */ -struct efi_driver mnponly_driver __efi_driver ( EFI_DRIVER_NORMAL ) = { +struct efi_driver mnponly_driver __efi_driver ( EFI_DRIVER_MNP ) = { .name = "MNPONLY", .supported = mnponly_supported, .start = mnpnet_start, @@ -262,5 +260,6 @@ static void chained_init ( void ) { /** EFI chainloaded-device-only initialisation function */ struct init_fn chained_init_fn __init_fn ( INIT_LATE ) = { + .name = "chained", .initialise = chained_init, }; diff --git a/src/drivers/net/ena.c b/src/drivers/net/ena.c index 7ce5b9eb9..56984d142 100644 --- a/src/drivers/net/ena.c +++ b/src/drivers/net/ena.c @@ -371,8 +371,11 @@ static int ena_set_aenq_config ( struct ena_nic *ena, uint32_t enabled ) { feature->aenq.enabled = cpu_to_le32 ( enabled ); /* Issue request */ - if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not set AENQ configuration: %s\n", + ena, strerror ( rc ) ); return rc; + } return 0; } @@ -447,6 +450,7 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq, union ena_aq_req *req; union ena_acq_rsp *rsp; unsigned int i; + size_t llqe; int rc; /* Allocate submission queue entries */ @@ -461,26 +465,39 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq, req = ena_admin_req ( ena ); req->header.opcode = ENA_CREATE_SQ; req->create_sq.direction = sq->direction; - req->create_sq.policy = cpu_to_le16 ( ENA_SQ_HOST_MEMORY | - ENA_SQ_CONTIGUOUS ); + req->create_sq.policy = cpu_to_le16 ( sq->policy ); req->create_sq.cq_id = cpu_to_le16 ( cq->id ); req->create_sq.count = cpu_to_le16 ( sq->count ); - req->create_sq.address = cpu_to_le64 ( virt_to_bus ( sq->sqe.raw ) ); + if ( ! ( sq->policy & ENA_SQ_DEVICE_MEMORY ) ) { + req->create_sq.address = + cpu_to_le64 ( virt_to_bus ( sq->sqe.raw ) ); + } /* Issue request */ - if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not create %s SQ: %s\n", + ena, ena_direction ( sq->direction ), strerror ( rc ) ); goto err_admin; + } /* Parse response */ sq->id = le16_to_cpu ( rsp->create_sq.id ); sq->doorbell = le32_to_cpu ( rsp->create_sq.doorbell ); + llqe = le32_to_cpu ( rsp->create_sq.llqe ); + if ( sq->policy & ENA_SQ_DEVICE_MEMORY ) { + assert ( ena->mem != NULL ); + assert ( sq->len >= sizeof ( *sq->sqe.llq ) ); + sq->llqe = ( ena->mem + llqe ); + } else { + sq->llqe = NULL; + } /* Reset producer counter and phase */ sq->prod = 0; sq->phase = ENA_SQE_PHASE; /* Calculate fill level */ - sq->fill = sq->max; + sq->fill = sq->count; if ( sq->fill > cq->actual ) sq->fill = cq->actual; @@ -488,10 +505,16 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq, for ( i = 0 ; i < sq->count ; i++ ) sq->ids[i] = i; - DBGC ( ena, "ENA %p %s SQ%d at [%08lx,%08lx) fill %d db +%04x CQ%d\n", - ena, ena_direction ( sq->direction ), sq->id, - virt_to_phys ( sq->sqe.raw ), - ( virt_to_phys ( sq->sqe.raw ) + sq->len ), + DBGC ( ena, "ENA %p %s SQ%d at ", + ena, ena_direction ( sq->direction ), sq->id ); + if ( sq->policy & ENA_SQ_DEVICE_MEMORY ) { + DBGC ( ena, "LLQ [+%08zx,+%08zx)", llqe, + ( llqe + ( sq->count * sizeof ( sq->sqe.llq[0] ) ) ) ); + } else { + DBGC ( ena, "[%08lx,%08lx)", virt_to_phys ( sq->sqe.raw ), + ( virt_to_phys ( sq->sqe.raw ) + sq->len ) ); + } + DBGC ( ena, " fill %d db +%04x CQ%d\n", sq->fill, sq->doorbell, cq->id ); return 0; @@ -520,8 +543,12 @@ static int ena_destroy_sq ( struct ena_nic *ena, struct ena_sq *sq ) { req->destroy_sq.direction = sq->direction; /* Issue request */ - if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not destroy %s SQ%d: %s\n", + ena, ena_direction ( sq->direction ), sq->id, + strerror ( rc ) ); return rc; + } /* Free submission queue entries */ free_phys ( sq->sqe.raw, sq->len ); @@ -560,8 +587,11 @@ static int ena_create_cq ( struct ena_nic *ena, struct ena_cq *cq ) { req->create_cq.address = cpu_to_le64 ( virt_to_bus ( cq->cqe.raw ) ); /* Issue request */ - if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not create CQ (broken firmware?): " + "%s\n", ena, strerror ( rc ) ); goto err_admin; + } /* Parse response */ cq->id = le16_to_cpu ( rsp->create_cq.id ); @@ -606,8 +636,11 @@ static int ena_destroy_cq ( struct ena_nic *ena, struct ena_cq *cq ) { req->destroy_cq.id = cpu_to_le16 ( cq->id ); /* Issue request */ - if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not destroy CQ%d: %s\n", + ena, cq->id, strerror ( rc ) ); return rc; + } /* Free completion queue entries */ free_phys ( cq->cqe.raw, cq->len ); @@ -680,17 +713,22 @@ static int ena_get_device_attributes ( struct net_device *netdev ) { req->get_feature.id = ENA_DEVICE_ATTRIBUTES; /* Issue request */ - if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not get device attributes: %s\n", + ena, strerror ( rc ) ); return rc; + } /* Parse response */ feature = &rsp->get_feature.feature; memcpy ( netdev->hw_addr, feature->device.mac, ETH_ALEN ); netdev->max_pkt_len = le32_to_cpu ( feature->device.mtu ); netdev->mtu = ( netdev->max_pkt_len - ETH_HLEN ); + ena->features = le32_to_cpu ( feature->device.features ); - DBGC ( ena, "ENA %p MAC %s MTU %zd\n", - ena, eth_ntoa ( netdev->hw_addr ), netdev->max_pkt_len ); + DBGC ( ena, "ENA %p MAC %s MTU %zd features %#08x\n", + ena, eth_ntoa ( netdev->hw_addr ), netdev->max_pkt_len, + ena->features ); return 0; } @@ -714,8 +752,106 @@ static int ena_set_host_attributes ( struct ena_nic *ena ) { feature->host.info = cpu_to_le64 ( virt_to_bus ( ena->info ) ); /* Issue request */ - if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not set host attributes: %s\n", + ena, strerror ( rc ) ); return rc; + } + + return 0; +} + +/** + * Configure low latency queues + * + * @v ena ENA device + * @ret rc Return status code + */ +static int ena_llq_config ( struct ena_nic *ena ) { + union ena_aq_req *req; + union ena_acq_rsp *rsp; + union ena_feature *feature; + uint16_t header; + uint16_t size; + uint16_t desc; + uint16_t stride; + uint16_t mode; + int rc; + + /* Construct request */ + req = ena_admin_req ( ena ); + req->header.opcode = ENA_GET_FEATURE; + req->get_feature.id = ENA_LLQ_CONFIG; + + /* Issue request */ + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not get LLQ configuration: %s\n", + ena, strerror ( rc ) ); + return rc; + } + + /* Parse response */ + feature = &rsp->get_feature.feature; + header = le16_to_cpu ( feature->llq.header.supported ); + size = le16_to_cpu ( feature->llq.size.supported ); + desc = le16_to_cpu ( feature->llq.desc.supported ); + stride = le16_to_cpu ( feature->llq.stride.supported ); + mode = le16_to_cpu ( feature->llq.mode ); + DBGC ( ena, "ENA %p LLQ supports %02x:%02x:%02x:%02x:%02x with %dx%d " + "entries\n", ena, header, size, desc, stride, mode, + le32_to_cpu ( feature->llq.queues ), + le32_to_cpu ( feature->llq.count ) ); + + /* Check for a supported configuration */ + if ( ! feature->llq.queues ) { + DBGC ( ena, "ENA %p LLQ has no queues\n", ena ); + return -ENOTSUP; + } + if ( ! ( header & ENA_LLQ_HEADER_INLINE ) ) { + DBGC ( ena, "ENA %p LLQ does not support inline headers\n", + ena ); + return -ENOTSUP; + } + if ( ! ( size & ENA_LLQ_SIZE_128 ) ) { + DBGC ( ena, "ENA %p LLQ does not support 128-byte entries\n", + ena ); + return -ENOTSUP; + } + if ( ! ( desc & ENA_LLQ_DESC_2 ) ) { + DBGC ( ena, "ENA %p LLQ does not support two-descriptor " + "entries\n", ena ); + return -ENOTSUP; + } + + /* Enable a minimal configuration */ + header = ENA_LLQ_HEADER_INLINE; + size = ENA_LLQ_SIZE_128; + desc = ENA_LLQ_DESC_2; + stride &= ( -stride ); /* Don't care: use first supported option */ + DBGC ( ena, "ENA %p LLQ enabling %02x:%02x:%02x:%02x:%02x\n", + ena, header, size, desc, stride, mode ); + + /* Construct request */ + req = ena_admin_req ( ena ); + req->header.opcode = ENA_SET_FEATURE; + req->set_feature.id = ENA_LLQ_CONFIG; + feature = &req->set_feature.feature; + feature->llq.header.enabled = cpu_to_le16 ( header ); + feature->llq.size.enabled = cpu_to_le16 ( size ); + feature->llq.desc.enabled = cpu_to_le16 ( desc ); + feature->llq.stride.enabled = cpu_to_le16 ( stride ); + feature->llq.mode = cpu_to_le16 ( mode ); + + /* Issue request */ + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not set LLQ configuration: %s\n", + ena, strerror ( rc ) ); + return rc; + } + + /* Use on-device memory for transmit queue */ + ena->tx.sq.policy |= ENA_SQ_DEVICE_MEMORY; + ena->tx.sq.inlined = sizeof ( ena->tx.sq.sqe.llq->inlined ); return 0; } @@ -744,8 +880,11 @@ static int ena_get_stats ( struct ena_nic *ena ) { req->get_stats.device = ENA_DEVICE_MINE; /* Issue request */ - if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) + if ( ( rc = ena_admin ( ena, req, &rsp ) ) != 0 ) { + DBGC ( ena, "ENA %p could not get statistics: %s\n", + ena, strerror ( rc ) ); return rc; + } /* Parse response */ stats = &rsp->get_stats; @@ -834,12 +973,32 @@ static void ena_refill_rx ( struct net_device *netdev ) { * @v ena ENA device */ static void ena_empty_rx ( struct ena_nic *ena ) { + struct io_buffer *iobuf; unsigned int i; for ( i = 0 ; i < ENA_RX_COUNT ; i++ ) { - if ( ena->rx_iobuf[i] ) - free_iob ( ena->rx_iobuf[i] ); + iobuf = ena->rx_iobuf[i]; ena->rx_iobuf[i] = NULL; + if ( iobuf ) + free_iob ( iobuf ); + } +} + +/** + * Cancel uncompleted transmit I/O buffers + * + * @v netdev Network device + */ +static void ena_cancel_tx ( struct net_device *netdev ) { + struct ena_nic *ena = netdev->priv; + struct io_buffer *iobuf; + unsigned int i; + + for ( i = 0 ; i < ENA_TX_COUNT ; i++ ) { + iobuf = ena->tx_iobuf[i]; + ena->tx_iobuf[i] = NULL; + if ( iobuf ) + netdev_tx_complete_err ( netdev, iobuf, -ECANCELED ); } } @@ -892,6 +1051,9 @@ static void ena_close ( struct net_device *netdev ) { /* Destroy transmit queue pair */ ena_destroy_qp ( ena, &ena->tx ); + + /* Cancel any uncompleted transmit buffers */ + ena_cancel_tx ( netdev ); } /** @@ -904,9 +1066,15 @@ static void ena_close ( struct net_device *netdev ) { static int ena_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { struct ena_nic *ena = netdev->priv; struct ena_tx_sqe *sqe; + struct ena_tx_llqe *llqe; + const uint64_t *src; + uint64_t *dest; physaddr_t address; unsigned int index; unsigned int id; + unsigned int i; + uint8_t flags; + size_t inlined; size_t len; /* Get next submission queue entry */ @@ -918,17 +1086,51 @@ static int ena_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { sqe = &ena->tx.sq.sqe.tx[index]; id = ena->tx_ids[index]; - /* Construct submission queue entry */ + /* Construct submission queue entry values */ address = virt_to_bus ( iobuf->data ); len = iob_len ( iobuf ); + inlined = ena->tx.sq.inlined; + if ( inlined > len ) + inlined = len; + len -= inlined; + address += inlined; + flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL | + ena->tx.sq.phase ); + + /* Prepare low-latency queue bounce buffer, if applicable */ + llqe = ena->tx.sq.sqe.llq; + if ( ena->tx.sq.llqe ) { + + /* Construct zero-information metadata queue entry */ + llqe->meta.meta = ENA_TX_SQE_META; + llqe->meta.flags = ( flags & ~( ENA_SQE_LAST | ENA_SQE_CPL ) ); + + /* Copy inlined data */ + memcpy ( llqe->inlined, iobuf->data, inlined ); + + /* Place submission queue entry within bounce buffer */ + sqe = &llqe->sqe; + flags &= ~ENA_SQE_FIRST; + } + + /* Construct submission queue entry */ sqe->len = cpu_to_le16 ( len ); sqe->id = cpu_to_le16 ( id ); sqe->address = cpu_to_le64 ( address ); + sqe->inlined = inlined; wmb(); - sqe->flags = ( ENA_SQE_FIRST | ENA_SQE_LAST | ENA_SQE_CPL | - ena->tx.sq.phase ); + sqe->flags = flags; wmb(); + /* Copy bounce buffer to on-device memory, if applicable */ + if ( ena->tx.sq.llqe ) { + src = ( ( const void * ) llqe ); + dest = ( ena->tx.sq.llqe + ( index * sizeof ( *llqe ) ) ); + for ( i = 0 ; i < ( sizeof ( *llqe ) / sizeof ( *src ) ); i++ ) + writeq ( *(src++), dest++ ); + wmb(); + } + /* Increment producer counter */ ena->tx.sq.prod++; if ( ( ena->tx.sq.prod % ENA_TX_COUNT ) == 0 ) @@ -1070,10 +1272,12 @@ static struct net_device_operations ena_operations = { */ /** - * Assign memory BAR + * Assign memory BARs * * @v ena ENA device * @v pci PCI device + * @v prefmembase On-device memory base address to fill in + * @v prefmemsize On-device memory size to fill in * @ret rc Return status code * * Some BIOSes in AWS EC2 are observed to fail to assign a base @@ -1081,15 +1285,27 @@ static struct net_device_operations ena_operations = { * its bridge, and the BIOS does assign a memory window to the bridge. * We therefore place the device at the start of the memory window. */ -static int ena_membase ( struct ena_nic *ena, struct pci_device *pci ) { +static int ena_membases ( struct ena_nic *ena, struct pci_device *pci, + unsigned long *prefmembase, + unsigned long *prefmemsize ) { struct pci_bridge *bridge; + /* Get on-device memory base address and size */ + *prefmembase = pci_bar_start ( pci, ENA_MEM_BAR ); + *prefmemsize = pci_bar_size ( pci, ENA_MEM_BAR ); + + /* Do nothing if addresses are already assigned */ + if ( pci->membase && ( *prefmembase || ( ! *prefmemsize ) ) ) + return 0; + /* Locate PCI bridge */ bridge = pcibridge_find ( pci ); if ( ! bridge ) { DBGC ( ena, "ENA %p found no PCI bridge\n", ena ); return -ENOTCONN; } + DBGC ( ena, "ENA %p at " PCI_FMT " claiming bridge " PCI_FMT "\n", + ena, PCI_ARGS ( pci ), PCI_ARGS ( bridge->pci ) ); /* Sanity check */ if ( PCI_SLOT ( pci->busdevfn ) || PCI_FUNC ( pci->busdevfn ) ) { @@ -1098,12 +1314,21 @@ static int ena_membase ( struct ena_nic *ena, struct pci_device *pci ) { return -ENOTSUP; } - /* Place device at start of memory window */ - pci_write_config_dword ( pci, PCI_BASE_ADDRESS_0, bridge->membase ); - pci->membase = bridge->membase; - DBGC ( ena, "ENA %p at " PCI_FMT " claiming bridge " PCI_FMT " mem " - "%08x\n", ena, PCI_ARGS ( pci ), PCI_ARGS ( bridge->pci ), - bridge->membase ); + /* Place register BAR at start of memory window, if applicable */ + if ( ! pci->membase ) { + pci_bar_set ( pci, ENA_REGS_BAR, bridge->membase ); + pci->membase = bridge->membase; + DBGC ( ena, "ENA %p at " PCI_FMT " claiming mem %08lx\n", + ena, PCI_ARGS ( pci ), pci->membase ); + } + + /* Place memory BAR at start of prefetchable window, if applicable */ + if ( *prefmemsize && ( ! *prefmembase ) ) { + pci_bar_set ( pci, ENA_MEM_BAR, bridge->prefmembase ); + *prefmembase = bridge->prefmembase; + DBGC ( ena, "ENA %p at " PCI_FMT " claiming prefmem %08lx\n", + ena, PCI_ARGS ( pci ), *prefmembase ); + } return 0; } @@ -1118,6 +1343,8 @@ static int ena_probe ( struct pci_device *pci ) { struct net_device *netdev; struct ena_nic *ena; struct ena_host_info *info; + unsigned long prefmembase; + unsigned long prefmemsize; int rc; /* Allocate and initialise net device */ @@ -1134,25 +1361,38 @@ static int ena_probe ( struct pci_device *pci ) { ena->acq.phase = ENA_ACQ_PHASE; ena_cq_init ( &ena->tx.cq, ENA_TX_COUNT, sizeof ( ena->tx.cq.cqe.tx[0] ) ); - ena_sq_init ( &ena->tx.sq, ENA_SQ_TX, ENA_TX_COUNT, ENA_TX_COUNT, + ena_sq_init ( &ena->tx.sq, ENA_SQ_TX, ENA_TX_COUNT, sizeof ( ena->tx.sq.sqe.tx[0] ), ena->tx_ids ); ena_cq_init ( &ena->rx.cq, ENA_RX_COUNT, sizeof ( ena->rx.cq.cqe.rx[0] ) ); - ena_sq_init ( &ena->rx.sq, ENA_SQ_RX, ENA_RX_COUNT, ENA_RX_FILL, + ena_sq_init ( &ena->rx.sq, ENA_SQ_RX, ENA_RX_COUNT, sizeof ( ena->rx.sq.sqe.rx[0] ), ena->rx_ids ); /* Fix up PCI device */ adjust_pci_device ( pci ); /* Fix up PCI BAR if left unassigned by BIOS */ - if ( ( ! pci->membase ) && ( ( rc = ena_membase ( ena, pci ) ) != 0 ) ) - goto err_membase; + if ( ( rc = ena_membases ( ena, pci, &prefmembase, + &prefmemsize ) ) != 0 ) { + goto err_membases; + } /* Map registers */ - ena->regs = pci_ioremap ( pci, pci->membase, ENA_BAR_SIZE ); + ena->regs = pci_ioremap ( pci, pci->membase, ENA_REGS_SIZE ); if ( ! ena->regs ) { rc = -ENODEV; - goto err_ioremap; + goto err_regs; + } + + /* Map device memory */ + if ( prefmemsize ) { + ena->mem = pci_ioremap ( pci, prefmembase, prefmemsize ); + if ( ! ena->mem ) { + rc = -ENODEV; + goto err_mem; + } + DBGC ( ena, "ENA %p has %ldkB of on-device memory\n", + ena, ( prefmemsize >> 10 ) ); } /* Allocate and initialise host info */ @@ -1163,7 +1403,7 @@ static int ena_probe ( struct pci_device *pci ) { } ena->info = info; memset ( info, 0, PAGE_SIZE ); - info->type = cpu_to_le32 ( ENA_HOST_INFO_TYPE_LINUX ); + info->type = cpu_to_le32 ( ENA_HOST_INFO_TYPE_IPXE ); snprintf ( info->dist_str, sizeof ( info->dist_str ), "%s", ( product_name[0] ? product_name : product_short_name ) ); snprintf ( info->kernel_str, sizeof ( info->kernel_str ), "%s", @@ -1194,6 +1434,12 @@ static int ena_probe ( struct pci_device *pci ) { if ( ( rc = ena_get_device_attributes ( netdev ) ) != 0 ) goto err_get_device_attributes; + /* Attempt to configure low latency queues, if applicable. + * Ignore any errors and continue without using LLQs. + */ + if ( ena->mem && ( ena->features & ENA_FEATURE_LLQ ) ) + ena_llq_config ( ena ); + /* Register network device */ if ( ( rc = register_netdev ( netdev ) ) != 0 ) goto err_register_netdev; @@ -1217,9 +1463,12 @@ static int ena_probe ( struct pci_device *pci ) { err_reset: free_phys ( ena->info, PAGE_SIZE ); err_info: + if ( ena->mem ) + iounmap ( ena->mem ); + err_mem: iounmap ( ena->regs ); - err_ioremap: - err_membase: + err_regs: + err_membases: netdev_nullify ( netdev ); netdev_put ( netdev ); err_alloc: @@ -1250,8 +1499,12 @@ static void ena_remove ( struct pci_device *pci ) { /* Free host info */ free_phys ( ena->info, PAGE_SIZE ); - /* Free network device */ + /* Unmap registers and on-device memory */ + if ( ena->mem ) + iounmap ( ena->mem ); iounmap ( ena->regs ); + + /* Free network device */ netdev_nullify ( netdev ); netdev_put ( netdev ); } diff --git a/src/drivers/net/ena.h b/src/drivers/net/ena.h index 0f280c700..240e64480 100644 --- a/src/drivers/net/ena.h +++ b/src/drivers/net/ena.h @@ -12,8 +12,14 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include <stdint.h> #include <ipxe/if_ether.h> -/** BAR size */ -#define ENA_BAR_SIZE 16384 +/** Register BAR */ +#define ENA_REGS_BAR PCI_BASE_ADDRESS_0 + +/** Register BAR size */ +#define ENA_REGS_SIZE 16384 + +/** On-device memory BAR */ +#define ENA_MEM_BAR PCI_BASE_ADDRESS_2 /** Queue alignment */ #define ENA_ALIGN 4096 @@ -28,13 +34,10 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #define ENA_AENQ_COUNT 2 /** Number of transmit queue entries */ -#define ENA_TX_COUNT 16 +#define ENA_TX_COUNT 32 /** Number of receive queue entries */ -#define ENA_RX_COUNT 128 - -/** Receive queue maximum fill level */ -#define ENA_RX_FILL 16 +#define ENA_RX_COUNT 32 /** Base address low register offset */ #define ENA_BASE_LO 0x0 @@ -139,6 +142,62 @@ struct ena_device_attributes { uint32_t mtu; } __attribute__ (( packed )); +/** Device supports low latency queues */ +#define ENA_FEATURE_LLQ 0x00000010 + +/** Low latency queue config */ +#define ENA_LLQ_CONFIG 4 + +/** A low latency queue option */ +struct ena_llq_option { + /** Bitmask of supported option values */ + uint16_t supported; + /** Single-entry bitmask of the enabled option value */ + uint16_t enabled; +} __attribute__ (( packed )); + +/** Low latency queue config */ +struct ena_llq_config { + /** Maximum number of low latency queues */ + uint32_t queues; + /** Maximum queue depth */ + uint32_t count; + /** Header locations */ + struct ena_llq_option header; + /** Entry sizes */ + struct ena_llq_option size; + /** Descriptor counts */ + struct ena_llq_option desc; + /** Descriptor strides */ + struct ena_llq_option stride; + /** Reserved */ + uint8_t reserved_a[4]; + /** Acceleration mode */ + uint16_t mode; + /** Maximum burst size */ + uint16_t burst; + /** Reserved */ + uint8_t reserved_b[4]; +} __attribute__ (( packed )); + +/** Low latency queue header locations */ +enum ena_llq_header { + /** Headers are placed inline immediately after descriptors */ + ENA_LLQ_HEADER_INLINE = 0x0001, +}; + +/** Low latency queue entry sizes */ +enum ena_llq_size { + /** Entries are 128 bytes */ + ENA_LLQ_SIZE_128 = 0x0001, +}; + +/** Low latency queue descriptor count */ +enum ena_llq_desc { + /** Two descriptors before inline headers */ + ENA_LLQ_DESC_2 = 0x0002, +}; + /** Async event notification queue config */ #define ENA_AENQ_CONFIG 26 @@ -191,14 +250,17 @@ struct ena_host_info { uint32_t features; } __attribute__ (( packed )); -/** Linux operating system type +/** Operating system type * - * There is a defined "iPXE" operating system type (with value 5). - * However, some very broken versions of the ENA firmware will refuse - * to allow a completion queue to be created if the "iPXE" type is - * used. + * Some very broken older versions of the ENA firmware will refuse to + * allow a completion queue to be created if "iPXE" (type 5) is used, + * and require us to pretend that we are "Linux" (type 1) instead. + * + * The ENA team at AWS assures us that the entire AWS fleet has been + * upgraded to fix this bug, and that we are now safe to use the + * correct operating system type value. */ -#define ENA_HOST_INFO_TYPE_LINUX 1 +#define ENA_HOST_INFO_TYPE_IPXE 5 /** Driver version * @@ -228,6 +290,8 @@ struct ena_host_info { union ena_feature { /** Device attributes */ struct ena_device_attributes device; + /** Low latency queue configuration */ + struct ena_llq_config llq; /** Async event notification queue config */ struct ena_aenq_config aenq; /** Host attributes */ @@ -271,6 +335,8 @@ struct ena_create_sq_req { enum ena_sq_policy { /** Use host memory */ ENA_SQ_HOST_MEMORY = 0x0001, + /** Use on-device memory (must be used in addition to host memory) */ + ENA_SQ_DEVICE_MEMORY = 0x0002, /** Memory is contiguous */ ENA_SQ_CONTIGUOUS = 0x0100, }; @@ -282,13 +348,13 @@ struct ena_create_sq_rsp { /** Submission queue identifier */ uint16_t id; /** Reserved */ - uint8_t reserved[2]; + uint8_t reserved_a[2]; /** Doorbell register offset */ uint32_t doorbell; /** LLQ descriptor ring offset */ - uint32_t llq_desc; - /** LLQ header offset */ - uint32_t llq_data; + uint32_t llqe; + /** Reserved */ + uint8_t reserved_b[4]; } __attribute__ (( packed )); /** Destroy submission queue */ @@ -554,18 +620,31 @@ struct ena_aenq { struct ena_tx_sqe { /** Length */ uint16_t len; - /** Reserved */ - uint8_t reserved_a; + /** Metadata flags */ + uint8_t meta; /** Flags */ uint8_t flags; /** Reserved */ uint8_t reserved_b[3]; /** Request identifier */ uint8_t id; - /** Address */ - uint64_t address; + /** Address and inlined length */ + union { + /** Address */ + uint64_t address; + /** Inlined length */ + struct { + /** Reserved */ + uint8_t reserved[7]; + /** Inlined length */ + uint8_t inlined; + } __attribute__ (( packed )); + } __attribute__ (( packed )); } __attribute__ (( packed )); +/** This is a metadata entry */ +#define ENA_TX_SQE_META 0x80 + /** Receive submission queue entry */ struct ena_rx_sqe { /** Length */ @@ -628,6 +707,16 @@ struct ena_rx_cqe { /** Completion queue ownership phase flag */ #define ENA_CQE_PHASE 0x01 +/** Low latency transmit queue bounce buffer */ +struct ena_tx_llqe { + /** Pointless metadata descriptor */ + struct ena_tx_sqe meta; + /** Transmit descriptor */ + struct ena_tx_sqe sqe; + /** Inlined header data */ + uint8_t inlined[96]; +} __attribute__ (( packed )); + /** Submission queue */ struct ena_sq { /** Entries */ @@ -636,11 +725,15 @@ struct ena_sq { struct ena_tx_sqe *tx; /** Receive submission queue entries */ struct ena_rx_sqe *rx; + /** Low latency queue bounce buffer */ + struct ena_tx_llqe *llq; /** Raw data */ void *raw; } sqe; /** Buffer IDs */ uint8_t *ids; + /** Low latency queue base */ + void *llqe; /** Doorbell register offset */ unsigned int doorbell; /** Total length of entries */ @@ -649,16 +742,18 @@ struct ena_sq { unsigned int prod; /** Phase */ unsigned int phase; + /** Queue policy */ + uint16_t policy; /** Submission queue identifier */ uint16_t id; /** Direction */ uint8_t direction; /** Number of entries */ uint8_t count; - /** Maximum fill level */ - uint8_t max; /** Fill level (limited to completion queue size) */ uint8_t fill; + /** Maximum inline header length */ + uint8_t inlined; }; /** @@ -667,18 +762,17 @@ struct ena_sq { * @v sq Submission queue * @v direction Direction * @v count Number of entries - * @v max Maximum fill level * @v size Size of each entry * @v ids Buffer IDs */ static inline __attribute__ (( always_inline )) void ena_sq_init ( struct ena_sq *sq, unsigned int direction, unsigned int count, - unsigned int max, size_t size, uint8_t *ids ) { + size_t size, uint8_t *ids ) { sq->len = ( count * size ); + sq->policy = ( ENA_SQ_HOST_MEMORY | ENA_SQ_CONTIGUOUS ); sq->direction = direction; sq->count = count; - sq->max = max; sq->ids = ids; } @@ -740,6 +834,10 @@ struct ena_qp { struct ena_nic { /** Registers */ void *regs; + /** On-device memory */ + void *mem; + /** Device features */ + uint32_t features; /** Host info */ struct ena_host_info *info; /** Admin queue */ diff --git a/src/drivers/net/epic100.c b/src/drivers/net/epic100.c index 8e31a3bfa..03b394e6b 100644 --- a/src/drivers/net/epic100.c +++ b/src/drivers/net/epic100.c @@ -51,7 +51,7 @@ struct epic_tx_desc { static void epic100_open(void); static void epic100_init_ring(void); -static void epic100_disable(struct nic *nic); +static void epic100_disable(struct nic *nic, void *hwdev); static int epic100_poll(struct nic *nic, int retrieve); static void epic100_transmit(struct nic *nic, const char *destaddr, unsigned int type, unsigned int len, const char *data); @@ -86,14 +86,15 @@ static unsigned int cur_rx, cur_tx; /* The next free ring entry */ static unsigned short eeprom[64]; #endif static signed char phys[4]; /* MII device addresses. */ -struct { +struct epic100_bss { struct epic_rx_desc rx_ring[RX_RING_SIZE] __attribute__ ((aligned(4))); struct epic_tx_desc tx_ring[TX_RING_SIZE] __attribute__ ((aligned(4))); unsigned char rx_packet[PKT_BUF_SZ * RX_RING_SIZE]; unsigned char tx_packet[PKT_BUF_SZ * TX_RING_SIZE]; -} epic100_bufs __shared; +}; +#define epic100_bufs NIC_FAKE_BSS ( struct epic100_bss ) #define rx_ring epic100_bufs.rx_ring #define tx_ring epic100_bufs.tx_ring #define rx_packet epic100_bufs.rx_packet @@ -419,7 +420,7 @@ epic100_poll(struct nic *nic, int retrieve) } -static void epic100_disable ( struct nic *nic __unused ) { +static void epic100_disable ( struct nic *nic __unused, void *hwdev __unused ) { /* Soft reset the chip. */ outl(GC_SOFT_RESET, genctl); } @@ -525,7 +526,7 @@ PCI_ROM(0x10b8, 0x0006, "smc-83c175", "SMC EPIC/C 83c175", 0), PCI_DRIVER ( epic100_driver, epic100_nics, PCI_NO_CLASS ); DRIVER ( "EPIC100", nic_driver, pci_driver, epic100_driver, - epic100_probe, epic100_disable ); + epic100_probe, epic100_disable, epic100_bufs ); /* * Local variables: diff --git a/src/drivers/net/etherfabric.c b/src/drivers/net/etherfabric.c index b40596bea..a58b71568 100644 --- a/src/drivers/net/etherfabric.c +++ b/src/drivers/net/etherfabric.c @@ -21,6 +21,7 @@ FILE_LICENCE ( GPL_ANY ); #include <stdint.h> #include <stdlib.h> #include <stdio.h> +#include <string.h> #include <unistd.h> #include <errno.h> #include <assert.h> @@ -2225,13 +2226,16 @@ falcon_xaui_link_ok ( struct efab_nic *efab ) sync = ( sync == FCN_XX_SYNC_STAT_DECODE_SYNCED ); link_ok = align_done && sync; - } - /* Clear link status ready for next read */ - EFAB_SET_DWORD_FIELD ( reg, FCN_XX_COMMA_DET, FCN_XX_COMMA_DET_RESET ); - EFAB_SET_DWORD_FIELD ( reg, FCN_XX_CHARERR, FCN_XX_CHARERR_RESET); - EFAB_SET_DWORD_FIELD ( reg, FCN_XX_DISPERR, FCN_XX_DISPERR_RESET); - falcon_xmac_writel ( efab, ®, FCN_XX_CORE_STAT_REG_MAC ); + /* Clear link status ready for next read */ + EFAB_SET_DWORD_FIELD ( reg, FCN_XX_COMMA_DET, + FCN_XX_COMMA_DET_RESET ); + EFAB_SET_DWORD_FIELD ( reg, FCN_XX_CHARERR, + FCN_XX_CHARERR_RESET ); + EFAB_SET_DWORD_FIELD ( reg, FCN_XX_DISPERR, + FCN_XX_DISPERR_RESET ); + falcon_xmac_writel ( efab, ®, FCN_XX_CORE_STAT_REG_MAC ); + } has_phyxs = ( efab->phy_op->mmds & ( 1 << MDIO_MMD_PHYXS ) ); if ( link_ok && has_phyxs ) { diff --git a/src/drivers/net/exanic.c b/src/drivers/net/exanic.c index aaa6a28a1..a36f7a774 100644 --- a/src/drivers/net/exanic.c +++ b/src/drivers/net/exanic.c @@ -395,7 +395,7 @@ static int exanic_open ( struct net_device *netdev ) { } /* Reset receive region contents */ - memset_user ( port->rx, 0, 0xff, EXANIC_RX_LEN ); + memset ( port->rx, 0xff, EXANIC_RX_LEN ); /* Reset transmit feedback region */ *(port->txf) = 0; @@ -406,7 +406,7 @@ static int exanic_open ( struct net_device *netdev ) { port->rx_cons = 0; /* Map receive region */ - exanic_write_base ( phys_to_bus ( user_to_phys ( port->rx, 0 ) ), + exanic_write_base ( phys_to_bus ( virt_to_phys ( port->rx ) ), ( port->regs + EXANIC_PORT_RX_BASE ) ); /* Enable promiscuous mode */ @@ -540,26 +540,23 @@ static void exanic_poll_tx ( struct net_device *netdev ) { static void exanic_poll_rx ( struct net_device *netdev ) { struct exanic_port *port = netdev->priv; struct exanic_rx_chunk *rx; - struct exanic_rx_descriptor desc; + unsigned int index; uint8_t current; uint8_t previous; - size_t offset; size_t len; for ( ; ; port->rx_cons++ ) { /* Fetch descriptor */ - offset = ( ( port->rx_cons * sizeof ( *rx ) ) % EXANIC_RX_LEN ); - copy_from_user ( &desc, port->rx, - ( offset + offsetof ( typeof ( *rx ), desc ) ), - sizeof ( desc ) ); + index = ( port->rx_cons % EXANIC_RX_COUNT ); + rx = &port->rx[index]; /* Calculate generation */ - current = ( port->rx_cons / ( EXANIC_RX_LEN / sizeof ( *rx ) )); + current = ( port->rx_cons / EXANIC_RX_COUNT ); previous = ( current - 1 ); /* Do nothing if no chunk is ready */ - if ( desc.generation == previous ) + if ( rx->desc.generation == previous ) break; /* Allocate I/O buffer if needed */ @@ -573,14 +570,12 @@ static void exanic_poll_rx ( struct net_device *netdev ) { } /* Calculate chunk length */ - len = ( desc.len ? desc.len : sizeof ( rx->data ) ); + len = ( rx->desc.len ? rx->desc.len : sizeof ( rx->data ) ); /* Append data to I/O buffer */ if ( len <= iob_tailroom ( port->rx_iobuf ) ) { - copy_from_user ( iob_put ( port->rx_iobuf, len ), - port->rx, - ( offset + offsetof ( typeof ( *rx ), - data ) ), len ); + memcpy ( iob_put ( port->rx_iobuf, len ), + rx->data, len ); } else { DBGC ( port, "EXANIC %s RX too large\n", netdev->name ); @@ -589,23 +584,19 @@ static void exanic_poll_rx ( struct net_device *netdev ) { /* Check for overrun */ rmb(); - copy_from_user ( &desc.generation, port->rx, - ( offset + offsetof ( typeof ( *rx ), - desc.generation ) ), - sizeof ( desc.generation ) ); - if ( desc.generation != current ) { + if ( rx->desc.generation != current ) { DBGC ( port, "EXANIC %s RX overrun\n", netdev->name ); port->rx_rc = -ENOBUFS; continue; } /* Wait for end of packet */ - if ( ! desc.len ) + if ( ! rx->desc.len ) continue; /* Check for receive errors */ - if ( desc.status & EXANIC_STATUS_ERROR_MASK ) { - port->rx_rc = -EIO_STATUS ( desc.status ); + if ( rx->desc.status & EXANIC_STATUS_ERROR_MASK ) { + port->rx_rc = -EIO_STATUS ( rx->desc.status ); DBGC ( port, "EXANIC %s RX %04x error: %s\n", netdev->name, port->rx_cons, strerror ( port->rx_rc ) ); @@ -729,8 +720,8 @@ static int exanic_probe_port ( struct exanic *exanic, struct device *dev, DBGC ( port, "EXANIC %s port %d TX [%#05zx,%#05zx) TXF %#02x RX " "[%#lx,%#lx)\n", netdev->name, index, port->tx_offset, ( port->tx_offset + tx_len ), port->txf_slot, - user_to_phys ( port->rx, 0 ), - user_to_phys ( port->rx, EXANIC_RX_LEN ) ); + virt_to_phys ( port->rx ), + ( virt_to_phys ( port->rx ) + EXANIC_RX_LEN ) ); /* Set initial link state */ exanic_check_link ( netdev ); diff --git a/src/drivers/net/exanic.h b/src/drivers/net/exanic.h index 041b9e21a..7c59612e0 100644 --- a/src/drivers/net/exanic.h +++ b/src/drivers/net/exanic.h @@ -12,7 +12,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include <stdint.h> #include <ipxe/pci.h> #include <ipxe/ethernet.h> -#include <ipxe/uaccess.h> #include <ipxe/retry.h> #include <ipxe/i2c.h> #include <ipxe/bitbash.h> @@ -158,6 +157,9 @@ struct exanic_rx_chunk { /** Receive status error mask */ #define EXANIC_STATUS_ERROR_MASK 0x0f +/** Number of receive chunks */ +#define EXANIC_RX_COUNT ( EXANIC_RX_LEN / sizeof ( struct exanic_rx_chunk ) ) + /** An ExaNIC I2C bus configuration */ struct exanic_i2c_config { /** GPIO bit for pulling SCL low */ @@ -194,7 +196,7 @@ struct exanic_port { uint16_t *txf; /** Receive region */ - userptr_t rx; + struct exanic_rx_chunk *rx; /** Receive consumer counter */ unsigned int rx_cons; /** Receive I/O buffer (if any) */ diff --git a/src/drivers/net/gve.c b/src/drivers/net/gve.c new file mode 100644 index 000000000..77eb4b674 --- /dev/null +++ b/src/drivers/net/gve.c @@ -0,0 +1,2016 @@ +/* + * Copyright (C) 2024 Michael Brown <mbrown@fensystems.co.uk>. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + * + * You can also choose to distribute this program under the terms of + * the Unmodified Binary Distribution Licence (as given in the file + * COPYING.UBDL), provided that you have satisfied its requirements. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include <stdint.h> +#include <string.h> +#include <stdio.h> +#include <unistd.h> +#include <errno.h> +#include <assert.h> +#include <byteswap.h> +#include <ipxe/netdevice.h> +#include <ipxe/ethernet.h> +#include <ipxe/if_ether.h> +#include <ipxe/iobuf.h> +#include <ipxe/dma.h> +#include <ipxe/pci.h> +#include <ipxe/fault.h> +#include "gve.h" + +/** @file + * + * Google Virtual Ethernet network driver + * + */ + +/* Disambiguate the various error causes */ +#define EINFO_EIO_ADMIN_UNSET \ + __einfo_uniqify ( EINFO_EIO, 0x00, "Uncompleted" ) +#define EIO_ADMIN_UNSET \ + __einfo_error ( EINFO_EIO_ADMIN_UNSET ) +#define EINFO_EIO_ADMIN_ABORTED \ + __einfo_uniqify ( EINFO_EIO, 0x10, "Aborted" ) +#define EIO_ADMIN_ABORTED \ + __einfo_error ( EINFO_EIO_ADMIN_ABORTED ) +#define EINFO_EIO_ADMIN_EXISTS \ + __einfo_uniqify ( EINFO_EIO, 0x11, "Already exists" ) +#define EIO_ADMIN_EXISTS \ + __einfo_error ( EINFO_EIO_ADMIN_EXISTS ) +#define EINFO_EIO_ADMIN_CANCELLED \ + __einfo_uniqify ( EINFO_EIO, 0x12, "Cancelled" ) +#define EIO_ADMIN_CANCELLED \ + __einfo_error ( EINFO_EIO_ADMIN_CANCELLED ) +#define EINFO_EIO_ADMIN_DATALOSS \ + __einfo_uniqify ( EINFO_EIO, 0x13, "Data loss" ) +#define EIO_ADMIN_DATALOSS \ + __einfo_error ( EINFO_EIO_ADMIN_DATALOSS ) +#define EINFO_EIO_ADMIN_DEADLINE \ + __einfo_uniqify ( EINFO_EIO, 0x14, "Deadline exceeded" ) +#define EIO_ADMIN_DEADLINE \ + __einfo_error ( EINFO_EIO_ADMIN_DEADLINE ) +#define EINFO_EIO_ADMIN_PRECONDITION \ + __einfo_uniqify ( EINFO_EIO, 0x15, "Failed precondition" ) +#define EIO_ADMIN_PRECONDITION \ + __einfo_error ( EINFO_EIO_ADMIN_PRECONDITION ) +#define EINFO_EIO_ADMIN_INTERNAL \ + __einfo_uniqify ( EINFO_EIO, 0x16, "Internal error" ) +#define EIO_ADMIN_INTERNAL \ + __einfo_error ( EINFO_EIO_ADMIN_INTERNAL ) +#define EINFO_EIO_ADMIN_INVAL \ + __einfo_uniqify ( EINFO_EIO, 0x17, "Invalid argument" ) +#define EIO_ADMIN_INVAL \ + __einfo_error ( EINFO_EIO_ADMIN_INVAL ) +#define EINFO_EIO_ADMIN_NOT_FOUND \ + __einfo_uniqify ( EINFO_EIO, 0x18, "Not found" ) +#define EIO_ADMIN_NOT_FOUND \ + __einfo_error ( EINFO_EIO_ADMIN_NOT_FOUND ) +#define EINFO_EIO_ADMIN_RANGE \ + __einfo_uniqify ( EINFO_EIO, 0x19, "Out of range" ) +#define EIO_ADMIN_RANGE \ + __einfo_error ( EINFO_EIO_ADMIN_RANGE ) +#define EINFO_EIO_ADMIN_PERM \ + __einfo_uniqify ( EINFO_EIO, 0x1a, "Permission denied" ) +#define EIO_ADMIN_PERM \ + __einfo_error ( EINFO_EIO_ADMIN_PERM ) +#define EINFO_EIO_ADMIN_UNAUTH \ + __einfo_uniqify ( EINFO_EIO, 0x1b, "Unauthenticated" ) +#define EIO_ADMIN_UNAUTH \ + __einfo_error ( EINFO_EIO_ADMIN_UNAUTH ) +#define EINFO_EIO_ADMIN_RESOURCE \ + __einfo_uniqify ( EINFO_EIO, 0x1c, "Resource exhausted" ) +#define EIO_ADMIN_RESOURCE \ + __einfo_error ( EINFO_EIO_ADMIN_RESOURCE ) +#define EINFO_EIO_ADMIN_UNAVAIL \ + __einfo_uniqify ( EINFO_EIO, 0x1d, "Unavailable" ) +#define EIO_ADMIN_UNAVAIL \ + __einfo_error ( EINFO_EIO_ADMIN_UNAVAIL ) +#define EINFO_EIO_ADMIN_NOTSUP \ + __einfo_uniqify ( EINFO_EIO, 0x1e, "Unimplemented" ) +#define EIO_ADMIN_NOTSUP \ + __einfo_error ( EINFO_EIO_ADMIN_NOTSUP ) +#define EINFO_EIO_ADMIN_UNKNOWN \ + __einfo_uniqify ( EINFO_EIO, 0x1f, "Unknown error" ) +#define EIO_ADMIN_UNKNOWN \ + __einfo_error ( EINFO_EIO_ADMIN_UNKNOWN ) +#define EIO_ADMIN( status ) \ + EUNIQ ( EINFO_EIO, ( (status) & 0x1f ), \ + EIO_ADMIN_UNSET, EIO_ADMIN_ABORTED, EIO_ADMIN_EXISTS, \ + EIO_ADMIN_CANCELLED, EIO_ADMIN_DATALOSS, \ + EIO_ADMIN_DEADLINE, EIO_ADMIN_PRECONDITION, \ + EIO_ADMIN_INTERNAL, EIO_ADMIN_INVAL, \ + EIO_ADMIN_NOT_FOUND, EIO_ADMIN_RANGE, EIO_ADMIN_PERM, \ + EIO_ADMIN_UNAUTH, EIO_ADMIN_RESOURCE, \ + EIO_ADMIN_UNAVAIL, EIO_ADMIN_NOTSUP, EIO_ADMIN_UNKNOWN ) + +/****************************************************************************** + * + * Buffer layout + * + ****************************************************************************** + */ + +/** + * Get buffer offset (within queue page list allocation) + * + * @v queue Descriptor queue + * @v tag Buffer tag + * @ret addr Buffer address within queue page list address space + */ +static inline __attribute__ (( always_inline)) size_t +gve_offset ( struct gve_queue *queue, unsigned int tag ) { + + /* We allocate sufficient pages for the maximum fill level of + * buffers, and reuse the buffers in strict rotation as they + * are released by the hardware. + */ + assert ( tag < queue->fill ); + return ( tag * GVE_BUF_SIZE ); +} + +/** + * Get buffer address (within queue page list address space) + * + * @v queue Descriptor queue + * @v tag Buffer tag + * @ret addr Buffer address within queue page list address space + */ +static inline __attribute__ (( always_inline)) physaddr_t +gve_address ( struct gve_queue *queue, unsigned int tag ) { + + /* Pages are allocated as a single contiguous block */ + return ( queue->qpl.base + gve_offset ( queue, tag ) ); +} + +/** + * Get buffer address + * + * @v queue Descriptor queue + * @v tag Buffer tag + * @ret addr Buffer address + */ +static inline __attribute__ (( always_inline )) void * +gve_buffer ( struct gve_queue *queue, unsigned int tag ) { + + /* Pages are allocated as a single contiguous block */ + return ( queue->qpl.data + gve_offset ( queue, tag ) ); +} + +/****************************************************************************** + * + * Device reset + * + ****************************************************************************** + */ + +/** + * Reset hardware + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_reset ( struct gve_nic *gve ) { + uint32_t pfn; + unsigned int i; + + /* Skip reset if admin queue page frame number is already + * clear. Triggering a reset on an already-reset device seems + * to cause a delayed reset to be scheduled. This can cause + * the device to end up in a reset loop, where each attempt to + * recover from reset triggers another reset a few seconds + * later. + */ + pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN ); + if ( ! pfn ) { + DBGC ( gve, "GVE %p skipping reset\n", gve ); + return 0; + } + + /* Clear admin queue page frame number */ + writel ( 0, gve->cfg + GVE_CFG_ADMIN_PFN ); + wmb(); + + /* Wait for device to reset */ + for ( i = 0 ; i < GVE_RESET_MAX_WAIT_MS ; i++ ) { + + /* Delay */ + mdelay ( 1 ); + + /* Check for reset completion */ + pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN ); + if ( ! pfn ) + return 0; + } + + DBGC ( gve, "GVE %p reset timed out (PFN %#08x devstat %#08x)\n", + gve, bswap_32 ( pfn ), + bswap_32 ( readl ( gve->cfg + GVE_CFG_DEVSTAT ) ) ); + return -ETIMEDOUT; +} + +/****************************************************************************** + * + * Admin queue + * + ****************************************************************************** + */ + +/** + * Get operating mode name (for debugging) + * + * @v mode Operating mode + * @ret name Mode name + */ +static inline const char * gve_mode_name ( unsigned int mode ) { + static char buf[ 8 /* "XXX-XXX" + NUL */ ]; + + snprintf ( buf, sizeof ( buf ), "%s-%s", + ( ( mode & GVE_MODE_DQO ) ? "DQO" : "GQI" ), + ( ( mode & GVE_MODE_QPL ) ? "QPL" : "RDA" ) ); + return buf; +} + +/** + * Allocate admin queue + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_admin_alloc ( struct gve_nic *gve ) { + struct dma_device *dma = gve->dma; + struct gve_admin *admin = &gve->admin; + struct gve_scratch *scratch = &gve->scratch; + size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) ); + size_t scratch_len = sizeof ( *scratch->buf ); + int rc; + + /* Allocate admin queue */ + admin->cmd = dma_alloc ( dma, &admin->map, admin_len, GVE_ALIGN ); + if ( ! admin->cmd ) { + rc = -ENOMEM; + goto err_admin; + } + + /* Allocate scratch buffer */ + scratch->buf = dma_alloc ( dma, &scratch->map, scratch_len, GVE_ALIGN ); + if ( ! scratch->buf ) { + rc = -ENOMEM; + goto err_scratch; + } + + DBGC ( gve, "GVE %p AQ at [%08lx,%08lx) scratch [%08lx,%08lx)\n", + gve, virt_to_phys ( admin->cmd ), + ( virt_to_phys ( admin->cmd ) + admin_len ), + virt_to_phys ( scratch->buf ), + ( virt_to_phys ( scratch->buf ) + scratch_len ) ); + return 0; + + dma_free ( &scratch->map, scratch->buf, scratch_len ); + err_scratch: + dma_free ( &admin->map, admin->cmd, admin_len ); + err_admin: + return rc; +} + +/** + * Free admin queue + * + * @v gve GVE device + */ +static void gve_admin_free ( struct gve_nic *gve ) { + struct gve_admin *admin = &gve->admin; + struct gve_scratch *scratch = &gve->scratch; + size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) ); + size_t scratch_len = sizeof ( *scratch->buf ); + + /* Free scratch buffer */ + dma_free ( &scratch->map, scratch->buf, scratch_len ); + + /* Free admin queue */ + dma_free ( &admin->map, admin->cmd, admin_len ); +} + +/** + * Enable admin queue + * + * @v gve GVE device + */ +static void gve_admin_enable ( struct gve_nic *gve ) { + struct gve_admin *admin = &gve->admin; + size_t admin_len = ( GVE_ADMIN_COUNT * sizeof ( admin->cmd[0] ) ); + physaddr_t base; + + /* Reset queue */ + admin->prod = 0; + + /* Program queue addresses and capabilities */ + base = dma ( &admin->map, admin->cmd ); + writel ( bswap_32 ( base / GVE_PAGE_SIZE ), + gve->cfg + GVE_CFG_ADMIN_PFN ); + writel ( bswap_32 ( base & 0xffffffffUL ), + gve->cfg + GVE_CFG_ADMIN_BASE_LO ); + if ( sizeof ( base ) > sizeof ( uint32_t ) ) { + writel ( bswap_32 ( ( ( uint64_t ) base ) >> 32 ), + gve->cfg + GVE_CFG_ADMIN_BASE_HI ); + } else { + writel ( 0, gve->cfg + GVE_CFG_ADMIN_BASE_HI ); + } + writel ( bswap_16 ( admin_len ), gve->cfg + GVE_CFG_ADMIN_LEN ); + writel ( bswap_32 ( GVE_CFG_DRVSTAT_RUN ), gve->cfg + GVE_CFG_DRVSTAT ); +} + +/** + * Get next available admin queue command slot + * + * @v gve GVE device + * @ret cmd Admin queue command + */ +static union gve_admin_command * gve_admin_command ( struct gve_nic *gve ) { + struct gve_admin *admin = &gve->admin; + union gve_admin_command *cmd; + unsigned int index; + + /* Get next command slot */ + index = admin->prod; + cmd = &admin->cmd[ index % GVE_ADMIN_COUNT ]; + + /* Initialise request */ + memset ( cmd, 0, sizeof ( *cmd ) ); + + return cmd; +} + +/** + * Wait for admin queue command to complete + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_admin_wait ( struct gve_nic *gve ) { + struct gve_admin *admin = &gve->admin; + uint32_t evt; + uint32_t pfn; + unsigned int i; + + /* Wait for any outstanding commands to complete */ + for ( i = 0 ; i < GVE_ADMIN_MAX_WAIT_MS ; i++ ) { + + /* Check event counter */ + rmb(); + evt = bswap_32 ( readl ( gve->cfg + GVE_CFG_ADMIN_EVT ) ); + if ( evt == admin->prod ) + return 0; + + /* Check for device reset */ + pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN ); + if ( ! pfn ) + break; + + /* Delay */ + mdelay ( 1 ); + } + + DBGC ( gve, "GVE %p AQ %#02x %s (completed %#02x, status %#08x)\n", + gve, admin->prod, ( pfn ? "timed out" : "saw reset" ), evt, + bswap_32 ( readl ( gve->cfg + GVE_CFG_DEVSTAT ) ) ); + return ( pfn ? -ETIMEDOUT : -ECONNRESET ); +} + +/** + * Issue admin queue command + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_admin ( struct gve_nic *gve ) { + struct gve_admin *admin = &gve->admin; + union gve_admin_command *cmd; + unsigned int index; + uint32_t opcode; + uint32_t status; + int rc; + + /* Ensure admin queue is idle */ + if ( ( rc = gve_admin_wait ( gve ) ) != 0 ) + return rc; + + /* Get next command slot */ + index = admin->prod; + cmd = &admin->cmd[ index % GVE_ADMIN_COUNT ]; + opcode = cmd->hdr.opcode; + DBGC2 ( gve, "GVE %p AQ %#02x command %#04x request:\n", + gve, index, opcode ); + DBGC2_HDA ( gve, 0, cmd, sizeof ( *cmd ) ); + + /* Increment producer counter */ + admin->prod++; + + /* Ring doorbell */ + wmb(); + writel ( bswap_32 ( admin->prod ), gve->cfg + GVE_CFG_ADMIN_DB ); + + /* Wait for command to complete */ + if ( ( rc = gve_admin_wait ( gve ) ) != 0 ) + return rc; + + /* Check command status */ + status = be32_to_cpu ( cmd->hdr.status ); + if ( status != GVE_ADMIN_STATUS_OK ) { + rc = -EIO_ADMIN ( status ); + DBGC ( gve, "GVE %p AQ %#02x command %#04x failed: %#08x\n", + gve, index, opcode, status ); + DBGC_HDA ( gve, 0, cmd, sizeof ( *cmd ) ); + DBGC ( gve, "GVE %p AQ error: %s\n", gve, strerror ( rc ) ); + return rc; + } + + DBGC2 ( gve, "GVE %p AQ %#02x command %#04x result:\n", + gve, index, opcode ); + DBGC2_HDA ( gve, 0, cmd, sizeof ( *cmd ) ); + return 0; +} + +/** + * Issue simple admin queue command + * + * @v gve GVE device + * @v opcode Operation code + * @v id ID parameter (or zero if not applicable) + * @ret rc Return status code + * + * Several admin queue commands take either an empty parameter list or + * a single 32-bit ID parameter. + */ +static int gve_admin_simple ( struct gve_nic *gve, unsigned int opcode, + unsigned int id ) { + union gve_admin_command *cmd; + int rc; + + /* Construct request */ + cmd = gve_admin_command ( gve ); + cmd->hdr.opcode = opcode; + cmd->simple.id = cpu_to_be32 ( id ); + + /* Issue command */ + if ( ( rc = gve_admin ( gve ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Get device descriptor + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_describe ( struct gve_nic *gve ) { + struct net_device *netdev = gve->netdev; + struct gve_device_descriptor *desc = &gve->scratch.buf->desc; + union gve_admin_command *cmd; + struct gve_option *opt; + unsigned int count; + unsigned int id; + size_t offset; + size_t max; + size_t len; + int rc; + + /* Construct request */ + cmd = gve_admin_command ( gve ); + cmd->hdr.opcode = GVE_ADMIN_DESCRIBE; + cmd->desc.addr = cpu_to_be64 ( dma ( &gve->scratch.map, desc ) ); + cmd->desc.ver = cpu_to_be32 ( GVE_ADMIN_DESCRIBE_VER ); + cmd->desc.len = cpu_to_be32 ( sizeof ( *desc ) ); + + /* Issue command */ + if ( ( rc = gve_admin ( gve ) ) != 0 ) + return rc; + DBGC2 ( gve, "GVE %p device descriptor:\n", gve ); + DBGC2_HDA ( gve, 0, desc, sizeof ( *desc ) ); + + /* Extract queue parameters */ + gve->events.count = be16_to_cpu ( desc->counters ); + gve->tx.count = be16_to_cpu ( desc->tx_count ); + gve->rx.count = be16_to_cpu ( desc->rx_count ); + DBGC ( gve, "GVE %p using %d TX, %d RX, %d events\n", + gve, gve->tx.count, gve->rx.count, gve->events.count ); + + /* Extract network parameters */ + build_assert ( sizeof ( desc->mac ) == ETH_ALEN ); + memcpy ( netdev->hw_addr, &desc->mac, sizeof ( desc->mac ) ); + netdev->mtu = be16_to_cpu ( desc->mtu ); + netdev->max_pkt_len = ( netdev->mtu + ETH_HLEN ); + DBGC ( gve, "GVE %p MAC %s (\"%s\") MTU %zd\n", + gve, eth_ntoa ( netdev->hw_addr ), + inet_ntoa ( desc->mac.in ), netdev->mtu ); + + /* Parse options */ + count = be16_to_cpu ( desc->opt_count ); + max = be16_to_cpu ( desc->len ); + gve->options = 0; + for ( offset = offsetof ( typeof ( *desc ), opts ) ; count ; + count--, offset += len ) { + + /* Check space for option header */ + if ( ( offset + sizeof ( *opt ) ) > max ) { + DBGC ( gve, "GVE %p underlength option at +%#02zx:\n", + gve, offset ); + DBGC_HDA ( gve, 0, desc, sizeof ( *desc ) ); + return -EINVAL; + } + opt = ( ( ( void * ) desc ) + offset ); + + /* Check space for option body */ + len = ( sizeof ( *opt ) + be16_to_cpu ( opt->len ) ); + if ( ( offset + len ) > max ) { + DBGC ( gve, "GVE %p malformed option at +%#02zx:\n", + gve, offset ); + DBGC_HDA ( gve, 0, desc, sizeof ( *desc ) ); + return -EINVAL; + } + + /* Record option as supported */ + id = be16_to_cpu ( opt->id ); + if ( id < ( 8 * sizeof ( gve->options ) ) ) + gve->options |= ( 1 << id ); + } + DBGC ( gve, "GVE %p supports options %#08x\n", gve, gve->options ); + + /* Select preferred operating mode */ + if ( gve->options & ( 1 << GVE_OPT_GQI_QPL ) ) { + /* GQI-QPL: in-order queues, queue page list addressing */ + gve->mode = GVE_MODE_QPL; + } else if ( gve->options & ( 1 << GVE_OPT_GQI_RDA ) ) { + /* GQI-RDA: in-order queues, raw DMA addressing */ + gve->mode = 0; + } else if ( gve->options & ( 1 << GVE_OPT_DQO_QPL ) ) { + /* DQO-QPL: out-of-order queues, queue page list addressing */ + gve->mode = ( GVE_MODE_DQO | GVE_MODE_QPL ); + } else if ( gve->options & ( 1 << GVE_OPT_DQO_RDA ) ) { + /* DQO-RDA: out-of-order queues, raw DMA addressing */ + gve->mode = GVE_MODE_DQO; + } else { + /* No options matched: assume the original GQI-QPL mode */ + gve->mode = GVE_MODE_QPL; + } + DBGC ( gve, "GVE %p using %s mode\n", + gve, gve_mode_name ( gve->mode ) ); + + return 0; +} + +/** + * Configure device resources + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_configure ( struct gve_nic *gve ) { + struct gve_events *events = &gve->events; + struct gve_irqs *irqs = &gve->irqs; + union gve_admin_command *cmd; + uint32_t doorbell; + unsigned int db_off; + unsigned int i; + int rc; + + /* Construct request */ + cmd = gve_admin_command ( gve ); + cmd->hdr.opcode = GVE_ADMIN_CONFIGURE; + cmd->conf.events = + cpu_to_be64 ( dma ( &events->map, events->event ) ); + cmd->conf.irqs = + cpu_to_be64 ( dma ( &irqs->map, irqs->irq ) ); + cmd->conf.num_events = cpu_to_be32 ( events->count ); + cmd->conf.num_irqs = cpu_to_be32 ( GVE_IRQ_COUNT ); + cmd->conf.irq_stride = cpu_to_be32 ( sizeof ( irqs->irq[0] ) ); + cmd->conf.format = GVE_FORMAT ( gve->mode ); + + /* Issue command */ + if ( ( rc = gve_admin ( gve ) ) != 0 ) + return rc; + + /* Disable all interrupts */ + doorbell = ( ( gve->mode & GVE_MODE_DQO ) ? + 0 : bswap_32 ( GVE_GQI_IRQ_DISABLE ) ); + for ( i = 0 ; i < GVE_IRQ_COUNT ; i++ ) { + db_off = ( be32_to_cpu ( irqs->irq[i].db_idx ) * + sizeof ( uint32_t ) ); + DBGC ( gve, "GVE %p IRQ %d doorbell +%#04x\n", gve, i, db_off ); + irqs->db[i] = ( gve->db + db_off ); + writel ( doorbell, irqs->db[i] ); + } + + return 0; +} + +/** + * Deconfigure device resources + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_deconfigure ( struct gve_nic *gve ) { + int rc; + + /* Issue command (with meaningless ID) */ + if ( ( rc = gve_admin_simple ( gve, GVE_ADMIN_DECONFIGURE, 0 ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Register queue page list + * + * @v gve GVE device + * @v qpl Queue page list + * @ret rc Return status code + */ +static int gve_register ( struct gve_nic *gve, struct gve_qpl *qpl ) { + struct gve_pages *pages = &gve->scratch.buf->pages; + union gve_admin_command *cmd; + void *addr; + unsigned int i; + int rc; + + /* Do nothing if using raw DMA addressing */ + if ( ! ( gve->mode & GVE_MODE_QPL ) ) + return 0; + + /* Build page address list */ + for ( i = 0 ; i < qpl->count ; i++ ) { + addr = ( qpl->data + ( i * GVE_PAGE_SIZE ) ); + pages->addr[i] = cpu_to_be64 ( dma ( &qpl->map, addr ) ); + } + + /* Construct request */ + cmd = gve_admin_command ( gve ); + cmd->hdr.opcode = GVE_ADMIN_REGISTER; + cmd->reg.id = cpu_to_be32 ( qpl->id ); + cmd->reg.count = cpu_to_be32 ( qpl->count ); + cmd->reg.addr = cpu_to_be64 ( dma ( &gve->scratch.map, pages ) ); + cmd->reg.size = cpu_to_be64 ( GVE_PAGE_SIZE ); + + /* Issue command */ + if ( ( rc = gve_admin ( gve ) ) != 0 ) + return rc; + + return 0; +} + +/** + * Unregister page list + * + * @v gve GVE device + * @v qpl Queue page list + * @ret rc Return status code + */ +static int gve_unregister ( struct gve_nic *gve, struct gve_qpl *qpl ) { + int rc; + + /* Do nothing if using raw DMA addressing */ + if ( ! ( gve->mode & GVE_MODE_QPL ) ) + return 0; + + /* Issue command */ + if ( ( rc = gve_admin_simple ( gve, GVE_ADMIN_UNREGISTER, + qpl->id ) ) != 0 ) { + return rc; + } + + return 0; +} + +/** + * Construct command to create transmit queue + * + * @v queue Transmit queue + * @v qpl Queue page list ID + * @v cmd Admin queue command + */ +static void gve_create_tx_param ( struct gve_queue *queue, uint32_t qpl, + union gve_admin_command *cmd ) { + struct gve_admin_create_tx *create = &cmd->create_tx; + const struct gve_queue_type *type = queue->type; + + /* Construct request parameters */ + create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) ); + create->desc = + cpu_to_be64 ( dma ( &queue->desc_map, queue->desc.raw ) ); + create->qpl_id = cpu_to_be32 ( qpl ); + create->notify_id = cpu_to_be32 ( type->irq ); + create->desc_count = cpu_to_be16 ( queue->count ); + if ( queue->cmplt.raw ) { + create->cmplt = cpu_to_be64 ( dma ( &queue->cmplt_map, + queue->cmplt.raw ) ); + create->cmplt_count = cpu_to_be16 ( queue->count ); + } +} + +/** + * Construct command to create receive queue + * + * @v queue Receive queue + * @v qpl Queue page list ID + * @v cmd Admin queue command + */ +static void gve_create_rx_param ( struct gve_queue *queue, uint32_t qpl, + union gve_admin_command *cmd ) { + struct gve_admin_create_rx *create = &cmd->create_rx; + const struct gve_queue_type *type = queue->type; + + /* Construct request parameters */ + create->notify_id = cpu_to_be32 ( type->irq ); + create->res = cpu_to_be64 ( dma ( &queue->res_map, queue->res ) ); + create->desc = + cpu_to_be64 ( dma ( &queue->desc_map, queue->desc.raw ) ); + create->cmplt = + cpu_to_be64 ( dma ( &queue->cmplt_map, queue->cmplt.raw ) ); + create->qpl_id = cpu_to_be32 ( qpl ); + create->desc_count = cpu_to_be16 ( queue->count ); + create->bufsz = cpu_to_be16 ( GVE_BUF_SIZE ); + create->cmplt_count = cpu_to_be16 ( queue->count ); +} + +/** + * Create transmit or receive queue + * + * @v gve GVE device + * @v queue Descriptor queue + * @ret rc Return status code + */ +static int gve_create_queue ( struct gve_nic *gve, struct gve_queue *queue ) { + const struct gve_queue_type *type = queue->type; + const struct gve_queue_stride *stride = &queue->stride; + union gve_admin_command *cmd; + struct gve_buffer *buf; + unsigned int db_off; + unsigned int evt_idx; + unsigned int tag; + unsigned int i; + uint32_t qpl; + int rc; + + /* Reset queue */ + queue->prod = 0; + queue->cons = 0; + queue->done = 0; + memset ( queue->desc.raw, 0, ( queue->count * stride->desc ) ); + memset ( queue->cmplt.raw, 0, ( queue->count * stride->cmplt ) ); + for ( i = 0 ; i < queue->fill ; i++ ) + queue->tag[i] = i; + + /* Pre-populate descriptor offsets for in-order queues */ + if ( ! ( gve->mode & GVE_MODE_DQO ) ) { + buf = ( queue->desc.raw + stride->desc - sizeof ( *buf ) ); + for ( i = 0 ; i < queue->count ; i++ ) { + tag = ( i & ( queue->fill - 1 ) ); + buf->addr = cpu_to_be64 ( gve_address ( queue, tag ) ); + buf = ( ( ( void * ) buf ) + stride->desc ); + } + } + + /* Construct request */ + cmd = gve_admin_command ( gve ); + cmd->hdr.opcode = type->create; + qpl = ( ( gve->mode & GVE_MODE_QPL ) ? type->qpl : GVE_RAW_QPL ); + type->param ( queue, qpl, cmd ); + + /* Issue command */ + if ( ( rc = gve_admin ( gve ) ) != 0 ) + return rc; + + /* Record indices */ + db_off = ( be32_to_cpu ( queue->res->db_idx ) * sizeof ( uint32_t ) ); + evt_idx = be32_to_cpu ( queue->res->evt_idx ); + DBGC ( gve, "GVE %p %s doorbell +%#04x event counter %d\n", + gve, type->name, db_off, evt_idx ); + queue->db = ( gve->db + db_off ); + assert ( evt_idx < gve->events.count ); + queue->event = &gve->events.event[evt_idx]; + assert ( queue->event->count == 0 ); + + /* Unmask dummy interrupt */ + pci_msix_unmask ( &gve->msix, type->irq ); + + /* Rearm queue interrupt if applicable */ + if ( gve->mode & GVE_MODE_DQO ) + writel ( GVE_DQO_IRQ_REARM, gve->irqs.db[type->irq] ); + + return 0; +} + +/** + * Destroy transmit or receive queue + * + * @v gve GVE device + * @v queue Descriptor queue + * @ret rc Return status code + */ +static int gve_destroy_queue ( struct gve_nic *gve, struct gve_queue *queue ) { + const struct gve_queue_type *type = queue->type; + int rc; + + /* Mask dummy interrupt */ + pci_msix_mask ( &gve->msix, type->irq ); + + /* Issue command */ + if ( ( rc = gve_admin_simple ( gve, type->destroy, 0 ) ) != 0 ) + return rc; + + return 0; +} + +/****************************************************************************** + * + * Network device interface + * + ****************************************************************************** + */ + +/** + * Allocate shared queue resources + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_alloc_shared ( struct gve_nic *gve ) { + struct dma_device *dma = gve->dma; + struct gve_irqs *irqs = &gve->irqs; + struct gve_events *events = &gve->events; + size_t irqs_len = ( GVE_IRQ_COUNT * sizeof ( irqs->irq[0] ) ); + size_t events_len = ( gve->events.count * sizeof ( events->event[0] ) ); + int rc; + + /* Allocate interrupt channels */ + irqs->irq = dma_alloc ( dma, &irqs->map, irqs_len, GVE_ALIGN ); + if ( ! irqs->irq ) { + rc = -ENOMEM; + goto err_irqs; + } + DBGC ( gve, "GVE %p IRQs at [%08lx,%08lx)\n", + gve, virt_to_phys ( irqs->irq ), + ( virt_to_phys ( irqs->irq ) + irqs_len ) ); + + /* Allocate event counters */ + events->event = dma_alloc ( dma, &events->map, events_len, GVE_ALIGN ); + if ( ! events->event ) { + rc = -ENOMEM; + goto err_events; + } + DBGC ( gve, "GVE %p events at [%08lx,%08lx)\n", + gve, virt_to_phys ( events->event ), + ( virt_to_phys ( events->event ) + events_len ) ); + + return 0; + + dma_free ( &events->map, events->event, events_len ); + err_events: + dma_free ( &irqs->map, irqs->irq, irqs_len ); + err_irqs: + return rc; +} + +/** + * Free shared queue resources + * + * @v gve GVE device + */ +static void gve_free_shared ( struct gve_nic *gve ) { + struct gve_irqs *irqs = &gve->irqs; + struct gve_events *events = &gve->events; + size_t irqs_len = ( GVE_IRQ_COUNT * sizeof ( irqs->irq[0] ) ); + size_t events_len = ( gve->events.count * sizeof ( events->event[0] ) ); + + /* Free event counters */ + dma_free ( &events->map, events->event, events_len ); + + /* Free interrupt channels */ + dma_free ( &irqs->map, irqs->irq, irqs_len ); +} + +/** + * Allocate queue page list + * + * @v gve GVE device + * @v qpl Queue page list + * @v id Queue page list ID + * @v buffers Number of data buffers + * @ret rc Return status code + */ +static int gve_alloc_qpl ( struct gve_nic *gve, struct gve_qpl *qpl, + uint32_t id, unsigned int buffers ) { + size_t len; + + /* Record ID */ + qpl->id = id; + + /* Calculate number of pages required */ + build_assert ( GVE_BUF_SIZE <= GVE_PAGE_SIZE ); + qpl->count = ( ( buffers + GVE_BUF_PER_PAGE - 1 ) / GVE_BUF_PER_PAGE ); + assert ( qpl->count <= GVE_QPL_MAX ); + + /* Allocate pages (as a single block) */ + len = ( qpl->count * GVE_PAGE_SIZE ); + qpl->data = dma_umalloc ( gve->dma, &qpl->map, len, GVE_ALIGN ); + if ( ! qpl->data ) + return -ENOMEM; + qpl->base = ( ( gve->mode == GVE_MODE_QPL ) ? + 0 : dma ( &qpl->map, qpl->data ) ); + + DBGC ( gve, "GVE %p QPL %#08x at [%08lx,%08lx)\n", + gve, qpl->id, virt_to_phys ( qpl->data ), + ( virt_to_phys ( qpl->data ) + len ) ); + return 0; +} + +/** + * Free queue page list + * + * @v gve GVE device + * @v qpl Queue page list + */ +static void gve_free_qpl ( struct gve_nic *nic __unused, + struct gve_qpl *qpl ) { + size_t len = ( qpl->count * GVE_PAGE_SIZE ); + + /* Free pages */ + dma_ufree ( &qpl->map, qpl->data, len ); +} + +/** + * Calculate next receive sequence number + * + * @v seq Current sequence number, or zero to start sequence + * @ret next Next sequence number + */ +static inline __attribute__ (( always_inline )) unsigned int +gve_next ( unsigned int seq ) { + + /* The receive completion sequence number is a modulo 7 + * counter that cycles through the non-zero three-bit values 1 + * to 7 inclusive. + * + * Since 7 is coprime to 2^n, this ensures that the sequence + * number changes each time that a new completion is written + * to memory. + * + * Since the counter takes only non-zero values, this ensures + * that the sequence number changes whenever a new completion + * is first written to a zero-initialised completion ring. + */ + seq = ( ( seq + 1 ) & GVE_GQI_RX_SEQ_MASK ); + return ( seq ? seq : 1 ); +} + +/** + * Allocate descriptor queue + * + * @v gve GVE device + * @v queue Descriptor queue + * @ret rc Return status code + */ +static int gve_alloc_queue ( struct gve_nic *gve, struct gve_queue *queue ) { + const struct gve_queue_type *type = queue->type; + struct gve_queue_stride *stride = &queue->stride; + struct dma_device *dma = gve->dma; + size_t desc_len; + size_t cmplt_len; + size_t res_len; + int rc; + + /* Sanity checks */ + if ( ( queue->count == 0 ) || + ( queue->count & ( queue->count - 1 ) ) ) { + DBGC ( gve, "GVE %p %s invalid queue size %d\n", + gve, type->name, queue->count ); + rc = -EINVAL; + goto err_sanity; + } + + /* Set queue strides and calculate total lengths */ + *stride = ( ( gve->mode & GVE_MODE_DQO ) ? + type->stride.dqo : type->stride.gqi ); + desc_len = ( queue->count * stride->desc ); + cmplt_len = ( queue->count * stride->cmplt ); + res_len = sizeof ( *queue->res ); + + /* Calculate maximum fill level */ + assert ( ( type->fill & ( type->fill - 1 ) ) == 0 ); + queue->fill = type->fill; + if ( queue->fill > queue->count ) + queue->fill = queue->count; + DBGC ( gve, "GVE %p %s using QPL %#08x with %d/%d descriptors\n", + gve, type->name, type->qpl, queue->fill, queue->count ); + + /* Allocate queue page list */ + if ( ( rc = gve_alloc_qpl ( gve, &queue->qpl, type->qpl, + queue->fill ) ) != 0 ) + goto err_qpl; + + /* Allocate descriptors */ + queue->desc.raw = dma_umalloc ( dma, &queue->desc_map, desc_len, + GVE_ALIGN ); + if ( ! queue->desc.raw ) { + rc = -ENOMEM; + goto err_desc; + } + DBGC ( gve, "GVE %p %s descriptors at [%08lx,%08lx)\n", + gve, type->name, virt_to_phys ( queue->desc.raw ), + ( virt_to_phys ( queue->desc.raw ) + desc_len ) ); + + /* Allocate completions */ + if ( cmplt_len ) { + queue->cmplt.raw = dma_umalloc ( dma, &queue->cmplt_map, + cmplt_len, GVE_ALIGN ); + if ( ! queue->cmplt.raw ) { + rc = -ENOMEM; + goto err_cmplt; + } + DBGC ( gve, "GVE %p %s completions at [%08lx,%08lx)\n", + gve, type->name, virt_to_phys ( queue->cmplt.raw ), + ( virt_to_phys ( queue->cmplt.raw ) + cmplt_len ) ); + } + + /* Allocate queue resources */ + queue->res = dma_alloc ( dma, &queue->res_map, res_len, GVE_ALIGN ); + if ( ! queue->res ) { + rc = -ENOMEM; + goto err_res; + } + memset ( queue->res, 0, res_len ); + + return 0; + + dma_free ( &queue->res_map, queue->res, res_len ); + err_res: + if ( cmplt_len ) + dma_ufree ( &queue->cmplt_map, queue->cmplt.raw, cmplt_len ); + err_cmplt: + dma_ufree ( &queue->desc_map, queue->desc.raw, desc_len ); + err_desc: + gve_free_qpl ( gve, &queue->qpl ); + err_qpl: + err_sanity: + return rc; +} + +/** + * Free descriptor queue + * + * @v gve GVE device + * @v queue Descriptor queue + */ +static void gve_free_queue ( struct gve_nic *gve, struct gve_queue *queue ) { + const struct gve_queue_stride *stride = &queue->stride; + size_t desc_len = ( queue->count * stride->desc ); + size_t cmplt_len = ( queue->count * stride->cmplt ); + size_t res_len = sizeof ( *queue->res ); + + /* Free queue resources */ + dma_free ( &queue->res_map, queue->res, res_len ); + + /* Free completions, if applicable */ + if ( cmplt_len ) + dma_ufree ( &queue->cmplt_map, queue->cmplt.raw, cmplt_len ); + + /* Free descriptors */ + dma_ufree ( &queue->desc_map, queue->desc.raw, desc_len ); + + /* Free queue page list */ + gve_free_qpl ( gve, &queue->qpl ); +} + +/** + * Cancel any pending transmissions + * + * @v gve GVE device + */ +static void gve_cancel_tx ( struct gve_nic *gve ) { + struct net_device *netdev = gve->netdev; + struct io_buffer *iobuf; + unsigned int i; + + /* Cancel any pending transmissions */ + for ( i = 0 ; i < ( sizeof ( gve->tx_iobuf ) / + sizeof ( gve->tx_iobuf[0] ) ) ; i++ ) { + iobuf = gve->tx_iobuf[i]; + gve->tx_iobuf[i] = NULL; + if ( iobuf ) + netdev_tx_complete_err ( netdev, iobuf, -ECANCELED ); + } +} + +/** + * Start up device + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_start ( struct gve_nic *gve ) { + struct gve_queue *tx = &gve->tx; + struct gve_queue *rx = &gve->rx; + int rc; + + /* Cancel any pending transmissions */ + gve_cancel_tx ( gve ); + + /* Reset receive sequence */ + gve->seq = gve_next ( 0 ); + + /* Configure device resources */ + if ( ( rc = gve_configure ( gve ) ) != 0 ) + goto err_configure; + + /* Register transmit queue page list */ + if ( ( rc = gve_register ( gve, &tx->qpl ) ) != 0 ) + goto err_register_tx; + + /* Register receive queue page list */ + if ( ( rc = gve_register ( gve, &rx->qpl ) ) != 0 ) + goto err_register_rx; + + /* Create transmit queue */ + if ( ( rc = gve_create_queue ( gve, tx ) ) != 0 ) + goto err_create_tx; + + /* Create receive queue */ + if ( ( rc = gve_create_queue ( gve, rx ) ) != 0 ) + goto err_create_rx; + + return 0; + + gve_destroy_queue ( gve, rx ); + err_create_rx: + gve_destroy_queue ( gve, tx ); + err_create_tx: + gve_unregister ( gve, &rx->qpl ); + err_register_rx: + gve_unregister ( gve, &tx->qpl ); + err_register_tx: + gve_deconfigure ( gve ); + err_configure: + return rc; +} + +/** + * Stop device + * + * @v gve GVE device + */ +static void gve_stop ( struct gve_nic *gve ) { + struct gve_queue *tx = &gve->tx; + struct gve_queue *rx = &gve->rx; + + /* Destroy queues */ + gve_destroy_queue ( gve, rx ); + gve_destroy_queue ( gve, tx ); + + /* Unregister page lists */ + gve_unregister ( gve, &rx->qpl ); + gve_unregister ( gve, &tx->qpl ); + + /* Deconfigure device */ + gve_deconfigure ( gve ); +} + +/** + * Device startup process + * + * @v gve GVE device + */ +static void gve_startup ( struct gve_nic *gve ) { + struct net_device *netdev = gve->netdev; + int rc; + + /* Reset device */ + if ( ( rc = gve_reset ( gve ) ) != 0 ) + goto err_reset; + + /* Enable admin queue */ + gve_admin_enable ( gve ); + + /* Start device */ + if ( ( rc = gve_start ( gve ) ) != 0 ) + goto err_start; + + /* Reset retry count */ + gve->retries = 0; + + /* (Ab)use link status to report startup status */ + netdev_link_up ( netdev ); + + return; + + gve_stop ( gve ); + err_start: + err_reset: + DBGC ( gve, "GVE %p startup failed: %s\n", gve, strerror ( rc ) ); + netdev_link_err ( netdev, rc ); + if ( gve->retries++ < GVE_RESET_MAX_RETRY ) + process_add ( &gve->startup ); +} + +/** + * Trigger startup process + * + * @v gve GVE device + */ +static void gve_restart ( struct gve_nic *gve ) { + struct net_device *netdev = gve->netdev; + + /* Mark link down to inhibit polling and transmit activity */ + netdev_link_down ( netdev ); + + /* Schedule startup process */ + process_add ( &gve->startup ); +} + +/** + * Reset recovery watchdog + * + * @v timer Reset recovery watchdog timer + * @v over Failure indicator + */ +static void gve_watchdog ( struct retry_timer *timer, int over __unused ) { + struct gve_nic *gve = container_of ( timer, struct gve_nic, watchdog ); + uint32_t activity; + uint32_t pfn; + int rc; + + /* Reschedule watchdog */ + start_timer_fixed ( &gve->watchdog, GVE_WATCHDOG_TIMEOUT ); + + /* Reset device (for test purposes) if applicable */ + if ( ( rc = inject_fault ( VM_MIGRATED_RATE ) ) != 0 ) { + DBGC ( gve, "GVE %p synthesising host reset\n", gve ); + writel ( 0, gve->cfg + GVE_CFG_ADMIN_PFN ); + } + + /* Check for activity since last timer invocation */ + activity = ( gve->tx.cons + gve->rx.cons ); + if ( activity != gve->activity ) { + gve->activity = activity; + return; + } + + /* Check for reset */ + pfn = readl ( gve->cfg + GVE_CFG_ADMIN_PFN ); + if ( pfn ) { + DBGC2 ( gve, "GVE %p idle but not in reset\n", gve ); + return; + } + + /* Schedule restart */ + DBGC ( gve, "GVE %p watchdog detected reset by host\n", gve ); + gve_restart ( gve ); +} + +/** + * Open network device + * + * @v netdev Network device + * @ret rc Return status code + */ +static int gve_open ( struct net_device *netdev ) { + struct gve_nic *gve = netdev->priv; + struct gve_queue *tx = &gve->tx; + struct gve_queue *rx = &gve->rx; + int rc; + + /* Allocate shared queue resources */ + if ( ( rc = gve_alloc_shared ( gve ) ) != 0 ) + goto err_alloc_shared; + + /* Allocate and prepopulate transmit queue */ + if ( ( rc = gve_alloc_queue ( gve, tx ) ) != 0 ) + goto err_alloc_tx; + + /* Allocate and prepopulate receive queue */ + if ( ( rc = gve_alloc_queue ( gve, rx ) ) != 0 ) + goto err_alloc_rx; + + /* Trigger startup */ + gve_restart ( gve ); + + /* Start reset recovery watchdog timer */ + start_timer_fixed ( &gve->watchdog, GVE_WATCHDOG_TIMEOUT ); + + return 0; + + gve_free_queue ( gve, rx ); + err_alloc_rx: + gve_free_queue ( gve, tx ); + err_alloc_tx: + gve_free_shared ( gve ); + err_alloc_shared: + return rc; +} + +/** + * Close network device + * + * @v netdev Network device + */ +static void gve_close ( struct net_device *netdev ) { + struct gve_nic *gve = netdev->priv; + struct gve_queue *tx = &gve->tx; + struct gve_queue *rx = &gve->rx; + + /* Stop reset recovery timer */ + stop_timer ( &gve->watchdog ); + + /* Terminate startup process */ + process_del ( &gve->startup ); + + /* Stop and reset device */ + gve_stop ( gve ); + gve_reset ( gve ); + + /* Cancel any pending transmissions */ + gve_cancel_tx ( gve ); + + /* Free queues */ + gve_free_queue ( gve, rx ); + gve_free_queue ( gve, tx ); + + /* Free shared queue resources */ + gve_free_shared ( gve ); +} + +/** + * Transmit packet + * + * @v netdev Network device + * @v iobuf I/O buffer + * @ret rc Return status code + */ +static int gve_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { + struct gve_nic *gve = netdev->priv; + struct gve_queue *tx = &gve->tx; + struct gve_gqi_tx_descriptor *gqi; + struct gve_dqo_tx_descriptor *dqo; + unsigned int count; + unsigned int index; + unsigned int tag; + unsigned int chain; + uint32_t doorbell; + size_t frag_len; + size_t offset; + size_t next; + size_t len; + + /* Do nothing if queues are not yet set up */ + if ( ! netdev_link_ok ( netdev ) ) + return -ENETDOWN; + + /* Defer packet if there is no space in the transmit ring */ + len = iob_len ( iobuf ); + count = ( ( len + GVE_BUF_SIZE - 1 ) / GVE_BUF_SIZE ); + if ( ( ( tx->prod - tx->cons ) + count ) > tx->fill ) { + netdev_tx_defer ( netdev, iobuf ); + return 0; + } + + /* Copy packet to queue pages and populate descriptors */ + for ( offset = 0, chain = 0 ; ; offset = next, chain = tag ) { + + /* Identify next available buffer */ + index = ( tx->prod++ & ( tx->count - 1 ) ); + tag = tx->tag[ index % GVE_TX_FILL ]; + + /* Sanity check */ + assert ( gve->tx_iobuf[tag] == NULL ); + + /* Copy packet fragment */ + frag_len = ( len - offset ); + if ( frag_len > GVE_BUF_SIZE ) + frag_len = GVE_BUF_SIZE; + memcpy ( gve_buffer ( tx, tag ), + ( iobuf->data + offset ), frag_len ); + next = ( offset + frag_len ); + + /* Populate descriptor */ + if ( gve->mode & GVE_MODE_DQO ) { + + /* Out-of-order descriptor */ + dqo = &tx->desc.tx.dqo[index]; + dqo->buf.addr = + cpu_to_le64 ( gve_address ( tx, tag ) ); + if ( next == len ) { + dqo->type = ( GVE_DQO_TX_TYPE_PACKET | + GVE_DQO_TX_TYPE_LAST ); + dqo->tag.id = tag; + dqo->tag.count = count; + } else { + dqo->type = GVE_DQO_TX_TYPE_PACKET; + dqo->tag.id = 0; + dqo->tag.count = 0; + } + dqo->len = cpu_to_le16 ( frag_len ); + gve->tx_chain[tag] = chain; + + } else { + + /* In-order descriptor */ + gqi = &tx->desc.tx.gqi[index]; + if ( offset ) { + gqi->type = GVE_GQI_TX_TYPE_CONT; + gqi->count = 0; + gqi->total = 0; + } else { + gqi->type = GVE_GQI_TX_TYPE_START; + gqi->count = count; + gqi->total = cpu_to_be16 ( len ); + } + gqi->len = cpu_to_be16 ( frag_len ); + + } + DBGC2 ( gve, "GVE %p TXD %#04x %#02x:%#02x len %#04zx/%#04zx " + "at %#08lx\n", gve, index, tag, count, frag_len, len, + gve_address ( tx, tag ) ); + + /* Record I/O buffer against final descriptor */ + if ( next == len ) { + gve->tx_iobuf[tag] = iobuf; + break; + } + } + assert ( ( tx->prod - tx->cons ) <= tx->fill ); + + /* Ring doorbell */ + doorbell = tx->prod; + if ( gve->mode & GVE_MODE_DQO ) { + doorbell &= ( tx->count - 1 ); + } else { + doorbell = bswap_32 ( doorbell ); + } + wmb(); + writel ( doorbell, tx->db ); + + return 0; +} + +/** + * Poll for completed transmissions + * + * @v netdev Network device + */ +static void gve_poll_tx ( struct net_device *netdev ) { + struct gve_nic *gve = netdev->priv; + struct gve_queue *tx = &gve->tx; + struct gve_dqo_tx_completion *dqo; + struct io_buffer *iobuf; + unsigned int index; + unsigned int gen; + unsigned int bit; + unsigned int tag; + uint32_t count; + + /* Process transmit completions */ + if ( gve->mode & GVE_MODE_DQO ) { + + /* Out-of-order completions */ + while ( 1 ) { + + /* Read next possible completion */ + gen = ( tx->done & tx->count ); + index = ( tx->done & ( tx->count - 1 ) ); + dqo = &tx->cmplt.tx.dqo[index]; + + /* Check generation bit */ + bit = ( dqo->flags & GVE_DQO_TXF_GEN ); + if ( ( !! bit ) == ( !! gen ) ) + break; + rmb(); + tx->done++; + + /* Ignore non-packet completions */ + if ( ( ! ( dqo->flags & GVE_DQO_TXF_PKT ) ) || + ( dqo->tag.count < 0 ) ) { + DBGC2 ( gve, "GVE %p TXC %#04x flags %#02x " + "ignored\n", gve, index, dqo->flags ); + continue; + } + + /* Parse completion */ + tag = dqo->tag.id; + count = dqo->tag.count; + iobuf = gve->tx_iobuf[tag]; + gve->tx_iobuf[tag] = NULL; + assert ( iobuf != NULL ); + + /* Return completed descriptors to ring */ + while ( count-- ) { + DBGC2 ( gve, "GVE %p TXC %#04x %#02x:%#02x " + "complete\n", gve, index, tag, + dqo->tag.count ); + tx->tag[ tx->cons++ % GVE_TX_FILL ] = tag; + tag = gve->tx_chain[tag]; + } + + /* Hand off to network stack */ + if ( iobuf ) + netdev_tx_complete ( netdev, iobuf ); + } + + } else { + + /* Read event counter */ + count = be32_to_cpu ( tx->event->count ); + + /* Process transmit completions */ + while ( count != tx->cons ) { + DBGC2 ( gve, "GVE %p TXC %#04x complete\n", + gve, tx->cons ); + tag = ( tx->cons % GVE_TX_FILL ); + iobuf = gve->tx_iobuf[tag]; + gve->tx_iobuf[tag] = NULL; + tx->cons++; + if ( iobuf ) + netdev_tx_complete ( netdev, iobuf ); + } + } +} + +/** + * Poll for received packets + * + * @v netdev Network device + */ +static void gve_poll_rx ( struct net_device *netdev ) { + struct gve_nic *gve = netdev->priv; + struct gve_queue *rx = &gve->rx; + struct gve_gqi_rx_completion *gqi; + struct gve_dqo_rx_completion *dqo; + struct io_buffer *iobuf; + unsigned int index; + unsigned int gen; + unsigned int bit; + unsigned int seq; + unsigned int tag; + uint32_t done; + size_t total; + size_t len; + int rc; + + /* Process receive completions */ + done = rx->done; + seq = gve->seq; + total = 0; + while ( 1 ) { + + /* Read next possible completion */ + rc = 0; + gen = ( done & rx->count ); + index = ( done++ & ( rx->count - 1 ) ); + if ( gve->mode & GVE_MODE_DQO ) { + + /* Out-of-order completion */ + dqo = &rx->cmplt.rx.dqo[index]; + + /* Check generation bit */ + bit = ( dqo->len & cpu_to_le16 ( GVE_DQO_RXL_GEN ) ); + if ( ( !! bit ) == ( !! gen ) ) + break; + rmb(); + + /* Parse completion */ + len = ( le16_to_cpu ( dqo->len ) & + ( GVE_BUF_SIZE - 1 ) ); + tag = dqo->tag; + DBGC2 ( gve, "GVE %p RXC %#04x %#02x:%#02x len %#04zx " + "at %#08zx\n", gve, index, tag, dqo->flags, + len, gve_offset ( rx, tag ) ); + + /* Accumulate a complete packet */ + if ( dqo->status & GVE_DQO_RXS_ERROR ) { + rc = -EIO; + total = 0; + } else { + total += len; + if ( ! ( dqo->flags & GVE_DQO_RXF_LAST ) ) + continue; + } + + } else { + + /* In-order completion */ + gqi = &rx->cmplt.rx.gqi[index]; + + /* Check sequence number */ + if ( ( gqi->seq & GVE_GQI_RX_SEQ_MASK ) != seq ) + break; + rmb(); + seq = gve_next ( seq ); + + /* Parse completion */ + len = be16_to_cpu ( gqi->len ); + tag = ( index % GVE_RX_FILL ); + DBGC2 ( gve, "GVE %p RXC %#04x %#02x:%#02x len %#04zx " + "at %#08zx\n", gve, index, gqi->seq, + gqi->flags, len, gve_offset ( rx, tag ) ); + + /* Accumulate a complete packet */ + if ( gqi->flags & GVE_GQI_RXF_ERROR ) { + rc = -EIO; + total = 0; + } else { + total += len; + if ( gqi->flags & GVE_GQI_RXF_MORE ) + continue; + } + gve->seq = seq; + } + + /* Allocate and populate I/O buffer */ + iobuf = ( total ? alloc_iob ( total ) : NULL ); + for ( ; rx->done != done ; rx->done++ ) { + + /* Re-read completion and return tag to ring */ + index = ( rx->done & ( rx->count - 1 ) ); + if ( gve->mode & GVE_MODE_DQO ) { + dqo = &rx->cmplt.rx.dqo[index]; + tag = dqo->tag; + len = ( le16_to_cpu ( dqo->len ) & + ( GVE_BUF_SIZE - 1 ) ); + rx->tag[ rx->cons++ % GVE_RX_FILL ] = tag; + } else { + gqi = &rx->cmplt.rx.gqi[index]; + tag = ( index % GVE_RX_FILL ); + len = be16_to_cpu ( gqi->len ); + assert ( rx->cons == rx->done ); + rx->cons++; + } + + /* Copy data */ + if ( iobuf ) { + memcpy ( iob_put ( iobuf, len ), + gve_buffer ( rx, tag ), len ); + } + } + assert ( ( iobuf == NULL ) || ( iob_len ( iobuf ) == total ) ); + total = 0; + + /* Hand off packet to network stack */ + if ( iobuf ) { + if ( ! ( gve->mode & GVE_MODE_DQO ) ) + iob_pull ( iobuf, GVE_GQI_RX_PAD ); + netdev_rx ( netdev, iobuf ); + } else { + netdev_rx_err ( netdev, NULL, ( rc ? rc : -ENOMEM ) ); + } + } +} + +/** + * Refill receive queue + * + * @v netdev Network device + */ +static void gve_refill_rx ( struct net_device *netdev ) { + struct gve_nic *gve = netdev->priv; + struct gve_queue *rx = &gve->rx; + struct gve_dqo_rx_descriptor *dqo; + unsigned int refill; + unsigned int index; + unsigned int tag; + uint32_t doorbell; + + /* Calculate refill quantity */ + doorbell = ( rx->cons + rx->fill ); + refill = ( doorbell - rx->prod ); + if ( ! refill ) + return; + + /* Refill ring */ + if ( gve->mode & GVE_MODE_DQO ) { + + /* Out-of-order descriptors */ + while ( refill-- ) { + + /* Identify next available buffer */ + index = ( rx->prod++ & ( rx->count - 1 ) ); + tag = rx->tag[ index % GVE_RX_FILL ]; + + /* Populate descriptor */ + dqo = &rx->desc.rx.dqo[index]; + dqo->tag = tag; + dqo->buf.addr = + cpu_to_le64 ( gve_address ( rx, tag ) ); + DBGC2 ( gve, "GVE %p RXD %#04x:%#02x at %#08llx\n", + gve, index, dqo->tag, + ( ( unsigned long long ) + le64_to_cpu ( dqo->buf.addr ) ) ); + } + wmb(); + assert ( rx->prod == doorbell ); + + } else { + + /* The in-order receive descriptors are prepopulated + * at the time of creating the receive queue (pointing + * to the preallocated queue pages). Refilling is + * therefore just a case of ringing the doorbell if + * the device is not yet aware of any available + * descriptors. + */ + rx->prod += refill; + assert ( rx->prod == doorbell ); + DBGC2 ( gve, "GVE %p RXD %#04x ready\n", gve, rx->prod ); + + /* Doorbell is big-endian */ + doorbell = bswap_32 ( doorbell ); + } + + /* Ring doorbell */ + writel ( doorbell, rx->db ); +} + +/** + * Poll for completed and received packets + * + * @v netdev Network device + */ +static void gve_poll ( struct net_device *netdev ) { + struct gve_nic *gve = netdev->priv; + + /* Do nothing if queues are not yet set up */ + if ( ! netdev_link_ok ( netdev ) ) + return; + + /* Poll for transmit completions */ + gve_poll_tx ( netdev ); + + /* Poll for receive completions */ + gve_poll_rx ( netdev ); + + /* Refill receive queue */ + gve_refill_rx ( netdev ); + + /* Rearm queue interrupts if applicable */ + if ( gve->mode & GVE_MODE_DQO ) { + writel ( GVE_DQO_IRQ_REARM, gve->irqs.db[GVE_TX_IRQ] ); + writel ( GVE_DQO_IRQ_REARM, gve->irqs.db[GVE_RX_IRQ] ); + } +} + +/** GVE network device operations */ +static struct net_device_operations gve_operations = { + .open = gve_open, + .close = gve_close, + .transmit = gve_transmit, + .poll = gve_poll, +}; + +/****************************************************************************** + * + * PCI interface + * + ****************************************************************************** + */ + +/** Transmit descriptor queue type */ +static const struct gve_queue_type gve_tx_type = { + .name = "TX", + .param = gve_create_tx_param, + .qpl = GVE_TX_QPL, + .irq = GVE_TX_IRQ, + .fill = GVE_TX_FILL, + .stride = { + .gqi = { + .desc = sizeof ( struct gve_gqi_tx_descriptor ), + }, + .dqo = { + .desc = sizeof ( struct gve_dqo_tx_descriptor ), + .cmplt = sizeof ( struct gve_dqo_tx_completion ), + }, + }, + .create = GVE_ADMIN_CREATE_TX, + .destroy = GVE_ADMIN_DESTROY_TX, +}; + +/** Receive descriptor queue type */ +static const struct gve_queue_type gve_rx_type = { + .name = "RX", + .param = gve_create_rx_param, + .qpl = GVE_RX_QPL, + .irq = GVE_RX_IRQ, + .fill = GVE_RX_FILL, + .stride = { + .gqi = { + .desc = sizeof ( struct gve_gqi_rx_descriptor ), + .cmplt = sizeof ( struct gve_gqi_rx_completion ), + }, + .dqo = { + .desc = sizeof ( struct gve_dqo_rx_descriptor ), + .cmplt = sizeof ( struct gve_dqo_rx_completion ), + }, + }, + .create = GVE_ADMIN_CREATE_RX, + .destroy = GVE_ADMIN_DESTROY_RX, +}; + +/** + * Set up admin queue and get device description + * + * @v gve GVE device + * @ret rc Return status code + */ +static int gve_setup ( struct gve_nic *gve ) { + unsigned int i; + int rc; + + /* Attempt several times, since the device may decide to add + * in a few spurious resets. + */ + for ( i = 0 ; i < GVE_RESET_MAX_RETRY ; i++ ) { + + /* Reset device */ + if ( ( rc = gve_reset ( gve ) ) != 0 ) + continue; + + /* Enable admin queue */ + gve_admin_enable ( gve ); + + /* Fetch MAC address */ + if ( ( rc = gve_describe ( gve ) ) != 0 ) + continue; + + /* Success */ + return 0; + } + + DBGC ( gve, "GVE %p failed to get device description: %s\n", + gve, strerror ( rc ) ); + return rc; +} + +/** Device startup process descriptor */ +static struct process_descriptor gve_startup_desc = + PROC_DESC_ONCE ( struct gve_nic, startup, gve_startup ); + +/** + * Probe PCI device + * + * @v pci PCI device + * @ret rc Return status code + */ +static int gve_probe ( struct pci_device *pci ) { + struct net_device *netdev; + struct gve_nic *gve; + unsigned long cfg_start; + unsigned long db_start; + unsigned long db_size; + int rc; + + /* Allocate and initialise net device */ + netdev = alloc_etherdev ( sizeof ( *gve ) ); + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } + netdev_init ( netdev, &gve_operations ); + gve = netdev->priv; + pci_set_drvdata ( pci, netdev ); + netdev->dev = &pci->dev; + memset ( gve, 0, sizeof ( *gve ) ); + gve->netdev = netdev; + gve->tx.type = &gve_tx_type; + gve->rx.type = &gve_rx_type; + gve->tx.tag = gve->tx_tag; + gve->rx.tag = gve->rx_tag; + process_init_stopped ( &gve->startup, &gve_startup_desc, + &netdev->refcnt ); + timer_init ( &gve->watchdog, gve_watchdog, &netdev->refcnt ); + + /* Fix up PCI device */ + adjust_pci_device ( pci ); + + /* Check PCI revision */ + pci_read_config_byte ( pci, PCI_REVISION, &gve->revision ); + DBGC ( gve, "GVE %p is revision %#02x\n", gve, gve->revision ); + + /* Map configuration registers */ + cfg_start = pci_bar_start ( pci, GVE_CFG_BAR ); + gve->cfg = pci_ioremap ( pci, cfg_start, GVE_CFG_SIZE ); + if ( ! gve->cfg ) { + rc = -ENODEV; + goto err_cfg; + } + + /* Map doorbell registers */ + db_start = pci_bar_start ( pci, GVE_DB_BAR ); + db_size = pci_bar_size ( pci, GVE_DB_BAR ); + gve->db = pci_ioremap ( pci, db_start, db_size ); + if ( ! gve->db ) { + rc = -ENODEV; + goto err_db; + } + + /* Configure DMA */ + gve->dma = &pci->dma; + dma_set_mask_64bit ( gve->dma ); + assert ( netdev->dma == NULL ); + + /* Configure dummy MSI-X interrupt */ + if ( ( rc = pci_msix_enable ( pci, &gve->msix ) ) != 0 ) + goto err_msix; + + /* Allocate admin queue */ + if ( ( rc = gve_admin_alloc ( gve ) ) != 0 ) + goto err_admin; + + /* Set up the device */ + if ( ( rc = gve_setup ( gve ) ) != 0 ) + goto err_setup; + + /* Register network device */ + if ( ( rc = register_netdev ( netdev ) ) != 0 ) + goto err_register_netdev; + + return 0; + + unregister_netdev ( netdev ); + err_register_netdev: + err_setup: + gve_reset ( gve ); + gve_admin_free ( gve ); + err_admin: + pci_msix_disable ( pci, &gve->msix ); + err_msix: + iounmap ( gve->db ); + err_db: + iounmap ( gve->cfg ); + err_cfg: + netdev_nullify ( netdev ); + netdev_put ( netdev ); + err_alloc: + return rc; +} + +/** + * Remove PCI device + * + * @v pci PCI device + */ +static void gve_remove ( struct pci_device *pci ) { + struct net_device *netdev = pci_get_drvdata ( pci ); + struct gve_nic *gve = netdev->priv; + + /* Unregister network device */ + unregister_netdev ( netdev ); + + /* Reset device */ + gve_reset ( gve ); + + /* Free admin queue */ + gve_admin_free ( gve ); + + /* Disable dummy MSI-X interrupt */ + pci_msix_disable ( pci, &gve->msix ); + + /* Unmap registers */ + iounmap ( gve->db ); + iounmap ( gve->cfg ); + + /* Free network device */ + netdev_nullify ( netdev ); + netdev_put ( netdev ); +} + +/** GVE PCI device IDs */ +static struct pci_device_id gve_nics[] = { + PCI_ROM ( 0x1ae0, 0x0042, "gve", "gVNIC", 0 ), +}; + +/** GVE PCI driver */ +struct pci_driver gve_driver __pci_driver = { + .ids = gve_nics, + .id_count = ( sizeof ( gve_nics ) / sizeof ( gve_nics[0] ) ), + .probe = gve_probe, + .remove = gve_remove, +}; diff --git a/src/drivers/net/gve.h b/src/drivers/net/gve.h new file mode 100644 index 000000000..d352306ef --- /dev/null +++ b/src/drivers/net/gve.h @@ -0,0 +1,915 @@ +#ifndef _GVE_H +#define _GVE_H + +/** @file + * + * Google Virtual Ethernet network driver + * + * The Google Virtual Ethernet NIC (GVE or gVNIC) is found only in + * Google Cloud instances. There is essentially zero documentation + * available beyond the mostly uncommented source code in the Linux + * kernel. + */ + +FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); + +#include <stdint.h> +#include <ipxe/dma.h> +#include <ipxe/pci.h> +#include <ipxe/pcimsix.h> +#include <ipxe/in.h> +#include <ipxe/process.h> +#include <ipxe/retry.h> + +struct gve_nic; + +/** + * A Google Cloud MAC address + * + * Google Cloud locally assigned MAC addresses encode the local IPv4 + * address in the trailing 32 bits, presumably as a performance + * optimisation to allow ARP resolution to be skipped by a suitably + * aware network stack. + */ +struct google_mac { + /** Reserved */ + uint8_t reserved[2]; + /** Local IPv4 address */ + struct in_addr in; +} __attribute__ (( packed )); + +/** Page size */ +#define GVE_PAGE_SIZE 0x1000 + +/** + * Address alignment + * + * All DMA data structure base addresses seem to need to be aligned to + * a page boundary. (This is not documented anywhere, but is inferred + * from existing source code and experimentation.) + */ +#define GVE_ALIGN GVE_PAGE_SIZE + +/** Configuration BAR */ +#define GVE_CFG_BAR PCI_BASE_ADDRESS_0 + +/** + * Configuration BAR size + * + * All registers within the configuration BAR are big-endian. + */ +#define GVE_CFG_SIZE 0x1000 + +/** Device status */ +#define GVE_CFG_DEVSTAT 0x0000 +#define GVE_CFG_DEVSTAT_RESET 0x00000010UL /**< Device is reset */ + +/** Driver status */ +#define GVE_CFG_DRVSTAT 0x0004 +#define GVE_CFG_DRVSTAT_RUN 0x00000001UL /**< Run admin queue */ + +/** Maximum time to wait for reset */ +#define GVE_RESET_MAX_WAIT_MS 500 + +/** Admin queue page frame number (for older devices) */ +#define GVE_CFG_ADMIN_PFN 0x0010 + +/** Admin queue doorbell */ +#define GVE_CFG_ADMIN_DB 0x0014 + +/** Admin queue event counter */ +#define GVE_CFG_ADMIN_EVT 0x0018 + +/** Driver version (8-bit register) */ +#define GVE_CFG_VERSION 0x001f + +/** Admin queue base address high 32 bits */ +#define GVE_CFG_ADMIN_BASE_HI 0x0020 + +/** Admin queue base address low 32 bits */ +#define GVE_CFG_ADMIN_BASE_LO 0x0024 + +/** Admin queue base address length (16-bit register) */ +#define GVE_CFG_ADMIN_LEN 0x0028 + +/** Doorbell BAR */ +#define GVE_DB_BAR PCI_BASE_ADDRESS_2 + +/** + * Admin queue entry header + * + * All values within admin queue entries are big-endian. + */ +struct gve_admin_header { + /** Reserved */ + uint8_t reserved[3]; + /** Operation code */ + uint8_t opcode; + /** Status */ + uint32_t status; +} __attribute__ (( packed )); + +/** Command succeeded */ +#define GVE_ADMIN_STATUS_OK 0x00000001 + +/** Simple admin command */ +struct gve_admin_simple { + /** Header */ + struct gve_admin_header hdr; + /** ID */ + uint32_t id; +} __attribute__ (( packed )); + +/** Describe device command */ +#define GVE_ADMIN_DESCRIBE 0x0001 + +/** Describe device command */ +struct gve_admin_describe { + /** Header */ + struct gve_admin_header hdr; + /** Descriptor buffer address */ + uint64_t addr; + /** Descriptor version */ + uint32_t ver; + /** Descriptor maximum length */ + uint32_t len; +} __attribute__ (( packed )); + +/** Device descriptor version */ +#define GVE_ADMIN_DESCRIBE_VER 1 + +/** Device descriptor */ +struct gve_device_descriptor { + /** Reserved */ + uint8_t reserved_a[10]; + /** Number of transmit queue entries */ + uint16_t tx_count; + /** Number of receive queue entries */ + uint16_t rx_count; + /** Reserved */ + uint8_t reserved_b[2]; + /** Maximum transmit unit */ + uint16_t mtu; + /** Number of event counters */ + uint16_t counters; + /** Reserved */ + uint8_t reserved_c[4]; + /** MAC address */ + struct google_mac mac; + /** Number of device options */ + uint16_t opt_count; + /** Total length (including this header) */ + uint16_t len; + /** Reserved */ + uint8_t reserved_d[6]; + /** Space for options + * + * There is no specified upper limit, and no negotiation + * mechanism for the amount of space required. We allow space + * for seems like a reasonable number of options. + */ + uint8_t opts[216]; +} __attribute__ (( packed )); + +/** Device option header */ +struct gve_option { + /** Option ID */ + uint16_t id; + /** Length (excluding this header) */ + uint16_t len; + /** Required feature mask + * + * The purpose of this field is remarkably unclear. The Linux + * kernel driver does define enum gve_dev_opt_req_feat_mask, + * but every member of this enum has a zero value. + */ + uint32_t required; +} __attribute__ (( packed )); + +/** In-order descriptor queues with raw DMA addressing */ +#define GVE_OPT_GQI_RDA 0x02 + +/** In-order descriptor queues with queue page list addressing */ +#define GVE_OPT_GQI_QPL 0x03 + +/** Out-of-order descriptor queues with raw DMA addressing */ +#define GVE_OPT_DQO_RDA 0x04 + +/** Out-of-order descriptor queues with queue page list addressing */ +#define GVE_OPT_DQO_QPL 0x07 + +/** Configure device resources command */ +#define GVE_ADMIN_CONFIGURE 0x0002 + +/** Configure device resources command */ +struct gve_admin_configure { + /** Header */ + struct gve_admin_header hdr; + /** Event counter array */ + uint64_t events; + /** IRQ doorbell address */ + uint64_t irqs; + /** Number of event counters */ + uint32_t num_events; + /** Number of IRQ doorbells */ + uint32_t num_irqs; + /** IRQ doorbell stride */ + uint32_t irq_stride; + /** MSI-X base index */ + uint32_t msix_base; + /** Descriptor queue format */ + uint8_t format; + /** Reserved */ + uint8_t reserved[7]; +} __attribute__ (( packed )); + +/** Descriptor queue format */ +#define GVE_FORMAT( mode ) ( (mode) + 1 ) + +/** Register page list command */ +#define GVE_ADMIN_REGISTER 0x0003 + +/** Register page list command */ +struct gve_admin_register { + /** Header */ + struct gve_admin_header hdr; + /** Page list ID */ + uint32_t id; + /** Number of pages */ + uint32_t count; + /** Address list address */ + uint64_t addr; + /** Page size */ + uint64_t size; +} __attribute__ (( packed )); + +/** + * Maximum number of pages per queue + * + * This is a policy decision. Must be sufficient to allow for both + * the transmit and receive queue fill levels. + */ +#define GVE_QPL_MAX 32 + +/** Page list */ +struct gve_pages { + /** Page address */ + uint64_t addr[GVE_QPL_MAX]; +} __attribute__ (( packed )); + +/** Unregister page list command */ +#define GVE_ADMIN_UNREGISTER 0x0004 + +/** Create transmit queue command */ +#define GVE_ADMIN_CREATE_TX 0x0005 + +/** Create transmit queue command */ +struct gve_admin_create_tx { + /** Header */ + struct gve_admin_header hdr; + /** Queue ID */ + uint32_t id; + /** Reserved */ + uint8_t reserved_a[4]; + /** Queue resources address */ + uint64_t res; + /** Descriptor ring address */ + uint64_t desc; + /** Queue page list ID */ + uint32_t qpl_id; + /** Notification channel ID */ + uint32_t notify_id; + /** Completion ring address */ + uint64_t cmplt; + /** Number of descriptor ring entries */ + uint16_t desc_count; + /** Number of completion ring entries */ + uint16_t cmplt_count; + /** Reserved */ + uint8_t reserved_b[4]; +} __attribute__ (( packed )); + +/** Create receive queue command */ +#define GVE_ADMIN_CREATE_RX 0x0006 + +/** Create receive queue command */ +struct gve_admin_create_rx { + /** Header */ + struct gve_admin_header hdr; + /** Queue ID */ + uint32_t id; + /** Index */ + uint32_t index; + /** Reserved */ + uint8_t reserved_a[4]; + /** Notification channel ID */ + uint32_t notify_id; + /** Queue resources address */ + uint64_t res; + /** Completion ring address */ + uint64_t cmplt; + /** Descriptor ring address */ + uint64_t desc; + /** Queue page list ID */ + uint32_t qpl_id; + /** Number of descriptor ring entries */ + uint16_t desc_count; + /** Packet buffer size */ + uint16_t bufsz; + /** Number of completion ring entries */ + uint16_t cmplt_count; + /** Reserved */ + uint8_t reserved[6]; +} __attribute__ (( packed )); + +/** Destroy transmit queue command */ +#define GVE_ADMIN_DESTROY_TX 0x0007 + +/** Destroy receive queue command */ +#define GVE_ADMIN_DESTROY_RX 0x0008 + +/** Deconfigure device resources command */ +#define GVE_ADMIN_DECONFIGURE 0x0009 + +/** An admin queue command */ +union gve_admin_command { + /** Header */ + struct gve_admin_header hdr; + /** Simple command */ + struct gve_admin_simple simple; + /** Describe device */ + struct gve_admin_describe desc; + /** Configure device resources */ + struct gve_admin_configure conf; + /** Register page list */ + struct gve_admin_register reg; + /** Create transmit queue */ + struct gve_admin_create_tx create_tx; + /** Create receive queue */ + struct gve_admin_create_rx create_rx; + /** Padding */ + uint8_t pad[64]; +}; + +/** + * Number of admin queue commands + * + * This is theoretically a policy decision. However, older revisions + * of the hardware seem to have only the "admin queue page frame + * number" register and no "admin queue length" register, with the + * implication that the admin queue must be exactly one page in + * length. + * + * Choose to use a one page (4kB) admin queue for both older and newer + * versions of the hardware, to minimise variability. + */ +#define GVE_ADMIN_COUNT ( GVE_PAGE_SIZE / sizeof ( union gve_admin_command ) ) + +/** Admin queue */ +struct gve_admin { + /** Commands */ + union gve_admin_command *cmd; + /** Producer counter */ + uint32_t prod; + /** DMA mapping */ + struct dma_mapping map; +}; + +/** Scratch buffer for admin queue commands */ +struct gve_scratch { + /** Buffer contents */ + union { + /** Device descriptor */ + struct gve_device_descriptor desc; + /** Page address list */ + struct gve_pages pages; + } *buf; + /** DMA mapping */ + struct dma_mapping map; +}; + +/** + * An event counter + * + * Written by the device to indicate completions. The device chooses + * which counter to use for each transmit queue, and stores the index + * of the chosen counter in the queue resources. + */ +struct gve_event { + /** Number of events that have occurred */ + volatile uint32_t count; +} __attribute__ (( packed )); + +/** Event counter array */ +struct gve_events { + /** Event counters */ + struct gve_event *event; + /** DMA mapping */ + struct dma_mapping map; + /** Actual number of event counters */ + unsigned int count; +}; + +/** An interrupt channel */ +struct gve_irq { + /** Interrupt doorbell index (within doorbell BAR) */ + uint32_t db_idx; + /** Reserved */ + uint8_t reserved[60]; +} __attribute__ (( packed )); + +/** + * Number of interrupt channels + * + * We tell the device how many interrupt channels we have provided via + * the "configure device resources" admin queue command. The device + * will accept being given zero interrupt channels, but will + * subsequently fail to create more than a single queue (either + * transmit or receive). + * + * There is, of course, no documentation indicating how may interrupt + * channels actually need to be provided. In the absence of evidence + * to the contrary, assume that two channels (one for transmit, one + * for receive) will be sufficient. + */ +#define GVE_IRQ_COUNT 2 + +/** Interrupt channel array */ +struct gve_irqs { + /** Interrupt channels */ + struct gve_irq *irq; + /** DMA mapping */ + struct dma_mapping map; + /** Interrupt doorbells */ + volatile uint32_t *db[GVE_IRQ_COUNT]; +}; + +/** Disable in-order queue interrupt */ +#define GVE_GQI_IRQ_DISABLE 0x40000000UL + +/** Rearm out-of-order queue interrupt */ +#define GVE_DQO_IRQ_REARM 0x00000019UL + +/** + * Queue resources + * + * Written by the device to indicate the indices of the chosen event + * counter and descriptor doorbell register. + * + * This appears to be a largely pointless data structure: the relevant + * information is static for the lifetime of the queue and could + * trivially have been returned in the response for the "create + * transmit/receive queue" command, instead of requiring yet another + * page-aligned coherent DMA buffer allocation. + */ +struct gve_resources { + /** Descriptor doorbell index (within doorbell BAR) */ + uint32_t db_idx; + /** Event counter index (within event counter array) */ + uint32_t evt_idx; + /** Reserved */ + uint8_t reserved[56]; +} __attribute__ (( packed )); + +/** + * Queue data buffer size + * + * In theory, we may specify the size of receive buffers. However, + * the original version of the device seems not to have a parameter + * for this, and assumes the use of half-page (2kB) buffers. Choose + * to use this as the buffer size, on the assumption that older + * devices will not support any other buffer size. + */ +#define GVE_BUF_SIZE ( GVE_PAGE_SIZE / 2 ) + +/** Number of data buffers per page */ +#define GVE_BUF_PER_PAGE ( GVE_PAGE_SIZE / GVE_BUF_SIZE ) + +/** + * Queue page list + * + * The device uses preregistered pages for fast-path DMA operations + * (i.e. transmit and receive buffers). A list of device addresses + * for each page must be registered before the transmit or receive + * queue is created, and cannot subsequently be modified. + * + * The Linux driver allocates pages as DMA_TO_DEVICE or + * DMA_FROM_DEVICE as appropriate, and uses dma_sync_single_for_cpu() + * etc to ensure that data is copied to/from bounce buffers as needed. + * + * Unfortunately there is no such sync operation available within our + * DMA API, since we are constrained by the limitations imposed by + * EFI_PCI_IO_PROTOCOL. There is no way to synchronise a buffer + * without also [un]mapping it, and no way to force the reuse of the + * same device address for a subsequent remapping. We are therefore + * constrained to use only DMA-coherent buffers, since this is the + * only way we can repeatedly reuse the same device address. + * + * Newer versions of the gVNIC device support "raw DMA addressing + * (RDA)", which is essentially a prebuilt queue page list covering + * the whole of the guest address space. Unfortunately we cannot rely + * on this, since older versions will not support it. + * + * Experimentation suggests that the device will accept a request to + * create a queue page list covering the whole of the guest address + * space via two giant "pages" of 2^63 bytes each. However, + * experimentation also suggests that the device will accept any old + * garbage value as the "page size". In the total absence of any + * documentation, it is probably unsafe to conclude that the device is + * bothering to look at or respect the "page size" parameter: it is + * most likely just presuming the use of 4kB pages. + */ +struct gve_qpl { + /** Page addresses */ + void *data; + /** Page mapping */ + struct dma_mapping map; + /** Number of pages */ + unsigned int count; + /** Queue page list ID */ + unsigned int id; + /** Queue page list base device address + * + * This will be zero in the GQI-QPL operating mode, or the DMA + * address of the first page in any other operating mode. + * (Despite its name, DQO-QPL still requires the use of raw + * DMA addresses in transmit and receive descriptors.) + */ + physaddr_t base; +}; + +/** Raw DMA addressing queue page list ID */ +#define GVE_RAW_QPL 0xffffffff + +/** + * Maximum number of transmit buffers + * + * This is a policy decision. + */ +#define GVE_TX_FILL 8 + +/** Transmit queue page list ID */ +#define GVE_TX_QPL 0x18ae5458 + +/** Tranmsit queue interrupt channel */ +#define GVE_TX_IRQ 0 + +/** A transmit or receive buffer descriptor */ +struct gve_buffer { + /** Address (within queue page list address space) */ + uint64_t addr; +} __attribute__ (( packed )); + +/** An in-order transmit descriptor */ +struct gve_gqi_tx_descriptor { + /** Type */ + uint8_t type; + /** Reserved */ + uint8_t reserved_a[2]; + /** Number of descriptors in this packet */ + uint8_t count; + /** Total length of this packet */ + uint16_t total; + /** Length of this descriptor */ + uint16_t len; + /** Buffer descriptor */ + struct gve_buffer buf; +} __attribute__ (( packed )); + +/** Start of packet transmit descriptor type */ +#define GVE_GQI_TX_TYPE_START 0x00 + +/** Continuation of packet transmit descriptor type */ +#define GVE_GQI_TX_TYPE_CONT 0x20 + +/** An out-of-order transmit tag + * + * From the hardware perspective, this is an opaque 15-bit (sic) value + * that is simply copied from the descriptor to the corresponding + * completion. + */ +struct gve_dqo_tx_tag { + /** Buffer index within queue page list */ + uint8_t id; + /** Number of descriptors covered by this completion + * + * Note that this is a 7-bit quantity: the high bit may be + * (ab)used by the hardware to indicate that a completion is a + * terminologically undefined "miss" completion. + */ + int8_t count; +} __attribute__ (( packed )); + +/** An out-of-order transmit descriptor */ +struct gve_dqo_tx_descriptor { + /** Buffer descriptor */ + struct gve_buffer buf; + /** Descriptor type and flags */ + uint8_t type; + /** Reserved */ + uint8_t reserved_a[3]; + /** Tag */ + struct gve_dqo_tx_tag tag; + /** Length of this descriptor */ + uint16_t len; +} __attribute__ (( packed )); + +/** Normal packet transmit descriptor type */ +#define GVE_DQO_TX_TYPE_PACKET 0x0c + +/** Last transmit descriptor in a packet */ +#define GVE_DQO_TX_TYPE_LAST 0x20 + +/** An out-of-order transmit completion */ +struct gve_dqo_tx_completion { + /** Reserved */ + uint8_t reserved_a[1]; + /** Completion flags */ + uint8_t flags; + /** Tag */ + struct gve_dqo_tx_tag tag; + /** Reserved */ + uint8_t reserved_b[4]; +} __attribute__ (( packed )); + +/** Transmit completion packet flag */ +#define GVE_DQO_TXF_PKT 0x10 + +/** Transmit completion generation flag */ +#define GVE_DQO_TXF_GEN 0x80 + +/** + * Maximum number of receive buffers + * + * This is a policy decision. Experiments suggest that using fewer + * than 64 receive buffers leads to excessive packet drop rates on + * some instance types. + */ +#define GVE_RX_FILL 64 + +/** Receive queue page list ID */ +#define GVE_RX_QPL 0x18ae5258 + +/** Receive queue interrupt channel */ +#define GVE_RX_IRQ 1 + +/** An in-order receive descriptor */ +struct gve_gqi_rx_descriptor { + /** Buffer descriptor */ + struct gve_buffer buf; +} __attribute__ (( packed )); + +/** Receive error */ +#define GVE_GQI_RXF_ERROR 0x08 + +/** Receive packet continues into next descriptor */ +#define GVE_GQI_RXF_MORE 0x20 + +/** Receive sequence number mask */ +#define GVE_GQI_RX_SEQ_MASK 0x07 + +/** An in-order receive completion descriptor */ +struct gve_gqi_rx_completion { + /** Reserved */ + uint8_t reserved[60]; + /** Length */ + uint16_t len; + /** Flags */ + uint8_t flags; + /** Sequence number */ + uint8_t seq; +} __attribute__ (( packed )); + +/** Padding at the start of all received packets */ +#define GVE_GQI_RX_PAD 2 + +/** An out-of-order receive descriptor */ +struct gve_dqo_rx_descriptor { + /** Tag */ + uint8_t tag; + /** Reserved */ + uint8_t reserved_a[7]; + /** Buffer descriptor */ + struct gve_buffer buf; + /** Reserved */ + uint8_t reserved_b[16]; +} __attribute__ (( packed )); + +/** An out-of-order receive completion */ +struct gve_dqo_rx_completion { + /** Reserved */ + uint8_t reserved_a[1]; + /** Status */ + uint8_t status; + /** Reserved */ + uint8_t reserved_b[2]; + /** Length and generation bit */ + uint16_t len; + /** Reserved */ + uint8_t reserved_c[2]; + /** Flags */ + uint8_t flags; + /** Reserved */ + uint8_t reserved_d[3]; + /** Tag */ + uint8_t tag; + /** Reserved */ + uint8_t reserved_e[19]; +} __attribute__ (( packed )); + +/** Receive error */ +#define GVE_DQO_RXS_ERROR 0x04 + +/** Receive completion generation flag */ +#define GVE_DQO_RXL_GEN 0x4000 + +/** Last receive descriptor in a packet */ +#define GVE_DQO_RXF_LAST 0x02 + +/** Queue strides */ +struct gve_queue_stride { + /** Descriptor ring stride */ + uint8_t desc; + /** Completion ring stride */ + uint8_t cmplt; +}; + +/** A descriptor queue */ +struct gve_queue { + /** Descriptor ring */ + union { + /** Transmit descriptors */ + union { + /** In-order transmit descriptors */ + struct gve_gqi_tx_descriptor *gqi; + /** Out-of-order transmit descriptors */ + struct gve_dqo_tx_descriptor *dqo; + } tx; + /** Receive descriptors */ + union { + /** In-order receive descriptors */ + struct gve_gqi_rx_descriptor *gqi; + /** Out-of-order receive descriptors */ + struct gve_dqo_rx_descriptor *dqo; + } rx; + /** Raw data */ + void *raw; + } desc; + /** Completion ring */ + union { + /** Transmit completions */ + union { + /** Out-of-order transmit completions */ + struct gve_dqo_tx_completion *dqo; + } tx; + /** Receive completions */ + union { + /** In-order receive completions */ + struct gve_gqi_rx_completion *gqi; + /** Out-of-order receive completions */ + struct gve_dqo_rx_completion *dqo; + } rx; + /** Raw data */ + void *raw; + } cmplt; + /** Queue resources */ + struct gve_resources *res; + + /** Queue type */ + const struct gve_queue_type *type; + /** Queue strides */ + struct gve_queue_stride stride; + /** Number of descriptors (must be a power of two) */ + unsigned int count; + /** Maximum fill level (must be a power of two) */ + unsigned int fill; + + /** Descriptor mapping */ + struct dma_mapping desc_map; + /** Completion mapping */ + struct dma_mapping cmplt_map; + /** Queue resources mapping */ + struct dma_mapping res_map; + + /** Doorbell register */ + volatile uint32_t *db; + /** Event counter */ + struct gve_event *event; + + /** Producer counter */ + uint32_t prod; + /** Consumer counter */ + uint32_t cons; + /** Completion counter */ + uint32_t done; + /** Tag ring */ + uint8_t *tag; + + /** Queue page list */ + struct gve_qpl qpl; +}; + +/** A descriptor queue type */ +struct gve_queue_type { + /** Name */ + const char *name; + /** + * Populate command parameters to create queue + * + * @v queue Descriptor queue + * @v qpl Queue page list ID + * @v cmd Admin queue command + */ + void ( * param ) ( struct gve_queue *queue, uint32_t qpl, + union gve_admin_command *cmd ); + /** Queue page list ID */ + uint32_t qpl; + /** Interrupt channel */ + uint8_t irq; + /** Maximum fill level */ + uint8_t fill; + /** Queue strides */ + struct { + /** In-order queue strides */ + struct gve_queue_stride gqi; + /** Out-of-order queue strides */ + struct gve_queue_stride dqo; + } stride; + /** Command to create queue */ + uint8_t create; + /** Command to destroy queue */ + uint8_t destroy; +}; + +/** A Google Virtual Ethernet NIC */ +struct gve_nic { + /** Configuration registers */ + void *cfg; + /** Doorbell registers */ + void *db; + /** PCI revision */ + uint8_t revision; + /** Network device */ + struct net_device *netdev; + /** DMA device */ + struct dma_device *dma; + /** Dummy MSI-X interrupt */ + struct pci_msix msix; + + /** Admin queue */ + struct gve_admin admin; + /** Interrupt channels */ + struct gve_irqs irqs; + /** Event counters */ + struct gve_events events; + /** Scratch buffer */ + struct gve_scratch scratch; + /** Supported options */ + uint32_t options; + /** Operating mode */ + unsigned int mode; + + /** Transmit queue */ + struct gve_queue tx; + /** Receive queue */ + struct gve_queue rx; + /** Transmit I/O buffers (indexed by tag) */ + struct io_buffer *tx_iobuf[GVE_TX_FILL]; + /** Transmit tag chain */ + uint8_t tx_chain[GVE_TX_FILL]; + /** Transmit tag ring */ + uint8_t tx_tag[GVE_TX_FILL]; + /** Receive tag ring */ + uint8_t rx_tag[GVE_RX_FILL]; + /** Receive sequence number */ + unsigned int seq; + + /** Startup process */ + struct process startup; + /** Startup process retry counter */ + unsigned int retries; + /** Reset recovery watchdog timer */ + struct retry_timer watchdog; + /** Reset recovery recorded activity counter */ + uint32_t activity; +}; + +/** Operating mode + * + * These values are chosen to allow for easy transformation to a queue + * format identifier as used for the "Configure device resources" + * command. + */ +#define GVE_MODE_QPL 0x01 /**< Use registered queue pages */ +#define GVE_MODE_DQO 0x02 /**< Use out-of-order queues */ + +/** Maximum time to wait for admin queue commands */ +#define GVE_ADMIN_MAX_WAIT_MS 500 + +/** Maximum number of times to reattempt device reset */ +#define GVE_RESET_MAX_RETRY 5 + +/** Time between reset recovery checks */ +#define GVE_WATCHDOG_TIMEOUT ( 1 * TICKS_PER_SEC ) + +#endif /* _GVE_H */ diff --git a/src/drivers/net/ice.c b/src/drivers/net/ice.c index b5d66f1bb..1abc8ecd0 100644 --- a/src/drivers/net/ice.c +++ b/src/drivers/net/ice.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <string.h> diff --git a/src/drivers/net/ice.h b/src/drivers/net/ice.h index 26291a7a1..c4b7b95be 100644 --- a/src/drivers/net/ice.h +++ b/src/drivers/net/ice.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <ipxe/if_ether.h> #include "intelxl.h" diff --git a/src/drivers/net/igbvf/igbvf_osdep.h b/src/drivers/net/igbvf/igbvf_osdep.h index 8ac179de0..dc65da6c1 100644 --- a/src/drivers/net/igbvf/igbvf_osdep.h +++ b/src/drivers/net/igbvf/igbvf_osdep.h @@ -35,8 +35,9 @@ FILE_LICENCE ( GPL2_ONLY ); #ifndef _IGBVF_OSDEP_H_ #define _IGBVF_OSDEP_H_ +#include <stdbool.h> + #define u8 unsigned char -#define bool boolean_t #define dma_addr_t unsigned long #define __le16 uint16_t #define __le32 uint32_t @@ -51,10 +52,6 @@ FILE_LICENCE ( GPL2_ONLY ); #define ETH_FCS_LEN 4 typedef int spinlock_t; -typedef enum { - false = 0, - true = 1 -} boolean_t; #define usec_delay(x) udelay(x) #define msec_delay(x) mdelay(x) diff --git a/src/drivers/net/intel.c b/src/drivers/net/intel.c index 7879714f6..57c0151a4 100644 --- a/src/drivers/net/intel.c +++ b/src/drivers/net/intel.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <string.h> @@ -1043,6 +1044,7 @@ static struct pci_device_id intel_nics[] = { PCI_ROM ( 0x8086, 0x0d4f, "i219v-10", "I219-V (10)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x0d53, "i219lm-12", "I219-LM (12)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x0d55, "i219v-12", "I219-V (12)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x0d9f, "i225it", "I225-IT", INTEL_PBSIZE_RST ), PCI_ROM ( 0x8086, 0x0dc5, "i219lm-23", "I219-LM (23)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x1000, "82542-f", "82542 (Fiber)", 0 ), PCI_ROM ( 0x8086, 0x1001, "82543gc-f", "82543GC (Fiber)", 0 ), @@ -1133,6 +1135,9 @@ static struct pci_device_id intel_nics[] = { PCI_ROM ( 0x8086, 0x10f0, "82578dc", "82578DC", 0 ), PCI_ROM ( 0x8086, 0x10f5, "82567lm", "82567LM", 0 ), PCI_ROM ( 0x8086, 0x10f6, "82574l", "82574L", 0 ), + PCI_ROM ( 0x8086, 0x125b, "i226lm", "I226-LM", INTEL_PBSIZE_RST ), + PCI_ROM ( 0x8086, 0x125c, "i226v", "I226-V", INTEL_PBSIZE_RST ), + PCI_ROM ( 0x8086, 0x125d, "i226it", "I226-IT", INTEL_PBSIZE_RST ), PCI_ROM ( 0x8086, 0x1501, "82567v-3", "82567V-3", INTEL_PBS_ERRATA ), PCI_ROM ( 0x8086, 0x1502, "82579lm", "82579LM", INTEL_NO_PHY_RST ), PCI_ROM ( 0x8086, 0x1503, "82579v", "82579V", 0 ), @@ -1180,6 +1185,8 @@ static struct pci_device_id intel_nics[] = { PCI_ROM ( 0x8086, 0x15e1, "i219lm-9", "I219-LM (9)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x15e2, "i219v-9", "I219-V (9)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x15e3, "i219lm-5", "I219-LM (5)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x15f2, "i225lm", "I225-LM", INTEL_PBSIZE_RST ), + PCI_ROM ( 0x8086, 0x15f3, "i225v", "I225-V", INTEL_PBSIZE_RST ), PCI_ROM ( 0x8086, 0x15f4, "i219lm-15", "I219-LM (15)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x15f5, "i219v-15", "I219-V (15)", INTEL_I219 ), PCI_ROM ( 0x8086, 0x15f9, "i219lm-14", "I219-LM (14)", INTEL_I219 ), @@ -1193,6 +1200,9 @@ static struct pci_device_id intel_nics[] = { PCI_ROM ( 0x8086, 0x1f41, "i354", "I354", INTEL_NO_ASDE ), PCI_ROM ( 0x8086, 0x294c, "82566dc-2", "82566DC-2", 0 ), PCI_ROM ( 0x8086, 0x2e6e, "cemedia", "CE Media Processor", 0 ), + PCI_ROM ( 0x8086, 0x5502, "i225lmvp", "I225-LMvP", INTEL_PBSIZE_RST ), + PCI_ROM ( 0x8086, 0x57a0, "i219lm-24", "I219-LM (24)", INTEL_I219 ), + PCI_ROM ( 0x8086, 0x57a1, "i219v-24", "I219-V (24)", INTEL_I219 ), }; /** Intel PCI driver */ diff --git a/src/drivers/net/intel.h b/src/drivers/net/intel.h index 29cf3a7d8..bfd250f00 100644 --- a/src/drivers/net/intel.h +++ b/src/drivers/net/intel.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <ipxe/if_ether.h> diff --git a/src/drivers/net/intelvf.c b/src/drivers/net/intelvf.c index 0d48b4178..e99b67626 100644 --- a/src/drivers/net/intelvf.c +++ b/src/drivers/net/intelvf.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <unistd.h> diff --git a/src/drivers/net/intelvf.h b/src/drivers/net/intelvf.h index ffb18e040..378f9b075 100644 --- a/src/drivers/net/intelvf.h +++ b/src/drivers/net/intelvf.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include "intel.h" diff --git a/src/drivers/net/intelx.c b/src/drivers/net/intelx.c index 343d01374..ceb687e4f 100644 --- a/src/drivers/net/intelx.c +++ b/src/drivers/net/intelx.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <string.h> diff --git a/src/drivers/net/intelx.h b/src/drivers/net/intelx.h index d7f3b78e8..d68f50082 100644 --- a/src/drivers/net/intelx.h +++ b/src/drivers/net/intelx.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <ipxe/if_ether.h> diff --git a/src/drivers/net/intelxl.c b/src/drivers/net/intelxl.c index 82b07833c..f8d325ead 100644 --- a/src/drivers/net/intelxl.c +++ b/src/drivers/net/intelxl.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <string.h> @@ -63,35 +64,20 @@ int intelxl_msix_enable ( struct intelxl_nic *intelxl, struct pci_device *pci, unsigned int vector ) { int rc; - /* Map dummy target location */ - if ( ( rc = dma_map ( intelxl->dma, &intelxl->msix.map, - virt_to_phys ( &intelxl->msix.msg ), - sizeof ( intelxl->msix.msg ), DMA_RX ) ) != 0 ) { - DBGC ( intelxl, "INTELXL %p could not map MSI-X target: %s\n", - intelxl, strerror ( rc ) ); - goto err_map; - } - /* Enable MSI-X capability */ - if ( ( rc = pci_msix_enable ( pci, &intelxl->msix.cap ) ) != 0 ) { + if ( ( rc = pci_msix_enable ( pci, &intelxl->msix ) ) != 0 ) { DBGC ( intelxl, "INTELXL %p could not enable MSI-X: %s\n", intelxl, strerror ( rc ) ); goto err_enable; } - /* Configure interrupt to write to dummy location */ - pci_msix_map ( &intelxl->msix.cap, vector, - dma ( &intelxl->msix.map, &intelxl->msix.msg ), 0 ); - /* Enable dummy interrupt */ - pci_msix_unmask ( &intelxl->msix.cap, vector ); + pci_msix_unmask ( &intelxl->msix, vector ); return 0; - pci_msix_disable ( pci, &intelxl->msix.cap ); + pci_msix_disable ( pci, &intelxl->msix ); err_enable: - dma_unmap ( &intelxl->msix.map ); - err_map: return rc; } @@ -106,13 +92,10 @@ void intelxl_msix_disable ( struct intelxl_nic *intelxl, struct pci_device *pci, unsigned int vector ) { /* Disable dummy interrupts */ - pci_msix_mask ( &intelxl->msix.cap, vector ); + pci_msix_mask ( &intelxl->msix, vector ); /* Disable MSI-X capability */ - pci_msix_disable ( pci, &intelxl->msix.cap ); - - /* Unmap dummy target location */ - dma_unmap ( &intelxl->msix.map ); + pci_msix_disable ( pci, &intelxl->msix ); } /****************************************************************************** diff --git a/src/drivers/net/intelxl.h b/src/drivers/net/intelxl.h index 6c7865aa2..4481300d3 100644 --- a/src/drivers/net/intelxl.h +++ b/src/drivers/net/intelxl.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <ipxe/if_ether.h> @@ -891,16 +892,6 @@ intelxl_init_ring ( struct intelxl_ring *ring, unsigned int count, size_t len, #define INTELXL_PFGEN_PORTNUM_PORT_NUM(x) \ ( ( (x) >> 0 ) & 0x3 ) /**< Port number */ -/** MSI-X interrupt */ -struct intelxl_msix { - /** PCI capability */ - struct pci_msix cap; - /** MSI-X dummy interrupt target */ - uint32_t msg; - /** DMA mapping for dummy interrupt target */ - struct dma_mapping map; -}; - /** MSI-X interrupt vector */ #define INTELXL_MSIX_VECTOR 0 @@ -934,7 +925,7 @@ struct intelxl_nic { /** PCI Express capability offset */ unsigned int exp; /** MSI-X interrupt */ - struct intelxl_msix msix; + struct pci_msix msix; /** Admin command queue */ struct intelxl_admin command; diff --git a/src/drivers/net/intelxlvf.c b/src/drivers/net/intelxlvf.c index 083195513..ab4df4c47 100644 --- a/src/drivers/net/intelxlvf.c +++ b/src/drivers/net/intelxlvf.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <unistd.h> diff --git a/src/drivers/net/intelxlvf.h b/src/drivers/net/intelxlvf.h index 95ddf9474..63ed0b202 100644 --- a/src/drivers/net/intelxlvf.h +++ b/src/drivers/net/intelxlvf.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include "intelxl.h" diff --git a/src/drivers/net/intelxvf.c b/src/drivers/net/intelxvf.c index d50bac698..70ed8efe3 100644 --- a/src/drivers/net/intelxvf.c +++ b/src/drivers/net/intelxvf.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <unistd.h> diff --git a/src/drivers/net/intelxvf.h b/src/drivers/net/intelxvf.h index 4663272aa..1dac98699 100644 --- a/src/drivers/net/intelxvf.h +++ b/src/drivers/net/intelxvf.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include "intelvf.h" diff --git a/src/drivers/net/iphone.c b/src/drivers/net/iphone.c index bbac527bd..b58017560 100644 --- a/src/drivers/net/iphone.c +++ b/src/drivers/net/iphone.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <string.h> @@ -362,18 +363,8 @@ static int icert_cert ( struct icert *icert, struct asn1_cursor *subject, struct asn1_builder raw = { NULL, 0 }; uint8_t digest_ctx[SHA256_CTX_SIZE]; uint8_t digest_out[SHA256_DIGEST_SIZE]; - uint8_t pubkey_ctx[RSA_CTX_SIZE]; - int len; int rc; - /* Initialise "private" key */ - if ( ( rc = pubkey_init ( pubkey, pubkey_ctx, private->data, - private->len ) ) != 0 ) { - DBGC ( icert, "ICERT %p could not initialise private key: " - "%s\n", icert, strerror ( rc ) ); - goto err_pubkey_init; - } - /* Construct subjectPublicKeyInfo */ if ( ( rc = ( asn1_prepend_raw ( &spki, public->data, public->len ), asn1_prepend_raw ( &spki, icert_nul, @@ -407,21 +398,13 @@ static int icert_cert ( struct icert *icert, struct asn1_cursor *subject, digest_update ( digest, digest_ctx, tbs.data, tbs.len ); digest_final ( digest, digest_ctx, digest_out ); - /* Construct signature */ - if ( ( rc = asn1_grow ( &raw, pubkey_max_len ( pubkey, - pubkey_ctx ) ) ) != 0 ) { - DBGC ( icert, "ICERT %p could not build signature: %s\n", - icert, strerror ( rc ) ); - goto err_grow; - } - if ( ( len = pubkey_sign ( pubkey, pubkey_ctx, digest, digest_out, - raw.data ) ) < 0 ) { - rc = len; + /* Construct signature using "private" key */ + if ( ( rc = pubkey_sign ( pubkey, private, digest, digest_out, + &raw ) ) != 0 ) { DBGC ( icert, "ICERT %p could not sign: %s\n", icert, strerror ( rc ) ); goto err_pubkey_sign; } - assert ( ( ( size_t ) len ) == raw.len ); /* Construct raw certificate data */ if ( ( rc = ( asn1_prepend_raw ( &raw, icert_nul, @@ -447,14 +430,11 @@ static int icert_cert ( struct icert *icert, struct asn1_cursor *subject, err_x509: err_raw: err_pubkey_sign: + err_tbs: + err_spki: free ( raw.data ); - err_grow: free ( tbs.data ); - err_tbs: free ( spki.data ); - err_spki: - pubkey_final ( pubkey, pubkey_ctx ); - err_pubkey_init: return rc; } @@ -1487,7 +1467,7 @@ static int ipair_rx_pubkey ( struct ipair *ipair, char *msg ) { } /* Decode inner layer of Base64 */ - next = pem_asn1 ( virt_to_user ( decoded ), len, 0, &key ); + next = pem_asn1 ( decoded, len, 0, &key ); if ( next < 0 ) { rc = next; DBGC ( ipair, "IPAIR %p invalid inner public key:\n%s\n", diff --git a/src/drivers/net/iphone.h b/src/drivers/net/iphone.h index 2db6da7bd..3448af37f 100644 --- a/src/drivers/net/iphone.h +++ b/src/drivers/net/iphone.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <ipxe/usb.h> diff --git a/src/drivers/net/lan78xx.c b/src/drivers/net/lan78xx.c index 3f4f21b60..32333e787 100644 --- a/src/drivers/net/lan78xx.c +++ b/src/drivers/net/lan78xx.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <unistd.h> diff --git a/src/drivers/net/lan78xx.h b/src/drivers/net/lan78xx.h index 39422aec0..ea6d7ce52 100644 --- a/src/drivers/net/lan78xx.h +++ b/src/drivers/net/lan78xx.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include "smscusb.h" #include "smsc75xx.h" diff --git a/src/drivers/net/legacy.c b/src/drivers/net/legacy.c index 73a80194f..b86f77fc8 100644 --- a/src/drivers/net/legacy.c +++ b/src/drivers/net/legacy.c @@ -19,7 +19,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); -struct nic nic; +struct nic legacy_nic; static int legacy_registered = 0; @@ -42,7 +42,8 @@ static void legacy_poll ( struct net_device *netdev ) { struct nic *nic = netdev->priv; struct io_buffer *iobuf; - iobuf = alloc_iob ( ETH_FRAME_LEN ); + iobuf = alloc_iob ( ETH_FRAME_LEN + 4 /* possible VLAN */ + + 4 /* possible CRC */ ); if ( ! iobuf ) return; @@ -83,26 +84,42 @@ int legacy_probe ( void *hwdev, void ( * set_drvdata ) ( void *hwdev, void *priv ), struct device *dev, int ( * probe ) ( struct nic *nic, void *hwdev ), - void ( * disable ) ( struct nic *nic, void *hwdev ) ) { + void ( * disable ) ( struct nic *nic, void *hwdev ), + size_t fake_bss_len ) { struct net_device *netdev; + struct nic *nic; int rc; - if ( legacy_registered ) - return -EBUSY; - + if ( legacy_registered ) { + rc = -EBUSY; + goto err_registered; + } + netdev = alloc_etherdev ( 0 ); - if ( ! netdev ) - return -ENOMEM; + if ( ! netdev ) { + rc = -ENOMEM; + goto err_alloc; + } netdev_init ( netdev, &legacy_operations ); - netdev->priv = &nic; - memset ( &nic, 0, sizeof ( nic ) ); + nic = &legacy_nic; + netdev->priv = nic; + memset ( nic, 0, sizeof ( *nic ) ); set_drvdata ( hwdev, netdev ); netdev->dev = dev; - nic.node_addr = netdev->hw_addr; - nic.irqno = dev->desc.irq; + nic->node_addr = netdev->hw_addr; + nic->irqno = dev->desc.irq; - if ( ! probe ( &nic, hwdev ) ) { + if ( fake_bss_len ) { + nic->fake_bss = malloc_phys ( fake_bss_len, PAGE_SIZE ); + if ( ! nic->fake_bss ) { + rc = -ENOMEM; + goto err_fake_bss; + } + } + nic->fake_bss_len = fake_bss_len; + + if ( ! probe ( nic, hwdev ) ) { rc = -ENODEV; goto err_probe; } @@ -112,7 +129,7 @@ int legacy_probe ( void *hwdev, * don't support interrupts; doing this allows the timer * interrupt to be used instead. */ - dev->desc.irq = nic.irqno; + dev->desc.irq = nic->irqno; if ( ( rc = register_netdev ( netdev ) ) != 0 ) goto err_register; @@ -122,16 +139,21 @@ int legacy_probe ( void *hwdev, /* Do not remove this message */ printf ( "WARNING: Using legacy NIC wrapper on %s\n", - netdev->ll_protocol->ntoa ( nic.node_addr ) ); + netdev->ll_protocol->ntoa ( nic->node_addr ) ); legacy_registered = 1; return 0; err_register: - disable ( &nic, hwdev ); + disable ( nic, hwdev ); err_probe: + if ( fake_bss_len ) + free_phys ( nic->fake_bss, fake_bss_len ); + err_fake_bss: netdev_nullify ( netdev ); netdev_put ( netdev ); + err_alloc: + err_registered: return rc; } @@ -143,6 +165,8 @@ void legacy_remove ( void *hwdev, unregister_netdev ( netdev ); disable ( nic, hwdev ); + if ( nic->fake_bss_len ) + free_phys ( nic->fake_bss, nic->fake_bss_len ); netdev_nullify ( netdev ); netdev_put ( netdev ); legacy_registered = 0; diff --git a/src/drivers/net/marvell/aqc1xx.c b/src/drivers/net/marvell/aqc1xx.c index 42b8164ac..b108bf75d 100644 --- a/src/drivers/net/marvell/aqc1xx.c +++ b/src/drivers/net/marvell/aqc1xx.c @@ -2,7 +2,7 @@ * * Marvell AQtion family network card driver. * - * Copyright(C) 2017-2021 Marvell + * Copyright(C) 2017-2024 Marvell * * SPDX-License-Identifier: BSD-2-Clause * @@ -56,8 +56,7 @@ extern struct atl_hw_ops atl2_hw; */ static int atl_ring_alloc ( const struct atl_nic *nic, struct atl_ring *ring, - uint32_t desc_size, uint32_t reg_base ) -{ + uint32_t desc_size, uint32_t reg_base ) { physaddr_t phy_addr; /* Allocate ring buffer.*/ @@ -75,8 +74,8 @@ static int atl_ring_alloc ( const struct atl_nic *nic, struct atl_ring *ring, phy_addr = dma ( &ring->map, ring->ring ); /* Write ring address (hi & low parts).*/ - ATL_WRITE_REG ( (uint32_t)phy_addr, reg_base ); - ATL_WRITE_REG ( (uint32_t)(((uint64_t)phy_addr) >> 32), reg_base + 4 ); + ATL_WRITE_REG ( ( uint32_t )phy_addr, reg_base ); + ATL_WRITE_REG ( ( uint32_t ) ( ( ( uint64_t )phy_addr ) >> 32 ), reg_base + 4 ); /* Write ring length.*/ ATL_WRITE_REG ( ATL_RING_SIZE, reg_base + 8 ); @@ -84,35 +83,31 @@ static int atl_ring_alloc ( const struct atl_nic *nic, struct atl_ring *ring, ring->sw_head = ring->sw_tail = 0; DBGC ( nic, "AQUANTIA: %p ring is at [%08llx,%08llx), reg base %#x\n", - nic, ((unsigned long long)phy_addr), - ((unsigned long long) phy_addr + ring->length), reg_base ); + nic, ( ( unsigned long long )phy_addr ), + ( ( unsigned long long ) phy_addr + ring->length ), reg_base ); return 0; } -static void atl_ring_free ( struct atl_ring *ring ) -{ +static void atl_ring_free ( struct atl_ring *ring ) { dma_free ( &ring->map, ring->ring, ring->length ); ring->ring = NULL; ring->length = 0; } -static void atl_ring_next_dx ( unsigned int *val ) -{ +static void atl_ring_next_dx ( unsigned int *val ) { ++( *val ); if ( *val == ATL_RING_SIZE ) *val = 0; } -int atl_ring_full ( const struct atl_ring *ring ) -{ +int atl_ring_full ( const struct atl_ring *ring ) { unsigned int tail = ring->sw_tail; atl_ring_next_dx ( &tail ); return tail == ring->sw_head; } -void atl_rx_ring_fill ( struct atl_nic *nic ) -{ +void atl_rx_ring_fill ( struct atl_nic *nic ) { struct atl_desc_rx *rx; struct io_buffer *iobuf; physaddr_t address; @@ -143,8 +138,8 @@ void atl_rx_ring_fill ( struct atl_nic *nic ) DBGC( nic, "AQUANTIA: RX[%d] is [%llx,%llx)\n", nic->rx_ring.sw_tail, - ( (unsigned long long)address), - ( (unsigned long long)address + ATL_RX_MAX_LEN) ); + ( ( unsigned long long )address ), + ( ( unsigned long long )address + ATL_RX_MAX_LEN ) ); atl_ring_next_dx ( &nic->rx_ring.sw_tail ); refilled++; @@ -163,23 +158,22 @@ void atl_rx_ring_fill ( struct atl_nic *nic ) * @v netdev Network device * @ret rc Return status code */ -static int atl_open ( struct net_device *netdev ) -{ +static int atl_open ( struct net_device *netdev ) { struct atl_nic *nic = netdev->priv; uint32_t ctrl = 0; /* Tx ring */ - if ( atl_ring_alloc ( nic, &nic->tx_ring, sizeof(struct atl_desc_tx), + if ( atl_ring_alloc ( nic, &nic->tx_ring, sizeof ( struct atl_desc_tx ), ATL_TX_DMA_DESC_ADDR ) != 0 ) goto err_tx_alloc; /* Rx ring */ - if ( atl_ring_alloc ( nic, &nic->rx_ring, sizeof(struct atl_desc_rx), + if ( atl_ring_alloc ( nic, &nic->rx_ring, sizeof ( struct atl_desc_rx ), ATL_RX_DMA_DESC_ADDR ) != 0 ) goto err_rx_alloc; /* Allocate interrupt vectors */ - ATL_WRITE_REG ( (ATL_IRQ_CTRL_COR_EN | ATL_IRQ_CTRL_REG_RST_DIS), + ATL_WRITE_REG ( ( ATL_IRQ_CTRL_COR_EN | ATL_IRQ_CTRL_REG_RST_DIS ), ATL_IRQ_CTRL ); /*TX & RX Interruprt Mapping*/ @@ -197,7 +191,7 @@ static int atl_open ( struct net_device *netdev ) ctrl = ATL_IRQ_TX | ATL_IRQ_RX; /* itr mask */ ATL_WRITE_REG ( ctrl, ATL_ITR_MSKS ); - ATL_WRITE_REG ( (uint32_t)ATL_RX_MAX_LEN / 1024U, + ATL_WRITE_REG ( ( uint32_t )ATL_RX_MAX_LEN / 1024U, ATL_RX_DMA_DESC_BUF_SIZE ); /*filter global ctrl */ @@ -219,8 +213,8 @@ static int atl_open ( struct net_device *netdev ) ATL_WRITE_REG ( ctrl, ATL_RPB0_CTRL2 ); /*RPB global ctrl*/ - ctrl = ATL_READ_REG(ATL_RPB_CTRL); - ctrl |= (ATL_RPB_CTRL_EN | ATL_RPB_CTRL_FC); + ctrl = ATL_READ_REG ( ATL_RPB_CTRL ); + ctrl |= ( ATL_RPB_CTRL_EN | ATL_RPB_CTRL_FC ); ATL_WRITE_REG ( ctrl, ATL_RPB_CTRL ); /*TX data path*/ @@ -266,8 +260,7 @@ err_tx_alloc: * * @v netdev Network device */ -static void atl_close ( struct net_device *netdev ) -{ +static void atl_close ( struct net_device *netdev ) { struct atl_nic *nic = netdev->priv; nic->hw_ops->stop ( nic ); @@ -276,9 +269,9 @@ static void atl_close ( struct net_device *netdev ) /* tgb global ctrl */ ATL_WRITE_REG ( ATL_TPB_CTRL_DIS, ATL_TPB_CTRL); - ATL_WRITE_REG ( ATL_READ_REG(ATL_RING_TX_CTRL) | (~ATL_RING_TX_CTRL_EN), + ATL_WRITE_REG ( ATL_READ_REG ( ATL_RING_TX_CTRL ) | ( ~ATL_RING_TX_CTRL_EN ), ATL_RING_TX_CTRL ); - ATL_WRITE_REG ( ATL_READ_REG(ATL_RING_RX_CTRL) | (~ATL_RING_RX_CTRL_EN), + ATL_WRITE_REG ( ATL_READ_REG ( ATL_RING_RX_CTRL ) | ( ~ATL_RING_RX_CTRL_EN ), ATL_RING_RX_CTRL ); /* clear itr mask */ @@ -298,8 +291,7 @@ static void atl_close ( struct net_device *netdev ) * @v iobuf I/O buffer * @ret rc Return status code */ -int atl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) -{ +int atl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) { struct atl_nic *nic = netdev->priv; struct atl_desc_tx *tx; physaddr_t address; @@ -311,7 +303,7 @@ int atl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) return -ENOBUFS; } - tx = (struct atl_desc_tx *)nic->tx_ring.ring + nic->tx_ring.sw_tail; + tx = ( struct atl_desc_tx * )nic->tx_ring.ring + nic->tx_ring.sw_tail; /* Populate transmit descriptor */ memset ( tx, 0, sizeof ( *tx ) ); @@ -320,17 +312,17 @@ int atl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) len = iob_len ( iobuf ); tx->status = 0x1; - tx->status = ( (tx->status) & ~ATL_DESC_TX_BUF_LEN_MASK) | - ((len << ATL_DESC_TX_BUF_LEN_OFFSET) & - ATL_DESC_TX_BUF_LEN_MASK ); - tx->status = ((tx->status) & ~ATL_DESC_TX_EOP_MASK) | + tx->status = ( (tx->status) & ~ATL_DESC_TX_BUF_LEN_MASK) | + ( (len << ATL_DESC_TX_BUF_LEN_OFFSET) & + ATL_DESC_TX_BUF_LEN_MASK ); + tx->status = ((tx->status) & ~ATL_DESC_TX_EOP_MASK) | ( (ATL_DESC_TX_DX_EOP_VALUE << ATL_DESC_TX_EOP_OFFSET) & ATL_DESC_TX_EOP_MASK ); - tx->status = ( (tx->status) & ~ATL_DESC_TX_CMD_MASK) | - ((ATL_DESC_TX_CMD_VALUE << ATL_DESC_TX_CMD_OFFSET) & - ATL_DESC_TX_CMD_MASK ); - tx->flag = ( (tx->flag) & ~ATL_DESC_TX_PAY_LEN_MASK) | - ((len << ATL_DESC_TX_PAY_LEN_OFFSET) & + tx->status = ( (tx->status) & ~ATL_DESC_TX_CMD_MASK) | + ( (ATL_DESC_TX_CMD_VALUE << ATL_DESC_TX_CMD_OFFSET) & + ATL_DESC_TX_CMD_MASK ); + tx->flag = ( (tx->flag) & ~ATL_DESC_TX_PAY_LEN_MASK) | + ( (len << ATL_DESC_TX_PAY_LEN_OFFSET) & ATL_DESC_TX_PAY_LEN_MASK ); wmb(); @@ -345,8 +337,7 @@ int atl_transmit ( struct net_device *netdev, struct io_buffer *iobuf ) return 0; } -void atl_check_link ( struct net_device *netdev ) -{ +void atl_check_link ( struct net_device *netdev ) { struct atl_nic *nic = netdev->priv; uint32_t link_state; @@ -372,8 +363,7 @@ void atl_check_link ( struct net_device *netdev ) * * @v netdev Network device */ -void atl_poll_tx ( struct net_device *netdev ) -{ +void atl_poll_tx ( struct net_device *netdev ) { struct atl_nic *nic = netdev->priv; struct atl_desc_tx_wb *tx; @@ -385,7 +375,7 @@ void atl_poll_tx ( struct net_device *netdev ) nic->tx_ring.sw_head; /* Stop if descriptor is still in use */ - if ( !(tx->status & cpu_to_le32 ( ATL_TX_DESC_STATUS_DD ) ) ) + if ( !( tx->status & cpu_to_le32 ( ATL_TX_DESC_STATUS_DD ) ) ) return; DBGC2 ( nic, "AQUANTIA: %p TX[%d] complete\n", @@ -402,8 +392,7 @@ void atl_poll_tx ( struct net_device *netdev ) * * @v netdev Network device */ -void atl_poll_rx ( struct net_device *netdev ) -{ +void atl_poll_rx ( struct net_device *netdev ) { struct atl_nic *nic = netdev->priv; struct atl_desc_rx_wb *rx; struct io_buffer *iobuf; @@ -413,11 +402,11 @@ void atl_poll_rx ( struct net_device *netdev ) while ( nic->rx_ring.sw_head != nic->rx_ring.sw_tail ) { /* Get next receive descriptor */ - rx = (struct atl_desc_rx_wb *)nic->rx_ring.ring + + rx = ( struct atl_desc_rx_wb * )nic->rx_ring.ring + nic->rx_ring.sw_head; /* Stop if descriptor is still in use */ - if ( !(rx->status & cpu_to_le16(ATL_RX_DESC_STATUS_DD)) ) + if ( !( rx->status & cpu_to_le16( ATL_RX_DESC_STATUS_DD ) ) ) return; /* Populate I/O buffer */ @@ -441,8 +430,7 @@ void atl_poll_rx ( struct net_device *netdev ) * * @v netdev Network device */ -static void atl_poll ( struct net_device *netdev ) -{ +static void atl_poll ( struct net_device *netdev ) { struct atl_nic *nic = netdev->priv; /* Check link state */ @@ -464,8 +452,7 @@ static void atl_poll ( struct net_device *netdev ) * @v netdev Network device * @v enable Interrupts should be enabled */ -static void atl_irq ( struct net_device *netdev, int enable ) -{ +static void atl_irq ( struct net_device *netdev, int enable ) { struct atl_nic *nic = netdev->priv; uint32_t mask; @@ -498,8 +485,7 @@ static struct net_device_operations atl_operations = { * @v pci PCI device * @ret rc Return status code */ -static int atl_probe ( struct pci_device *pci ) -{ +static int atl_probe ( struct pci_device *pci ) { struct net_device *netdev; struct atl_nic *nic; int rc = ENOERR; @@ -515,7 +501,7 @@ static int atl_probe ( struct pci_device *pci ) nic = netdev->priv; pci_set_drvdata ( pci, netdev ); netdev->dev = &pci->dev; - memset( nic, 0, sizeof( *nic ) ); + memset( nic, 0, sizeof ( *nic ) ); nic->flags = pci->id->driver_data; /* Fix up PCI device */ @@ -580,8 +566,7 @@ err_alloc: * * @v pci PCI device */ -static void atl_remove ( struct pci_device *pci ) -{ +static void atl_remove ( struct pci_device *pci ) { struct net_device *netdev = pci_get_drvdata ( pci ); struct atl_nic *nic = netdev->priv; @@ -601,43 +586,44 @@ static void atl_remove ( struct pci_device *pci ) static struct pci_device_id atl_nics[] = { /* Atlantic 1 */ /* 10G */ - PCI_ROM(0x1D6A, 0x0001, "AQC07", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0xD107, "AQC07", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x07B1, "AQC07", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x87B1, "AQC07", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A1), + PCI_ROM ( 0x1D6A, 0x0001, "AQC07", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0xD107, "AQC07", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x07B1, "AQC07", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x87B1, "AQC07", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A1 ), /* SFP */ - PCI_ROM(0x1D6A, 0xD100, "AQC00", "Felicity Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x00B1, "AQC00", "Felicity Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x80B1, "AQC00", "Felicity Network Adapter", ATL_FLAG_A1), + PCI_ROM ( 0x1D6A, 0xD100, "AQC00", "Felicity Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x00B1, "AQC00", "Felicity Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x80B1, "AQC00", "Felicity Network Adapter", ATL_FLAG_A1 ), /* 5G */ - PCI_ROM(0x1D6A, 0xD108, "AQC08", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x08B1, "AQC08", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x88B1, "AQC08", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x11B1, "AQC11", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x91B1, "AQC11", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1), + PCI_ROM ( 0x1D6A, 0xD108, "AQC08", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x08B1, "AQC08", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x88B1, "AQC08", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x11B1, "AQC11", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x91B1, "AQC11", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A1 ), /* 2.5G */ - PCI_ROM(0x1D6A, 0xD109, "AQC09", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x09B1, "AQC09", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x89B1, "AQC09", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x12B1, "AQC12", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1), - PCI_ROM(0x1D6A, 0x92B1, "AQC12", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1), + PCI_ROM ( 0x1D6A, 0xD109, "AQC09", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x09B1, "AQC09", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x89B1, "AQC09", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x12B1, "AQC12", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1 ), + PCI_ROM ( 0x1D6A, 0x92B1, "AQC12", "Marvell AQtion 2.5Gbit Network Adapter", ATL_FLAG_A1 ), /* Atlantic 2 */ - PCI_ROM(0x1D6A, 0x00C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2), - PCI_ROM(0x1D6A, 0x94C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2), - PCI_ROM(0x1D6A, 0x93C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2), - PCI_ROM(0x1D6A, 0x04C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2), - PCI_ROM(0x1D6A, 0x14C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2), - PCI_ROM(0x1D6A, 0x12C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2), + PCI_ROM ( 0x1D6A, 0x00C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2 ), + PCI_ROM ( 0x1D6A, 0x94C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2 ), + PCI_ROM ( 0x1D6A, 0x93C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2 ), + PCI_ROM ( 0x1D6A, 0x04C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2 ), + PCI_ROM ( 0x1D6A, 0x14C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2 ), + PCI_ROM ( 0x1D6A, 0x12C0, "AQC13", "Marvell AQtion 10Gbit Network Adapter", ATL_FLAG_A2 ), + PCI_ROM ( 0x1D6A, 0x03C0, "AQC14", "Marvell AQtion 5Gbit Network Adapter", ATL_FLAG_A2 ), }; /** Marvell PCI driver */ struct pci_driver atl_driver __pci_driver = { .ids = atl_nics, - .id_count = (sizeof(atl_nics) / sizeof(atl_nics[0])), + .id_count = ( sizeof( atl_nics ) / sizeof ( atl_nics[0] ) ), .probe = atl_probe, .remove = atl_remove, -};
\ No newline at end of file +}; diff --git a/src/drivers/net/marvell/aqc1xx.h b/src/drivers/net/marvell/aqc1xx.h index c3e34e1e7..26bdd00f0 100644 --- a/src/drivers/net/marvell/aqc1xx.h +++ b/src/drivers/net/marvell/aqc1xx.h @@ -2,7 +2,7 @@ * * Marvell AQtion family network card driver definitions. * - * Copyright(C) 2017-2021 Marvell + * Copyright(C) 2017-2024 Marvell * * SPDX-License-Identifier: BSD-2-Clause * @@ -177,14 +177,14 @@ FILE_LICENCE ( BSD2 ); #define ATL_FLAG_A2 0x2 /*write register*/ -#define ATL_WRITE_REG(VAL, REG) writel(VAL, nic->regs + (REG)) -#define ATL_READ_REG(REG) readl(nic->regs + (REG)) /*read register*/ +#define ATL_WRITE_REG( VAL, REG ) writel( VAL, nic->regs + (REG) ) +#define ATL_READ_REG( REG ) readl( nic->regs + (REG) ) /*read register*/ struct atl_desc_tx { uint64_t address; uint32_t status; uint32_t flag; -} __attribute__((packed)); +} __attribute__ (( packed )); #define ATL_DESC_TX_DX_TYPE_VALUE 0x1 @@ -206,7 +206,7 @@ struct atl_desc_tx_wb { uint64_t rsvd1; uint32_t status; uint32_t rsvd4; -} __attribute__((packed)); +} __attribute__ (( packed )); #define ATL_TX_DESC_STATUS_DD 0x00100000UL @@ -214,14 +214,14 @@ struct atl_desc_rx { uint64_t data_addr; uint64_t hdr_addr; -} __attribute__((packed)); +} __attribute__ (( packed )); struct atl_desc_rx_wb { uint64_t rsvd2; uint16_t status; uint16_t pkt_len; uint32_t rsvd4; -} __attribute__((packed)); +} __attribute__ (( packed )); #define ATL_RX_DESC_STATUS_DD 0x0001UL #define ATL_RX_DESC_STATUS_EOP 0x0002UL @@ -237,11 +237,11 @@ struct atl_ring { struct atl_nic; struct atl_hw_ops { - int (*reset) (struct atl_nic *nic); - int (*start) (struct atl_nic *nic); - int (*stop) (struct atl_nic *nic); - int (*get_link) (struct atl_nic *nic); - int (*get_mac) (struct atl_nic *, uint8_t *mac); + int ( *reset ) ( struct atl_nic *nic ); + int ( *start ) ( struct atl_nic *nic ); + int ( *stop ) ( struct atl_nic *nic ); + int ( *get_link ) ( struct atl_nic *nic ); + int ( *get_mac ) ( struct atl_nic *, uint8_t *mac ); }; /** An aQuanita network card */ diff --git a/src/drivers/net/marvell/atl2_hw.c b/src/drivers/net/marvell/atl2_hw.c index 0c57a12fc..07822a9c2 100644 --- a/src/drivers/net/marvell/atl2_hw.c +++ b/src/drivers/net/marvell/atl2_hw.c @@ -2,20 +2,20 @@ * * Marvell AQtion family network card driver, hardware-specific functions. * - * Copyright(C) 2017-2021 Marvell + * Copyright(C) 2017-2024 Marvell * * SPDX-License-Identifier: BSD-2-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: - - * 1. Redistributions of source code must retain the above copyright notice, + + * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO,THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR HOLDER OR @@ -31,6 +31,7 @@ FILE_LICENCE ( BSD2 ); +#include <string.h> #include <errno.h> #include <stdio.h> #include <unistd.h> @@ -39,8 +40,7 @@ FILE_LICENCE ( BSD2 ); #include "aqc1xx.h" #include "atl2_hw.h" -static int atl2_hw_boot_completed_ ( struct atl_nic *nic ) -{ +static int atl2_hw_boot_completed_ ( struct atl_nic *nic ) { uint32_t reset_status = ATL_READ_REG ( ATL2_GLB_RST_CTRL2 ); return ( reset_status & ATL2_RESET_STATUS_BOOT_COMPLETED_MASK ) || @@ -49,8 +49,7 @@ static int atl2_hw_boot_completed_ ( struct atl_nic *nic ) } void atl2_hw_read_shared_in_ ( struct atl_nic *nic, uint32_t offset, - uint32_t *data, uint32_t len ) -{ + uint32_t *data, uint32_t len ) { uint32_t i; for (i = 0; i < len; ++i ) @@ -60,8 +59,7 @@ void atl2_hw_read_shared_in_ ( struct atl_nic *nic, uint32_t offset, } void atl2_hw_write_shared_in_ ( struct atl_nic *nic, uint32_t offset, - uint32_t *data, uint32_t len ) -{ + uint32_t *data, uint32_t len ) { uint32_t i; for ( i = 0; i < len; ++i ) @@ -70,17 +68,16 @@ void atl2_hw_write_shared_in_ ( struct atl_nic *nic, uint32_t offset, } } -int atl2_hw_finish_ack_ ( struct atl_nic *nic, uint32_t ms ) -{ +int atl2_hw_finish_ack_ ( struct atl_nic *nic, uint32_t ms ) { uint32_t i; int err = 0; - ATL_WRITE_REG ( ATL_READ_REG(ATL2_HOST_FINISHED_WRITE ) + ATL_WRITE_REG ( ATL_READ_REG ( ATL2_HOST_FINISHED_WRITE ) | 1, ATL2_HOST_FINISHED_WRITE ); - for ( i = 0; i < (ms / 100); ++i ) + for ( i = 0; i < ( ms / 100 ); ++i ) { - if ( ( ATL_READ_REG(ATL2_MCP_BUSY_WRITE ) & 1 ) == 0 ) + if ( ( ATL_READ_REG ( ATL2_MCP_BUSY_WRITE ) & 1 ) == 0 ) { break; } @@ -92,8 +89,7 @@ int atl2_hw_finish_ack_ ( struct atl_nic *nic, uint32_t ms ) return err; } -int atl2_hw_fw_init_ ( struct atl_nic *nic ) -{ +int atl2_hw_fw_init_ ( struct atl_nic *nic ) { uint32_t val; int err = 0; @@ -113,8 +109,7 @@ int atl2_hw_fw_init_ ( struct atl_nic *nic ) return err; } -int atl2_hw_reset ( struct atl_nic *nic ) -{ +int atl2_hw_reset ( struct atl_nic *nic ) { int completed = 0; uint32_t status = 0; uint32_t request; @@ -131,7 +126,7 @@ int atl2_hw_reset ( struct atl_nic *nic ) status = ATL_READ_REG ( ATL2_GLB_RST_CTRL2 ); if ( ( ( status & ATL2_RESET_STATUS_BC_STARTED ) && - (status != 0xFFFFFFFFu ) ) ) + ( status != 0xFFFFFFFFu ) ) ) break; udelay ( ATL2_DELAY_10 ); @@ -184,19 +179,17 @@ err_exit: return err; } -int atl2_hw_start ( struct atl_nic *nic ) -{ +int atl2_hw_start ( struct atl_nic *nic ) { uint32_t val; atl2_hw_read_shared_in_ ( nic, ATL2_LINK_OPTS_IN_OFF, &val, 1 ); val = 0x4B00FFE1; atl2_hw_write_shared_in_ ( nic, ATL2_LINK_OPTS_IN_OFF, &val, 1 ); - return atl2_hw_finish_ack_ ( nic, 100000); + return atl2_hw_finish_ack_ ( nic, 100000 ); } -int atl2_hw_stop ( struct atl_nic *nic ) -{ +int atl2_hw_stop ( struct atl_nic *nic ) { uint32_t val; atl2_hw_read_shared_in_ ( nic, ATL2_LINK_OPTS_IN_OFF, &val, 1 ); @@ -206,22 +199,20 @@ int atl2_hw_stop ( struct atl_nic *nic ) return atl2_hw_finish_ack_ ( nic, 100000 ); } -int atl2_hw_get_link ( struct atl_nic *nic ) -{ +int atl2_hw_get_link ( struct atl_nic *nic ) { uint32_t val; val = ATL_READ_REG ( ATL2_MIF_SHARED_BUF_OUT + ATL2_LINK_STS_OUT_OFF ); - return ( (val & 0xf) != 0) && ((val & 0xF0) != 0 ); + return ( ( val & 0xf ) != 0 ) && ( ( val & 0xF0 ) != 0 ); } -int atl2_hw_get_mac ( struct atl_nic *nic, uint8_t *mac ) -{ +int atl2_hw_get_mac ( struct atl_nic *nic, uint8_t *mac ) { uint32_t mac_addr[2] = {0}; atl2_hw_read_shared_in_ ( nic, ATL2_MAC_ADDR_IN_OFF, mac_addr, 2 ); - memcpy ( mac, (uint8_t *)mac_addr, 6 ); + memcpy ( mac, ( uint8_t * )mac_addr, 6 ); return 0; } @@ -232,4 +223,4 @@ struct atl_hw_ops atl2_hw = { .stop = atl2_hw_stop, .get_link = atl2_hw_get_link, .get_mac = atl2_hw_get_mac, -};
\ No newline at end of file +}; diff --git a/src/drivers/net/marvell/atl2_hw.h b/src/drivers/net/marvell/atl2_hw.h index ebd5466e2..d044c21fa 100644 --- a/src/drivers/net/marvell/atl2_hw.h +++ b/src/drivers/net/marvell/atl2_hw.h @@ -1,21 +1,21 @@ /* - * Copyright(C) 2017-2021 Marvell + * Copyright(C) 2017-2024 Marvell * * SPDX-License-Identifier: BSD-2-Clause * - * Redistribution and use in source and binary forms, with or without + * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: - - * 1. Redistributions of source code must retain the above copyright notice, + + * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO,THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR HOLDER OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; @@ -37,21 +37,21 @@ FILE_LICENCE ( BSD2 ); #define ATL2_HOST_ITR_REQ 0xF00 -#define ATL2_RESET_STATUS_REQ_GSR (1U << 0x0) -#define ATL2_RESET_STATUS_REQ_HOST_BOOT (1U << 0x8) -#define ATL2_RESET_STATUS_REQ_MAC_FAST_BOOT (1U << 0xA) -#define ATL2_RESET_STATUS_REQ_PHY_FAST_BOOT (1U << 0xB) +#define ATL2_RESET_STATUS_REQ_GSR ( 1U << 0x0 ) +#define ATL2_RESET_STATUS_REQ_HOST_BOOT ( 1U << 0x8 ) +#define ATL2_RESET_STATUS_REQ_MAC_FAST_BOOT ( 1U << 0xA ) +#define ATL2_RESET_STATUS_REQ_PHY_FAST_BOOT ( 1U << 0xB ) -#define ATL2_RESET_STATUS_HOST_LOAD_COMPLETED (1U << 0x10) -#define ATL2_RESET_STATUS_REQUIRE_HOST_LOAD (1U << 0x11) -#define ATL2_RESET_STATUS_BC_STARTED (1U << 0x18) -#define ATL2_RESET_STATUS_CRASH_DURING_INIT (1U << 0x1B) -#define ATL2_RESET_STATUS_BC_FAILED (1U << 0x1C) -#define ATL2_RESET_STATUS_FW_FAILED (1U << 0x1D) -#define ATL2_RESET_STATUS_FW_SUCCEED (1U << 0x1F) +#define ATL2_RESET_STATUS_HOST_LOAD_COMPLETED ( 1U << 0x10 ) +#define ATL2_RESET_STATUS_REQUIRE_HOST_LOAD ( 1U << 0x11 ) +#define ATL2_RESET_STATUS_BC_STARTED ( 1U << 0x18 ) +#define ATL2_RESET_STATUS_CRASH_DURING_INIT ( 1U << 0x1B ) +#define ATL2_RESET_STATUS_BC_FAILED ( 1U << 0x1C ) +#define ATL2_RESET_STATUS_FW_FAILED ( 1U << 0x1D ) +#define ATL2_RESET_STATUS_FW_SUCCEED ( 1U << 0x1F ) -#define ATL2_RESET_STATUS_BOOT_FAILED_MASK (ATL2_RESET_STATUS_CRASH_DURING_INIT | ATL2_RESET_STATUS_BC_FAILED | ATL2_RESET_STATUS_FW_FAILED) -#define ATL2_RESET_STATUS_BOOT_COMPLETED_MASK (ATL2_RESET_STATUS_BOOT_FAILED_MASK | ATL2_RESET_STATUS_FW_SUCCEED) +#define ATL2_RESET_STATUS_BOOT_FAILED_MASK ( ATL2_RESET_STATUS_CRASH_DURING_INIT | ATL2_RESET_STATUS_BC_FAILED | ATL2_RESET_STATUS_FW_FAILED ) +#define ATL2_RESET_STATUS_BOOT_COMPLETED_MASK ( ATL2_RESET_STATUS_BOOT_FAILED_MASK | ATL2_RESET_STATUS_FW_SUCCEED ) #define ATL2_FW_HOST_INTERRUPT_REQUEST_READY 0x0001 #define ATL2_FW_HOST_INTERRUPT_MAC_READY 0x0004 @@ -91,4 +91,4 @@ FILE_LICENCE ( BSD2 ); #define ATL2_DELAY_10 10 #define ATL2_DELAY_100 100 -#endif
\ No newline at end of file +#endif diff --git a/src/drivers/net/marvell/atl_hw.c b/src/drivers/net/marvell/atl_hw.c index 2dddb7187..fa7f2a9b8 100644 --- a/src/drivers/net/marvell/atl_hw.c +++ b/src/drivers/net/marvell/atl_hw.c @@ -2,20 +2,20 @@ * * Marvell AQtion family network card driver, hardware-specific functions. * - * Copyright(C) 2017-2021 Marvell + * Copyright(C) 2017-2024 Marvell * * SPDX-License-Identifier: BSD-2-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: - - * 1. Redistributions of source code must retain the above copyright notice, + + * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO,THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR HOLDER OR @@ -31,6 +31,7 @@ FILE_LICENCE ( BSD2 ); +#include <string.h> #include <errno.h> #include <stdio.h> #include <unistd.h> @@ -41,8 +42,7 @@ FILE_LICENCE ( BSD2 ); #include <compiler.h> -int atl_hw_reset_flb_ ( struct atl_nic *nic ) -{ +int atl_hw_reset_flb_ ( struct atl_nic *nic ) { uint32_t val; int k = 0; @@ -53,8 +53,8 @@ int atl_hw_reset_flb_ ( struct atl_nic *nic ) val = ATL_READ_REG ( ATL_GLB_NVR_PROV4 ); ATL_WRITE_REG ( val | ATL_GBL_NVR_PROV4_RESET, ATL_GLB_NVR_PROV4 ); - ATL_WRITE_REG( (ATL_READ_REG(ATL_GLB_STD_CTRL) & - ~ATL_GLB_CTRL_RST_DIS) | ATL_GLB_STD_CTRL_RESET, + ATL_WRITE_REG( ( ATL_READ_REG ( ATL_GLB_STD_CTRL ) & + ~ATL_GLB_CTRL_RST_DIS ) | ATL_GLB_STD_CTRL_RESET, ATL_GLB_STD_CTRL ); /* Kickstart MAC */ @@ -94,7 +94,7 @@ int atl_hw_reset_flb_ ( struct atl_nic *nic ) ATL_WRITE_REG ( ATL_GBL_MCP_SEM1_RELEASE, ATL_GLB_MCP_SEM1 ); /* Global software reset*/ - ATL_WRITE_REG ( ATL_READ_REG ( ATL_RX_CTRL ) & + ATL_WRITE_REG ( ATL_READ_REG ( ATL_RX_CTRL ) & ~ATL_RX_CTRL_RST_DIS, ATL_RX_CTRL ); ATL_WRITE_REG ( ATL_READ_REG ( ATL_TX_CTRL ) & ~ATL_TX_CTRL_RST_DIS, ATL_TX_CTRL ); @@ -103,7 +103,7 @@ int atl_hw_reset_flb_ ( struct atl_nic *nic ) ~ATL_MAC_PHY_CTRL_RST_DIS, ATL_MAC_PHY_CTRL ); ATL_WRITE_REG ( ( ATL_READ_REG ( ATL_GLB_STD_CTRL ) & - ~ATL_GLB_CTRL_RST_DIS) | ATL_GLB_STD_CTRL_RESET, + ~ATL_GLB_CTRL_RST_DIS ) | ATL_GLB_STD_CTRL_RESET, ATL_GLB_STD_CTRL ); for (k = 0; k < 1000; k++) { @@ -123,14 +123,13 @@ int atl_hw_reset_flb_ ( struct atl_nic *nic ) return 0; } -int atl_hw_reset_rbl_ ( struct atl_nic *nic ) -{ +int atl_hw_reset_rbl_ ( struct atl_nic *nic ) { uint32_t val, rbl_status; int k; ATL_WRITE_REG ( ATL_GLB_CTRL2_MBOX_ERR_UP_RUN_STALL, ATL_GLB_CTRL2 ); ATL_WRITE_REG ( ATL_GBL_MCP_SEM1_RELEASE, ATL_GLB_MCP_SEM1 ); - ATL_WRITE_REG ( ATL_MIF_PWR_GATING_EN_CTRL_RESET, + ATL_WRITE_REG ( ATL_MIF_PWR_GATING_EN_CTRL_RESET, ATL_MIF_PWR_GATING_EN_CTRL ); /* Alter RBL status */ @@ -141,15 +140,15 @@ int atl_hw_reset_rbl_ ( struct atl_nic *nic ) ATL_WRITE_REG ( val | ATL_GBL_NVR_PROV4_RESET, ATL_GLB_NVR_PROV4 ); /* Global software reset*/ - ATL_WRITE_REG ( ATL_READ_REG(ATL_RX_CTRL) & ~ATL_RX_CTRL_RST_DIS, + ATL_WRITE_REG ( ATL_READ_REG ( ATL_RX_CTRL ) & ~ATL_RX_CTRL_RST_DIS, ATL_RX_CTRL ); - ATL_WRITE_REG ( ATL_READ_REG(ATL_TX_CTRL) & ~ATL_TX_CTRL_RST_DIS, + ATL_WRITE_REG ( ATL_READ_REG ( ATL_TX_CTRL ) & ~ATL_TX_CTRL_RST_DIS, ATL_TX_CTRL ); - ATL_WRITE_REG ( ATL_READ_REG(ATL_MAC_PHY_CTRL) & + ATL_WRITE_REG ( ATL_READ_REG ( ATL_MAC_PHY_CTRL ) & ~ATL_MAC_PHY_CTRL_RST_DIS, ATL_MAC_PHY_CTRL ); - ATL_WRITE_REG ( (ATL_READ_REG(ATL_GLB_STD_CTRL) & - ~ATL_GLB_CTRL_RST_DIS) | ATL_GLB_STD_CTRL_RESET, + ATL_WRITE_REG ( ( ATL_READ_REG ( ATL_GLB_STD_CTRL ) & + ~ATL_GLB_CTRL_RST_DIS ) | ATL_GLB_STD_CTRL_RESET, ATL_GLB_STD_CTRL ); ATL_WRITE_REG ( ATL_GLB_CTRL2_MBOX_ERR_UP_RUN_NORMAL, ATL_GLB_CTRL2 ); @@ -186,8 +185,7 @@ int atl_hw_reset_rbl_ ( struct atl_nic *nic ) return 0; } -int atl_hw_reset ( struct atl_nic *nic ) -{ +int atl_hw_reset ( struct atl_nic *nic ) { uint32_t boot_exit_code = 0; uint32_t k; int rbl_enabled; @@ -207,22 +205,22 @@ int atl_hw_reset ( struct atl_nic *nic ) return -ENOTSUP; } - rbl_enabled = (boot_exit_code != 0); + rbl_enabled = ( boot_exit_code != 0 ); fw_ver = ATL_READ_REG ( ATL_FW_VER ); - if ( ((fw_ver >> 24) & 0xFF) >= 4 ) { + if ( ( ( fw_ver >> 24 ) & 0xFF ) >= 4 ) { sem_timeout = ATL_READ_REG ( ATL_SEM_TIMEOUT ); if ( sem_timeout > ATL_SEM_MAX_TIMEOUT ) sem_timeout = ATL_SEM_MAX_TIMEOUT; for ( k = 0; k < sem_timeout; ++k ) { - if ( ATL_READ_REG ( ATL_GLB_MCP_SEM4) ) + if ( ATL_READ_REG ( ATL_GLB_MCP_SEM4 ) ) break; - mdelay (ATL_DELAY_1_MNS); + mdelay ( ATL_DELAY_1_MNS ); } for ( k = 0; k < sem_timeout; ++k ) { - if (ATL_READ_REG ( ATL_GLB_MCP_SEM5) ) + if ( ATL_READ_REG ( ATL_GLB_MCP_SEM5 ) ) break; mdelay ( ATL_DELAY_1_MNS ); @@ -236,35 +234,31 @@ int atl_hw_reset ( struct atl_nic *nic ) return atl_hw_reset_flb_ ( nic ); } -int atl_hw_start ( struct atl_nic *nic ) -{ +int atl_hw_start ( struct atl_nic *nic ) { ATL_WRITE_REG ( ATL_LINK_ADV_AUTONEG, ATL_LINK_ADV ); return 0; } -int atl_hw_stop ( struct atl_nic *nic ) -{ +int atl_hw_stop ( struct atl_nic *nic ) { ATL_WRITE_REG ( ATL_SHUT_LINK, ATL_LINK_ADV ); return 0; } -int atl_hw_get_link ( struct atl_nic *nic ) -{ +int atl_hw_get_link ( struct atl_nic *nic ) { return ( ATL_READ_REG ( ATL_LINK_ST) & ATL_LINK_ADV_AUTONEG ) != 0; } int atl_hw_read_mem ( struct atl_nic *nic, uint32_t addr, uint32_t *buffer, - uint32_t size ) -{ + uint32_t size ) { uint32_t i; for ( i = 0; i < 100; ++i ) { - if ( ATL_READ_REG( ATL_SEM_RAM) ) + if ( ATL_READ_REG( ATL_SEM_RAM ) ) break; mdelay ( ATL_DELAY_1_MNS ); } if ( i == 100 ) { - DBGC (nic, "Semaphore Register not set\n" ); + DBGC ( nic, "Semaphore Register not set\n" ); return -EIO; } @@ -280,7 +274,7 @@ int atl_hw_read_mem ( struct atl_nic *nic, uint32_t addr, uint32_t *buffer, udelay ( ATL_DELAY_10_MNS ); } if ( j == 10000 ) { - DBGC (nic, "Reading from CTRL3 Register Failed\n" ); + DBGC ( nic, "Reading from CTRL3 Register Failed\n" ); return -EIO; } @@ -292,14 +286,13 @@ int atl_hw_read_mem ( struct atl_nic *nic, uint32_t addr, uint32_t *buffer, return 0; } -int atl_hw_get_mac ( struct atl_nic *nic, uint8_t *mac ) -{ +int atl_hw_get_mac ( struct atl_nic *nic, uint8_t *mac ) { uint32_t mac_addr[2] = {0}; int err = 0; uint32_t efuse_addr = ATL_READ_REG ( ATL_GLB_MCP_SP26 ); if ( efuse_addr != 0) { - uint32_t mac_efuse_addr = efuse_addr + 40 * sizeof(uint32_t); + uint32_t mac_efuse_addr = efuse_addr + 40 * sizeof ( uint32_t ); err = atl_hw_read_mem ( nic, mac_efuse_addr, mac_addr, 2 ); if ( err != 0 ) return err; @@ -307,7 +300,7 @@ int atl_hw_get_mac ( struct atl_nic *nic, uint8_t *mac ) mac_addr[0] = cpu_to_be32 ( mac_addr[0] ); mac_addr[1] = cpu_to_be32 ( mac_addr[1] ); - memcpy ( mac, (uint8_t *)mac_addr, ATL_MAC_ADDRESS_SIZE ); + memcpy ( mac, ( uint8_t * )mac_addr, ATL_MAC_ADDRESS_SIZE ); } return 0; } @@ -318,4 +311,4 @@ struct atl_hw_ops atl_hw = { .stop = atl_hw_stop, .get_link = atl_hw_get_link, .get_mac = atl_hw_get_mac, -};
\ No newline at end of file +}; diff --git a/src/drivers/net/marvell/atl_hw.h b/src/drivers/net/marvell/atl_hw.h index 0a20fbfce..efc9f86c1 100644 --- a/src/drivers/net/marvell/atl_hw.h +++ b/src/drivers/net/marvell/atl_hw.h @@ -1,21 +1,21 @@ /* - * Copyright(C) 2017-2021 Marvell + * Copyright(C) 2017-2024 Marvell * * SPDX-License-Identifier: BSD-2-Clause * - * Redistribution and use in source and binary forms, with or without + * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: - - * 1. Redistributions of source code must retain the above copyright notice, + + * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright notice, - * this list of conditions and the following disclaimer in the documentation + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO,THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR HOLDER OR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR HOLDER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; diff --git a/src/drivers/net/mii.c b/src/drivers/net/mii.c index 87605f0cb..85749b941 100644 --- a/src/drivers/net/mii.c +++ b/src/drivers/net/mii.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <unistd.h> diff --git a/src/drivers/net/myri10ge.c b/src/drivers/net/myri10ge.c index 6d0f723f2..fb9dc01b2 100644 --- a/src/drivers/net/myri10ge.c +++ b/src/drivers/net/myri10ge.c @@ -74,7 +74,7 @@ FILE_LICENCE ( GPL2_ONLY ); */ #include <stdint.h> - +#include <string.h> #include <byteswap.h> #include <errno.h> #include <ipxe/ethernet.h> diff --git a/src/drivers/net/ncm.c b/src/drivers/net/ncm.c index 2c0f91e21..48f9856b0 100644 --- a/src/drivers/net/ncm.c +++ b/src/drivers/net/ncm.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <errno.h> diff --git a/src/drivers/net/ncm.h b/src/drivers/net/ncm.h index 6b0d21cdb..53e96cf72 100644 --- a/src/drivers/net/ncm.h +++ b/src/drivers/net/ncm.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <ipxe/usb.h> diff --git a/src/drivers/net/ne2k_isa.c b/src/drivers/net/ne2k_isa.c index a923fd3c5..8c8188af3 100644 --- a/src/drivers/net/ne2k_isa.c +++ b/src/drivers/net/ne2k_isa.c @@ -370,6 +370,6 @@ ISA_DRIVER ( ne_driver, ne_probe_addrs, ne_probe1, GENERIC_ISAPNP_VENDOR, 0x0600 ); DRIVER ( "ne", nic_driver, isapnp_driver, ne_driver, - ne_probe, ne_disable ); + ne_probe, ne_disable, no_fake_bss ); ISA_ROM("ne","NE1000/2000 and clones"); diff --git a/src/drivers/net/netfront.c b/src/drivers/net/netfront.c index 12713c5b4..ba6a20002 100644 --- a/src/drivers/net/netfront.c +++ b/src/drivers/net/netfront.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <stdlib.h> diff --git a/src/drivers/net/netfront.h b/src/drivers/net/netfront.h index de16d5291..0520a0b2a 100644 --- a/src/drivers/net/netfront.h +++ b/src/drivers/net/netfront.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <ipxe/xen.h> #include <xen/io/netif.h> diff --git a/src/drivers/net/netvsc.c b/src/drivers/net/netvsc.c index 5be52fb8e..9b6ee88b4 100644 --- a/src/drivers/net/netvsc.c +++ b/src/drivers/net/netvsc.c @@ -32,6 +32,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); * bus (VMBus). It provides a transport layer for RNDIS packets. */ +#include <string.h> #include <errno.h> #include <unistd.h> #include <byteswap.h> @@ -622,7 +623,7 @@ static int netvsc_buffer_copy ( struct vmbus_xfer_pages *pages, void *data, return -ERANGE; /* Copy data from buffer */ - copy_from_user ( data, buffer->data, offset, len ); + memcpy ( data, ( buffer->data + offset ), len ); return 0; } diff --git a/src/drivers/net/netvsc.h b/src/drivers/net/netvsc.h index 93192357f..41db49af7 100644 --- a/src/drivers/net/netvsc.h +++ b/src/drivers/net/netvsc.h @@ -305,7 +305,7 @@ struct netvsc_buffer { /** Buffer length */ size_t len; /** Buffer */ - userptr_t data; + void *data; /** GPADL ID */ unsigned int gpadl; }; diff --git a/src/drivers/net/ns8390.c b/src/drivers/net/ns8390.c index 8e8d8500f..bc94aabe3 100644 --- a/src/drivers/net/ns8390.c +++ b/src/drivers/net/ns8390.c @@ -597,7 +597,7 @@ static int ns8390_poll(struct nic *nic, int retrieve) /************************************************************************** NS8390_DISABLE - Turn off adapter **************************************************************************/ -static void ns8390_disable ( struct nic *nic ) { +static void ns8390_disable ( struct nic *nic, void *hwdev __unused ) { ns8390_reset(nic); } @@ -1022,7 +1022,7 @@ PCI_ROM(0x8e2e, 0x3000, "ktiet32p2", "KTI ET32P2", 0), PCI_DRIVER ( nepci_driver, nepci_nics, PCI_NO_CLASS ); DRIVER ( "NE2000/PCI", nic_driver, pci_driver, nepci_driver, - nepci_probe, ns8390_disable ); + nepci_probe, ns8390_disable, no_fake_bss ); #endif /* INCLUDE_NS8390 */ diff --git a/src/drivers/net/prism2_pci.c b/src/drivers/net/prism2_pci.c index 2feb69522..128bdedc1 100644 --- a/src/drivers/net/prism2_pci.c +++ b/src/drivers/net/prism2_pci.c @@ -44,7 +44,7 @@ static int prism2_pci_probe ( struct nic *nic, struct pci_device *pci ) { return prism2_probe ( nic, hw ); } -static void prism2_pci_disable ( struct nic *nic ) { +static void prism2_pci_disable ( struct nic *nic, void *hwdev __unused ) { prism2_disable ( nic ); } @@ -55,7 +55,7 @@ PCI_ROM(0x1260, 0x3873, "prism2_pci", "Harris Semiconductor Prism2.5 clone", 0), PCI_DRIVER ( prism2_pci_driver, prism2_pci_nics, PCI_NO_CLASS ); DRIVER ( "Prism2/PCI", nic_driver, pci_driver, prism2_pci_driver, - prism2_pci_probe, prism2_pci_disable ); + prism2_pci_probe, prism2_pci_disable, no_fake_bss ); /* * Local variables: diff --git a/src/drivers/net/prism2_plx.c b/src/drivers/net/prism2_plx.c index 770cf3288..dac63ba9f 100644 --- a/src/drivers/net/prism2_plx.c +++ b/src/drivers/net/prism2_plx.c @@ -99,7 +99,7 @@ static int prism2_plx_probe ( struct nic *nic, struct pci_device *pci ) { return prism2_probe ( nic, hw ); } -static void prism2_plx_disable ( struct nic *nic ) { +static void prism2_plx_disable ( struct nic *nic, void *hwdev __unused ) { prism2_disable ( nic ); } @@ -119,9 +119,8 @@ PCI_ROM(0xec80, 0xec00, "f5d6000", "Belkin F5D6000", 0), PCI_DRIVER ( prism2_plx_driver, prism2_plx_nics, PCI_NO_CLASS ); - DRIVER ( "Prism2/PLX", nic_driver, pci_driver, prism2_plx_driver, - prism2_plx_probe, prism2_plx_disable ); + prism2_plx_probe, prism2_plx_disable, no_fake_bss ); /* * Local variables: diff --git a/src/drivers/net/rtl818x/rtl8180.c b/src/drivers/net/rtl818x/rtl8180.c index b3f685419..d92c8ea67 100644 --- a/src/drivers/net/rtl818x/rtl8180.c +++ b/src/drivers/net/rtl818x/rtl8180.c @@ -1,6 +1,7 @@ /* Realtek 8180 card: rtl818x driver + rtl8180 RF modules */ FILE_LICENCE(GPL2_OR_LATER); +FILE_SECBOOT(FORBIDDEN); #include <ipxe/pci.h> #include "rtl818x.h" diff --git a/src/drivers/net/rtl818x/rtl8180_grf5101.c b/src/drivers/net/rtl818x/rtl8180_grf5101.c index 2b995030c..9e017fd4f 100644 --- a/src/drivers/net/rtl818x/rtl8180_grf5101.c +++ b/src/drivers/net/rtl818x/rtl8180_grf5101.c @@ -27,6 +27,7 @@ #include "rtl818x.h" FILE_LICENCE(GPL2_ONLY); +FILE_SECBOOT(FORBIDDEN); #define GRF5101_ANTENNA 0xA3 diff --git a/src/drivers/net/rtl818x/rtl8180_max2820.c b/src/drivers/net/rtl818x/rtl8180_max2820.c index ab380fcc7..d3cb15454 100644 --- a/src/drivers/net/rtl818x/rtl8180_max2820.c +++ b/src/drivers/net/rtl818x/rtl8180_max2820.c @@ -27,6 +27,7 @@ #include "rtl818x.h" FILE_LICENCE(GPL2_ONLY); +FILE_SECBOOT(FORBIDDEN); #define MAXIM_ANTENNA 0xb3 diff --git a/src/drivers/net/rtl818x/rtl8180_sa2400.c b/src/drivers/net/rtl818x/rtl8180_sa2400.c index 9bd62bed8..d86e52aac 100644 --- a/src/drivers/net/rtl818x/rtl8180_sa2400.c +++ b/src/drivers/net/rtl818x/rtl8180_sa2400.c @@ -27,6 +27,7 @@ #include "rtl818x.h" FILE_LICENCE(GPL2_ONLY); +FILE_SECBOOT(FORBIDDEN); #define SA2400_ANTENNA 0x91 #define SA2400_DIG_ANAPARAM_PWR1_ON 0x8 diff --git a/src/drivers/net/rtl818x/rtl8185.c b/src/drivers/net/rtl818x/rtl8185.c index 234978cea..beaf62566 100644 --- a/src/drivers/net/rtl818x/rtl8185.c +++ b/src/drivers/net/rtl818x/rtl8185.c @@ -1,6 +1,7 @@ /* Realtek 8185 card: rtl818x driver + rtl8185_rtl8225 RF module */ FILE_LICENCE(GPL2_OR_LATER); +FILE_SECBOOT(FORBIDDEN); #include <ipxe/pci.h> #include "rtl818x.h" diff --git a/src/drivers/net/rtl818x/rtl8185_rtl8225.c b/src/drivers/net/rtl818x/rtl8185_rtl8225.c index 31a740e64..f810215ca 100644 --- a/src/drivers/net/rtl818x/rtl8185_rtl8225.c +++ b/src/drivers/net/rtl818x/rtl8185_rtl8225.c @@ -23,6 +23,7 @@ #include "rtl818x.h" FILE_LICENCE(GPL2_ONLY); +FILE_SECBOOT(FORBIDDEN); #define RTL8225_ANAPARAM_ON 0xa0000b59 #define RTL8225_ANAPARAM2_ON 0x860dec11 diff --git a/src/drivers/net/rtl818x/rtl818x.c b/src/drivers/net/rtl818x/rtl818x.c index 599d36fad..81592a7db 100644 --- a/src/drivers/net/rtl818x/rtl818x.c +++ b/src/drivers/net/rtl818x/rtl818x.c @@ -18,8 +18,10 @@ */ FILE_LICENCE(GPL2_ONLY); +FILE_SECBOOT(FORBIDDEN); #include <stdint.h> +#include <string.h> #include <errno.h> #include <stdio.h> #include <unistd.h> diff --git a/src/drivers/net/rtl818x/rtl818x.h b/src/drivers/net/rtl818x/rtl818x.h index ae4b8a96f..f8d19604e 100644 --- a/src/drivers/net/rtl818x/rtl818x.h +++ b/src/drivers/net/rtl818x/rtl818x.h @@ -22,6 +22,7 @@ #include <ipxe/net80211.h> FILE_LICENCE(GPL2_ONLY); +FILE_SECBOOT(FORBIDDEN); struct rtl818x_csr { u8 MAC[6]; diff --git a/src/drivers/net/sfc/efx_hunt.c b/src/drivers/net/sfc/efx_hunt.c index abe3e8320..92c0fda62 100644 --- a/src/drivers/net/sfc/efx_hunt.c +++ b/src/drivers/net/sfc/efx_hunt.c @@ -21,6 +21,7 @@ #include <stdint.h> #include <stdlib.h> #include <stdio.h> +#include <string.h> #include <unistd.h> #include <errno.h> #include <assert.h> diff --git a/src/drivers/net/sfc/sfc_hunt.c b/src/drivers/net/sfc/sfc_hunt.c index 43ac229ab..f763fc9d0 100644 --- a/src/drivers/net/sfc/sfc_hunt.c +++ b/src/drivers/net/sfc/sfc_hunt.c @@ -19,6 +19,7 @@ ***************************************************************************/ #include <stdlib.h> #include <stdio.h> +#include <string.h> #include <unistd.h> #include <errno.h> #include <byteswap.h> diff --git a/src/drivers/net/sis900.c b/src/drivers/net/sis900.c index 8a3ac01bc..59deb8df9 100644 --- a/src/drivers/net/sis900.c +++ b/src/drivers/net/sis900.c @@ -65,12 +65,13 @@ static unsigned int cur_phy; static unsigned int cur_rx; -struct { +struct sis900_bss { BufferDesc txd; BufferDesc rxd[NUM_RX_DESC]; unsigned char txb[TX_BUF_SIZE]; unsigned char rxb[NUM_RX_DESC * RX_BUF_SIZE]; -} sis900_bufs __shared; +}; +#define sis900_bufs NIC_FAKE_BSS ( struct sis900_bss ) #define txd sis900_bufs.txd #define rxd sis900_bufs.rxd #define txb sis900_bufs.txb @@ -164,7 +165,7 @@ static void sis900_transmit(struct nic *nic, const char *d, unsigned int t, unsigned int s, const char *p); static int sis900_poll(struct nic *nic, int retrieve); -static void sis900_disable(struct nic *nic); +static void sis900_disable(struct nic *nic, void *hwdev); static void sis900_irq(struct nic *nic, irq_action_t action); @@ -1238,7 +1239,7 @@ sis900_poll(struct nic *nic, int retrieve) */ static void -sis900_disable ( struct nic *nic ) { +sis900_disable ( struct nic *nic, void *hwdev __unused ) { sis900_init(nic); @@ -1291,7 +1292,7 @@ PCI_ROM(0x1039, 0x7016, "sis7016", "SIS7016", 0), PCI_DRIVER ( sis900_driver, sis900_nics, PCI_NO_CLASS ); DRIVER ( "SIS900", nic_driver, pci_driver, sis900_driver, - sis900_probe, sis900_disable ); + sis900_probe, sis900_disable, sis900_bufs ); /* * Local variables: diff --git a/src/drivers/net/skge.c b/src/drivers/net/skge.c index cc7f0b91b..828a2a4c9 100755 --- a/src/drivers/net/skge.c +++ b/src/drivers/net/skge.c @@ -31,6 +31,7 @@ FILE_LICENCE ( GPL2_ONLY ); #include <stdint.h> +#include <string.h> #include <errno.h> #include <stdio.h> #include <unistd.h> diff --git a/src/drivers/net/sky2.c b/src/drivers/net/sky2.c index 4f8ec3e42..db3f6aaa1 100644 --- a/src/drivers/net/sky2.c +++ b/src/drivers/net/sky2.c @@ -28,6 +28,7 @@ FILE_LICENCE ( GPL2_ONLY ); #include <stdint.h> +#include <string.h> #include <errno.h> #include <stdio.h> #include <unistd.h> diff --git a/src/drivers/net/smc9000.c b/src/drivers/net/smc9000.c index c9762d580..420aca9bd 100644 --- a/src/drivers/net/smc9000.c +++ b/src/drivers/net/smc9000.c @@ -939,7 +939,7 @@ ISA_DRIVER ( smc9000_driver, smc9000_probe_addrs, smc9000_probe_addr, GENERIC_ISAPNP_VENDOR, 0x8228 ); DRIVER ( "SMC9000", nic_driver, isa_driver, smc9000_driver, - smc9000_probe, smc9000_disable ); + smc9000_probe, smc9000_disable, no_fake_bss ); ISA_ROM ( "smc9000", "SMC9000" ); diff --git a/src/drivers/net/smsc75xx.c b/src/drivers/net/smsc75xx.c index 861669edf..8ae65e42a 100644 --- a/src/drivers/net/smsc75xx.c +++ b/src/drivers/net/smsc75xx.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <unistd.h> diff --git a/src/drivers/net/smsc75xx.h b/src/drivers/net/smsc75xx.h index 72339df03..51330993d 100644 --- a/src/drivers/net/smsc75xx.h +++ b/src/drivers/net/smsc75xx.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include "smscusb.h" diff --git a/src/drivers/net/smsc95xx.c b/src/drivers/net/smsc95xx.c index 3ec49584d..16086b33e 100644 --- a/src/drivers/net/smsc95xx.c +++ b/src/drivers/net/smsc95xx.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <unistd.h> @@ -64,92 +65,67 @@ static struct profiler smsc95xx_out_profiler __profiler = */ static int smsc95xx_vm3_fetch_mac ( struct smscusb_device *smscusb ) { struct net_device *netdev = smscusb->netdev; - struct smbios_structure structure; - struct smbios_system_information system; - struct { - char manufacturer[ 10 /* "Honeywell" + NUL */ ]; - char product[ 4 /* "VM3" + NUL */ ]; - char mac[ base16_encoded_len ( ETH_ALEN ) + 1 /* NUL */ ]; - } strings; + const struct smbios_header *structure; + const struct smbios_system_information *system; + const char *manufacturer; + const char *product; + const char *mac; int len; int rc; /* Find system information */ - if ( ( rc = find_smbios_structure ( SMBIOS_TYPE_SYSTEM_INFORMATION, 0, - &structure ) ) != 0 ) { + structure = smbios_structure ( SMBIOS_TYPE_SYSTEM_INFORMATION, 0 ); + if ( ! structure ) { DBGC ( smscusb, "SMSC95XX %p could not find system " - "information: %s\n", smscusb, strerror ( rc ) ); - return rc; - } - - /* Read system information */ - if ( ( rc = read_smbios_structure ( &structure, &system, - sizeof ( system ) ) ) != 0 ) { - DBGC ( smscusb, "SMSC95XX %p could not read system " - "information: %s\n", smscusb, strerror ( rc ) ); - return rc; + "information\n", smscusb ); + return -ENOENT; } - - /* NUL-terminate all strings to be fetched */ - memset ( &strings, 0, sizeof ( strings ) ); + system = container_of ( structure, struct smbios_system_information, + header ); /* Fetch system manufacturer name */ - len = read_smbios_string ( &structure, system.manufacturer, - strings.manufacturer, - ( sizeof ( strings.manufacturer ) - 1 ) ); - if ( len < 0 ) { - rc = len; + manufacturer = smbios_string ( structure, system->manufacturer ); + if ( ! manufacturer ) { DBGC ( smscusb, "SMSC95XX %p could not read manufacturer " - "name: %s\n", smscusb, strerror ( rc ) ); - return rc; + "name\n", smscusb ); + return -ENOENT; } /* Fetch system product name */ - len = read_smbios_string ( &structure, system.product, strings.product, - ( sizeof ( strings.product ) - 1 ) ); - if ( len < 0 ) { - rc = len; - DBGC ( smscusb, "SMSC95XX %p could not read product name: " - "%s\n", smscusb, strerror ( rc ) ); - return rc; + product = smbios_string ( structure, system->product ); + if ( ! product ) { + DBGC ( smscusb, "SMSC95XX %p could not read product name\n", + smscusb ); + return -ENOENT; } /* Ignore non-VM3 devices */ - if ( ( strcmp ( strings.manufacturer, "Honeywell" ) != 0 ) || - ( strcmp ( strings.product, "VM3" ) != 0 ) ) + if ( ( strcmp ( manufacturer, "Honeywell" ) != 0 ) || + ( strcmp ( product, "VM3" ) != 0 ) ) return -ENOTTY; /* Find OEM strings */ - if ( ( rc = find_smbios_structure ( SMBIOS_TYPE_OEM_STRINGS, 0, - &structure ) ) != 0 ) { - DBGC ( smscusb, "SMSC95XX %p could not find OEM strings: %s\n", - smscusb, strerror ( rc ) ); - return rc; + structure = smbios_structure ( SMBIOS_TYPE_OEM_STRINGS, 0 ); + if ( ! structure ) { + DBGC ( smscusb, "SMSC95XX %p could not find OEM strings\n", + smscusb ); + return -ENOENT; } /* Fetch MAC address */ - len = read_smbios_string ( &structure, SMSC95XX_VM3_OEM_STRING_MAC, - strings.mac, ( sizeof ( strings.mac ) - 1 )); - if ( len < 0 ) { - rc = len; - DBGC ( smscusb, "SMSC95XX %p could not read OEM string: %s\n", - smscusb, strerror ( rc ) ); - return rc; - } - - /* Sanity check */ - if ( len != ( ( int ) ( sizeof ( strings.mac ) - 1 ) ) ) { - DBGC ( smscusb, "SMSC95XX %p invalid MAC address \"%s\"\n", - smscusb, strings.mac ); - return -EINVAL; + mac = smbios_string ( structure, SMSC95XX_VM3_OEM_STRING_MAC ); + if ( ! mac ) { + DBGC ( smscusb, "SMSC95XX %p could not read OEM string\n", + smscusb ); + return -ENOENT; } /* Decode MAC address */ - len = base16_decode ( strings.mac, netdev->hw_addr, ETH_ALEN ); + len = base16_decode ( mac, netdev->hw_addr, ETH_ALEN ); if ( len < 0 ) { rc = len; DBGC ( smscusb, "SMSC95XX %p invalid MAC address \"%s\"\n", - smscusb, strings.mac ); + smscusb, mac ); return rc; } diff --git a/src/drivers/net/smsc95xx.h b/src/drivers/net/smsc95xx.h index 0cdf38248..0cb6ab4c7 100644 --- a/src/drivers/net/smsc95xx.h +++ b/src/drivers/net/smsc95xx.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include "smscusb.h" diff --git a/src/drivers/net/smscusb.c b/src/drivers/net/smscusb.c index c639c58c1..486b5953b 100644 --- a/src/drivers/net/smscusb.c +++ b/src/drivers/net/smscusb.c @@ -22,6 +22,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <string.h> #include <errno.h> @@ -459,13 +460,13 @@ int smscusb_fdt_fetch_mac ( struct smscusb_device *smscusb ) { int rc; /* Look for "ethernet[0]" alias */ - if ( ( rc = fdt_alias ( "ethernet", &offset ) != 0 ) && - ( rc = fdt_alias ( "ethernet0", &offset ) != 0 ) ) { + if ( ( rc = fdt_alias ( &sysfdt, "ethernet", &offset ) != 0 ) && + ( rc = fdt_alias ( &sysfdt, "ethernet0", &offset ) != 0 ) ) { return rc; } /* Fetch MAC address */ - if ( ( rc = fdt_mac ( offset, netdev ) ) != 0 ) + if ( ( rc = fdt_mac ( &sysfdt, offset, netdev ) ) != 0 ) return rc; DBGC ( smscusb, "SMSCUSB %p using FDT MAC %s\n", diff --git a/src/drivers/net/smscusb.h b/src/drivers/net/smscusb.h index e866bb747..e4ad61915 100644 --- a/src/drivers/net/smscusb.h +++ b/src/drivers/net/smscusb.h @@ -8,6 +8,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> #include <string.h> diff --git a/src/drivers/net/sundance.c b/src/drivers/net/sundance.c index 8eb09b988..964365ef6 100644 --- a/src/drivers/net/sundance.c +++ b/src/drivers/net/sundance.c @@ -233,10 +233,11 @@ static struct netdev_desc rx_ring[RX_RING_SIZE]; /* Create a static buffer of size PKT_BUF_SZ for each RX and TX descriptor. All descriptors point to a part of this buffer */ -struct { +struct sundance_bss { unsigned char txb[PKT_BUF_SZ * TX_RING_SIZE]; unsigned char rxb[RX_RING_SIZE * PKT_BUF_SZ]; -} rx_tx_buf __shared; +}; +#define rx_tx_buf NIC_FAKE_BSS ( struct sundance_bss ) #define rxb rx_tx_buf.rxb #define txb rx_tx_buf.txb @@ -536,7 +537,7 @@ static void sundance_transmit(struct nic *nic, const char *d, /* Destination */ /************************************************************************** DISABLE - Turn off ethernet interface ***************************************************************************/ -static void sundance_disable ( struct nic *nic __unused ) { +static void sundance_disable ( struct nic *nic __unused, void *hwdev __unused) { /* put the card in its initial state */ /* This function serves 3 purposes. * This disables DMA and interrupts so we don't receive @@ -888,7 +889,7 @@ static struct pci_device_id sundance_nics[] = { PCI_DRIVER ( sundance_driver, sundance_nics, PCI_NO_CLASS ); DRIVER ( "SUNDANCE/PCI", nic_driver, pci_driver, sundance_driver, - sundance_probe, sundance_disable ); + sundance_probe, sundance_disable, rx_tx_buf ); /* * Local variables: diff --git a/src/drivers/net/tg3/tg3.c b/src/drivers/net/tg3/tg3.c index 05af22d61..a6736305c 100644 --- a/src/drivers/net/tg3/tg3.c +++ b/src/drivers/net/tg3/tg3.c @@ -3,6 +3,7 @@ FILE_LICENCE ( GPL2_ONLY ); #include <mii.h> #include <stdio.h> +#include <string.h> #include <errno.h> #include <unistd.h> #include <byteswap.h> diff --git a/src/drivers/net/tg3/tg3_hw.c b/src/drivers/net/tg3/tg3_hw.c index 9a70413b6..5c9506dce 100644 --- a/src/drivers/net/tg3/tg3_hw.c +++ b/src/drivers/net/tg3/tg3_hw.c @@ -18,6 +18,7 @@ FILE_LICENCE ( GPL2_ONLY ); #include <mii.h> +#include <string.h> #include <stdio.h> #include <errno.h> #include <unistd.h> diff --git a/src/drivers/net/tg3/tg3_phy.c b/src/drivers/net/tg3/tg3_phy.c index e88b0be0f..a2322329e 100644 --- a/src/drivers/net/tg3/tg3_phy.c +++ b/src/drivers/net/tg3/tg3_phy.c @@ -1,6 +1,7 @@ #include <mii.h> #include <stdio.h> +#include <string.h> #include <errno.h> #include <unistd.h> #include <byteswap.h> diff --git a/src/drivers/net/thunderx.c b/src/drivers/net/thunderx.c index 1865a9b91..482474152 100644 --- a/src/drivers/net/thunderx.c +++ b/src/drivers/net/thunderx.c @@ -89,7 +89,7 @@ static __attribute__ (( unused )) void txnic_diag ( struct txnic *vnic ) { ( ( vnic->rq.cons % TXNIC_RQES ) * TXNIC_RQ_STRIDE ), readq ( vnic->regs + TXNIC_QS_RBDR_HEAD(0) ), readq ( vnic->regs + TXNIC_QS_RBDR_STATUS0(0) ) ); - DBGC ( vnic, "TXNIC %s CQ xxxxx(%05llx)/%05x(%05llx) %08llx:%08llx\n", + DBGC ( vnic, "TXNIC %s CQ xxxxx(%05llx)/%05zx(%05llx) %08llx:%08llx\n", vnic->name, readq ( vnic->regs + TXNIC_QS_CQ_TAIL(0) ), ( ( vnic->cq.cons % TXNIC_CQES ) * TXNIC_CQ_STRIDE ), readq ( vnic->regs + TXNIC_QS_CQ_HEAD(0) ), @@ -118,14 +118,14 @@ static int txnic_create_sq ( struct txnic *vnic ) { writeq ( TXNIC_QS_SQ_CFG_RESET, ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) ); /* Configure and enable send queue */ - writeq ( user_to_phys ( vnic->sq.sqe, 0 ), + writeq ( virt_to_phys ( vnic->sq.sqe ), ( vnic->regs + TXNIC_QS_SQ_BASE(0) ) ); writeq ( ( TXNIC_QS_SQ_CFG_ENA | TXNIC_QS_SQ_CFG_QSIZE_1K ), ( vnic->regs + TXNIC_QS_SQ_CFG(0) ) ); DBGC ( vnic, "TXNIC %s SQ at [%08lx,%08lx)\n", - vnic->name, user_to_phys ( vnic->sq.sqe, 0 ), - user_to_phys ( vnic->sq.sqe, TXNIC_SQ_SIZE ) ); + vnic->name, virt_to_phys ( vnic->sq.sqe ), + ( virt_to_phys ( vnic->sq.sqe ) + TXNIC_SQ_SIZE ) ); return 0; } @@ -184,9 +184,8 @@ static void txnic_destroy_sq ( struct txnic *vnic ) { * @ret rc Return status code */ static int txnic_send ( struct txnic *vnic, struct io_buffer *iobuf ) { - struct txnic_sqe sqe; + struct txnic_sqe *sqe; unsigned int sq_idx; - size_t offset; size_t len; /* Get next send queue entry */ @@ -196,24 +195,21 @@ static int txnic_send ( struct txnic *vnic, struct io_buffer *iobuf ) { return -ENOBUFS; } sq_idx = ( vnic->sq.prod++ % TXNIC_SQES ); - offset = ( sq_idx * TXNIC_SQ_STRIDE ); + sqe = &vnic->sq.sqe[sq_idx]; /* Populate send descriptor */ len = iob_len ( iobuf ); - memset ( &sqe, 0, sizeof ( sqe ) ); - sqe.hdr.total = cpu_to_le32 ( ( len >= ETH_ZLEN ) ? len : ETH_ZLEN ); - sqe.hdr.subdcnt = ( TXNIC_SQE_SUBDESCS - 1 ); - sqe.hdr.flags = TXNIC_SEND_HDR_FLAGS; - sqe.gather.size = cpu_to_le16 ( len ); - sqe.gather.flags = TXNIC_SEND_GATHER_FLAGS; - sqe.gather.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) ); + memset ( sqe, 0, sizeof ( *sqe ) ); + sqe->hdr.total = cpu_to_le32 ( ( len >= ETH_ZLEN ) ? len : ETH_ZLEN ); + sqe->hdr.subdcnt = ( TXNIC_SQE_SUBDESCS - 1 ); + sqe->hdr.flags = TXNIC_SEND_HDR_FLAGS; + sqe->gather.size = cpu_to_le16 ( len ); + sqe->gather.flags = TXNIC_SEND_GATHER_FLAGS; + sqe->gather.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) ); DBGC2 ( vnic, "TXNIC %s SQE %#03x is [%08lx,%08lx)\n", vnic->name, sq_idx, virt_to_bus ( iobuf->data ), ( virt_to_bus ( iobuf->data ) + len ) ); - /* Copy send descriptor to ring */ - copy_to_user ( vnic->sq.sqe, offset, &sqe, sizeof ( sqe ) ); - /* Ring doorbell */ wmb(); writeq ( TXNIC_SQE_SUBDESCS, ( vnic->regs + TXNIC_QS_SQ_DOOR(0) ) ); @@ -277,7 +273,7 @@ static int txnic_create_rq ( struct txnic *vnic ) { ( vnic->regs + TXNIC_QS_RBDR_CFG(0) ) ); /* Configure and enable receive buffer descriptor ring */ - writeq ( user_to_phys ( vnic->rq.rqe, 0 ), + writeq ( virt_to_phys ( vnic->rq.rqe ), ( vnic->regs + TXNIC_QS_RBDR_BASE(0) ) ); writeq ( ( TXNIC_QS_RBDR_CFG_ENA | TXNIC_QS_RBDR_CFG_QSIZE_8K | TXNIC_QS_RBDR_CFG_LINES ( TXNIC_RQE_SIZE / @@ -288,8 +284,8 @@ static int txnic_create_rq ( struct txnic *vnic ) { writeq ( TXNIC_QS_RQ_CFG_ENA, ( vnic->regs + TXNIC_QS_RQ_CFG(0) ) ); DBGC ( vnic, "TXNIC %s RQ at [%08lx,%08lx)\n", - vnic->name, user_to_phys ( vnic->rq.rqe, 0 ), - user_to_phys ( vnic->rq.rqe, TXNIC_RQ_SIZE ) ); + vnic->name, virt_to_phys ( vnic->rq.rqe ), + ( virt_to_phys ( vnic->rq.rqe ) + TXNIC_RQ_SIZE ) ); return 0; } @@ -359,11 +355,10 @@ static void txnic_destroy_rq ( struct txnic *vnic ) { */ static void txnic_refill_rq ( struct txnic *vnic ) { struct io_buffer *iobuf; - struct txnic_rqe rqe; + struct txnic_rqe *rqe; unsigned int rq_idx; unsigned int rq_iobuf_idx; unsigned int refilled = 0; - size_t offset; /* Refill ring */ while ( ( vnic->rq.prod - vnic->rq.cons ) < TXNIC_RQ_FILL ) { @@ -377,16 +372,15 @@ static void txnic_refill_rq ( struct txnic *vnic ) { /* Get next receive descriptor */ rq_idx = ( vnic->rq.prod++ % TXNIC_RQES ); - offset = ( rq_idx * TXNIC_RQ_STRIDE ); + rqe = &vnic->rq.rqe[rq_idx]; /* Populate receive descriptor */ - rqe.rbdre.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) ); + rqe->rbdre.addr = cpu_to_le64 ( virt_to_bus ( iobuf->data ) ); DBGC2 ( vnic, "TXNIC %s RQE %#03x is [%08lx,%08lx)\n", vnic->name, rq_idx, virt_to_bus ( iobuf->data ), ( virt_to_bus ( iobuf->data ) + TXNIC_RQE_SIZE ) ); - /* Copy receive descriptor to ring */ - copy_to_user ( vnic->rq.rqe, offset, &rqe, sizeof ( rqe ) ); + /* Record number of refills for doorbell */ refilled++; /* Record I/O buffer */ @@ -463,14 +457,14 @@ static int txnic_create_cq ( struct txnic *vnic ) { writeq ( TXNIC_QS_CQ_CFG_RESET, ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) ); /* Configure and enable completion queue */ - writeq ( user_to_phys ( vnic->cq.cqe, 0 ), + writeq ( virt_to_phys ( vnic->cq.cqe ), ( vnic->regs + TXNIC_QS_CQ_BASE(0) ) ); writeq ( ( TXNIC_QS_CQ_CFG_ENA | TXNIC_QS_CQ_CFG_QSIZE_256 ), ( vnic->regs + TXNIC_QS_CQ_CFG(0) ) ); DBGC ( vnic, "TXNIC %s CQ at [%08lx,%08lx)\n", - vnic->name, user_to_phys ( vnic->cq.cqe, 0 ), - user_to_phys ( vnic->cq.cqe, TXNIC_CQ_SIZE ) ); + vnic->name, virt_to_phys ( vnic->cq.cqe ), + ( virt_to_phys ( vnic->cq.cqe ) + TXNIC_CQ_SIZE ) ); return 0; } @@ -527,9 +521,8 @@ static void txnic_destroy_cq ( struct txnic *vnic ) { * @v vnic Virtual NIC */ static void txnic_poll_cq ( struct txnic *vnic ) { - union txnic_cqe cqe; + union txnic_cqe *cqe; uint64_t status; - size_t offset; unsigned int qcount; unsigned int cq_idx; unsigned int i; @@ -545,22 +538,21 @@ static void txnic_poll_cq ( struct txnic *vnic ) { /* Get completion queue entry */ cq_idx = ( vnic->cq.cons++ % TXNIC_CQES ); - offset = ( cq_idx * TXNIC_CQ_STRIDE ); - copy_from_user ( &cqe, vnic->cq.cqe, offset, sizeof ( cqe ) ); + cqe = &vnic->cq.cqe[cq_idx]; /* Process completion queue entry */ - switch ( cqe.common.cqe_type ) { + switch ( cqe->common.cqe_type ) { case TXNIC_CQE_TYPE_SEND: - txnic_complete_sqe ( vnic, &cqe.send ); + txnic_complete_sqe ( vnic, &cqe->send ); break; case TXNIC_CQE_TYPE_RX: - txnic_complete_rqe ( vnic, &cqe.rx ); + txnic_complete_rqe ( vnic, &cqe->rx ); break; default: DBGC ( vnic, "TXNIC %s unknown completion type %d\n", - vnic->name, cqe.common.cqe_type ); - DBGC_HDA ( vnic, user_to_phys ( vnic->cq.cqe, offset ), - &cqe, sizeof ( cqe ) ); + vnic->name, cqe->common.cqe_type ); + DBGC_HDA ( vnic, virt_to_phys ( cqe ), cqe, + sizeof ( *cqe ) ); break; } } diff --git a/src/drivers/net/thunderx.h b/src/drivers/net/thunderx.h index 410daf6e2..2d04c1271 100644 --- a/src/drivers/net/thunderx.h +++ b/src/drivers/net/thunderx.h @@ -12,7 +12,6 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); #include <stdint.h> #include <ipxe/list.h> #include <ipxe/netdevice.h> -#include <ipxe/uaccess.h> /****************************************************************************** * @@ -188,7 +187,7 @@ struct txnic_sq { /** Consumer counter */ unsigned int cons; /** Send queue entries */ - userptr_t sqe; + struct txnic_sqe *sqe; }; /****************************************************************************** @@ -280,7 +279,7 @@ struct txnic_rq { /** Consumer counter */ unsigned int cons; /** Receive queue entries */ - userptr_t rqe; + struct txnic_rqe *rqe; /** I/O buffers */ struct io_buffer *iobuf[TXNIC_RQ_FILL]; }; @@ -381,6 +380,8 @@ union txnic_cqe { struct txnic_cqe_send send; /** Receive completion */ struct txnic_cqe_rx rx; + /** Padding */ + uint8_t pad[512]; }; /** Number of completion queue entries @@ -393,7 +394,7 @@ union txnic_cqe { #define TXNIC_CQ_ALIGN 512 /** Completion queue stride */ -#define TXNIC_CQ_STRIDE 512 +#define TXNIC_CQ_STRIDE sizeof ( union txnic_cqe ) /** Completion queue size */ #define TXNIC_CQ_SIZE ( TXNIC_CQES * TXNIC_CQ_STRIDE ) @@ -403,7 +404,7 @@ struct txnic_cq { /** Consumer counter */ unsigned int cons; /** Completion queue entries */ - userptr_t cqe; + union txnic_cqe *cqe; }; /****************************************************************************** diff --git a/src/drivers/net/tlan.c b/src/drivers/net/tlan.c index 93533b438..01193006f 100644 --- a/src/drivers/net/tlan.c +++ b/src/drivers/net/tlan.c @@ -179,12 +179,13 @@ struct TLanList { } buffer[TLAN_BUFFERS_PER_LIST]; }; -struct { +struct tlan_bss { struct TLanList tx_ring[TLAN_NUM_TX_LISTS]; unsigned char txb[TLAN_MAX_FRAME_SIZE * TLAN_NUM_TX_LISTS]; struct TLanList rx_ring[TLAN_NUM_RX_LISTS]; unsigned char rxb[TLAN_MAX_FRAME_SIZE * TLAN_NUM_RX_LISTS]; -} tlan_buffers __shared; +}; +#define tlan_buffers NIC_FAKE_BSS ( struct tlan_bss ) #define tx_ring tlan_buffers.tx_ring #define txb tlan_buffers.txb #define rx_ring tlan_buffers.rx_ring @@ -717,7 +718,7 @@ static void tlan_transmit(struct nic *nic, const char *d, /* Destination */ /************************************************************************** DISABLE - Turn off ethernet interface ***************************************************************************/ -static void tlan_disable ( struct nic *nic __unused ) { +static void tlan_disable ( struct nic *nic __unused, void *hwdev __unused ) { /* put the card in its initial state */ /* This function serves 3 purposes. * This disables DMA and interrupts so we don't receive @@ -1715,7 +1716,7 @@ static struct pci_device_id tlan_nics[] = { PCI_DRIVER ( tlan_driver, tlan_nics, PCI_NO_CLASS ); DRIVER ( "TLAN/PCI", nic_driver, pci_driver, tlan_driver, - tlan_probe, tlan_disable ); + tlan_probe, tlan_disable, tlan_buffers ); /* * Local variables: diff --git a/src/drivers/net/tulip.c b/src/drivers/net/tulip.c index fddebfe5b..1030e3698 100644 --- a/src/drivers/net/tulip.c +++ b/src/drivers/net/tulip.c @@ -426,13 +426,14 @@ struct tulip_private { #define TX_RING_SIZE 2 #define RX_RING_SIZE 4 -struct { +struct tulip_bss { struct tulip_tx_desc tx_ring[TX_RING_SIZE]; unsigned char txb[BUFLEN]; struct tulip_rx_desc rx_ring[RX_RING_SIZE]; unsigned char rxb[RX_RING_SIZE * BUFLEN]; struct tulip_private tpx; -} tulip_bss __shared __attribute__ ((aligned(4))); +}; +#define tulip_bss NIC_FAKE_BSS ( struct tulip_bss ) #define tx_ring tulip_bss.tx_ring #define txb tulip_bss.txb #define rx_ring tulip_bss.rx_ring @@ -494,7 +495,7 @@ static void tulip_reset(struct nic *nic); static void tulip_transmit(struct nic *nic, const char *d, unsigned int t, unsigned int s, const char *p); static int tulip_poll(struct nic *nic, int retrieve); -static void tulip_disable(struct nic *nic); +static void tulip_disable(struct nic *nic, void *hwdev); static void nway_start(struct nic *nic); static void pnic_do_nway(struct nic *nic); static void select_media(struct nic *nic, int startup); @@ -1128,7 +1129,7 @@ static int tulip_poll(struct nic *nic, int retrieve) /*********************************************************************/ /* eth_disable - Disable the interface */ /*********************************************************************/ -static void tulip_disable ( struct nic *nic ) { +static void tulip_disable ( struct nic *nic, void *hwdev __unused ) { whereami("tulip_disable\n"); @@ -1958,7 +1959,7 @@ PCI_ROM(0x8086, 0x0039, "intel21145", "Intel Tulip", 0), PCI_DRIVER ( tulip_driver, tulip_nics, PCI_NO_CLASS ); DRIVER ( "Tulip", nic_driver, pci_driver, tulip_driver, - tulip_probe, tulip_disable ); + tulip_probe, tulip_disable, tulip_bss ); /* * Local variables: diff --git a/src/drivers/net/vmxnet3.c b/src/drivers/net/vmxnet3.c index 3800d6b72..95e4f79c2 100644 --- a/src/drivers/net/vmxnet3.c +++ b/src/drivers/net/vmxnet3.c @@ -22,8 +22,10 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); #include <stdint.h> +#include <string.h> #include <errno.h> #include <assert.h> #include <byteswap.h> diff --git a/src/drivers/net/vmxnet3.h b/src/drivers/net/vmxnet3.h index 5e1e0cb6e..b6c3bc50d 100644 --- a/src/drivers/net/vmxnet3.h +++ b/src/drivers/net/vmxnet3.h @@ -25,6 +25,7 @@ */ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL ); +FILE_SECBOOT ( PERMITTED ); /** * @file diff --git a/src/drivers/net/vxge/vxge_config.c b/src/drivers/net/vxge/vxge_config.c index f4d217097..8c6ee9e96 100644 --- a/src/drivers/net/vxge/vxge_config.c +++ b/src/drivers/net/vxge/vxge_config.c @@ -16,6 +16,7 @@ FILE_LICENCE(GPL2_ONLY); #include <stdlib.h> #include <stdio.h> +#include <string.h> #include <ipxe/malloc.h> #include <ipxe/pci.h> #include <ipxe/iobuf.h> diff --git a/src/drivers/net/vxge/vxge_traffic.c b/src/drivers/net/vxge/vxge_traffic.c index dbd799015..0adaea2aa 100644 --- a/src/drivers/net/vxge/vxge_traffic.c +++ b/src/drivers/net/vxge/vxge_traffic.c @@ -15,6 +15,7 @@ FILE_LICENCE(GPL2_ONLY); #include <ipxe/netdevice.h> +#include <string.h> #include <errno.h> #include "vxge_traffic.h" diff --git a/src/drivers/net/w89c840.c b/src/drivers/net/w89c840.c index 72ccf3a28..33a6658d2 100644 --- a/src/drivers/net/w89c840.c +++ b/src/drivers/net/w89c840.c @@ -254,10 +254,11 @@ static struct winbond_private static int ioaddr; static unsigned short eeprom [0x40]; -struct { +struct w89c840_bss { char rx_packet[PKT_BUF_SZ * RX_RING_SIZE]; char tx_packet[PKT_BUF_SZ * TX_RING_SIZE]; -} w89c840_buf __shared; +}; +#define w89c840_buf NIC_FAKE_BSS ( struct w89c840_bss ) static int eeprom_read(long ioaddr, int location); static int mdio_read(int base_address, int phy_id, int location); @@ -579,7 +580,7 @@ static void w89c840_transmit( /************************************************************************** w89c840_disable - Turn off ethernet interface ***************************************************************************/ -static void w89c840_disable ( struct nic *nic ) { +static void w89c840_disable ( struct nic *nic, void *hwdev __unused ) { w89c840_reset(nic); @@ -956,7 +957,7 @@ static void init_ring(void) DRIVER ( "W89C840F", nic_driver, pci_driver, w89c840_driver, - w89c840_probe, w89c840_disable ); + w89c840_probe, w89c840_disable, w89c840_buf ); /* * Local variables: |
